mirror of
https://github.com/openfaas/faasd.git
synced 2025-06-18 12:06:36 +00:00
Compare commits
186 Commits
Author | SHA1 | Date | |
---|---|---|---|
4e9b6070c1 | |||
1862c0e1f5 | |||
ae909c8df4 | |||
6f76a05bdf | |||
8f022cfb21 | |||
ff9225d45e | |||
1da2763a96 | |||
666d6c4871 | |||
2248a8a071 | |||
908bbfda9f | |||
b40a7cbe58 | |||
a66f65c2b9 | |||
ac1cc16f0c | |||
716ef6f51c | |||
92523c496b | |||
5561c5cc67 | |||
6c48911412 | |||
3ce724512b | |||
8d91895c79 | |||
7ca531a8b5 | |||
94210cc7f1 | |||
9e5eb84236 | |||
b20e5614c7 | |||
40829bbf88 | |||
87f49b0289 | |||
b817479828 | |||
faae82aa1c | |||
cddc10acbe | |||
1c8e8bb615 | |||
6e537d1fde | |||
c314af4f98 | |||
4189cfe52c | |||
9e2f571cf7 | |||
93825e8354 | |||
6752a61a95 | |||
16a8d2ac6c | |||
68ac4dfecb | |||
c2480ab30a | |||
d978c19e23 | |||
038b92c5b4 | |||
f1a1f374d9 | |||
24692466d8 | |||
bdfff4e8c5 | |||
e3589a4ed1 | |||
b865e55c85 | |||
89a728db16 | |||
2237dfd44d | |||
4423a5389a | |||
a6a4502c89 | |||
8b86e00128 | |||
3039773fbd | |||
5b92e7793d | |||
88f1aa0433 | |||
2b9efd29a0 | |||
db5312158c | |||
26debca616 | |||
50de0f34bb | |||
d64edeb648 | |||
42b9cc6b71 | |||
25c553a87c | |||
8bc39f752e | |||
cbff6fa8f6 | |||
3e29408518 | |||
04f1807d92 | |||
35e017b526 | |||
e54da61283 | |||
84353d0cae | |||
e33a60862d | |||
7b67ff22e6 | |||
19abc9f7b9 | |||
480f566819 | |||
cece6cf1ef | |||
22882e2643 | |||
667d74aaf7 | |||
9dcdbfb7e3 | |||
3a9b81200e | |||
734425de25 | |||
70e7e0d25a | |||
be8574ecd0 | |||
a0110b3019 | |||
87c71b090f | |||
dc8667d36a | |||
137d199cb5 | |||
560c295eb0 | |||
93325b713e | |||
2307fc71c5 | |||
853830c018 | |||
262770a0b7 | |||
0efb6d492f | |||
27cfe465ca | |||
d6c4ebaf96 | |||
e9d1423315 | |||
4bca5c36a5 | |||
10e7a2f07c | |||
4775a9a77c | |||
e07186ed5b | |||
2454c2a807 | |||
8bd2ba5334 | |||
c379b0ebcc | |||
226a20c362 | |||
02c9dcf74d | |||
0b88fc232d | |||
fcd1c9ab54 | |||
592f3d3cc0 | |||
b06364c3f4 | |||
75fd07797c | |||
65c2cb0732 | |||
44df1cef98 | |||
881f5171ee | |||
970015ac85 | |||
283e8ed2c1 | |||
d49011702b | |||
eb369fbb16 | |||
040b426a19 | |||
251cb2d08a | |||
5c48ac1a70 | |||
7c166979c9 | |||
36843ad1d4 | |||
3bc041ba04 | |||
dd3f9732b4 | |||
6c10d18f59 | |||
969fc566e1 | |||
a4710db664 | |||
df2de7ee5c | |||
2d8b2b1f73 | |||
6e5bc27d9a | |||
2eb1df9517 | |||
c133b9c4ab | |||
f09028e451 | |||
bacf8ebad5 | |||
d551721649 | |||
42e9c91ee9 | |||
cda1fe78b1 | |||
a3392634a7 | |||
95e278b29a | |||
d802ba70c1 | |||
cd76ff3ebc | |||
ed5110de30 | |||
2f3ba1335c | |||
24e065965f | |||
fd2ee55f9f | |||
d135999d3b | |||
3068d03279 | |||
fd4f53fe15 | |||
4b93ccba3f | |||
e6b814fd60 | |||
06890cddb9 | |||
40da0a35c3 | |||
0c0d05b2ea | |||
af46f34003 | |||
2f7269fc97 | |||
1f56c39675 | |||
7152b170bb | |||
47955954eb | |||
7f672f006a | |||
20ec76bf74 | |||
04d1688bfb | |||
a8f514f7d6 | |||
502d3fdecc | |||
5d098c9cb7 | |||
0935dc6867 | |||
e77d05ec94 | |||
7ab69b5317 | |||
098baba7cc | |||
9d688b9ea6 | |||
1458a2f2fb | |||
c18a038062 | |||
af0555a85b | |||
2ff8646669 | |||
d785bebf4c | |||
17845457e2 | |||
300d8b082a | |||
17a5e2c625 | |||
f0172e618a | |||
61e2d16c3e | |||
ae0753a6d9 | |||
19a769b7da | |||
48237e0b3c | |||
306313ed9a | |||
ff0cccf0dc | |||
52baca9d17 | |||
f76432f60a | |||
38f26b213f | |||
6c3fe813fd | |||
13d28bd2db | |||
f3f6225674 |
2
.gitattributes
vendored
Normal file
2
.gitattributes
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
vendor/** linguist-generated=true
|
||||
Gopkg.lock linguist-generated=true
|
1
.github/CODEOWNERS
vendored
Normal file
1
.github/CODEOWNERS
vendored
Normal file
@ -0,0 +1 @@
|
||||
@alexellis
|
41
.github/ISSUE_TEMPLATE.md
vendored
Normal file
41
.github/ISSUE_TEMPLATE.md
vendored
Normal file
@ -0,0 +1,41 @@
|
||||
<!--- Provide a general summary of the issue in the Title above -->
|
||||
|
||||
## Expected Behaviour
|
||||
<!--- If you're describing a bug, tell us what should happen -->
|
||||
<!--- If you're suggesting a change/improvement, tell us how it should work -->
|
||||
|
||||
## Current Behaviour
|
||||
<!--- If describing a bug, tell us what happens instead of the expected behavior -->
|
||||
<!--- If suggesting a change/improvement, explain the difference from current behavior -->
|
||||
|
||||
## Possible Solution
|
||||
<!--- Not obligatory, but suggest a fix/reason for the bug, -->
|
||||
<!--- or ideas how to implement the addition or change -->
|
||||
|
||||
## Steps to Reproduce (for bugs)
|
||||
<!--- Provide a link to a live example, or an unambiguous set of steps to -->
|
||||
<!--- reproduce this bug. Include code to reproduce, if relevant -->
|
||||
1.
|
||||
2.
|
||||
3.
|
||||
4.
|
||||
|
||||
## Context
|
||||
<!--- How has this issue affected you? What are you trying to accomplish? -->
|
||||
<!--- Providing context helps us come up with a solution that is most useful in the real world -->
|
||||
|
||||
## Your Environment
|
||||
|
||||
* OS and architecture:
|
||||
|
||||
* Versions:
|
||||
|
||||
```sh
|
||||
go version
|
||||
|
||||
containerd -version
|
||||
|
||||
uname -a
|
||||
|
||||
cat /etc/os-release
|
||||
```
|
40
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
40
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
@ -0,0 +1,40 @@
|
||||
<!--- Provide a general summary of your changes in the Title above -->
|
||||
|
||||
## Description
|
||||
<!--- Describe your changes in detail -->
|
||||
|
||||
## Motivation and Context
|
||||
<!--- Why is this change required? What problem does it solve? -->
|
||||
<!--- If it fixes an open issue, please link to the issue here. -->
|
||||
- [ ] I have raised an issue to propose this change **this is required**
|
||||
|
||||
|
||||
## How Has This Been Tested?
|
||||
<!--- Please describe in detail how you tested your changes. -->
|
||||
<!--- Include details of your testing environment, and the tests you ran to -->
|
||||
<!--- see how your change affects other areas of the code, etc. -->
|
||||
|
||||
|
||||
## Types of changes
|
||||
<!--- What types of changes does your code introduce? Put an `x` in all the boxes that apply: -->
|
||||
- [ ] Bug fix (non-breaking change which fixes an issue)
|
||||
- [ ] New feature (non-breaking change which adds functionality)
|
||||
- [ ] Breaking change (fix or feature that would cause existing functionality to change)
|
||||
|
||||
## Checklist:
|
||||
|
||||
Commits:
|
||||
|
||||
- [ ] I've read the [CONTRIBUTION](https://github.com/openfaas/faas/blob/master/CONTRIBUTING.md) guide
|
||||
- [ ] My commit message has a body and describe how this was tested and why it is required.
|
||||
- [ ] I have signed-off my commits with `git commit -s` for the Developer Certificate of Origin (DCO)
|
||||
|
||||
Code:
|
||||
|
||||
- [ ] My code follows the code style of this project.
|
||||
- [ ] I have added tests to cover my changes.
|
||||
|
||||
Docs:
|
||||
|
||||
- [ ] My change requires a change to the documentation.
|
||||
- [ ] I have updated the documentation accordingly.
|
36
.github/workflows/build.yaml
vendored
Normal file
36
.github/workflows/build.yaml
vendored
Normal file
@ -0,0 +1,36 @@
|
||||
name: build
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
pull_request:
|
||||
branches: [ master ]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
env:
|
||||
GO111MODULE: off
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.13.x]
|
||||
os: [ubuntu-latest]
|
||||
runs-on: ${{ matrix.os }}
|
||||
steps:
|
||||
- uses: actions/checkout@master
|
||||
with:
|
||||
fetch-depth: 1
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
|
||||
- name: test
|
||||
run: make test
|
||||
- name: dist
|
||||
run: make dist
|
||||
- name: prepare-test
|
||||
run: make prepare-test
|
||||
- name: test e2e
|
||||
run: make test-e2e
|
||||
|
||||
|
96
.github/workflows/publish.yaml
vendored
Normal file
96
.github/workflows/publish.yaml
vendored
Normal file
@ -0,0 +1,96 @@
|
||||
name: publish
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- '*'
|
||||
|
||||
jobs:
|
||||
publish:
|
||||
env:
|
||||
GO111MODULE: off
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.13.x]
|
||||
os: [ubuntu-latest]
|
||||
runs-on: ${{ matrix.os }}
|
||||
steps:
|
||||
- uses: actions/checkout@master
|
||||
with:
|
||||
fetch-depth: 1
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
- name: test
|
||||
run: make test
|
||||
- name: dist
|
||||
run: make dist
|
||||
- name: hashgen
|
||||
run: make hashgen
|
||||
|
||||
- name: Create Release
|
||||
id: create_release
|
||||
uses: actions/create-release@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
tag_name: ${{ github.ref }}
|
||||
release_name: Release ${{ github.ref }}
|
||||
draft: false
|
||||
prerelease: false
|
||||
|
||||
- name: Upload Release faasd
|
||||
uses: actions/upload-release-asset@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }} # This pulls from the CREATE RELEASE step above, referencing it's ID to get its outputs object, which include a `upload_url`. See this blog post for more info: https://jasonet.co/posts/new-features-of-github-actions/#passing-data-to-future-steps
|
||||
asset_path: ./bin/faasd
|
||||
asset_name: faasd
|
||||
asset_content_type: application/binary
|
||||
- name: Upload Release faasd-armhf
|
||||
uses: actions/upload-release-asset@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }} # This pulls from the CREATE RELEASE step above, referencing it's ID to get its outputs object, which include a `upload_url`. See this blog post for more info: https://jasonet.co/posts/new-features-of-github-actions/#passing-data-to-future-steps
|
||||
asset_path: ./bin/faasd-armhf
|
||||
asset_name: faasd-armhf
|
||||
asset_content_type: application/binary
|
||||
- name: Upload Release faasd-arm64
|
||||
uses: actions/upload-release-asset@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }} # This pulls from the CREATE RELEASE step above, referencing it's ID to get its outputs object, which include a `upload_url`. See this blog post for more info: https://jasonet.co/posts/new-features-of-github-actions/#passing-data-to-future-steps
|
||||
asset_path: ./bin/faasd-arm64
|
||||
asset_name: faasd-arm64
|
||||
asset_content_type: application/binary
|
||||
- name: Upload Release faasd.sha256
|
||||
uses: actions/upload-release-asset@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }} # This pulls from the CREATE RELEASE step above, referencing it's ID to get its outputs object, which include a `upload_url`. See this blog post for more info: https://jasonet.co/posts/new-features-of-github-actions/#passing-data-to-future-steps
|
||||
asset_path: ./bin/faasd.sha256
|
||||
asset_name: faasd.sha256
|
||||
asset_content_type: text/plain
|
||||
- name: Upload Release faasd-armhf.sha256
|
||||
uses: actions/upload-release-asset@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }} # This pulls from the CREATE RELEASE step above, referencing it's ID to get its outputs object, which include a `upload_url`. See this blog post for more info: https://jasonet.co/posts/new-features-of-github-actions/#passing-data-to-future-steps
|
||||
asset_path: ./bin/faasd-armhf.sha256
|
||||
asset_name: faasd-armhf.sha256
|
||||
asset_content_type: text/plain
|
||||
- name: Upload Release faasd-arm64.sha256
|
||||
uses: actions/upload-release-asset@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }} # This pulls from the CREATE RELEASE step above, referencing it's ID to get its outputs object, which include a `upload_url`. See this blog post for more info: https://jasonet.co/posts/new-features-of-github-actions/#passing-data-to-future-steps
|
||||
asset_path: ./bin/faasd-arm64.sha256
|
||||
asset_name: faasd-arm64.sha256
|
||||
asset_content_type: text/plain
|
7
.gitignore
vendored
7
.gitignore
vendored
@ -1,3 +1,10 @@
|
||||
/faasd
|
||||
hosts
|
||||
/resolv.conf
|
||||
.idea/
|
||||
|
||||
basic-auth-user
|
||||
basic-auth-password
|
||||
/bin
|
||||
/secrets
|
||||
.vscode
|
||||
|
20
.travis.yml
20
.travis.yml
@ -1,20 +0,0 @@
|
||||
sudo: required
|
||||
language: go
|
||||
go:
|
||||
- '1.12'
|
||||
script:
|
||||
- make dist
|
||||
deploy:
|
||||
provider: releases
|
||||
api_key:
|
||||
secure: bccOSB+Mbk5ZJHyJfX82Xg/3/7mxiAYHx7P5m5KS1ncDuRpJBFjDV8Nx2PWYg341b5SMlCwsS3IJ9NkoGvRSKK+3YqeNfTeMabVNdKC2oL1i+4pdxGlbl57QXkzT4smqE8AykZEo4Ujk42rEr3e0gSHT2rXkV+Xt0xnoRVXn2tSRUDwsmwANnaBj6KpH2SjJ/lsfTifxrRB65uwcePaSjkqwR6htFraQtpONC9xYDdek6EoVQmoft/ONZJqi7HR+OcA1yhSt93XU6Vaf3678uLlPX9c/DxgIU9UnXRaOd0UUEiTHaMMWDe/bJSrKmgL7qY05WwbGMsXO/RdswwO1+zwrasrwf86SjdGX/P9AwobTW3eTEiBqw2J77UVbvLzDDoyJ5KrkbHRfPX8aIPO4OG9eHy/e7C3XVx4qv9bJBXQ3qD9YJtei9jmm8F/MCdPWuVYC0hEvHtuhP/xMm4esNUjFM5JUfDucvAuLL34NBYHBDP2XNuV4DkgQQPakfnlvYBd7OqyXCU6pzyWSasXpD1Rz8mD/x8aTUl2Ya4bnXQ8qAa5cnxfPqN2ADRlTw1qS7hl6LsXzNQ6r1mbuh/uFi67ybElIjBTfuMEeJOyYHkkLUHIBpooKrPyr0luAbf0By2D2N/eQQnM/RpixHNfZG/mvXx8ZCrs+wxgvG1Rm7rM=
|
||||
file:
|
||||
- ./bin/faasd
|
||||
- ./bin/faasd-armhf
|
||||
- ./bin/faasd-arm64
|
||||
skip_cleanup: true
|
||||
on:
|
||||
tags: true
|
||||
|
||||
env:
|
||||
- GO111MODULE=off
|
475
Gopkg.lock
generated
475
Gopkg.lock
generated
@ -1,475 +0,0 @@
|
||||
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
|
||||
|
||||
|
||||
[[projects]]
|
||||
digest = "1:d5e752c67b445baa5b6cb6f8aa706775c2aa8e41aca95a0c651520ff2c80361a"
|
||||
name = "github.com/Microsoft/go-winio"
|
||||
packages = [
|
||||
".",
|
||||
"pkg/guid",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "6c72808b55902eae4c5943626030429ff20f3b63"
|
||||
version = "v0.4.14"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:b28f788c0be42a6d26f07b282c5ff5f814ab7ad5833810ef0bc5f56fb9bedf11"
|
||||
name = "github.com/Microsoft/hcsshim"
|
||||
packages = [
|
||||
".",
|
||||
"internal/cow",
|
||||
"internal/hcs",
|
||||
"internal/hcserror",
|
||||
"internal/hns",
|
||||
"internal/interop",
|
||||
"internal/log",
|
||||
"internal/logfields",
|
||||
"internal/longpath",
|
||||
"internal/mergemaps",
|
||||
"internal/oc",
|
||||
"internal/safefile",
|
||||
"internal/schema1",
|
||||
"internal/schema2",
|
||||
"internal/timeout",
|
||||
"internal/vmcompute",
|
||||
"internal/wclayer",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "9e921883ac929bbe515b39793ece99ce3a9d7706"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:74860eb071d52337d67e9ffd6893b29affebd026505aa917ec23131576a91a77"
|
||||
name = "github.com/alexellis/go-execute"
|
||||
packages = ["pkg/v1"]
|
||||
pruneopts = "UT"
|
||||
revision = "961405ea754427780f2151adff607fa740d377f7"
|
||||
version = "0.3.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:6076d857867a70e87dd1994407deb142f27436f1293b13e75cc053192d14eb0c"
|
||||
name = "github.com/alexellis/k3sup"
|
||||
packages = ["pkg/env"]
|
||||
pruneopts = "UT"
|
||||
revision = "f9a4adddc732742a9ee7962609408fb0999f2d7b"
|
||||
version = "0.7.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:386ca0ac781cc1b630b3ed21725759770174140164b3faf3810e6ed6366a970b"
|
||||
name = "github.com/containerd/containerd"
|
||||
packages = [
|
||||
".",
|
||||
"api/services/containers/v1",
|
||||
"api/services/content/v1",
|
||||
"api/services/diff/v1",
|
||||
"api/services/events/v1",
|
||||
"api/services/images/v1",
|
||||
"api/services/introspection/v1",
|
||||
"api/services/leases/v1",
|
||||
"api/services/namespaces/v1",
|
||||
"api/services/snapshots/v1",
|
||||
"api/services/tasks/v1",
|
||||
"api/services/version/v1",
|
||||
"api/types",
|
||||
"api/types/task",
|
||||
"archive",
|
||||
"archive/compression",
|
||||
"cio",
|
||||
"containers",
|
||||
"content",
|
||||
"content/proxy",
|
||||
"defaults",
|
||||
"diff",
|
||||
"errdefs",
|
||||
"events",
|
||||
"events/exchange",
|
||||
"filters",
|
||||
"identifiers",
|
||||
"images",
|
||||
"images/archive",
|
||||
"labels",
|
||||
"leases",
|
||||
"leases/proxy",
|
||||
"log",
|
||||
"mount",
|
||||
"namespaces",
|
||||
"oci",
|
||||
"pkg/dialer",
|
||||
"platforms",
|
||||
"plugin",
|
||||
"reference",
|
||||
"remotes",
|
||||
"remotes/docker",
|
||||
"remotes/docker/schema1",
|
||||
"rootfs",
|
||||
"runtime/linux/runctypes",
|
||||
"runtime/v2/runc/options",
|
||||
"snapshots",
|
||||
"snapshots/proxy",
|
||||
"sys",
|
||||
"version",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "ff48f57fc83a8c44cf4ad5d672424a98ba37ded6"
|
||||
version = "v1.3.2"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:7e9da25c7a952c63e31ed367a88eede43224b0663b58eb452870787d8ddb6c70"
|
||||
name = "github.com/containerd/continuity"
|
||||
packages = [
|
||||
"fs",
|
||||
"syscallx",
|
||||
"sysx",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "f2a389ac0a02ce21c09edd7344677a601970f41c"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:1b9a7426259b5333d575785e21e1bd0decf18208f5bfb6424d24a50d5ddf83d0"
|
||||
name = "github.com/containerd/fifo"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "bda0ff6ed73c67bfb5e62bc9c697f146b7fd7f13"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:6d66a41dbbc6819902f1589d0550bc01c18032c0a598a7cd656731e6df73861b"
|
||||
name = "github.com/containerd/ttrpc"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "92c8520ef9f86600c650dd540266a007bf03670f"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:7b4683388adabc709dbb082c13ba35967f072379c85b4acde997c1ca75af5981"
|
||||
name = "github.com/containerd/typeurl"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "a93fcdb778cd272c6e9b3028b2f42d813e785d40"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:e495f9f1fb2bae55daeb76e099292054fe1f734947274b3cfc403ccda595d55a"
|
||||
name = "github.com/docker/distribution"
|
||||
packages = [
|
||||
"digestset",
|
||||
"reference",
|
||||
"registry/api/errcode",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "0d3efadf0154c2b8a4e7b6621fff9809655cc580"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:0938aba6e09d72d48db029d44dcfa304851f52e2d67cda920436794248e92793"
|
||||
name = "github.com/docker/go-events"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "9461782956ad83b30282bf90e31fa6a70c255ba9"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:fa6faf4a2977dc7643de38ae599a95424d82f8ffc184045510737010a82c4ecd"
|
||||
name = "github.com/gogo/googleapis"
|
||||
packages = ["google/rpc"]
|
||||
pruneopts = "UT"
|
||||
revision = "d31c731455cb061f42baff3bda55bad0118b126b"
|
||||
version = "v1.2.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:4107f4e81e8fd2e80386b4ed56b05e3a1fe26ecc7275fe80bb9c3a80a7344ff4"
|
||||
name = "github.com/gogo/protobuf"
|
||||
packages = [
|
||||
"proto",
|
||||
"sortkeys",
|
||||
"types",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "ba06b47c162d49f2af050fb4c75bcbc86a159d5c"
|
||||
version = "v1.2.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:b7cb6054d3dff43b38ad2e92492f220f57ae6087ee797dca298139776749ace8"
|
||||
name = "github.com/golang/groupcache"
|
||||
packages = ["lru"]
|
||||
pruneopts = "UT"
|
||||
revision = "611e8accdfc92c4187d399e95ce826046d4c8d73"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:f5ce1529abc1204444ec73779f44f94e2fa8fcdb7aca3c355b0c95947e4005c6"
|
||||
name = "github.com/golang/protobuf"
|
||||
packages = [
|
||||
"proto",
|
||||
"ptypes",
|
||||
"ptypes/any",
|
||||
"ptypes/duration",
|
||||
"ptypes/timestamp",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "6c65a5562fc06764971b7c5d05c76c75e84bdbf7"
|
||||
version = "v1.3.2"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:870d441fe217b8e689d7949fef6e43efbc787e50f200cb1e70dbca9204a1d6be"
|
||||
name = "github.com/inconshreveable/mousetrap"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75"
|
||||
version = "v1.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:31e761d97c76151dde79e9d28964a812c46efc5baee4085b86f68f0c654450de"
|
||||
name = "github.com/konsorten/go-windows-terminal-sequences"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "f55edac94c9bbba5d6182a4be46d86a2c9b5b50e"
|
||||
version = "v1.0.2"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:906eb1ca3c8455e447b99a45237b2b9615b665608fd07ad12cce847dd9a1ec43"
|
||||
name = "github.com/morikuni/aec"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "39771216ff4c63d11f5e604076f9c45e8be1067b"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:bc62c2c038cc8ae51b68f6d52570501a763bb71e78736a9f65d60762429864a9"
|
||||
name = "github.com/opencontainers/go-digest"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "c9281466c8b2f606084ac71339773efd177436e7"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:70711188c19c53147099d106169d6a81941ed5c2658651432de564a7d60fd288"
|
||||
name = "github.com/opencontainers/image-spec"
|
||||
packages = [
|
||||
"identity",
|
||||
"specs-go",
|
||||
"specs-go/v1",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "d60099175f88c47cd379c4738d158884749ed235"
|
||||
version = "v1.0.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:18d6ebfbabffccba7318a4e26028b0d41f23ff359df3dc07a53b37a9f3a4a994"
|
||||
name = "github.com/opencontainers/runc"
|
||||
packages = [
|
||||
"libcontainer/system",
|
||||
"libcontainer/user",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "d736ef14f0288d6993a1845745d6756cfc9ddd5a"
|
||||
version = "v1.0.0-rc9"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:7a58202c5cdf3d2c1eb0621fe369315561cea7f036ad10f0f0479ac36bcc95eb"
|
||||
name = "github.com/opencontainers/runtime-spec"
|
||||
packages = ["specs-go"]
|
||||
pruneopts = "UT"
|
||||
revision = "29686dbc5559d93fb1ef402eeda3e35c38d75af4"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:cf31692c14422fa27c83a05292eb5cbe0fb2775972e8f1f8446a71549bd8980b"
|
||||
name = "github.com/pkg/errors"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "ba968bfe8b2f7e042a574c888954fccecfa385b4"
|
||||
version = "v0.8.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:fd61cf4ae1953d55df708acb6b91492d538f49c305b364a014049914495db426"
|
||||
name = "github.com/sirupsen/logrus"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "8bdbc7bcc01dcbb8ec23dc8a28e332258d25251f"
|
||||
version = "v1.4.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:e096613fb7cf34743d49af87d197663cfccd61876e2219853005a57baedfa562"
|
||||
name = "github.com/spf13/cobra"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "f2b07da1e2c38d5f12845a4f607e2e1018cbb1f5"
|
||||
version = "v0.0.5"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:524b71991fc7d9246cc7dc2d9e0886ccb97648091c63e30eef619e6862c955dd"
|
||||
name = "github.com/spf13/pflag"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "2e9d26c8c37aae03e3f9d4e90b7116f5accb7cab"
|
||||
version = "v1.0.5"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:e14e467ed00ab98665623c5060fa17e3d7079be560ffc33cabafd05d35894f05"
|
||||
name = "github.com/syndtr/gocapability"
|
||||
packages = ["capability"]
|
||||
pruneopts = "UT"
|
||||
revision = "d98352740cb2c55f81556b63d4a1ec64c5a319c2"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:2d9d06cb9d46dacfdbb45f8575b39fc0126d083841a29d4fbf8d97708f43107e"
|
||||
name = "github.com/vishvananda/netlink"
|
||||
packages = [
|
||||
".",
|
||||
"nl",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "a2ad57a690f3caf3015351d2d6e1c0b95c349752"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:975cb0c04431bf92e60b636a15897c4a3faba9f7dc04da505646630ac91d29d3"
|
||||
name = "github.com/vishvananda/netns"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "0a2b9b5464df8343199164a0321edf3313202f7e"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:aed53a5fa03c1270457e331cf8b7e210e3088a2278fec552c5c5d29c1664e161"
|
||||
name = "go.opencensus.io"
|
||||
packages = [
|
||||
".",
|
||||
"internal",
|
||||
"trace",
|
||||
"trace/internal",
|
||||
"trace/tracestate",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "aad2c527c5defcf89b5afab7f37274304195a6b2"
|
||||
version = "v0.22.2"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:676f320d34ccfa88bfa6d04bdf388ed7062af175355c805ef57ccda1a3f13432"
|
||||
name = "golang.org/x/net"
|
||||
packages = [
|
||||
"context",
|
||||
"context/ctxhttp",
|
||||
"http/httpguts",
|
||||
"http2",
|
||||
"http2/hpack",
|
||||
"idna",
|
||||
"internal/timeseries",
|
||||
"trace",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "c0dbc17a35534bf2e581d7a942408dc936316da4"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:d6b0cfc5ae30841c4b116ac589629f56f8add0955a39f11d8c0d06ca67f5b3d5"
|
||||
name = "golang.org/x/sync"
|
||||
packages = [
|
||||
"errgroup",
|
||||
"semaphore",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "42b317875d0fa942474b76e1b46a6060d720ae6e"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:a76bac71eb452a046b47f82336ba792d8de988688a912f3fd0e8ec8e57fe1bb4"
|
||||
name = "golang.org/x/sys"
|
||||
packages = [
|
||||
"unix",
|
||||
"windows",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "af0d71d358abe0ba3594483a5d519f429dbae3e9"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:8d8faad6b12a3a4c819a3f9618cb6ee1fa1cfc33253abeeea8b55336721e3405"
|
||||
name = "golang.org/x/text"
|
||||
packages = [
|
||||
"collate",
|
||||
"collate/build",
|
||||
"internal/colltab",
|
||||
"internal/gen",
|
||||
"internal/language",
|
||||
"internal/language/compact",
|
||||
"internal/tag",
|
||||
"internal/triegen",
|
||||
"internal/ucd",
|
||||
"language",
|
||||
"secure/bidirule",
|
||||
"transform",
|
||||
"unicode/bidi",
|
||||
"unicode/cldr",
|
||||
"unicode/norm",
|
||||
"unicode/rangetable",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "342b2e1fbaa52c93f31447ad2c6abc048c63e475"
|
||||
version = "v0.3.2"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:583a0c80f5e3a9343d33aea4aead1e1afcc0043db66fdf961ddd1fe8cd3a4faf"
|
||||
name = "google.golang.org/genproto"
|
||||
packages = ["googleapis/rpc/status"]
|
||||
pruneopts = "UT"
|
||||
revision = "b31c10ee225f87dbb9f5f878ead9d64f34f5cbbb"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:48eafc052e46b4ebbc7882553873cf6198203e528627cefc94dcaf8553addd19"
|
||||
name = "google.golang.org/grpc"
|
||||
packages = [
|
||||
".",
|
||||
"balancer",
|
||||
"balancer/base",
|
||||
"balancer/roundrobin",
|
||||
"binarylog/grpc_binarylog_v1",
|
||||
"codes",
|
||||
"connectivity",
|
||||
"credentials",
|
||||
"credentials/internal",
|
||||
"encoding",
|
||||
"encoding/proto",
|
||||
"grpclog",
|
||||
"health/grpc_health_v1",
|
||||
"internal",
|
||||
"internal/backoff",
|
||||
"internal/balancerload",
|
||||
"internal/binarylog",
|
||||
"internal/channelz",
|
||||
"internal/envconfig",
|
||||
"internal/grpcrand",
|
||||
"internal/grpcsync",
|
||||
"internal/syscall",
|
||||
"internal/transport",
|
||||
"keepalive",
|
||||
"metadata",
|
||||
"naming",
|
||||
"peer",
|
||||
"resolver",
|
||||
"resolver/dns",
|
||||
"resolver/passthrough",
|
||||
"serviceconfig",
|
||||
"stats",
|
||||
"status",
|
||||
"tap",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "6eaf6f47437a6b4e2153a190160ef39a92c7eceb"
|
||||
version = "v1.23.0"
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
input-imports = [
|
||||
"github.com/alexellis/go-execute/pkg/v1",
|
||||
"github.com/alexellis/k3sup/pkg/env",
|
||||
"github.com/containerd/containerd",
|
||||
"github.com/containerd/containerd/cio",
|
||||
"github.com/containerd/containerd/containers",
|
||||
"github.com/containerd/containerd/errdefs",
|
||||
"github.com/containerd/containerd/namespaces",
|
||||
"github.com/containerd/containerd/oci",
|
||||
"github.com/morikuni/aec",
|
||||
"github.com/opencontainers/runtime-spec/specs-go",
|
||||
"github.com/spf13/cobra",
|
||||
"github.com/vishvananda/netlink",
|
||||
"github.com/vishvananda/netns",
|
||||
"golang.org/x/sys/unix",
|
||||
]
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
23
Gopkg.toml
23
Gopkg.toml
@ -1,23 +0,0 @@
|
||||
[[constraint]]
|
||||
name = "github.com/containerd/containerd"
|
||||
version = "1.3.2"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/morikuni/aec"
|
||||
version = "1.0.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/spf13/cobra"
|
||||
version = "0.0.5"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/alexellis/k3sup"
|
||||
version = "0.7.1"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/alexellis/go-execute"
|
||||
version = "0.3.0"
|
||||
|
||||
[prune]
|
||||
go-tests = true
|
||||
unused-packages = true
|
4
LICENSE
4
LICENSE
@ -1,6 +1,8 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2019 Alex Ellis
|
||||
Copyright (c) 2020 Alex Ellis
|
||||
Copyright (c) 2020 OpenFaaS Ltd
|
||||
Copyright (c) 2020 OpenFaas Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
57
Makefile
57
Makefile
@ -1,15 +1,62 @@
|
||||
Version := $(shell git describe --tags --dirty)
|
||||
GitCommit := $(shell git rev-parse HEAD)
|
||||
LDFLAGS := "-s -w -X pkg.Version=$(Version) -X pkg.GitCommit=$(GitCommit)"
|
||||
LDFLAGS := "-s -w -X main.Version=$(Version) -X main.GitCommit=$(GitCommit)"
|
||||
CONTAINERD_VER := 1.3.4
|
||||
CNI_VERSION := v0.8.6
|
||||
ARCH := amd64
|
||||
|
||||
export GO111MODULE=on
|
||||
|
||||
.PHONY: all
|
||||
all: local
|
||||
|
||||
local:
|
||||
CGO_ENABLED=0 GOOS=linux go build -o bin/faasd
|
||||
CGO_ENABLED=0 GOOS=linux go build -mod=vendor -o bin/faasd
|
||||
|
||||
.PHONY: test
|
||||
test:
|
||||
CGO_ENABLED=0 GOOS=linux go test -mod=vendor -ldflags $(LDFLAGS) ./...
|
||||
|
||||
.PHONY: dist
|
||||
dist:
|
||||
CGO_ENABLED=0 GOOS=linux go build -ldflags $(LDFLAGS) -a -installsuffix cgo -o bin/faasd
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=arm GOARM=7 go build -ldflags $(LDFLAGS) -a -installsuffix cgo -o bin/faasd-armhf
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=arm64 go build -ldflags $(LDFLAGS) -a -installsuffix cgo -o bin/faasd-arm64
|
||||
CGO_ENABLED=0 GOOS=linux go build -mod=vendor -ldflags $(LDFLAGS) -a -installsuffix cgo -o bin/faasd
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=arm GOARM=7 go build -mod=vendor -ldflags $(LDFLAGS) -a -installsuffix cgo -o bin/faasd-armhf
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=arm64 go build -mod=vendor -ldflags $(LDFLAGS) -a -installsuffix cgo -o bin/faasd-arm64
|
||||
|
||||
.PHONY: hashgen
|
||||
hashgen:
|
||||
for f in bin/faasd*; do shasum -a 256 $$f > $$f.sha256; done
|
||||
|
||||
.PHONY: prepare-test
|
||||
prepare-test:
|
||||
curl -sLSf https://github.com/containerd/containerd/releases/download/v$(CONTAINERD_VER)/containerd-$(CONTAINERD_VER).linux-amd64.tar.gz > /tmp/containerd.tar.gz && sudo tar -xvf /tmp/containerd.tar.gz -C /usr/local/bin/ --strip-components=1
|
||||
curl -SLfs https://raw.githubusercontent.com/containerd/containerd/v1.3.2/containerd.service | sudo tee /etc/systemd/system/containerd.service
|
||||
sudo systemctl daemon-reload && sudo systemctl start containerd
|
||||
sudo /sbin/sysctl -w net.ipv4.conf.all.forwarding=1
|
||||
sudo mkdir -p /opt/cni/bin
|
||||
curl -sSL https://github.com/containernetworking/plugins/releases/download/$(CNI_VERSION)/cni-plugins-linux-$(ARCH)-$(CNI_VERSION).tgz | sudo tar -xz -C /opt/cni/bin
|
||||
sudo cp bin/faasd /usr/local/bin/
|
||||
sudo /usr/local/bin/faasd install
|
||||
sudo systemctl status -l containerd --no-pager
|
||||
sudo journalctl -u faasd-provider --no-pager
|
||||
sudo systemctl status -l faasd-provider --no-pager
|
||||
sudo systemctl status -l faasd --no-pager
|
||||
curl -sSLf https://cli.openfaas.com | sudo sh
|
||||
echo "Sleeping for 2m" && sleep 120 && sudo journalctl -u faasd --no-pager
|
||||
|
||||
.PHONY: test-e2e
|
||||
test-e2e:
|
||||
sudo cat /var/lib/faasd/secrets/basic-auth-password | /usr/local/bin/faas-cli login --password-stdin
|
||||
/usr/local/bin/faas-cli store deploy figlet --env write_timeout=1s --env read_timeout=1s --label testing=true
|
||||
sleep 5
|
||||
/usr/local/bin/faas-cli list -v
|
||||
/usr/local/bin/faas-cli describe figlet | grep testing
|
||||
uname | /usr/local/bin/faas-cli invoke figlet
|
||||
uname | /usr/local/bin/faas-cli invoke figlet --async
|
||||
sleep 10
|
||||
/usr/local/bin/faas-cli list -v
|
||||
/usr/local/bin/faas-cli remove figlet
|
||||
sleep 3
|
||||
/usr/local/bin/faas-cli list
|
||||
sleep 3
|
||||
/usr/local/bin/faas-cli logs figlet --follow=false | grep Forking
|
||||
|
371
README.md
371
README.md
@ -1,153 +1,278 @@
|
||||
# faasd - serverless with containerd
|
||||
# faasd - a lightweight & portable faas engine
|
||||
|
||||
[](https://travis-ci.com/alexellis/faasd)
|
||||
[](https://github.com/openfaas/faasd/actions)
|
||||
[](https://opensource.org/licenses/MIT)
|
||||
[](https://www.openfaas.com)
|
||||

|
||||
|
||||
faasd is a Golang supervisor that bundles OpenFaaS for use with containerd instead of a container orchestrator like Kubernetes or Docker Swarm.
|
||||
faasd is [OpenFaaS](https://github.com/openfaas/) reimagined, but without the cost and complexity of Kubernetes. It runs on a single host with very modest requirements, making it fast and easy to manage. Under the hood it uses [containerd](https://containerd.io/) and [Container Networking Interface (CNI)](https://github.com/containernetworking/cni) along with the same core OpenFaaS components from the main project.
|
||||
|
||||
## About faasd:
|
||||
## When should you use faasd over OpenFaaS on Kubernetes?
|
||||
|
||||
* faasd is a single Golang binary
|
||||
* faasd is multi-arch, so works on `x86_64`, armhf and arm64
|
||||
* faasd downloads, starts and supervises the core components to run OpenFaaS
|
||||
* You have a cost sensitive project - run faasd on a 5-10 USD VPS or on your Raspberry Pi
|
||||
* When you just need a few functions or microservices, without the cost of a cluster
|
||||
* When you don't have the bandwidth to learn or manage Kubernetes
|
||||
* To deploy embedded apps in IoT and edge use-cases
|
||||
* To shrink-wrap applications for use with a customer or client
|
||||
|
||||
faasd does not create the same maintenance burden you'll find with maintaining, upgrading, and securing a Kubernetes cluster. You can deploy it and walk away, in the worst case, just deploy a new VM and deploy your functions again.
|
||||
|
||||
## About faasd
|
||||
|
||||
* is a single Golang binary
|
||||
* uses the same core components and ecosystem of OpenFaaS
|
||||
* is multi-arch, so works on Intel `x86_64` and ARM out the box
|
||||
* can be set-up and left alone to run your applications
|
||||
|
||||

|
||||
|
||||
> Demo of faasd running in KVM
|
||||
|
||||
## Tutorials
|
||||
|
||||
### Get started on DigitalOcean, or any other IaaS
|
||||
|
||||
If your IaaS supports `user_data` aka "cloud-init", then this guide is for you. If not, then checkout the approach and feel free to run each step manually.
|
||||
|
||||
* [Build a Serverless appliance with faasd](https://blog.alexellis.io/deploy-serverless-faasd-with-cloud-init/)
|
||||
|
||||
### Run locally on MacOS, Linux, or Windows with multipass
|
||||
|
||||
* [Get up and running with your own faasd installation on your Mac/Ubuntu or Windows with cloud-config](/docs/MULTIPASS.md)
|
||||
|
||||
### Get started on armhf / Raspberry Pi
|
||||
|
||||
You can run this tutorial on your Raspberry Pi, or adapt the steps for a regular Linux VM/VPS host.
|
||||
|
||||
* [faasd - lightweight Serverless for your Raspberry Pi](https://blog.alexellis.io/faasd-for-lightweight-serverless/)
|
||||
|
||||
### Terraform for DigitalOcean
|
||||
|
||||
Automate everything within < 60 seconds and get a public URL and IP address back. Customise as required, or adapt to your preferred cloud such as AWS EC2.
|
||||
|
||||
* [Provision faasd 0.9.5 on DigitalOcean with Terraform 0.12.0](docs/bootstrap/README.md)
|
||||
|
||||
* [Provision faasd on DigitalOcean with built-in TLS support](docs/bootstrap/digitalocean-terraform/README.md)
|
||||
|
||||
## Operational concerns
|
||||
|
||||
### A note on private repos / registries
|
||||
|
||||
To use private image repos, `~/.docker/config.json` needs to be copied to `/var/lib/faasd/.docker/config.json`.
|
||||
|
||||
If you'd like to set up your own private registry, [see this tutorial](https://blog.alexellis.io/get-a-tls-enabled-docker-registry-in-5-minutes/).
|
||||
|
||||
Beware that running `docker login` on MacOS and Windows may create an empty file with your credentials stored in the system helper.
|
||||
|
||||
Alternatively, use you can use the `registry-login` command from the OpenFaaS Cloud bootstrap tool (ofc-bootstrap):
|
||||
|
||||
```bash
|
||||
curl -sLSf https://raw.githubusercontent.com/openfaas-incubator/ofc-bootstrap/master/get.sh | sudo sh
|
||||
|
||||
ofc-bootstrap registry-login --username <your-registry-username> --password-stdin
|
||||
# (the enter your password and hit return)
|
||||
```
|
||||
The file will be created in `./credentials/`
|
||||
|
||||
> Note for the GitHub container registry, you should use `ghcr.io` Container Registry and not the previous generation of "Docker Package Registry". [See notes on migrating](https://docs.github.com/en/free-pro-team@latest/packages/getting-started-with-github-container-registry/migrating-to-github-container-registry-for-docker-images)
|
||||
|
||||
### Logs for functions
|
||||
|
||||
You can view the logs of functions using `journalctl`:
|
||||
|
||||
```bash
|
||||
journalctl -t openfaas-fn:FUNCTION_NAME
|
||||
|
||||
|
||||
faas-cli store deploy figlet
|
||||
journalctl -t openfaas-fn:figlet -f &
|
||||
echo logs | faas-cli invoke figlet
|
||||
```
|
||||
|
||||
### Logs for the core services
|
||||
|
||||
Core services as defined in the docker-compose.yaml file are deployed as containers by faasd.
|
||||
|
||||
View the logs for a component by giving its NAME:
|
||||
|
||||
```bash
|
||||
journalctl -t default:NAME
|
||||
|
||||
journalctl -t default:gateway
|
||||
|
||||
journalctl -t default:queue-worker
|
||||
```
|
||||
|
||||
You can also use `-f` to follow the logs, or `--lines` to tail a number of lines, or `--since` to give a timeframe.
|
||||
|
||||
### Exposing core services
|
||||
|
||||
The OpenFaaS stack is made up of several core services including NATS and Prometheus. You can expose these through the `docker-compose.yaml` file located at `/var/lib/faasd`.
|
||||
|
||||
Expose the gateway to all adapters:
|
||||
|
||||
```yaml
|
||||
gateway:
|
||||
ports:
|
||||
- "8080:8080"
|
||||
```
|
||||
|
||||
Expose Prometheus only to 127.0.0.1:
|
||||
|
||||
```yaml
|
||||
prometheus:
|
||||
ports:
|
||||
- "127.0.0.1:9090:9090"
|
||||
```
|
||||
|
||||
### Upgrading faasd
|
||||
|
||||
To upgrade `faasd` either re-create your VM using Terraform, or simply replace the faasd binary with a newer one.
|
||||
|
||||
```bash
|
||||
systemctl stop faasd-provider
|
||||
systemctl stop faasd
|
||||
|
||||
# Replace /usr/local/bin/faasd with the desired release
|
||||
|
||||
# Replace /var/lib/faasd/docker-compose.yaml with the matching version for
|
||||
# that release.
|
||||
# Remember to keep any custom patches you make such as exposing additional
|
||||
# ports, or updating timeout values
|
||||
|
||||
systemctl start faasd
|
||||
systemctl start faasd-provider
|
||||
```
|
||||
|
||||
You could also perform this task over SSH, or use a configuration management tool.
|
||||
|
||||
> Note: if you are using Caddy or Let's Encrypt for free SSL certificates, that you may hit rate-limits for generating new certificates if you do this too often within a given week.
|
||||
|
||||
### Memory limits for functions
|
||||
|
||||
Memory limits for functions are supported. When the limit is exceeded the function will be killed.
|
||||
|
||||
Example:
|
||||
|
||||
```yaml
|
||||
functions:
|
||||
figlet:
|
||||
skip_build: true
|
||||
image: functions/figlet:latest
|
||||
limits:
|
||||
memory: 20Mi
|
||||
```
|
||||
|
||||
## What does faasd deploy?
|
||||
|
||||
* [faas-containerd](https://github.com/alexellis/faas-containerd/)
|
||||
* [Prometheus](https://github.com/prometheus/prometheus)
|
||||
* [the OpenFaaS gateway](https://github.com/openfaas/faas/tree/master/gateway)
|
||||
* faasd - itself, and its [faas-provider](https://github.com/openfaas/faas-provider) for containerd - CRUD for functions and services, implements the OpenFaaS REST API
|
||||
* [Prometheus](https://github.com/prometheus/prometheus) - for monitoring of services, metrics, scaling and dashboards
|
||||
* [OpenFaaS Gateway](https://github.com/openfaas/faas/tree/master/gateway) - the UI portal, CLI, and other OpenFaaS tooling can talk to this.
|
||||
* [OpenFaaS queue-worker for NATS](https://github.com/openfaas/nats-queue-worker) - run your invocations in the background without adding any code. See also: [asynchronous invocations](https://docs.openfaas.com/reference/triggers/#async-nats-streaming)
|
||||
* [NATS](https://nats.io) for asynchronous processing and queues
|
||||
|
||||
You can use the standard [faas-cli](https://github.com/openfaas/faas-cli) with faasd along with pre-packaged functions in the Function Store, or build your own with the template store.
|
||||
You'll also need:
|
||||
|
||||
### faas-containerd supports:
|
||||
* [CNI](https://github.com/containernetworking/plugins)
|
||||
* [containerd](https://github.com/containerd/containerd)
|
||||
* [runc](https://github.com/opencontainers/runc)
|
||||
|
||||
You can use the standard [faas-cli](https://github.com/openfaas/faas-cli) along with pre-packaged functions from *the Function Store*, or build your own using any OpenFaaS template.
|
||||
|
||||
### Manual / developer instructions
|
||||
|
||||
See [here for manual / developer instructions](docs/DEV.md)
|
||||
|
||||
## Getting help
|
||||
|
||||
### Docs
|
||||
|
||||
The [OpenFaaS docs](https://docs.openfaas.com/) provide a wealth of information and are kept up to date with new features.
|
||||
|
||||
### Function and template store
|
||||
|
||||
For community functions see `faas-cli store --help`
|
||||
|
||||
For templates built by the community see: `faas-cli template store list`, you can also use the `dockerfile` template if you just want to migrate an existing service without the benefits of using a template.
|
||||
|
||||
### Training and courses
|
||||
|
||||
#### LinuxFoundation training course
|
||||
|
||||
The founder of faasd and OpenFaaS has written a training course for the LinuxFoundation which also covers how to use OpenFaaS on Kubernetes. Much of the same concepts can be applied to faasd, and the course is free:
|
||||
|
||||
* [Introduction to Serverless on Kubernetes](https://www.edx.org/course/introduction-to-serverless-on-kubernetes)
|
||||
|
||||
#### Community workshop
|
||||
|
||||
[The OpenFaaS workshop](https://github.com/openfaas/workshop/) is a set of 12 self-paced labs and provides a great starting point for learning the features of openfaas. Not all features will be available or usable with faasd.
|
||||
|
||||
### Community support
|
||||
|
||||
An active community of almost 3000 users awaits you on Slack. Over 250 of those users are also contributors and help maintain the code.
|
||||
|
||||
* [Join Slack](https://slack.openfaas.io/)
|
||||
|
||||
## Roadmap
|
||||
|
||||
### Supported operations
|
||||
|
||||
* `faas login`
|
||||
* `faas up`
|
||||
* `faas list`
|
||||
* `faas describe`
|
||||
* `faas describe`
|
||||
* `faas deploy --update=true --replace=false`
|
||||
* `faas invoke`
|
||||
* `faas invoke --async`
|
||||
* `faas invoke`
|
||||
* `faas rm`
|
||||
* `faas store list/deploy/inspect`
|
||||
* `faas version`
|
||||
* `faas namespace`
|
||||
* `faas secret`
|
||||
* `faas logs`
|
||||
|
||||
Other operations are pending development in the provider.
|
||||
Scale from and to zero is also supported. On a Dell XPS with a small, pre-pulled image unpausing an existing task took 0.19s and starting a task for a killed function took 0.39s. There may be further optimizations to be gained.
|
||||
|
||||
### Pre-reqs
|
||||
Other operations are pending development in the provider such as:
|
||||
|
||||
* Linux - ideally Ubuntu, which is used for testing
|
||||
* Installation steps as per [faas-containerd](https://github.com/alexellis/faas-containerd) for building and for development
|
||||
* [netns](https://github.com/genuinetools/netns/releases) binary in `$PATH`
|
||||
* [containerd v1.3.2](https://github.com/containerd/containerd)
|
||||
* [faas-cli](https://github.com/openfaas/faas-cli) (optional)
|
||||
* `faas auth` - supported for Basic Authentication, but OAuth2 & OIDC require a patch
|
||||
|
||||
## Backlog
|
||||
### Backlog
|
||||
|
||||
Pending:
|
||||
|
||||
* [ ] Configure `basic_auth` to protect the OpenFaaS gateway and faas-containerd HTTP API
|
||||
* [ ] Use CNI to create network namespaces and adapters
|
||||
* [ ] [Store and retrieve annotations in function spec](https://github.com/openfaas/faasd/pull/86) - in progress
|
||||
* [ ] Offer live rolling-updates, with zero downtime - requires moving to IDs vs. names for function containers
|
||||
* [ ] An installer for faasd and dependencies - runc, containerd
|
||||
* [ ] Monitor and restart any of the core components at runtime if the container stops
|
||||
* [ ] Bundle/package/automate installation of containerd - [see bootstrap from k3s](https://github.com/rancher/k3s)
|
||||
* [ ] Provide ufw rules / example for blocking access to everything but a reverse proxy to the gateway container
|
||||
* [ ] Multiple replicas per function
|
||||
|
||||
Done:
|
||||
### Known-issues
|
||||
|
||||
### Completed
|
||||
|
||||
* [x] Provide a cloud-init configuration for faasd bootstrap
|
||||
* [x] Configure core services from a docker-compose.yaml file
|
||||
* [x] Store and fetch logs from the journal
|
||||
* [x] Add support for using container images in third-party public registries
|
||||
* [x] Add support for using container images in private third-party registries
|
||||
* [x] Provide a cloud-config.txt file for automated deployments of `faasd`
|
||||
* [x] Inject / manage IPs between core components for service to service communication - i.e. so Prometheus can scrape the OpenFaaS gateway - done via `/etc/hosts` mount
|
||||
* [x] Add queue-worker and NATS
|
||||
* [x] Create faasd.service and faas-containerd.service
|
||||
* [x] Create faasd.service and faasd-provider.service
|
||||
* [x] Self-install / create systemd service via `faasd install`
|
||||
* [x] Restart containers upon restart of faasd
|
||||
* [x] Clear / remove containers and tasks with SIGTERM / SIGINT
|
||||
* [x] Determine armhf/arm64 containers to run for gateway
|
||||
* [x] Configure `basic_auth` to protect the OpenFaaS gateway and faasd-provider HTTP API
|
||||
* [x] Setup custom working directory for faasd `/var/lib/faasd/`
|
||||
* [x] Use CNI to create network namespaces and adapters
|
||||
* [x] Optionally expose core services from the docker-compose.yaml file, locally or to all adapters.
|
||||
* [x] ~~[containerd can't pull image from Github Docker Package Registry](https://github.com/containerd/containerd/issues/3291)~~ ghcr.io support
|
||||
* [x] Provide [simple Caddyfile example](https://blog.alexellis.io/https-inlets-local-endpoints/) in the README showing how to expose the faasd proxy on port 80/443 with TLS
|
||||
* [x] Annotation support
|
||||
* [x] Hard memory limits for functions
|
||||
* [ ] Terraform for DigitalOcean
|
||||
|
||||
## Hacking (build from source)
|
||||
|
||||
First run faas-containerd
|
||||
|
||||
```sh
|
||||
cd $GOPATH/src/github.com/alexellis/faas-containerd
|
||||
|
||||
# You'll need to install containerd and its pre-reqs first
|
||||
# https://github.com/alexellis/faas-containerd/
|
||||
|
||||
sudo ./faas-containerd
|
||||
```
|
||||
|
||||
Then run faasd, which brings up the gateway and Prometheus as containers
|
||||
|
||||
```sh
|
||||
cd $GOPATH/src/github.com/alexellis/faasd
|
||||
go build
|
||||
|
||||
# Install with systemd
|
||||
# sudo ./faasd install
|
||||
|
||||
# Or run interactively
|
||||
# sudo ./faasd up
|
||||
```
|
||||
|
||||
### Build and run (binaries)
|
||||
|
||||
```sh
|
||||
# For x86_64
|
||||
sudo curl -fSLs "https://github.com/alexellis/faasd/releases/download/0.2.4/faasd" \
|
||||
-o "/usr/local/bin/faasd" \
|
||||
&& sudo chmod a+x "/usr/local/bin/faasd"
|
||||
|
||||
# armhf
|
||||
sudo curl -fSLs "https://github.com/alexellis/faasd/releases/download/0.2.4/faasd-armhf" \
|
||||
-o "/usr/local/bin/faasd" \
|
||||
&& sudo chmod a+x "/usr/local/bin/faasd"
|
||||
|
||||
# arm64
|
||||
sudo curl -fSLs "https://github.com/alexellis/faasd/releases/download/0.2.4/faasd-arm64" \
|
||||
-o "/usr/local/bin/faasd" \
|
||||
&& sudo chmod a+x "/usr/local/bin/faasd"
|
||||
```
|
||||
|
||||
### At run-time
|
||||
|
||||
Look in `hosts` in the current working folder to get the IP for the gateway or Prometheus
|
||||
|
||||
```sh
|
||||
127.0.0.1 localhost
|
||||
172.19.0.1 faas-containerd
|
||||
172.19.0.2 prometheus
|
||||
|
||||
172.19.0.3 gateway
|
||||
172.19.0.4 nats
|
||||
172.19.0.5 queue-worker
|
||||
```
|
||||
|
||||
Since faas-containerd uses containerd heavily it is not running as a container, but as a stand-alone process. Its port is available via the bridge interface, i.e. netns0.
|
||||
|
||||
* Prometheus will run on the Prometheus IP plus port 8080 i.e. http://172.19.0.2:9090/targets
|
||||
|
||||
* faas-containerd runs on 172.19.0.1:8081
|
||||
|
||||
* Now go to the gateway's IP address as shown above on port 8080, i.e. http://172.19.0.3:8080 - you can also use this address to deploy OpenFaaS Functions via the `faas-cli`.
|
||||
|
||||
#### Installation with systemd
|
||||
|
||||
* `faasd install` - install faasd and containerd with systemd, run in `$GOPATH/src/github.com/alexellis/faasd`
|
||||
* `journalctl -u faasd` - faasd systemd logs
|
||||
* `journalctl -u faas-containerd` - faas-containerd systemd logs
|
||||
|
||||
### Appendix
|
||||
|
||||
Removing containers:
|
||||
|
||||
```sh
|
||||
echo faas-containerd gateway prometheus | xargs sudo ctr task rm -f
|
||||
|
||||
echo faas-containerd gateway prometheus | xargs sudo ctr container rm
|
||||
|
||||
echo faas-containerd gateway prometheus | xargs sudo ctr snapshot rm
|
||||
```
|
||||
|
||||
## Links
|
||||
|
||||
https://github.com/renatofq/ctrofb/blob/31968e4b4893f3603e9998f21933c4131523bb5d/cmd/network.go
|
||||
|
||||
https://github.com/renatofq/catraia/blob/c4f62c86bddbfadbead38cd2bfe6d920fba26dce/catraia-net/network.go
|
||||
|
||||
https://github.com/containernetworking/plugins
|
||||
|
||||
https://github.com/containerd/go-cni
|
||||
WIP:
|
||||
|
||||
* [ ] Terraform for AWS
|
||||
|
29
cloud-config.txt
Normal file
29
cloud-config.txt
Normal file
@ -0,0 +1,29 @@
|
||||
#cloud-config
|
||||
ssh_authorized_keys:
|
||||
## Note: Replace with your own public key
|
||||
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC8Q/aUYUr3P1XKVucnO9mlWxOjJm+K01lHJR90MkHC9zbfTqlp8P7C3J26zKAuzHXOeF+VFxETRr6YedQKW9zp5oP7sN+F2gr/pO7GV3VmOqHMV7uKfyUQfq7H1aVzLfCcI7FwN2Zekv3yB7kj35pbsMa1Za58aF6oHRctZU6UWgXXbRxP+B04DoVU7jTstQ4GMoOCaqYhgPHyjEAS3DW0kkPW6HzsvJHkxvVcVlZ/wNJa1Ie/yGpzOzWIN0Ol0t2QT/RSWOhfzO1A2P0XbPuZ04NmriBonO9zR7T1fMNmmtTuK7WazKjQT3inmYRAqU6pe8wfX8WIWNV7OowUjUsv alex@alexr.local
|
||||
|
||||
package_update: true
|
||||
|
||||
packages:
|
||||
- runc
|
||||
|
||||
runcmd:
|
||||
- curl -sLSf https://github.com/containerd/containerd/releases/download/v1.3.5/containerd-1.3.5-linux-amd64.tar.gz > /tmp/containerd.tar.gz && tar -xvf /tmp/containerd.tar.gz -C /usr/local/bin/ --strip-components=1
|
||||
- curl -SLfs https://raw.githubusercontent.com/containerd/containerd/v1.3.5/containerd.service | tee /etc/systemd/system/containerd.service
|
||||
- systemctl daemon-reload && systemctl start containerd
|
||||
- systemctl enable containerd
|
||||
- /sbin/sysctl -w net.ipv4.conf.all.forwarding=1
|
||||
- mkdir -p /opt/cni/bin
|
||||
- curl -sSL https://github.com/containernetworking/plugins/releases/download/v0.8.5/cni-plugins-linux-amd64-v0.8.5.tgz | tar -xz -C /opt/cni/bin
|
||||
- mkdir -p /go/src/github.com/openfaas/
|
||||
- cd /go/src/github.com/openfaas/ && git clone --depth 1 --branch 0.9.5 https://github.com/openfaas/faasd
|
||||
- curl -fSLs "https://github.com/openfaas/faasd/releases/download/0.9.5/faasd" --output "/usr/local/bin/faasd" && chmod a+x "/usr/local/bin/faasd"
|
||||
- cd /go/src/github.com/openfaas/faasd/ && /usr/local/bin/faasd install
|
||||
- systemctl status -l containerd --no-pager
|
||||
- journalctl -u faasd-provider --no-pager
|
||||
- systemctl status -l faasd-provider --no-pager
|
||||
- systemctl status -l faasd --no-pager
|
||||
- curl -sSLf https://cli.openfaas.com | sh
|
||||
- sleep 60 && journalctl -u faasd --no-pager
|
||||
- cat /var/lib/faasd/secrets/basic-auth-password | /usr/local/bin/faas-cli login --password-stdin
|
60
cmd/collect.go
Normal file
60
cmd/collect.go
Normal file
@ -0,0 +1,60 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
"github.com/containerd/containerd/runtime/v2/logging"
|
||||
"github.com/coreos/go-systemd/journal"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func CollectCommand() *cobra.Command {
|
||||
return collectCmd
|
||||
}
|
||||
|
||||
var collectCmd = &cobra.Command{
|
||||
Use: "collect",
|
||||
Short: "Collect logs to the journal",
|
||||
RunE: runCollect,
|
||||
}
|
||||
|
||||
func runCollect(_ *cobra.Command, _ []string) error {
|
||||
logging.Run(logStdio)
|
||||
return nil
|
||||
}
|
||||
|
||||
// logStdio copied from
|
||||
// https://github.com/containerd/containerd/pull/3085
|
||||
// https://github.com/stellarproject/orbit
|
||||
func logStdio(ctx context.Context, config *logging.Config, ready func() error) error {
|
||||
// construct any log metadata for the container
|
||||
vars := map[string]string{
|
||||
"SYSLOG_IDENTIFIER": fmt.Sprintf("%s:%s", config.Namespace, config.ID),
|
||||
}
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(2)
|
||||
// forward both stdout and stderr to the journal
|
||||
go copy(&wg, config.Stdout, journal.PriInfo, vars)
|
||||
go copy(&wg, config.Stderr, journal.PriErr, vars)
|
||||
// signal that we are ready and setup for the container to be started
|
||||
if err := ready(); err != nil {
|
||||
return err
|
||||
}
|
||||
wg.Wait()
|
||||
return nil
|
||||
}
|
||||
|
||||
func copy(wg *sync.WaitGroup, r io.Reader, pri journal.Priority, vars map[string]string) {
|
||||
defer wg.Done()
|
||||
s := bufio.NewScanner(r)
|
||||
for s.Scan() {
|
||||
if s.Err() != nil {
|
||||
return
|
||||
}
|
||||
journal.Send(s.Text(), pri, vars)
|
||||
}
|
||||
}
|
@ -2,10 +2,12 @@ package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
|
||||
systemd "github.com/alexellis/faasd/pkg/systemd"
|
||||
systemd "github.com/openfaas/faasd/pkg/systemd"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
@ -16,29 +18,52 @@ var installCmd = &cobra.Command{
|
||||
RunE: runInstall,
|
||||
}
|
||||
|
||||
const workingDirectoryPermission = 0644
|
||||
|
||||
const faasdwd = "/var/lib/faasd"
|
||||
|
||||
const faasdProviderWd = "/var/lib/faasd-provider"
|
||||
|
||||
func runInstall(_ *cobra.Command, _ []string) error {
|
||||
|
||||
err := binExists("/usr/local/bin/", "faas-containerd")
|
||||
if err := ensureWorkingDir(path.Join(faasdwd, "secrets")); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := ensureWorkingDir(faasdProviderWd); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if basicAuthErr := makeBasicAuthFiles(path.Join(faasdwd, "secrets")); basicAuthErr != nil {
|
||||
return errors.Wrap(basicAuthErr, "cannot create basic-auth-* files")
|
||||
}
|
||||
|
||||
if err := cp("docker-compose.yaml", faasdwd); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := cp("prometheus.yml", faasdwd); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := cp("resolv.conf", faasdwd); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err := binExists("/usr/local/bin/", "faasd")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = binExists("/usr/local/bin/", "faasd")
|
||||
err = systemd.InstallUnit("faasd-provider", map[string]string{
|
||||
"Cwd": faasdProviderWd,
|
||||
"SecretMountPath": path.Join(faasdwd, "secrets")})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = binExists("/usr/local/bin/", "netns")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = systemd.InstallUnit("faas-containerd")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = systemd.InstallUnit("faasd")
|
||||
err = systemd.InstallUnit("faasd", map[string]string{"Cwd": faasdwd})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -48,7 +73,7 @@ func runInstall(_ *cobra.Command, _ []string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
err = systemd.Enable("faas-containerd")
|
||||
err = systemd.Enable("faasd-provider")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -58,7 +83,7 @@ func runInstall(_ *cobra.Command, _ []string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
err = systemd.Start("faas-containerd")
|
||||
err = systemd.Start("faasd-provider")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -68,6 +93,9 @@ func runInstall(_ *cobra.Command, _ []string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Println(`Login with:
|
||||
sudo cat /var/lib/faasd/secrets/basic-auth-password | faas-cli login -s`)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -78,3 +106,33 @@ func binExists(folder, name string) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func ensureWorkingDir(folder string) error {
|
||||
if _, err := os.Stat(folder); err != nil {
|
||||
err = os.MkdirAll(folder, workingDirectoryPermission)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func cp(source, destFolder string) error {
|
||||
file, err := os.Open(source)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
out, err := os.Create(path.Join(destFolder, source))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer out.Close()
|
||||
|
||||
_, err = io.Copy(out, file)
|
||||
|
||||
return err
|
||||
}
|
||||
|
116
cmd/provider.go
Normal file
116
cmd/provider.go
Normal file
@ -0,0 +1,116 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
|
||||
"github.com/containerd/containerd"
|
||||
bootstrap "github.com/openfaas/faas-provider"
|
||||
"github.com/openfaas/faas-provider/logs"
|
||||
"github.com/openfaas/faas-provider/proxy"
|
||||
"github.com/openfaas/faas-provider/types"
|
||||
"github.com/openfaas/faasd/pkg/cninetwork"
|
||||
faasdlogs "github.com/openfaas/faasd/pkg/logs"
|
||||
"github.com/openfaas/faasd/pkg/provider/config"
|
||||
"github.com/openfaas/faasd/pkg/provider/handlers"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func makeProviderCmd() *cobra.Command {
|
||||
var command = &cobra.Command{
|
||||
Use: "provider",
|
||||
Short: "Run the faasd-provider",
|
||||
}
|
||||
|
||||
command.Flags().String("pull-policy", "Always", `Set to "Always" to force a pull of images upon deployment, or "IfNotPresent" to try to use a cached image.`)
|
||||
|
||||
command.RunE = func(_ *cobra.Command, _ []string) error {
|
||||
|
||||
pullPolicy, flagErr := command.Flags().GetString("pull-policy")
|
||||
if flagErr != nil {
|
||||
return flagErr
|
||||
}
|
||||
|
||||
alwaysPull := false
|
||||
if pullPolicy == "Always" {
|
||||
alwaysPull = true
|
||||
}
|
||||
|
||||
config, providerConfig, err := config.ReadFromEnv(types.OsEnv{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Printf("faasd-provider starting..\tService Timeout: %s\n", config.WriteTimeout.String())
|
||||
printVersion()
|
||||
|
||||
wd, err := os.Getwd()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
writeHostsErr := ioutil.WriteFile(path.Join(wd, "hosts"),
|
||||
[]byte(`127.0.0.1 localhost`), workingDirectoryPermission)
|
||||
|
||||
if writeHostsErr != nil {
|
||||
return fmt.Errorf("cannot write hosts file: %s", writeHostsErr)
|
||||
}
|
||||
|
||||
writeResolvErr := ioutil.WriteFile(path.Join(wd, "resolv.conf"),
|
||||
[]byte(`nameserver 8.8.8.8`), workingDirectoryPermission)
|
||||
|
||||
if writeResolvErr != nil {
|
||||
return fmt.Errorf("cannot write resolv.conf file: %s", writeResolvErr)
|
||||
}
|
||||
|
||||
cni, err := cninetwork.InitNetwork()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
client, err := containerd.New(providerConfig.Sock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer client.Close()
|
||||
|
||||
invokeResolver := handlers.NewInvokeResolver(client)
|
||||
|
||||
userSecretPath := path.Join(wd, "secrets")
|
||||
|
||||
bootstrapHandlers := types.FaaSHandlers{
|
||||
FunctionProxy: proxy.NewHandlerFunc(*config, invokeResolver),
|
||||
DeleteHandler: handlers.MakeDeleteHandler(client, cni),
|
||||
DeployHandler: handlers.MakeDeployHandler(client, cni, userSecretPath, alwaysPull),
|
||||
FunctionReader: handlers.MakeReadHandler(client),
|
||||
ReplicaReader: handlers.MakeReplicaReaderHandler(client),
|
||||
ReplicaUpdater: handlers.MakeReplicaUpdateHandler(client, cni),
|
||||
UpdateHandler: handlers.MakeUpdateHandler(client, cni, userSecretPath, alwaysPull),
|
||||
HealthHandler: func(w http.ResponseWriter, r *http.Request) {},
|
||||
InfoHandler: handlers.MakeInfoHandler(Version, GitCommit),
|
||||
ListNamespaceHandler: listNamespaces(),
|
||||
SecretHandler: handlers.MakeSecretHandler(client, userSecretPath),
|
||||
LogHandler: logs.NewLogHandlerFunc(faasdlogs.New(), config.ReadTimeout),
|
||||
}
|
||||
|
||||
log.Printf("Listening on TCP port: %d\n", *config.TCPPort)
|
||||
bootstrap.Serve(&bootstrapHandlers, config)
|
||||
return nil
|
||||
}
|
||||
|
||||
return command
|
||||
}
|
||||
|
||||
func listNamespaces() func(w http.ResponseWriter, r *http.Request) {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
list := []string{""}
|
||||
out, _ := json.Marshal(list)
|
||||
w.Write(out)
|
||||
}
|
||||
}
|
94
cmd/root.go
94
cmd/root.go
@ -3,19 +3,10 @@ package cmd
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/alexellis/faasd/pkg"
|
||||
"github.com/morikuni/aec"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
// Version as per git repo
|
||||
Version string
|
||||
|
||||
// GitCommit as per git repo
|
||||
GitCommit string
|
||||
)
|
||||
|
||||
// WelcomeMessage to introduce ofc-bootstrap
|
||||
const WelcomeMessage = "Welcome to faasd"
|
||||
|
||||
@ -23,41 +14,22 @@ func init() {
|
||||
rootCommand.AddCommand(versionCmd)
|
||||
rootCommand.AddCommand(upCmd)
|
||||
rootCommand.AddCommand(installCmd)
|
||||
rootCommand.AddCommand(makeProviderCmd())
|
||||
rootCommand.AddCommand(collectCmd)
|
||||
}
|
||||
|
||||
var rootCommand = &cobra.Command{
|
||||
Use: "faasd",
|
||||
Short: "Start faasd",
|
||||
Long: `
|
||||
faasd - serverless without Kubernetes
|
||||
`,
|
||||
RunE: runRootCommand,
|
||||
SilenceUsage: true,
|
||||
func RootCommand() *cobra.Command {
|
||||
return rootCommand
|
||||
}
|
||||
|
||||
var versionCmd = &cobra.Command{
|
||||
Use: "version",
|
||||
Short: "Display version information.",
|
||||
Run: parseBaseCommand,
|
||||
}
|
||||
|
||||
func getVersion() string {
|
||||
if len(Version) != 0 {
|
||||
return Version
|
||||
}
|
||||
return "dev"
|
||||
}
|
||||
|
||||
func parseBaseCommand(_ *cobra.Command, _ []string) {
|
||||
printLogo()
|
||||
|
||||
fmt.Printf(
|
||||
`faasd
|
||||
Commit: %s
|
||||
Version: %s
|
||||
`, pkg.GitCommit, pkg.GetVersion())
|
||||
}
|
||||
var (
|
||||
// GitCommit Git Commit SHA
|
||||
GitCommit string
|
||||
// Version version of the CLI
|
||||
Version string
|
||||
)
|
||||
|
||||
// Execute faasd
|
||||
func Execute(version, gitCommit string) error {
|
||||
|
||||
// Get Version and GitCommit values from main.go.
|
||||
@ -70,6 +42,16 @@ func Execute(version, gitCommit string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
var rootCommand = &cobra.Command{
|
||||
Use: "faasd",
|
||||
Short: "Start faasd",
|
||||
Long: `
|
||||
faasd - serverless without Kubernetes
|
||||
`,
|
||||
RunE: runRootCommand,
|
||||
SilenceUsage: true,
|
||||
}
|
||||
|
||||
func runRootCommand(cmd *cobra.Command, args []string) error {
|
||||
|
||||
printLogo()
|
||||
@ -78,7 +60,39 @@ func runRootCommand(cmd *cobra.Command, args []string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
var versionCmd = &cobra.Command{
|
||||
Use: "version",
|
||||
Short: "Display version information.",
|
||||
Run: parseBaseCommand,
|
||||
}
|
||||
|
||||
func parseBaseCommand(_ *cobra.Command, _ []string) {
|
||||
printLogo()
|
||||
|
||||
printVersion()
|
||||
}
|
||||
|
||||
func printVersion() {
|
||||
fmt.Printf("faasd version: %s\tcommit: %s\n", GetVersion(), GitCommit)
|
||||
}
|
||||
|
||||
func printLogo() {
|
||||
logoText := aec.WhiteF.Apply(pkg.Logo)
|
||||
logoText := aec.WhiteF.Apply(Logo)
|
||||
fmt.Println(logoText)
|
||||
}
|
||||
|
||||
// GetVersion get latest version
|
||||
func GetVersion() string {
|
||||
if len(Version) == 0 {
|
||||
return "dev"
|
||||
}
|
||||
return Version
|
||||
}
|
||||
|
||||
// Logo for version and root command
|
||||
const Logo = ` __ _
|
||||
/ _| __ _ __ _ ___ __| |
|
||||
| |_ / _` + "`" + ` |/ _` + "`" + ` / __|/ _` + "`" + ` |
|
||||
| _| (_| | (_| \__ \ (_| |
|
||||
|_| \__,_|\__,_|___/\__,_|
|
||||
`
|
||||
|
214
cmd/up.go
214
cmd/up.go
@ -2,6 +2,7 @@ package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
@ -10,39 +11,56 @@ import (
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/alexellis/faasd/pkg"
|
||||
"github.com/alexellis/k3sup/pkg/env"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sethvargo/go-password/password"
|
||||
"github.com/spf13/cobra"
|
||||
flag "github.com/spf13/pflag"
|
||||
|
||||
"github.com/openfaas/faasd/pkg"
|
||||
)
|
||||
|
||||
// upConfig are the CLI flags used by the `faasd up` command to deploy the faasd service
|
||||
type upConfig struct {
|
||||
// composeFilePath is the path to the compose file specifying the faasd service configuration
|
||||
// See https://compose-spec.io/ for more information about the spec,
|
||||
//
|
||||
// currently, this must be the name of a file in workingDir, which is set to the value of
|
||||
// `faasdwd = /var/lib/faasd`
|
||||
composeFilePath string
|
||||
|
||||
// working directory to assume the compose file is in, should be faasdwd.
|
||||
// this is not configurable but may be in the future.
|
||||
workingDir string
|
||||
}
|
||||
|
||||
func init() {
|
||||
configureUpFlags(upCmd.Flags())
|
||||
}
|
||||
|
||||
var upCmd = &cobra.Command{
|
||||
Use: "up",
|
||||
Short: "Start faasd",
|
||||
RunE: runUp,
|
||||
}
|
||||
|
||||
func runUp(_ *cobra.Command, _ []string) error {
|
||||
func runUp(cmd *cobra.Command, _ []string) error {
|
||||
|
||||
clientArch, clientOS := env.GetClientArch()
|
||||
printVersion()
|
||||
|
||||
if clientOS != "Linux" {
|
||||
return fmt.Errorf("You can only use faasd on Linux")
|
||||
}
|
||||
clientSuffix := ""
|
||||
switch clientArch {
|
||||
case "x86_64":
|
||||
clientSuffix = ""
|
||||
break
|
||||
case "armhf":
|
||||
case "armv7l":
|
||||
clientSuffix = "-armhf"
|
||||
break
|
||||
case "arm64":
|
||||
case "aarch64":
|
||||
clientSuffix = "-arm64"
|
||||
cfg, err := parseUpFlags(cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
services := makeServiceDefinitions(clientSuffix)
|
||||
services, err := loadServiceDefinition(cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
basicAuthErr := makeBasicAuthFiles(path.Join(cfg.workingDir, "secrets"))
|
||||
if basicAuthErr != nil {
|
||||
return errors.Wrap(basicAuthErr, "cannot create basic-auth-* files")
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
supervisor, err := pkg.NewSupervisor("/run/containerd/containerd.sock")
|
||||
@ -53,18 +71,15 @@ func runUp(_ *cobra.Command, _ []string) error {
|
||||
log.Printf("Supervisor created in: %s\n", time.Since(start).String())
|
||||
|
||||
start = time.Now()
|
||||
|
||||
err = supervisor.Start(services)
|
||||
|
||||
if err != nil {
|
||||
if err := supervisor.Start(services); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer supervisor.Close()
|
||||
|
||||
log.Printf("Supervisor init done in: %s\n", time.Since(start).String())
|
||||
|
||||
shutdownTimeout := time.Second * 1
|
||||
timeout := time.Second * 60
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
@ -80,68 +95,111 @@ func runUp(_ *cobra.Command, _ []string) error {
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
}
|
||||
|
||||
// TODO: close proxies
|
||||
time.AfterFunc(shutdownTimeout, func() {
|
||||
wg.Done()
|
||||
})
|
||||
}()
|
||||
|
||||
localResolver := pkg.NewLocalResolver(path.Join(cfg.workingDir, "hosts"))
|
||||
go localResolver.Start()
|
||||
|
||||
proxies := map[uint32]*pkg.Proxy{}
|
||||
for _, svc := range services {
|
||||
for _, port := range svc.Ports {
|
||||
|
||||
listenPort := port.Port
|
||||
if _, ok := proxies[listenPort]; ok {
|
||||
return fmt.Errorf("port %d already allocated", listenPort)
|
||||
}
|
||||
|
||||
hostIP := "0.0.0.0"
|
||||
if len(port.HostIP) > 0 {
|
||||
hostIP = port.HostIP
|
||||
}
|
||||
|
||||
upstream := fmt.Sprintf("%s:%d", svc.Name, port.TargetPort)
|
||||
proxies[listenPort] = pkg.NewProxy(upstream, listenPort, hostIP, timeout, localResolver)
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: track proxies for later cancellation when receiving sigint/term
|
||||
for _, v := range proxies {
|
||||
go v.Start()
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
return nil
|
||||
}
|
||||
|
||||
func makeServiceDefinitions(archSuffix string) []pkg.Service {
|
||||
wd, _ := os.Getwd()
|
||||
func makeBasicAuthFiles(wd string) error {
|
||||
|
||||
return []pkg.Service{
|
||||
pkg.Service{
|
||||
Name: "nats",
|
||||
Env: []string{""},
|
||||
Image: "docker.io/library/nats-streaming:0.11.2",
|
||||
Caps: []string{},
|
||||
Args: []string{"/nats-streaming-server", "-m", "8222", "--store=memory", "--cluster_id=faas-cluster"},
|
||||
},
|
||||
pkg.Service{
|
||||
Name: "prometheus",
|
||||
Env: []string{},
|
||||
Image: "docker.io/prom/prometheus:v2.14.0",
|
||||
Mounts: []pkg.Mount{
|
||||
pkg.Mount{
|
||||
Src: path.Join(wd, "prometheus.yml"),
|
||||
Dest: "/etc/prometheus/prometheus.yml",
|
||||
},
|
||||
},
|
||||
Caps: []string{"CAP_NET_RAW"},
|
||||
},
|
||||
pkg.Service{
|
||||
Name: "gateway",
|
||||
Env: []string{
|
||||
"basic_auth=false",
|
||||
"functions_provider_url=http://faas-containerd:8081/",
|
||||
"direct_functions=false",
|
||||
"read_timeout=60s",
|
||||
"write_timeout=60s",
|
||||
"upstream_timeout=65s",
|
||||
"faas_nats_address=nats",
|
||||
"faas_nats_port=4222",
|
||||
},
|
||||
Image: "docker.io/openfaas/gateway:0.18.8" + archSuffix,
|
||||
Mounts: []pkg.Mount{},
|
||||
Caps: []string{"CAP_NET_RAW"},
|
||||
},
|
||||
pkg.Service{
|
||||
Name: "queue-worker",
|
||||
Env: []string{
|
||||
"faas_nats_address=nats",
|
||||
"faas_nats_port=4222",
|
||||
"gateway_invoke=true",
|
||||
"faas_gateway_address=gateway",
|
||||
"ack_wait=5m5s",
|
||||
"max_inflight=1",
|
||||
"write_debug=false",
|
||||
},
|
||||
Image: "docker.io/openfaas/queue-worker:0.9.0",
|
||||
Mounts: []pkg.Mount{},
|
||||
Caps: []string{"CAP_NET_RAW"},
|
||||
},
|
||||
pwdFile := path.Join(wd, "basic-auth-password")
|
||||
authPassword, err := password.Generate(63, 10, 0, false, true)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = makeFile(pwdFile, authPassword)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
userFile := path.Join(wd, "basic-auth-user")
|
||||
err = makeFile(userFile, "admin")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// makeFile will create a file with the specified content if it does not exist yet.
|
||||
// if the file already exists, the method is a noop.
|
||||
func makeFile(filePath, fileContents string) error {
|
||||
_, err := os.Stat(filePath)
|
||||
if err == nil {
|
||||
log.Printf("File exists: %q\n", filePath)
|
||||
return nil
|
||||
} else if os.IsNotExist(err) {
|
||||
log.Printf("Writing to: %q\n", filePath)
|
||||
return ioutil.WriteFile(filePath, []byte(fileContents), workingDirectoryPermission)
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// load the docker compose file and then parse it as supervisor Services
|
||||
// the logic for loading the compose file comes from the compose reference implementation
|
||||
// https://github.com/compose-spec/compose-ref/blob/master/compose-ref.go#L353
|
||||
func loadServiceDefinition(cfg upConfig) ([]pkg.Service, error) {
|
||||
|
||||
serviceConfig, err := pkg.LoadComposeFile(cfg.workingDir, cfg.composeFilePath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return pkg.ParseCompose(serviceConfig)
|
||||
}
|
||||
|
||||
// ConfigureUpFlags will define the flags for the `faasd up` command. The flag struct, configure, and
|
||||
// parse are split like this to simplify testability.
|
||||
func configureUpFlags(flags *flag.FlagSet) {
|
||||
flags.StringP("file", "f", "docker-compose.yaml", "compose file specifying the faasd service configuration")
|
||||
}
|
||||
|
||||
// ParseUpFlags will load the flag values into an upFlags object. Errors will be underlying
|
||||
// Get errors from the pflag library.
|
||||
func parseUpFlags(cmd *cobra.Command) (upConfig, error) {
|
||||
parsed := upConfig{}
|
||||
path, err := cmd.Flags().GetString("file")
|
||||
if err != nil {
|
||||
return parsed, errors.Wrap(err, "can not parse compose file path flag")
|
||||
}
|
||||
|
||||
parsed.composeFilePath = path
|
||||
parsed.workingDir = faasdwd
|
||||
return parsed, err
|
||||
}
|
||||
|
98
docker-compose.yaml
Normal file
98
docker-compose.yaml
Normal file
@ -0,0 +1,98 @@
|
||||
version: "3.7"
|
||||
services:
|
||||
basic-auth-plugin:
|
||||
image: "docker.io/openfaas/basic-auth-plugin:0.18.18${ARCH_SUFFIX}"
|
||||
environment:
|
||||
- port=8080
|
||||
- secret_mount_path=/run/secrets
|
||||
- user_filename=basic-auth-user
|
||||
- pass_filename=basic-auth-password
|
||||
volumes:
|
||||
# we assume cwd == /var/lib/faasd
|
||||
- type: bind
|
||||
source: ./secrets/basic-auth-password
|
||||
target: /run/secrets/basic-auth-password
|
||||
- type: bind
|
||||
source: ./secrets/basic-auth-user
|
||||
target: /run/secrets/basic-auth-user
|
||||
cap_add:
|
||||
- CAP_NET_RAW
|
||||
|
||||
nats:
|
||||
image: docker.io/library/nats-streaming:0.11.2
|
||||
command:
|
||||
- "/nats-streaming-server"
|
||||
- "-m"
|
||||
- "8222"
|
||||
- "--store=memory"
|
||||
- "--cluster_id=faas-cluster"
|
||||
# ports:
|
||||
# - "127.0.0.1:8222:8222"
|
||||
|
||||
prometheus:
|
||||
image: docker.io/prom/prometheus:v2.14.0
|
||||
volumes:
|
||||
- type: bind
|
||||
source: ./prometheus.yml
|
||||
target: /etc/prometheus/prometheus.yml
|
||||
cap_add:
|
||||
- CAP_NET_RAW
|
||||
ports:
|
||||
- "127.0.0.1:9090:9090"
|
||||
|
||||
gateway:
|
||||
image: "docker.io/openfaas/gateway:0.19.1${ARCH_SUFFIX}"
|
||||
environment:
|
||||
- basic_auth=true
|
||||
- functions_provider_url=http://faasd-provider:8081/
|
||||
- direct_functions=false
|
||||
- read_timeout=60s
|
||||
- write_timeout=60s
|
||||
- upstream_timeout=65s
|
||||
- faas_nats_address=nats
|
||||
- faas_nats_port=4222
|
||||
- auth_proxy_url=http://basic-auth-plugin:8080/validate
|
||||
- auth_proxy_pass_body=false
|
||||
- secret_mount_path=/run/secrets
|
||||
- scale_from_zero=true
|
||||
volumes:
|
||||
# we assume cwd == /var/lib/faasd
|
||||
- type: bind
|
||||
source: ./secrets/basic-auth-password
|
||||
target: /run/secrets/basic-auth-password
|
||||
- type: bind
|
||||
source: ./secrets/basic-auth-user
|
||||
target: /run/secrets/basic-auth-user
|
||||
cap_add:
|
||||
- CAP_NET_RAW
|
||||
depends_on:
|
||||
- basic-auth-plugin
|
||||
- nats
|
||||
- prometheus
|
||||
ports:
|
||||
- "8080:8080"
|
||||
|
||||
queue-worker:
|
||||
image: docker.io/openfaas/queue-worker:0.11.2
|
||||
environment:
|
||||
- faas_nats_address=nats
|
||||
- faas_nats_port=4222
|
||||
- gateway_invoke=true
|
||||
- faas_gateway_address=gateway
|
||||
- ack_wait=5m5s
|
||||
- max_inflight=1
|
||||
- write_debug=false
|
||||
- basic_auth=true
|
||||
- secret_mount_path=/run/secrets
|
||||
volumes:
|
||||
# we assume cwd == /var/lib/faasd
|
||||
- type: bind
|
||||
source: ./secrets/basic-auth-password
|
||||
target: /run/secrets/basic-auth-password
|
||||
- type: bind
|
||||
source: ./secrets/basic-auth-user
|
||||
target: /run/secrets/basic-auth-user
|
||||
cap_add:
|
||||
- CAP_NET_RAW
|
||||
depends_on:
|
||||
- nats
|
357
docs/DEV.md
Normal file
357
docs/DEV.md
Normal file
@ -0,0 +1,357 @@
|
||||
## Manual installation of faasd for development
|
||||
|
||||
> Note: if you're just wanting to try out faasd, then it's likely that you're on the wrong page. This is a detailed set of instructions for those wanting to contribute or customise faasd. Feel free to go back to the homepage and pick a tutorial instead.
|
||||
|
||||
### Pre-reqs
|
||||
|
||||
* Linux
|
||||
|
||||
PC / Cloud - any Linux that containerd works on should be fair game, but faasd is tested with Ubuntu 18.04
|
||||
|
||||
For Raspberry Pi Raspbian Stretch or newer also works fine
|
||||
|
||||
For MacOS users try [multipass.run](https://multipass.run) or [Vagrant](https://www.vagrantup.com/)
|
||||
|
||||
For Windows users, install [Git Bash](https://git-scm.com/downloads) along with multipass or vagrant. You can also use WSL1 or WSL2 which provides a Linux environment.
|
||||
|
||||
You will also need [containerd v1.3.5](https://github.com/containerd/containerd) and the [CNI plugins v0.8.5](https://github.com/containernetworking/plugins)
|
||||
|
||||
[faas-cli](https://github.com/openfaas/faas-cli) is optional, but recommended.
|
||||
|
||||
If you're using multipass, then allocate sufficient resources:
|
||||
|
||||
```sh
|
||||
multipass launch \
|
||||
--mem 4G \
|
||||
-c 2 \
|
||||
-n faasd
|
||||
|
||||
# Then access its shell
|
||||
multipass shell faasd
|
||||
```
|
||||
|
||||
### Get runc
|
||||
|
||||
```sh
|
||||
sudo apt update \
|
||||
&& sudo apt install -qy \
|
||||
runc \
|
||||
bridge-utils \
|
||||
make
|
||||
```
|
||||
|
||||
### Get faas-cli (optional)
|
||||
|
||||
Having `faas-cli` on your dev machine is useful for testing and debug.
|
||||
|
||||
```bash
|
||||
curl -sLS https://cli.openfaas.com | sudo sh
|
||||
```
|
||||
|
||||
#### Install the CNI plugins:
|
||||
|
||||
* For PC run `export ARCH=amd64`
|
||||
* For RPi/armhf run `export ARCH=arm`
|
||||
* For arm64 run `export ARCH=arm64`
|
||||
|
||||
Then run:
|
||||
|
||||
```sh
|
||||
export ARCH=amd64
|
||||
export CNI_VERSION=v0.8.5
|
||||
|
||||
sudo mkdir -p /opt/cni/bin
|
||||
curl -sSL https://github.com/containernetworking/plugins/releases/download/${CNI_VERSION}/cni-plugins-linux-${ARCH}-${CNI_VERSION}.tgz | sudo tar -xz -C /opt/cni/bin
|
||||
|
||||
# Make a config folder for CNI definitions
|
||||
sudo mkdir -p /etc/cni/net.d
|
||||
|
||||
# Make an initial loopback configuration
|
||||
sudo sh -c 'cat >/etc/cni/net.d/99-loopback.conf <<-EOF
|
||||
{
|
||||
"cniVersion": "0.3.1",
|
||||
"type": "loopback"
|
||||
}
|
||||
EOF'
|
||||
```
|
||||
|
||||
### Get containerd
|
||||
|
||||
You have three options - binaries for PC, binaries for armhf, or build from source.
|
||||
|
||||
* Install containerd `x86_64` only
|
||||
|
||||
```sh
|
||||
export VER=1.3.5
|
||||
curl -sSL https://github.com/containerd/containerd/releases/download/v$VER/containerd-$VER-linux-amd64.tar.gz > /tmp/containerd.tar.gz \
|
||||
&& sudo tar -xvf /tmp/containerd.tar.gz -C /usr/local/bin/ --strip-components=1
|
||||
|
||||
containerd -version
|
||||
```
|
||||
|
||||
* Or get my containerd binaries for Raspberry Pi (armhf)
|
||||
|
||||
Building `containerd` on armhf is extremely slow, so I've provided binaries for you.
|
||||
|
||||
```sh
|
||||
curl -sSL https://github.com/alexellis/containerd-armhf/releases/download/v1.3.5/containerd.tgz | sudo tar -xvz --strip-components=2 -C /usr/local/bin/
|
||||
```
|
||||
|
||||
* Or clone / build / install [containerd](https://github.com/containerd/containerd) from source:
|
||||
|
||||
```sh
|
||||
export GOPATH=$HOME/go/
|
||||
mkdir -p $GOPATH/src/github.com/containerd
|
||||
cd $GOPATH/src/github.com/containerd
|
||||
git clone https://github.com/containerd/containerd
|
||||
cd containerd
|
||||
git fetch origin --tags
|
||||
git checkout v1.3.5
|
||||
|
||||
make
|
||||
sudo make install
|
||||
|
||||
containerd --version
|
||||
```
|
||||
|
||||
#### Ensure containerd is running
|
||||
|
||||
```sh
|
||||
curl -sLS https://raw.githubusercontent.com/containerd/containerd/v1.3.5/containerd.service > /tmp/containerd.service
|
||||
|
||||
# Extend the timeouts for low-performance VMs
|
||||
echo "[Manager]" | tee -a /tmp/containerd.service
|
||||
echo "DefaultTimeoutStartSec=3m" | tee -a /tmp/containerd.service
|
||||
|
||||
sudo cp /tmp/containerd.service /lib/systemd/system/
|
||||
sudo systemctl enable containerd
|
||||
|
||||
sudo systemctl daemon-reload
|
||||
sudo systemctl restart containerd
|
||||
```
|
||||
|
||||
Or run ad-hoc. This step can be useful for exploring why containerd might fail to start.
|
||||
|
||||
```sh
|
||||
sudo containerd &
|
||||
```
|
||||
|
||||
#### Enable forwarding
|
||||
|
||||
> This is required to allow containers in containerd to access the Internet via your computer's primary network interface.
|
||||
|
||||
```sh
|
||||
sudo /sbin/sysctl -w net.ipv4.conf.all.forwarding=1
|
||||
```
|
||||
|
||||
Make the setting permanent:
|
||||
|
||||
```sh
|
||||
echo "net.ipv4.conf.all.forwarding=1" | sudo tee -a /etc/sysctl.conf
|
||||
```
|
||||
|
||||
### Hacking (build from source)
|
||||
|
||||
#### Get build packages
|
||||
|
||||
```sh
|
||||
sudo apt update \
|
||||
&& sudo apt install -qy \
|
||||
runc \
|
||||
bridge-utils \
|
||||
make
|
||||
```
|
||||
|
||||
You may find alternative package names for CentOS and other Linux distributions.
|
||||
|
||||
#### Install Go 1.13 (x86_64)
|
||||
|
||||
```sh
|
||||
curl -sSLf https://dl.google.com/go/go1.13.6.linux-amd64.tar.gz > /tmp/go.tgz
|
||||
sudo rm -rf /usr/local/go/
|
||||
sudo mkdir -p /usr/local/go/
|
||||
sudo tar -xvf /tmp/go.tgz -C /usr/local/go/ --strip-components=1
|
||||
|
||||
export GOPATH=$HOME/go/
|
||||
export PATH=$PATH:/usr/local/go/bin/
|
||||
|
||||
go version
|
||||
```
|
||||
|
||||
You should also add the following to `~/.bash_profile`:
|
||||
|
||||
```sh
|
||||
echo "export GOPATH=\$HOME/go/" | tee -a $HOME/.bash_profile
|
||||
echo "export PATH=\$PATH:/usr/local/go/bin/" | tee -a $HOME/.bash_profile
|
||||
```
|
||||
|
||||
#### Or on Raspberry Pi (armhf)
|
||||
|
||||
```sh
|
||||
curl -SLsf https://dl.google.com/go/go1.13.6.linux-armv6l.tar.gz > go.tgz
|
||||
sudo rm -rf /usr/local/go/
|
||||
sudo mkdir -p /usr/local/go/
|
||||
sudo tar -xvf go.tgz -C /usr/local/go/ --strip-components=1
|
||||
|
||||
export GOPATH=$HOME/go/
|
||||
export PATH=$PATH:/usr/local/go/bin/
|
||||
|
||||
go version
|
||||
```
|
||||
|
||||
#### Clone faasd and its systemd unit files
|
||||
|
||||
```sh
|
||||
mkdir -p $GOPATH/src/github.com/openfaas/
|
||||
cd $GOPATH/src/github.com/openfaas/
|
||||
git clone https://github.com/openfaas/faasd
|
||||
```
|
||||
|
||||
#### Build `faasd` from source (optional)
|
||||
|
||||
```sh
|
||||
cd $GOPATH/src/github.com/openfaas/faasd
|
||||
cd faasd
|
||||
make local
|
||||
|
||||
# Install the binary
|
||||
sudo cp bin/faasd /usr/local/bin
|
||||
```
|
||||
|
||||
#### Or, download and run `faasd` (binaries)
|
||||
|
||||
```sh
|
||||
# For x86_64
|
||||
export SUFFIX=""
|
||||
|
||||
# armhf
|
||||
export SUFFIX="-armhf"
|
||||
|
||||
# arm64
|
||||
export SUFFIX="-arm64"
|
||||
|
||||
# Then download
|
||||
curl -fSLs "https://github.com/openfaas/faasd/releases/download/0.9.5/faasd$SUFFIX" \
|
||||
-o "/tmp/faasd" \
|
||||
&& chmod +x "/tmp/faasd"
|
||||
sudo mv /tmp/faasd /usr/local/bin/
|
||||
```
|
||||
|
||||
#### Install `faasd`
|
||||
|
||||
This step installs faasd as a systemd unit file, creates files in `/var/lib/faasd`, and writes out networking configuration for the CNI bridge networking plugin.
|
||||
|
||||
```sh
|
||||
sudo faasd install
|
||||
|
||||
2020/02/17 17:38:06 Writing to: "/var/lib/faasd/secrets/basic-auth-password"
|
||||
2020/02/17 17:38:06 Writing to: "/var/lib/faasd/secrets/basic-auth-user"
|
||||
Login with:
|
||||
sudo cat /var/lib/faasd/secrets/basic-auth-password | faas-cli login -s
|
||||
```
|
||||
|
||||
You can now log in either from this machine or a remote machine using the OpenFaaS UI, or CLI.
|
||||
|
||||
Check that faasd is ready:
|
||||
|
||||
```
|
||||
sudo journalctl -u faasd
|
||||
```
|
||||
|
||||
You should see output like:
|
||||
|
||||
```
|
||||
Feb 17 17:46:35 gold-survive faasd[4140]: 2020/02/17 17:46:35 Starting faasd proxy on 8080
|
||||
Feb 17 17:46:35 gold-survive faasd[4140]: Gateway: 10.62.0.5:8080
|
||||
Feb 17 17:46:35 gold-survive faasd[4140]: 2020/02/17 17:46:35 [proxy] Wait for done
|
||||
Feb 17 17:46:35 gold-survive faasd[4140]: 2020/02/17 17:46:35 [proxy] Begin listen on 8080
|
||||
```
|
||||
|
||||
To get the CLI for the command above run:
|
||||
|
||||
```sh
|
||||
curl -sSLf https://cli.openfaas.com | sudo sh
|
||||
```
|
||||
|
||||
#### Make a change to `faasd`
|
||||
|
||||
There are two components you can hack on:
|
||||
|
||||
For function CRUD you will work on `faasd provider` which is started from `cmd/provider.go`
|
||||
|
||||
For faasd itself, you will work on the code from `faasd up`, which is started from `cmd/up.go`
|
||||
|
||||
Before working on either, stop the systemd services:
|
||||
|
||||
```
|
||||
sudo systemctl stop faasd & # up command
|
||||
sudo systemctl stop faasd-provider # provider command
|
||||
```
|
||||
|
||||
Here is a workflow you can use for each code change:
|
||||
|
||||
Enter the directory of the source code, and build a new binary:
|
||||
|
||||
```bash
|
||||
cd $GOPATH/src/github.com/openfaas/faasd
|
||||
go build
|
||||
```
|
||||
|
||||
Copy that binary to `/usr/local/bin/`
|
||||
|
||||
```bash
|
||||
cp faasd /usr/local/bin/
|
||||
```
|
||||
|
||||
To run `faasd up`, run it from its working directory as root
|
||||
|
||||
```bash
|
||||
sudo -i
|
||||
cd /var/lib/faasd
|
||||
|
||||
faasd up
|
||||
```
|
||||
|
||||
Now to run `faasd provider`, run it from its working directory:
|
||||
|
||||
```bash
|
||||
sudo -i
|
||||
cd /var/lib/faasd-provider
|
||||
|
||||
faasd provider
|
||||
```
|
||||
|
||||
#### At run-time
|
||||
|
||||
Look in `hosts` in the current working folder or in `/var/lib/faasd/` to get the IP for the gateway or Prometheus
|
||||
|
||||
```sh
|
||||
127.0.0.1 localhost
|
||||
10.62.0.1 faasd-provider
|
||||
|
||||
10.62.0.2 prometheus
|
||||
10.62.0.3 gateway
|
||||
10.62.0.4 nats
|
||||
10.62.0.5 queue-worker
|
||||
```
|
||||
|
||||
The IP addresses are dynamic and may change on every launch.
|
||||
|
||||
Since faasd-provider uses containerd heavily it is not running as a container, but as a stand-alone process. Its port is available via the bridge interface, i.e. `openfaas0`
|
||||
|
||||
* Prometheus will run on the Prometheus IP plus port 8080 i.e. http://[prometheus_ip]:9090/targets
|
||||
|
||||
* faasd-provider runs on 10.62.0.1:8081, i.e. directly on the host, and accessible via the bridge interface from CNI.
|
||||
|
||||
* Now go to the gateway's IP address as shown above on port 8080, i.e. http://[gateway_ip]:8080 - you can also use this address to deploy OpenFaaS Functions via the `faas-cli`.
|
||||
|
||||
* basic-auth
|
||||
|
||||
You will then need to get the basic-auth password, it is written to `/var/lib/faasd/secrets/basic-auth-password` if you followed the above instructions.
|
||||
The default Basic Auth username is `admin`, which is written to `/var/lib/faasd/secrets/basic-auth-user`, if you wish to use a non-standard user then create this file and add your username (no newlines or other characters)
|
||||
|
||||
#### Installation with systemd
|
||||
|
||||
* `faasd install` - install faasd and containerd with systemd, this must be run from `$GOPATH/src/github.com/openfaas/faasd`
|
||||
* `journalctl -u faasd -f` - faasd service logs
|
||||
* `journalctl -u faasd-provider -f` - faasd-provider service logs
|
141
docs/MULTIPASS.md
Normal file
141
docs/MULTIPASS.md
Normal file
@ -0,0 +1,141 @@
|
||||
# Tutorial - faasd with multipass
|
||||
|
||||
## Get up and running with your own faasd installation on your Mac
|
||||
|
||||
[multipass from Canonical](https://multipass.run) is like Docker Desktop, but for getting Ubuntu instead of a Docker daemon. It works on MacOS, Linux, and Windows with the same consistent UX. It's not fully open-source, and uses some proprietary add-ons / binaries, but is free to use.
|
||||
|
||||
For Linux using Ubuntu, you can install the packages directly, or use `sudo snap install multipass --classic` and follow this tutorial. For Raspberry Pi, [see my tutorial here](https://blog.alexellis.io/faasd-for-lightweight-serverless/).
|
||||
|
||||
John McCabe has also tested faasd on Windows with multipass, [see his tweet](https://twitter.com/mccabejohn/status/1221899154672308224).
|
||||
|
||||
## Use-case:
|
||||
|
||||
Try out [faasd](https://github.com/openfaas/faasd) in a single command using a cloud-config file to get a VM which has:
|
||||
|
||||
* port 22 for administration and
|
||||
* port 8080 for the OpenFaaS REST API.
|
||||
|
||||

|
||||
|
||||
The above screenshot is [from my tweet](https://twitter.com/alexellisuk/status/1221408788395298819/), feel free to comment there.
|
||||
|
||||
It took me about 2-3 minutes to run through everything after installing multipass.
|
||||
|
||||
## Let's start the tutorial
|
||||
|
||||
* Get [multipass.run](https://multipass.run)
|
||||
|
||||
* Get my cloud-config.txt file
|
||||
|
||||
```sh
|
||||
curl -sSLO https://raw.githubusercontent.com/openfaas/faasd/master/cloud-config.txt
|
||||
```
|
||||
|
||||
* Update the SSH key to match your own, edit `cloud-config.txt`:
|
||||
|
||||
Replace the 2nd line with the contents of `~/.ssh/id_rsa.pub`:
|
||||
|
||||
```
|
||||
ssh_authorized_keys:
|
||||
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC8Q/aUYUr3P1XKVucnO9mlWxOjJm+K01lHJR90MkHC9zbfTqlp8P7C3J26zKAuzHXOeF+VFxETRr6YedQKW9zp5oP7sN+F2gr/pO7GV3VmOqHMV7uKfyUQfq7H1aVzLfCcI7FwN2Zekv3yB7kj35pbsMa1Za58aF6oHRctZU6UWgXXbRxP+B04DoVU7jTstQ4GMoOCaqYhgPHyjEAS3DW0kkPW6HzsvJHkxvVcVlZ/wNJa1Ie/yGpzOzWIN0Ol0t2QT/RSWOhfzO1A2P0XbPuZ04NmriBonO9zR7T1fMNmmtTuK7WazKjQT3inmYRAqU6pe8wfX8WIWNV7OowUjUsv alex@alexr.local
|
||||
```
|
||||
|
||||
* Boot the VM
|
||||
|
||||
```sh
|
||||
multipass launch --cloud-init cloud-config.txt --name faasd
|
||||
```
|
||||
|
||||
* Get the VM's IP and connect with `ssh`
|
||||
|
||||
```sh
|
||||
multipass info faasd
|
||||
Name: faasd
|
||||
State: Running
|
||||
IPv4: 192.168.64.14
|
||||
Release: Ubuntu 18.04.3 LTS
|
||||
Image hash: a720c34066dc (Ubuntu 18.04 LTS)
|
||||
Load: 0.79 0.19 0.06
|
||||
Disk usage: 1.1G out of 4.7G
|
||||
Memory usage: 145.6M out of 985.7M
|
||||
```
|
||||
|
||||
Set the variable `IP`:
|
||||
|
||||
```
|
||||
export IP="192.168.64.14"
|
||||
```
|
||||
|
||||
You can also try to use `jq` to get the IP into a variable:
|
||||
|
||||
```sh
|
||||
export IP=$(multipass info faasd --format json| jq '.info.faasd.ipv4[0]' | tr -d '\"')
|
||||
```
|
||||
|
||||
Connect to the IP listed:
|
||||
|
||||
```sh
|
||||
ssh ubuntu@$IP
|
||||
```
|
||||
|
||||
Log out once you know it works.
|
||||
|
||||
* Let's capture the authentication password into a file for use with `faas-cli`
|
||||
|
||||
```
|
||||
ssh ubuntu@$IP "sudo cat /var/lib/faasd/secrets/basic-auth-password" > basic-auth-password
|
||||
```
|
||||
|
||||
## Try faasd (OpenFaaS)
|
||||
|
||||
* Login from your laptop (the host)
|
||||
|
||||
```
|
||||
export OPENFAAS_URL=http://$IP:8080
|
||||
cat basic-auth-password | faas-cli login -s
|
||||
```
|
||||
|
||||
* Deploy a function and invoke it
|
||||
|
||||
```
|
||||
faas-cli store deploy figlet --env write_timeout=1s
|
||||
echo "faasd" | faas-cli invoke figlet
|
||||
|
||||
faas-cli describe figlet
|
||||
|
||||
# Run async
|
||||
curl -i -d "faasd-async" $OPENFAAS_URL/async-function/figlet
|
||||
|
||||
# Run async with a callback
|
||||
|
||||
curl -i -d "faasd-async" -H "X-Callback-Url: http://some-request-bin.com/path" $OPENFAAS_URL/async-function/figlet
|
||||
```
|
||||
|
||||
You can also checkout the other store functions: `faas-cli store list`
|
||||
|
||||
* Try the UI
|
||||
|
||||
Head over to the UI from your laptop and remember that your password is in the `basic-auth-password` file. The username is `admin.:
|
||||
|
||||
```
|
||||
echo http://$IP:8080
|
||||
```
|
||||
|
||||
* Stop/start the instance
|
||||
|
||||
```sh
|
||||
multipass stop faasd
|
||||
```
|
||||
|
||||
* Delete, if you want to:
|
||||
|
||||
```
|
||||
multipass delete --purge faasd
|
||||
```
|
||||
|
||||
You now have a faasd appliance on your Mac. You can also use this cloud-init file with public cloud like AWS or DigitalOcean.
|
||||
|
||||
* If you want a public IP for your faasd VM, then just head over to [inlets.dev](https://inlets.dev/)
|
||||
* Try my more complete walk-through / tutorial with Raspberry Pi, or run the same steps on your multipass VM, including how to develop your own functions and services - https://blog.alexellis.io/faasd-for-lightweight-serverless/
|
||||
* You might also like [Building containers without Docker](https://blog.alexellis.io/building-containers-without-docker/)
|
||||
* Star/fork [faasd](https://github.com/openfaas/faasd) on GitHub
|
3
docs/bootstrap/.gitignore
vendored
Normal file
3
docs/bootstrap/.gitignore
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
/.terraform/
|
||||
/terraform.tfstate
|
||||
/terraform.tfstate.backup
|
20
docs/bootstrap/README.md
Normal file
20
docs/bootstrap/README.md
Normal file
@ -0,0 +1,20 @@
|
||||
# Bootstrap faasd on Digitalocean
|
||||
|
||||
1) [Sign up to DigitalOcean](https://www.digitalocean.com/?refcode=2962aa9e56a1&utm_campaign=Referral_Invite&utm_medium=Referral_Program&utm_source=CopyPaste)
|
||||
2) [Download Terraform](https://www.terraform.io)
|
||||
3) Clone this gist using the URL from the address bar
|
||||
4) Run `terraform init`
|
||||
5) Run `terraform apply -var="do_token=$(cat $HOME/digitalocean-access-token)"`
|
||||
6) View the output for the login command and gateway URL i.e.
|
||||
|
||||
```
|
||||
gateway_url = http://178.128.39.201:8080/
|
||||
login_cmd = faas-cli login -g http://178.128.39.201:8080/ -p rvIU49CEcFcHmqxj
|
||||
password = rvIU49CEcFcHmqxj
|
||||
```
|
||||
|
||||
Note that the user-data may take a couple of minutes to come up since it will be pulling in various components and preparing the machine.
|
||||
|
||||
A single host with 1GB of RAM will be deployed for you, to remove at a later date simply use `terraform destroy`.
|
||||
|
||||
If required, you can remove the VM via `terraform destroy -var="do_token=$(cat $HOME/digitalocean-access-token)"`
|
30
docs/bootstrap/cloud-config.tpl
Normal file
30
docs/bootstrap/cloud-config.tpl
Normal file
@ -0,0 +1,30 @@
|
||||
#cloud-config
|
||||
ssh_authorized_keys:
|
||||
## Note: Replace with your own public key
|
||||
- ${ssh_key}
|
||||
|
||||
package_update: true
|
||||
|
||||
packages:
|
||||
- runc
|
||||
|
||||
runcmd:
|
||||
- curl -sLSf https://github.com/containerd/containerd/releases/download/v1.3.5/containerd-1.3.5-linux-amd64.tar.gz > /tmp/containerd.tar.gz && tar -xvf /tmp/containerd.tar.gz -C /usr/local/bin/ --strip-components=1
|
||||
- curl -SLfs https://raw.githubusercontent.com/containerd/containerd/v1.3.5/containerd.service | tee /etc/systemd/system/containerd.service
|
||||
- systemctl daemon-reload && systemctl start containerd
|
||||
- /sbin/sysctl -w net.ipv4.conf.all.forwarding=1
|
||||
- mkdir -p /opt/cni/bin
|
||||
- curl -sSL https://github.com/containernetworking/plugins/releases/download/v0.8.5/cni-plugins-linux-amd64-v0.8.5.tgz | tar -xz -C /opt/cni/bin
|
||||
- mkdir -p /go/src/github.com/openfaas/
|
||||
- mkdir -p /var/lib/faasd/secrets/
|
||||
- echo ${gw_password} > /var/lib/faasd/secrets/basic-auth-password
|
||||
- echo admin > /var/lib/faasd/secrets/basic-auth-user
|
||||
- cd /go/src/github.com/openfaas/ && git clone --depth 1 --branch 0.9.5 https://github.com/openfaas/faasd
|
||||
- curl -fSLs "https://github.com/openfaas/faasd/releases/download/0.9.5/faasd" --output "/usr/local/bin/faasd" && chmod a+x "/usr/local/bin/faasd"
|
||||
- cd /go/src/github.com/openfaas/faasd/ && /usr/local/bin/faasd install
|
||||
- systemctl status -l containerd --no-pager
|
||||
- journalctl -u faasd-provider --no-pager
|
||||
- systemctl status -l faasd-provider --no-pager
|
||||
- systemctl status -l faasd --no-pager
|
||||
- curl -sSLf https://cli.openfaas.com | sh
|
||||
- sleep 5 && journalctl -u faasd --no-pager
|
3
docs/bootstrap/digitalocean-terraform/.gitignore
vendored
Normal file
3
docs/bootstrap/digitalocean-terraform/.gitignore
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
/.terraform/
|
||||
/terraform.tfstate
|
||||
/terraform.tfstate.backup
|
38
docs/bootstrap/digitalocean-terraform/README.md
Normal file
38
docs/bootstrap/digitalocean-terraform/README.md
Normal file
@ -0,0 +1,38 @@
|
||||
# Bootstrap faasd with TLS support on Digitalocean
|
||||
|
||||
1) [Sign up to DigitalOcean](https://www.digitalocean.com/?refcode=2962aa9e56a1&utm_campaign=Referral_Invite&utm_medium=Referral_Program&utm_source=CopyPaste)
|
||||
2) [Download Terraform](https://www.terraform.io)
|
||||
3) Clone this gist using the URL from the address bar
|
||||
4) Run `terraform init`
|
||||
5) Configure terraform variables as needed by updating the `main.tfvars` file:
|
||||
|
||||
| Variable | Description | Default |
|
||||
| ------------ | ------------------- | --------------- |
|
||||
| `do_token` | Digitalocean API token | None |
|
||||
| `do_domain` | Public domain used for the faasd gateway | None |
|
||||
| `do_subdomain` | Public subdomain used for the faasd gateway | `faasd` |
|
||||
| `letsencrypt_email` | Email used by when ordering TLS certificate from Letsencrypt | `""` |
|
||||
| `do_create_record` | When set to `true`, a new DNS record will be created. This works only if your domain (`do_domain`) is managed by Digitalocean | `false` |
|
||||
| `do_region` | Digitalocean region for creating the droplet | `fra1` |
|
||||
| `ssh_key_file` | Path to public SSH key file |`~/.ssh/id_rsa.pub` |
|
||||
|
||||
> Environment variables can also be used to set terraform variables when running the `terraform apply` command using the format `TF_VAR_name`.
|
||||
|
||||
6) Run `terraform apply`
|
||||
1) Add `-var-file=main.tfvars` if you have set the variables in `main.tfvars`.
|
||||
2) OR [use environment variables](https://www.terraform.io/docs/commands/environment-variables.html#tf_var_name) for setting the terraform variables when running the `apply` command
|
||||
|
||||
7) View the output for the login command and gateway URL i.e.
|
||||
|
||||
```
|
||||
droplet_ip = 178.128.39.201
|
||||
gateway_url = https://faasd.example.com/
|
||||
login_cmd = faas-cli login -g https://faasd.example.com/ -p rvIU49CEcFcHmqxj
|
||||
password = rvIU49CEcFcHmqxj
|
||||
```
|
||||
8) Use your browser to access the OpenFaaS interface
|
||||
|
||||
Note that the user-data may take a couple of minutes to come up since it will be pulling in various components and preparing the machine.
|
||||
Also take into consideration the DNS propagation time for the new DNS record.
|
||||
|
||||
A single host with 1GB of RAM will be deployed for you, to remove at a later date simply use `terraform destroy`.
|
57
docs/bootstrap/digitalocean-terraform/cloud-config.tpl
Normal file
57
docs/bootstrap/digitalocean-terraform/cloud-config.tpl
Normal file
@ -0,0 +1,57 @@
|
||||
#cloud-config
|
||||
ssh_authorized_keys:
|
||||
- ${ssh_key}
|
||||
|
||||
groups:
|
||||
- caddy
|
||||
|
||||
users:
|
||||
- name: caddy
|
||||
gecos: Caddy web server
|
||||
primary_group: caddy
|
||||
groups: caddy
|
||||
shell: /usr/sbin/nologin
|
||||
homedir: /var/lib/caddy
|
||||
|
||||
write_files:
|
||||
- content: |
|
||||
{
|
||||
email ${letsencrypt_email}
|
||||
}
|
||||
|
||||
${faasd_domain_name} {
|
||||
reverse_proxy 127.0.0.1:8080
|
||||
}
|
||||
|
||||
path: /etc/caddy/Caddyfile
|
||||
|
||||
package_update: true
|
||||
|
||||
packages:
|
||||
- runc
|
||||
|
||||
runcmd:
|
||||
- curl -sLSf https://github.com/containerd/containerd/releases/download/v1.3.5/containerd-1.3.5-linux-amd64.tar.gz > /tmp/containerd.tar.gz && tar -xvf /tmp/containerd.tar.gz -C /usr/local/bin/ --strip-components=1
|
||||
- curl -SLfs https://raw.githubusercontent.com/containerd/containerd/v1.3.5/containerd.service | tee /etc/systemd/system/containerd.service
|
||||
- systemctl daemon-reload && systemctl start containerd
|
||||
- /sbin/sysctl -w net.ipv4.conf.all.forwarding=1
|
||||
- mkdir -p /opt/cni/bin
|
||||
- curl -sSL https://github.com/containernetworking/plugins/releases/download/v0.8.5/cni-plugins-linux-amd64-v0.8.5.tgz | tar -xz -C /opt/cni/bin
|
||||
- mkdir -p /go/src/github.com/openfaas/
|
||||
- mkdir -p /var/lib/faasd/secrets/
|
||||
- echo ${gw_password} > /var/lib/faasd/secrets/basic-auth-password
|
||||
- echo admin > /var/lib/faasd/secrets/basic-auth-user
|
||||
- cd /go/src/github.com/openfaas/ && git clone --depth 1 --branch 0.9.5 https://github.com/openfaas/faasd
|
||||
- curl -fSLs "https://github.com/openfaas/faasd/releases/download/0.9.5/faasd" --output "/usr/local/bin/faasd" && chmod a+x "/usr/local/bin/faasd"
|
||||
- cd /go/src/github.com/openfaas/faasd/ && /usr/local/bin/faasd install
|
||||
- systemctl status -l containerd --no-pager
|
||||
- journalctl -u faasd-provider --no-pager
|
||||
- systemctl status -l faasd-provider --no-pager
|
||||
- systemctl status -l faasd --no-pager
|
||||
- curl -sSLf https://cli.openfaas.com | sh
|
||||
- sleep 5 && journalctl -u faasd --no-pager
|
||||
- wget https://github.com/caddyserver/caddy/releases/download/v2.1.1/caddy_2.1.1_linux_amd64.tar.gz -O /tmp/caddy.tar.gz && tar -zxvf /tmp/caddy.tar.gz -C /usr/bin/ caddy
|
||||
- wget https://raw.githubusercontent.com/caddyserver/dist/master/init/caddy.service -O /etc/systemd/system/caddy.service
|
||||
- systemctl daemon-reload
|
||||
- systemctl enable caddy
|
||||
- systemctl start caddy
|
86
docs/bootstrap/digitalocean-terraform/main.tf
Normal file
86
docs/bootstrap/digitalocean-terraform/main.tf
Normal file
@ -0,0 +1,86 @@
|
||||
terraform {
|
||||
required_version = ">= 0.12"
|
||||
}
|
||||
|
||||
variable "do_token" {
|
||||
description = "Digitalocean API token"
|
||||
}
|
||||
variable "do_domain" {
|
||||
description = "Your public domain"
|
||||
}
|
||||
variable "do_subdomain" {
|
||||
description = "Your public subdomain"
|
||||
default = "faasd"
|
||||
}
|
||||
variable "letsencrypt_email" {
|
||||
description = "Email used to order a certificate from Letsencrypt"
|
||||
}
|
||||
variable "do_create_record" {
|
||||
default = false
|
||||
description = "Whether to create a DNS record on Digitalocean"
|
||||
}
|
||||
variable "do_region" {
|
||||
default = "fra1"
|
||||
description = "The Digitalocean region where the faasd droplet will be created."
|
||||
}
|
||||
variable "ssh_key_file" {
|
||||
default = "~/.ssh/id_rsa.pub"
|
||||
description = "Path to the SSH public key file"
|
||||
}
|
||||
|
||||
provider "digitalocean" {
|
||||
token = var.do_token
|
||||
}
|
||||
|
||||
data "local_file" "ssh_key"{
|
||||
filename = pathexpand(var.ssh_key_file)
|
||||
}
|
||||
|
||||
resource "random_password" "password" {
|
||||
length = 16
|
||||
special = true
|
||||
override_special = "_-#"
|
||||
}
|
||||
|
||||
data "template_file" "cloud_init" {
|
||||
template = "${file("cloud-config.tpl")}"
|
||||
vars = {
|
||||
gw_password=random_password.password.result,
|
||||
ssh_key=data.local_file.ssh_key.content,
|
||||
faasd_domain_name="${var.do_subdomain}.${var.do_domain}"
|
||||
letsencrypt_email=var.letsencrypt_email
|
||||
}
|
||||
}
|
||||
|
||||
resource "digitalocean_droplet" "faasd" {
|
||||
region = var.do_region
|
||||
image = "ubuntu-18-04-x64"
|
||||
name = "faasd"
|
||||
size = "s-1vcpu-1gb"
|
||||
user_data = data.template_file.cloud_init.rendered
|
||||
}
|
||||
|
||||
resource "digitalocean_record" "faasd" {
|
||||
domain = var.do_domain
|
||||
type = "A"
|
||||
name = "faasd"
|
||||
value = digitalocean_droplet.faasd.ipv4_address
|
||||
# Only creates record if do_create_record is true
|
||||
count = var.do_create_record == true ? 1 : 0
|
||||
}
|
||||
|
||||
output "droplet_ip" {
|
||||
value = digitalocean_droplet.faasd.ipv4_address
|
||||
}
|
||||
|
||||
output "gateway_url" {
|
||||
value = "https://${var.do_subdomain}.${var.do_domain}/"
|
||||
}
|
||||
|
||||
output "password" {
|
||||
value = random_password.password.result
|
||||
}
|
||||
|
||||
output "login_cmd" {
|
||||
value = "faas-cli login -g https://${var.do_subdomain}.${var.do_domain}/ -p ${random_password.password.result}"
|
||||
}
|
4
docs/bootstrap/digitalocean-terraform/main.tfvars
Normal file
4
docs/bootstrap/digitalocean-terraform/main.tfvars
Normal file
@ -0,0 +1,4 @@
|
||||
do_token = ""
|
||||
do_domain = ""
|
||||
do_subdomain = ""
|
||||
letsencrypt_email = ""
|
56
docs/bootstrap/main.tf
Normal file
56
docs/bootstrap/main.tf
Normal file
@ -0,0 +1,56 @@
|
||||
terraform {
|
||||
required_version = ">= 0.12"
|
||||
}
|
||||
|
||||
variable "do_token" {}
|
||||
|
||||
variable "ssh_key_file" {
|
||||
default = "~/.ssh/id_rsa.pub"
|
||||
description = "Path to the SSH public key file"
|
||||
}
|
||||
|
||||
provider "digitalocean" {
|
||||
token = var.do_token
|
||||
}
|
||||
|
||||
resource "random_password" "password" {
|
||||
length = 16
|
||||
special = true
|
||||
override_special = "_-#"
|
||||
}
|
||||
|
||||
data "local_file" "ssh_key"{
|
||||
filename = pathexpand(var.ssh_key_file)
|
||||
}
|
||||
|
||||
data "template_file" "cloud_init" {
|
||||
template = "${file("cloud-config.tpl")}"
|
||||
vars = {
|
||||
gw_password=random_password.password.result,
|
||||
ssh_key=data.local_file.ssh_key.content,
|
||||
}
|
||||
}
|
||||
|
||||
resource "digitalocean_droplet" "faasd" {
|
||||
|
||||
region = "lon1"
|
||||
image = "ubuntu-18-04-x64"
|
||||
name = "faasd"
|
||||
# Plans: https://developers.digitalocean.com/documentation/changelog/api-v2/new-size-slugs-for-droplet-plan-changes/
|
||||
#size = "512mb"
|
||||
size = "s-1vcpu-1gb"
|
||||
user_data = data.template_file.cloud_init.rendered
|
||||
}
|
||||
|
||||
output "password" {
|
||||
value = random_password.password.result
|
||||
}
|
||||
|
||||
output "gateway_url" {
|
||||
value = "http://${digitalocean_droplet.faasd.ipv4_address}:8080/"
|
||||
}
|
||||
|
||||
output "login_cmd" {
|
||||
value = "faas-cli login -g http://${digitalocean_droplet.faasd.ipv4_address}:8080/ -p ${random_password.password.result}"
|
||||
}
|
||||
|
47
go.mod
Normal file
47
go.mod
Normal file
@ -0,0 +1,47 @@
|
||||
module github.com/openfaas/faasd
|
||||
|
||||
go 1.13
|
||||
|
||||
require (
|
||||
github.com/Microsoft/hcsshim v0.8.7-0.20190820203702-9e921883ac92 // indirect
|
||||
github.com/alexellis/go-execute v0.0.0-20200124154445-8697e4e28c5e
|
||||
github.com/alexellis/k3sup v0.0.0-20200607084134-629c0bc6b50f
|
||||
github.com/compose-spec/compose-go v0.0.0-20200528042322-36d8ce368e05
|
||||
github.com/containerd/containerd v1.3.2
|
||||
github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02 // indirect
|
||||
github.com/containerd/fifo v0.0.0-20190816180239-bda0ff6ed73c // indirect
|
||||
github.com/containerd/go-cni v0.0.0-20200107172653-c154a49e2c75
|
||||
github.com/containerd/ttrpc v1.0.0 // indirect
|
||||
github.com/containerd/typeurl v1.0.0 // indirect
|
||||
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf
|
||||
github.com/docker/cli v0.0.0-20191105005515-99c5edceb48d
|
||||
github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible
|
||||
github.com/docker/docker v17.12.0-ce-rc1.0.20191113042239-ea84732a7725+incompatible // indirect
|
||||
github.com/docker/docker-credential-helpers v0.6.3 // indirect
|
||||
github.com/docker/go-events v0.0.0-20170721190031-9461782956ad // indirect
|
||||
github.com/gogo/googleapis v1.2.0 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9 // indirect
|
||||
github.com/gorilla/mux v1.7.3
|
||||
github.com/imdario/mergo v0.3.9 // indirect
|
||||
github.com/morikuni/aec v1.0.0
|
||||
github.com/opencontainers/go-digest v1.0.0-rc1.0.20180430190053-c9281466c8b2 // indirect
|
||||
github.com/opencontainers/image-spec v1.0.1 // indirect
|
||||
github.com/opencontainers/runc v1.0.0-rc9 // indirect
|
||||
github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559
|
||||
github.com/openfaas/faas v0.0.0-20191227175319-80b6976c1063
|
||||
github.com/openfaas/faas-provider v0.15.1
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/sethvargo/go-password v0.1.3
|
||||
github.com/spf13/cobra v0.0.5
|
||||
github.com/spf13/pflag v1.0.5
|
||||
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 // indirect
|
||||
github.com/vishvananda/netlink v1.1.0
|
||||
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
||||
go.opencensus.io v0.22.2 // indirect
|
||||
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553 // indirect
|
||||
golang.org/x/sys v0.0.0-20191219235734-af0d71d358ab
|
||||
google.golang.org/genproto v0.0.0-20191216205247-b31c10ee225f // indirect
|
||||
google.golang.org/grpc v1.23.0 // indirect
|
||||
k8s.io/apimachinery v0.18.9
|
||||
)
|
314
go.sum
Normal file
314
go.sum
Normal file
@ -0,0 +1,314 @@
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/Microsoft/go-winio v0.4.14 h1:+hMXMk01us9KgxGb7ftKQt2Xpf5hH/yky+TDA+qxleU=
|
||||
github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=
|
||||
github.com/Microsoft/hcsshim v0.8.7-0.20190820203702-9e921883ac92 h1:LbNLS5KKzW/L4K2scH5rzTA5MvRWiUfcWv4Qh/6D3wY=
|
||||
github.com/Microsoft/hcsshim v0.8.7-0.20190820203702-9e921883ac92/go.mod h1:nvUXb1s75kCTKHWZ1FGlut+PBI9D9EoQHCKcil2Cyps=
|
||||
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
|
||||
github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
||||
github.com/alexellis/go-execute v0.0.0-20191207085904-961405ea7544/go.mod h1:zfRbgnPVxXCSpiKrg1CE72hNUWInqxExiaz2D9ppTts=
|
||||
github.com/alexellis/go-execute v0.0.0-20200124154445-8697e4e28c5e h1:0cv4CUENL7e67/ZlNrvExWqa6oKH/9iv0KQn0/+hYaY=
|
||||
github.com/alexellis/go-execute v0.0.0-20200124154445-8697e4e28c5e/go.mod h1:zfRbgnPVxXCSpiKrg1CE72hNUWInqxExiaz2D9ppTts=
|
||||
github.com/alexellis/k3sup v0.0.0-20200607084134-629c0bc6b50f h1:GYVsHjM9/lyu0QBfNp1RThZNDrmRJncpSYtaektq9t0=
|
||||
github.com/alexellis/k3sup v0.0.0-20200607084134-629c0bc6b50f/go.mod h1:slULyLX94hIIP3/eVeZqQAqOwpLYK9Pljr8SfaR2nWI=
|
||||
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
||||
github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/compose-spec/compose-go v0.0.0-20200528042322-36d8ce368e05 h1:3CoRXflT4IAdIpsFTqZBlLuHOkHYhBvW0iijkQKm4QY=
|
||||
github.com/compose-spec/compose-go v0.0.0-20200528042322-36d8ce368e05/go.mod h1:y75QUr1jcR5aFNf3Tj3dhwnujABGz6UaRrZ5qZwF1cc=
|
||||
github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
|
||||
github.com/containerd/containerd v0.0.0-20190214164719-faec567304bb/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
||||
github.com/containerd/containerd v1.3.2 h1:ForxmXkA6tPIvffbrDAcPUIB32QgXkt2XFj+F0UxetA=
|
||||
github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
||||
github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
|
||||
github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02 h1:tN9D97v5A5QuKdcKHKt+UMKrkQ5YXUnD8iM7IAAjEfI=
|
||||
github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
|
||||
github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
|
||||
github.com/containerd/fifo v0.0.0-20190816180239-bda0ff6ed73c h1:KFbqHhDeaHM7IfFtXHfUHMDaUStpM2YwBR+iJCIOsKk=
|
||||
github.com/containerd/fifo v0.0.0-20190816180239-bda0ff6ed73c/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
|
||||
github.com/containerd/go-cni v0.0.0-20200107172653-c154a49e2c75 h1:5Q5C6jDObSVpjeX8CuZ5yac8d/KIYuPzUHbUzdL+NFw=
|
||||
github.com/containerd/go-cni v0.0.0-20200107172653-c154a49e2c75/go.mod h1:0mg8r6FCdbxvLDqCXwAx2rO+KA37QICjKL8+wHOG5OE=
|
||||
github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
|
||||
github.com/containerd/ttrpc v0.0.0-20180920185216-2a805f718635/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
|
||||
github.com/containerd/ttrpc v1.0.0 h1:NY8Zk2i7TpkLxrkOASo+KTFq9iNCEmMH2/ZG9OuOw6k=
|
||||
github.com/containerd/ttrpc v1.0.0/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
|
||||
github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc=
|
||||
github.com/containerd/typeurl v1.0.0 h1:7LMH7LfEmpWeCkGcIputvd4P0Rnd0LrIv1Jk2s5oobs=
|
||||
github.com/containerd/typeurl v1.0.0/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc=
|
||||
github.com/containernetworking/cni v0.7.1 h1:fE3r16wpSEyaqY4Z4oFrLMmIGfBYIKpPrHK31EJ9FzE=
|
||||
github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
|
||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
|
||||
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU=
|
||||
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/docker/cli v0.0.0-20191105005515-99c5edceb48d h1:SknEFm9d070Wn2GeX8dyl7bMrX07cp3UMXuZ2Ct02Kw=
|
||||
github.com/docker/cli v0.0.0-20191105005515-99c5edceb48d/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible h1:dvc1KSkIYTVjZgHf/CTC2diTYC8PzhaA5sFISRfNVrE=
|
||||
github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker v17.12.0-ce-rc1.0.20191113042239-ea84732a7725+incompatible h1:m+SEbBCq0i1e399zUpu70L8AYiTT5UiF7O0IO+5AorM=
|
||||
github.com/docker/docker v17.12.0-ce-rc1.0.20191113042239-ea84732a7725+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker-credential-helpers v0.6.3 h1:zI2p9+1NQYdnG6sMU26EX4aVGlqbInSQxQXLvzJ4RPQ=
|
||||
github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y=
|
||||
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
|
||||
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
||||
github.com/docker/go-events v0.0.0-20170721190031-9461782956ad h1:VXIse57M5C6ezDuCPyq6QmMvEJ2xclYKZ35SfkXdm3E=
|
||||
github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
|
||||
github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
|
||||
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
|
||||
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
|
||||
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
||||
github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
|
||||
github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=
|
||||
github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg=
|
||||
github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc=
|
||||
github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I=
|
||||
github.com/gogo/googleapis v1.2.0 h1:Z0v3OJDotX9ZBpdz2V+AI7F4fITSZhVE5mg6GQppwMM=
|
||||
github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU=
|
||||
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
||||
github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls=
|
||||
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9 h1:uHTyIjqVhYRhLbJ8nIiOJHkEZZ+5YoOsAbD3sk82NiE=
|
||||
github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.1 h1:/exdXoGamhu5ONeUJH0deniYLWYvQwW66yvlfiiKTu0=
|
||||
github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g=
|
||||
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
|
||||
github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
|
||||
github.com/gorilla/context v0.0.0-20160226214623-1ea25387ff6f/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
|
||||
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
github.com/gorilla/mux v1.7.3 h1:gnP5JzjVOuiZD07fKKToCAOjS0yOpj/qPETTXCCS6hw=
|
||||
github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I=
|
||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/imdario/mergo v0.3.9 h1:UauaLniWCFHWd+Jp9oCEkTBj8VO/9DKg3PV3VCNMDIg=
|
||||
github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
||||
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mattn/go-shellwords v1.0.10 h1:Y7Xqm8piKOO3v10Thp7Z36h4FYFjt5xB//6XvOrs2Gw=
|
||||
github.com/mattn/go-shellwords v1.0.10/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mitchellh/mapstructure v1.3.1 h1:cCBH2gTD2K0OtLlv/Y5H01VQCqmlDxz30kS5Y5bqfLA=
|
||||
github.com/mitchellh/mapstructure v1.3.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
|
||||
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
|
||||
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
|
||||
github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
|
||||
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||
github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
|
||||
github.com/opencontainers/go-digest v1.0.0-rc1.0.20180430190053-c9281466c8b2 h1:2C93eP55foV5f0eNmXbidhKzwUZbs/Gk4PRp1zfeffs=
|
||||
github.com/opencontainers/go-digest v1.0.0-rc1.0.20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
|
||||
github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI=
|
||||
github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
|
||||
github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
|
||||
github.com/opencontainers/runc v1.0.0-rc9 h1:/k06BMULKF5hidyoZymkoDCzdJzltZpz/UU4LguQVtc=
|
||||
github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
|
||||
github.com/opencontainers/runtime-spec v0.0.0-20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559 h1:Cef96rKLuXxeGzERI/0ve9yAzIeTpx0qz9JKFDZALYw=
|
||||
github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs=
|
||||
github.com/openfaas/faas v0.0.0-20191227175319-80b6976c1063 h1:9dxY2GyAGl6j3+g4RN0gHsDIk3dEHuKmqlB2mkGlJ7c=
|
||||
github.com/openfaas/faas v0.0.0-20191227175319-80b6976c1063/go.mod h1:E0m2rLup0Vvxg53BKxGgaYAGcZa3Xl+vvL7vSi5yQ14=
|
||||
github.com/openfaas/faas-provider v0.15.1 h1:5M+DrGbuKlZxhN3/otFgLgBUnPQcYm6XosQDSbKXh30=
|
||||
github.com/openfaas/faas-provider v0.15.1/go.mod h1:ChVioeB3snmfYxBTu7SrWGYLoYN9fZkQlnuEQ86a6l0=
|
||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
||||
github.com/sethvargo/go-password v0.1.2/go.mod h1:qKHfdSjT26DpHQWHWWR5+X4BI45jT31dg6j4RI2TEb0=
|
||||
github.com/sethvargo/go-password v0.1.3 h1:18KkbGDkw8SuzeohAbWqBLNSfRQblVwEHOLbPa0PvWM=
|
||||
github.com/sethvargo/go-password v0.1.3/go.mod h1:2tyaaoHK/AlXwh5WWQDYjqQbHcq4cjPj5qb/ciYvu/Q=
|
||||
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
|
||||
github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I=
|
||||
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
||||
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
|
||||
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s=
|
||||
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
|
||||
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||
github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
|
||||
github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4=
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
|
||||
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 h1:b6uOv7YOFK0TYG7HtkIgExQo+2RdLuwRft63jn2HWj8=
|
||||
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
|
||||
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
|
||||
github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
|
||||
github.com/vishvananda/netlink v1.1.0 h1:1iyaYNBLmP6L0220aDnYQpo1QEV4t4hJ+xEEhhJH8j0=
|
||||
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
|
||||
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df h1:OviZH7qLw/7ZovXvuNyL3XQl8UFofeikI1NW1Gypu7k=
|
||||
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0=
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
|
||||
github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs=
|
||||
github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74=
|
||||
github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
|
||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||
go.opencensus.io v0.22.2 h1:75k/FF0Q2YM8QYo07VPddOLBslDt1MZOdEslOHvmzAs=
|
||||
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.uber.org/goleak v0.10.0/go.mod h1:VCZuO8V8mFPlL0F5J5GK1rtHV3DrFcQ1R8ryq7FK0aI=
|
||||
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553 h1:efeOvDhwQ29Dj3SdAV/MJf8oukgn+8D8WgaCaRMchF8=
|
||||
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191219235734-af0d71d358ab h1:j8r8g0V3tVdbo274kyTmC+yEsChru2GfvdiV84wm5T8=
|
||||
golang.org/x/sys v0.0.0-20191219235734-af0d71d358ab/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20191216205247-b31c10ee225f h1:0RYv5T9ZdroAqqfM2taEB0nJrArv0X1JpIdgUmY4xg8=
|
||||
google.golang.org/genproto v0.0.0-20191216205247-b31c10ee225f/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
google.golang.org/grpc v1.23.0 h1:AzbTB6ux+okLTzP8Ru1Xs41C303zdcfEht7MQnYJt5A=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
|
||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
|
||||
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
|
||||
gotest.tools/v3 v3.0.2 h1:kG1BFyqVHuQoVQiR1bWGnfz/fmHvvuiSPIV7rvl360E=
|
||||
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
k8s.io/apimachinery v0.18.9 h1:3ZABKQx3F3xPWlsGhCfUl8W+JXRRblV6Wo2A3zn0pvY=
|
||||
k8s.io/apimachinery v0.18.9/go.mod h1:PF5taHbXgTEJLU+xMypMmYTXTWPJ5LaW8bfsisxnEXk=
|
||||
k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||
k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
|
||||
k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
|
||||
k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E=
|
||||
k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
|
||||
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
|
||||
sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
|
||||
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
|
||||
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
|
@ -1,11 +1,12 @@
|
||||
#!/bin/bash
|
||||
|
||||
export ARCH="armv6l"
|
||||
echo "Downloading Go"
|
||||
|
||||
curl -SLsf https://dl.google.com/go/go1.12.14.linux-armv6l.tar.gz > go.tgz
|
||||
curl -SLsf https://dl.google.com/go/go1.12.14.linux-$ARCH.tar.gz --output /tmp/go.tgz
|
||||
sudo rm -rf /usr/local/go/
|
||||
sudo mkdir -p /usr/local/go/
|
||||
sudo tar -xvf go.tgz -C /usr/local/go/ --strip-components=1
|
||||
sudo tar -xvf /tmp/go.tgz -C /usr/local/go/ --strip-components=1
|
||||
|
||||
export GOPATH=$HOME/go/
|
||||
export PATH=$PATH:/usr/local/go/bin/
|
||||
|
@ -1,11 +1,12 @@
|
||||
#!/bin/bash
|
||||
|
||||
export ARCH="amd64"
|
||||
echo "Downloading Go"
|
||||
|
||||
curl -SLsf https://dl.google.com/go/go1.12.14.linux-amd64.tar.gz > go.tgz
|
||||
curl -SLsf https://dl.google.com/go/go1.12.14.linux-$ARCH.tar.gz --output /tmp/go.tgz
|
||||
sudo rm -rf /usr/local/go/
|
||||
sudo mkdir -p /usr/local/go/
|
||||
sudo tar -xvf go.tgz -C /usr/local/go/ --strip-components=1
|
||||
sudo tar -xvf /tmp/go.tgz -C /usr/local/go/ --strip-components=1
|
||||
|
||||
export GOPATH=$HOME/go/
|
||||
export PATH=$PATH:/usr/local/go/bin/
|
||||
|
@ -1,12 +0,0 @@
|
||||
[Unit]
|
||||
Description=faasd-containerd
|
||||
|
||||
[Service]
|
||||
MemoryLimit=500M
|
||||
ExecStart=/usr/local/bin/faas-containerd
|
||||
Restart=on-failure
|
||||
RestartSec=10s
|
||||
WorkingDirectory=/usr/local/bin/
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
14
hack/faasd-provider.service
Normal file
14
hack/faasd-provider.service
Normal file
@ -0,0 +1,14 @@
|
||||
[Unit]
|
||||
Description=faasd-provider
|
||||
|
||||
[Service]
|
||||
MemoryLimit=500M
|
||||
Environment="secret_mount_path={{.SecretMountPath}}"
|
||||
Environment="basic_auth=true"
|
||||
ExecStart=/usr/local/bin/faasd provider
|
||||
Restart=on-failure
|
||||
RestartSec=10s
|
||||
WorkingDirectory={{.Cwd}}
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
@ -1,6 +1,6 @@
|
||||
[Unit]
|
||||
Description=faasd
|
||||
After=faas-containerd.service
|
||||
After=faasd-provider.service
|
||||
|
||||
[Service]
|
||||
MemoryLimit=500M
|
||||
|
141
hack/install.sh
Normal file
141
hack/install.sh
Normal file
@ -0,0 +1,141 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright OpenFaaS Author(s) 2020
|
||||
|
||||
#########################
|
||||
# Repo specific content #
|
||||
#########################
|
||||
|
||||
export OWNER="openfaas"
|
||||
export REPO="faasd"
|
||||
|
||||
version=""
|
||||
|
||||
echo "Finding latest version from GitHub"
|
||||
version=$(curl -sI https://github.com/$OWNER/$REPO/releases/latest | grep -i location | awk -F"/" '{ printf "%s", $NF }' | tr -d '\r')
|
||||
echo "$version"
|
||||
|
||||
if [ ! $version ]; then
|
||||
echo "Failed while attempting to get latest version"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
SUDO=sudo
|
||||
if [ "$(id -u)" -eq 0 ]; then
|
||||
SUDO=
|
||||
fi
|
||||
|
||||
verify_system() {
|
||||
if ! [ -d /run/systemd ]; then
|
||||
fatal 'Can not find systemd to use as a process supervisor for faasd'
|
||||
fi
|
||||
}
|
||||
|
||||
has_yum() {
|
||||
[ -n "$(command -v yum)" ]
|
||||
}
|
||||
|
||||
has_apt_get() {
|
||||
[ -n "$(command -v apt-get)" ]
|
||||
}
|
||||
|
||||
install_required_packages() {
|
||||
if $(has_apt_get); then
|
||||
$SUDO apt-get update -y
|
||||
$SUDO apt-get install -y curl runc bridge-utils
|
||||
elif $(has_yum); then
|
||||
$SUDO yum check-update -y
|
||||
$SUDO yum install -y curl runc
|
||||
else
|
||||
fatal "Could not find apt-get or yum. Cannot install dependencies on this OS."
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
install_cni_plugins() {
|
||||
cni_version=v0.8.5
|
||||
suffix=""
|
||||
arch=$(uname -m)
|
||||
case $arch in
|
||||
x86_64 | amd64)
|
||||
suffix=amd64
|
||||
;;
|
||||
arm*)
|
||||
suffix=arm
|
||||
;;
|
||||
*)
|
||||
fatal "Unsupported architecture $arch"
|
||||
;;
|
||||
esac
|
||||
|
||||
$SUDO mkdir -p /opt/cni/bin
|
||||
curl -sSL https://github.com/containernetworking/plugins/releases/download/${cni_version}/cni-plugins-linux-${suffix}-${cni_version}.tgz | $SUDO tar -xvz -C /opt/cni/bin
|
||||
}
|
||||
|
||||
install_containerd() {
|
||||
arch=$(uname -m)
|
||||
case $arch in
|
||||
x86_64 | amd64)
|
||||
curl -sLSf https://github.com/containerd/containerd/releases/download/v1.3.7/containerd-1.3.7-linux-amd64.tar.gz | $SUDO tar -xvz --strip-components=1 -C /usr/local/bin/
|
||||
;;
|
||||
armv7l)
|
||||
curl -sSL https://github.com/alexellis/containerd-arm/releases/download/v1.3.5/containerd-1.3.5-linux-armhf.tar.gz | $SUDO tar -xvz --strip-components=1 -C /usr/local/bin/
|
||||
;;
|
||||
aarch64)
|
||||
curl -sSL https://github.com/alexellis/containerd-arm/releases/download/v1.3.5/containerd-1.3.5-linux-arm64.tar.gz | $SUDO tar -xvz --strip-components=1 -C /usr/local/bin/
|
||||
;;
|
||||
*)
|
||||
fatal "Unsupported architecture $arch"
|
||||
;;
|
||||
esac
|
||||
|
||||
$SUDO curl -SLfs https://raw.githubusercontent.com/containerd/containerd/v1.3.5/containerd.service --output /etc/systemd/system/containerd.service
|
||||
|
||||
$SUDO systemctl enable containerd
|
||||
$SUDO systemctl start containerd
|
||||
|
||||
sleep 5
|
||||
}
|
||||
|
||||
install_faasd() {
|
||||
arch=$(uname -m)
|
||||
case $arch in
|
||||
x86_64 | amd64)
|
||||
suffix=""
|
||||
;;
|
||||
aarch64)
|
||||
suffix=-arm64
|
||||
;;
|
||||
armv7l)
|
||||
suffix=-armhf
|
||||
;;
|
||||
*)
|
||||
echo "Unsupported architecture $arch"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
$SUDO curl -fSLs "https://github.com/openfaas/faasd/releases/download/${version}/faasd${suffix}" --output "/usr/local/bin/faasd"
|
||||
$SUDO chmod a+x "/usr/local/bin/faasd"
|
||||
|
||||
mkdir -p /tmp/faasd-${version}-installation/hack
|
||||
cd /tmp/faasd-${version}-installation
|
||||
$SUDO curl -fSLs "https://raw.githubusercontent.com/openfaas/faasd/${version}/docker-compose.yaml" --output "docker-compose.yaml"
|
||||
$SUDO curl -fSLs "https://raw.githubusercontent.com/openfaas/faasd/${version}/prometheus.yml" --output "prometheus.yml"
|
||||
$SUDO curl -fSLs "https://raw.githubusercontent.com/openfaas/faasd/${version}/resolv.conf" --output "resolv.conf"
|
||||
$SUDO curl -fSLs "https://raw.githubusercontent.com/openfaas/faasd/${version}/hack/faasd-provider.service" --output "hack/faasd-provider.service"
|
||||
$SUDO curl -fSLs "https://raw.githubusercontent.com/openfaas/faasd/${version}/hack/faasd.service" --output "hack/faasd.service"
|
||||
$SUDO /usr/local/bin/faasd install
|
||||
|
||||
sleep 5
|
||||
}
|
||||
|
||||
verify_system
|
||||
install_required_packages
|
||||
|
||||
/sbin/sysctl -w net.ipv4.conf.all.forwarding=1
|
||||
echo "net.ipv4.conf.all.forwarding=1" | $SUDO tee -a /etc/sysctl.conf
|
||||
|
||||
install_cni_plugins
|
||||
install_containerd
|
||||
install_faasd
|
30
main.go
30
main.go
@ -1,16 +1,38 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/alexellis/faasd/cmd"
|
||||
"github.com/alexellis/faasd/pkg"
|
||||
"github.com/openfaas/faasd/cmd"
|
||||
)
|
||||
|
||||
// These values will be injected into these variables at the build time.
|
||||
var (
|
||||
// GitCommit Git Commit SHA
|
||||
GitCommit string
|
||||
// Version version of the CLI
|
||||
Version string
|
||||
)
|
||||
|
||||
func main() {
|
||||
if err := cmd.Execute(pkg.Version, pkg.GitCommit); err != nil {
|
||||
|
||||
if _, ok := os.LookupEnv("CONTAINER_ID"); ok {
|
||||
collect := cmd.RootCommand()
|
||||
collect.SetArgs([]string{"collect"})
|
||||
collect.SilenceUsage = true
|
||||
collect.SilenceErrors = true
|
||||
|
||||
err := collect.Execute()
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, err.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
if err := cmd.Execute(Version, GitCommit); err != nil {
|
||||
os.Exit(1)
|
||||
}
|
||||
return
|
||||
|
||||
}
|
||||
|
228
pkg/cninetwork/cni_network.go
Normal file
228
pkg/cninetwork/cni_network.go
Normal file
@ -0,0 +1,228 @@
|
||||
package cninetwork
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/containerd/containerd"
|
||||
gocni "github.com/containerd/go-cni"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
// CNIBinDir describes the directory where the CNI binaries are stored
|
||||
CNIBinDir = "/opt/cni/bin"
|
||||
// CNIConfDir describes the directory where the CNI plugin's configuration is stored
|
||||
CNIConfDir = "/etc/cni/net.d"
|
||||
// NetNSPathFmt gives the path to the a process network namespace, given the pid
|
||||
NetNSPathFmt = "/proc/%d/ns/net"
|
||||
// CNIResultsDir is the directory CNI stores allocated IP for containers
|
||||
CNIResultsDir = "/var/lib/cni/results"
|
||||
// defaultCNIConfFilename is the vanity filename of default CNI configuration file
|
||||
defaultCNIConfFilename = "10-openfaas.conflist"
|
||||
// defaultNetworkName names the "docker-bridge"-like CNI plugin-chain installed when no other CNI configuration is present.
|
||||
// This value appears in iptables comments created by CNI.
|
||||
defaultNetworkName = "openfaas-cni-bridge"
|
||||
// defaultBridgeName is the default bridge device name used in the defaultCNIConf
|
||||
defaultBridgeName = "openfaas0"
|
||||
// defaultSubnet is the default subnet used in the defaultCNIConf -- this value is set to not collide with common container networking subnets:
|
||||
defaultSubnet = "10.62.0.0/16"
|
||||
)
|
||||
|
||||
// defaultCNIConf is a CNI configuration that enables network access to containers (docker-bridge style)
|
||||
var defaultCNIConf = fmt.Sprintf(`
|
||||
{
|
||||
"cniVersion": "0.4.0",
|
||||
"name": "%s",
|
||||
"plugins": [
|
||||
{
|
||||
"type": "bridge",
|
||||
"bridge": "%s",
|
||||
"isGateway": true,
|
||||
"ipMasq": true,
|
||||
"ipam": {
|
||||
"type": "host-local",
|
||||
"subnet": "%s",
|
||||
"routes": [
|
||||
{ "dst": "0.0.0.0/0" }
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "firewall"
|
||||
}
|
||||
]
|
||||
}
|
||||
`, defaultNetworkName, defaultBridgeName, defaultSubnet)
|
||||
|
||||
// InitNetwork writes configlist file and initializes CNI network
|
||||
func InitNetwork() (gocni.CNI, error) {
|
||||
|
||||
log.Printf("Writing network config...\n")
|
||||
if !dirExists(CNIConfDir) {
|
||||
if err := os.MkdirAll(CNIConfDir, 0755); err != nil {
|
||||
return nil, fmt.Errorf("cannot create directory: %s", CNIConfDir)
|
||||
}
|
||||
}
|
||||
|
||||
netConfig := path.Join(CNIConfDir, defaultCNIConfFilename)
|
||||
if err := ioutil.WriteFile(netConfig, []byte(defaultCNIConf), 644); err != nil {
|
||||
return nil, fmt.Errorf("cannot write network config: %s", defaultCNIConfFilename)
|
||||
|
||||
}
|
||||
// Initialize CNI library
|
||||
cni, err := gocni.New(gocni.WithPluginConfDir(CNIConfDir),
|
||||
gocni.WithPluginDir([]string{CNIBinDir}))
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error initializing cni: %s", err)
|
||||
}
|
||||
|
||||
// Load the cni configuration
|
||||
if err := cni.Load(gocni.WithLoNetwork, gocni.WithConfListFile(filepath.Join(CNIConfDir, defaultCNIConfFilename))); err != nil {
|
||||
return nil, fmt.Errorf("failed to load cni configuration: %v", err)
|
||||
}
|
||||
|
||||
return cni, nil
|
||||
}
|
||||
|
||||
// CreateCNINetwork creates a CNI network interface and attaches it to the context
|
||||
func CreateCNINetwork(ctx context.Context, cni gocni.CNI, task containerd.Task, labels map[string]string) (*gocni.CNIResult, error) {
|
||||
id := netID(task)
|
||||
netns := netNamespace(task)
|
||||
result, err := cni.Setup(ctx, id, netns, gocni.WithLabels(labels))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Failed to setup network for task %q: %v", id, err)
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// DeleteCNINetwork deletes a CNI network based on task ID and Pid
|
||||
func DeleteCNINetwork(ctx context.Context, cni gocni.CNI, client *containerd.Client, name string) error {
|
||||
container, containerErr := client.LoadContainer(ctx, name)
|
||||
if containerErr == nil {
|
||||
task, err := container.Task(ctx, nil)
|
||||
if err != nil {
|
||||
log.Printf("[Delete] unable to find task for container: %s\n", name)
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Printf("[Delete] removing CNI network for: %s\n", task.ID())
|
||||
|
||||
id := netID(task)
|
||||
netns := netNamespace(task)
|
||||
|
||||
if err := cni.Remove(ctx, id, netns); err != nil {
|
||||
return errors.Wrapf(err, "Failed to remove network for task: %q, %v", id, err)
|
||||
}
|
||||
log.Printf("[Delete] removed: %s from namespace: %s, ID: %s\n", name, netns, id)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return errors.Wrapf(containerErr, "Unable to find container: %s, error: %s", name, containerErr)
|
||||
}
|
||||
|
||||
// GetIPAddress returns the IP address of the created container
|
||||
func GetIPAddress(result *gocni.CNIResult, task containerd.Task) (net.IP, error) {
|
||||
// Get the IP of the created interface
|
||||
var ip net.IP
|
||||
for ifName, config := range result.Interfaces {
|
||||
if config.Sandbox == netNamespace(task) {
|
||||
for _, ipConfig := range config.IPConfigs {
|
||||
if ifName != "lo" && ipConfig.IP.To4() != nil {
|
||||
ip = ipConfig.IP
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if ip == nil {
|
||||
return nil, fmt.Errorf("unable to get IP address for: %s", task.ID())
|
||||
}
|
||||
return ip, nil
|
||||
}
|
||||
|
||||
func GetIPfromPID(pid int) (*net.IP, error) {
|
||||
// https://github.com/weaveworks/weave/blob/master/net/netdev.go
|
||||
|
||||
peerIDs, err := ConnectedToBridgeVethPeerIds(defaultBridgeName)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to find peers on: %s %s", defaultBridgeName, err)
|
||||
}
|
||||
|
||||
addrs, addrsErr := GetNetDevsByVethPeerIds(pid, peerIDs)
|
||||
if addrsErr != nil {
|
||||
return nil, fmt.Errorf("unable to find address for veth pair using: %v %s", peerIDs, addrsErr)
|
||||
}
|
||||
|
||||
if len(addrs) > 0 && len(addrs[0].CIDRs) > 0 {
|
||||
return &addrs[0].CIDRs[0].IP, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("no IP found for function")
|
||||
}
|
||||
|
||||
// CNIGateway returns the gateway for default subnet
|
||||
func CNIGateway() (string, error) {
|
||||
ip, _, err := net.ParseCIDR(defaultSubnet)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error formatting gateway for network %s", defaultSubnet)
|
||||
}
|
||||
ip = ip.To4()
|
||||
ip[3] = 1
|
||||
return ip.String(), nil
|
||||
}
|
||||
|
||||
// netID generates the network IF based on task name and task PID
|
||||
func netID(task containerd.Task) string {
|
||||
return fmt.Sprintf("%s-%d", task.ID(), task.Pid())
|
||||
}
|
||||
|
||||
// netNamespace generates the namespace path based on task PID.
|
||||
func netNamespace(task containerd.Task) string {
|
||||
return fmt.Sprintf(NetNSPathFmt, task.Pid())
|
||||
}
|
||||
|
||||
func dirEmpty(dirname string) (isEmpty bool) {
|
||||
if !dirExists(dirname) {
|
||||
return
|
||||
}
|
||||
|
||||
f, err := os.Open(dirname)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer func() { _ = f.Close() }()
|
||||
|
||||
// If the first file is EOF, the directory is empty
|
||||
if _, err = f.Readdir(1); err == io.EOF {
|
||||
isEmpty = true
|
||||
}
|
||||
return isEmpty
|
||||
}
|
||||
|
||||
func dirExists(dirname string) bool {
|
||||
exists, info := pathExists(dirname)
|
||||
if !exists {
|
||||
return false
|
||||
}
|
||||
|
||||
return info.IsDir()
|
||||
}
|
||||
|
||||
func pathExists(path string) (bool, os.FileInfo) {
|
||||
info, err := os.Stat(path)
|
||||
if os.IsNotExist(err) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, info
|
||||
}
|
@ -3,7 +3,7 @@
|
||||
// Copyright Weaveworks
|
||||
// github.com/weaveworks/weave/net
|
||||
|
||||
package weave
|
||||
package cninetwork
|
||||
|
||||
import (
|
||||
"errors"
|
@ -1,7 +1,7 @@
|
||||
// Copyright Weaveworks
|
||||
// github.com/weaveworks/weave/net
|
||||
|
||||
package weave
|
||||
package cninetwork
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
@ -18,19 +18,6 @@ type Dev struct {
|
||||
CIDRs []*net.IPNet `json:"CIDRs,omitempty"`
|
||||
}
|
||||
|
||||
func linkToNetDev(link netlink.Link) (Dev, error) {
|
||||
addrs, err := netlink.AddrList(link, netlink.FAMILY_V4)
|
||||
if err != nil {
|
||||
return Dev{}, err
|
||||
}
|
||||
|
||||
netDev := Dev{Name: link.Attrs().Name, MAC: link.Attrs().HardwareAddr}
|
||||
for _, addr := range addrs {
|
||||
netDev.CIDRs = append(netDev.CIDRs, addr.IPNet)
|
||||
}
|
||||
return netDev, nil
|
||||
}
|
||||
|
||||
// ConnectedToBridgeVethPeerIds returns peer indexes of veth links connected to
|
||||
// the given bridge. The peer index is used to query from a container netns
|
||||
// whether the container is connected to the bridge.
|
10
pkg/cninetwork/weave_darwin.go
Normal file
10
pkg/cninetwork/weave_darwin.go
Normal file
@ -0,0 +1,10 @@
|
||||
// +build darwin
|
||||
|
||||
package cninetwork
|
||||
|
||||
import "github.com/vishvananda/netlink"
|
||||
|
||||
func linkToNetDev(link netlink.Link) (Dev, error) {
|
||||
|
||||
return Dev{}, nil
|
||||
}
|
19
pkg/cninetwork/weave_linux.go
Normal file
19
pkg/cninetwork/weave_linux.go
Normal file
@ -0,0 +1,19 @@
|
||||
// +build linux
|
||||
|
||||
package cninetwork
|
||||
|
||||
import "github.com/vishvananda/netlink"
|
||||
|
||||
func linkToNetDev(link netlink.Link) (Dev, error) {
|
||||
|
||||
addrs, err := netlink.AddrList(link, netlink.FAMILY_V4)
|
||||
if err != nil {
|
||||
return Dev{}, err
|
||||
}
|
||||
|
||||
netDev := Dev{Name: link.Attrs().Name, MAC: link.Attrs().HardwareAddr}
|
||||
for _, addr := range addrs {
|
||||
netDev.CIDRs = append(netDev.CIDRs, addr.IPNet)
|
||||
}
|
||||
return netDev, nil
|
||||
}
|
6
pkg/constants.go
Normal file
6
pkg/constants.go
Normal file
@ -0,0 +1,6 @@
|
||||
package pkg
|
||||
|
||||
const (
|
||||
// FunctionNamespace is the default containerd namespace functions are created
|
||||
FunctionNamespace = "openfaas-fn"
|
||||
)
|
106
pkg/depgraph/depgraph.go
Normal file
106
pkg/depgraph/depgraph.go
Normal file
@ -0,0 +1,106 @@
|
||||
package depgraph
|
||||
|
||||
import "log"
|
||||
|
||||
// Node represents a node in a Graph with
|
||||
// 0 to many edges
|
||||
type Node struct {
|
||||
Name string
|
||||
Edges []*Node
|
||||
}
|
||||
|
||||
// Graph is a collection of nodes
|
||||
type Graph struct {
|
||||
nodes []*Node
|
||||
}
|
||||
|
||||
func NewDepgraph() *Graph {
|
||||
return &Graph{
|
||||
nodes: []*Node{},
|
||||
}
|
||||
}
|
||||
|
||||
// Nodes returns the nodes within the graph
|
||||
func (g *Graph) Nodes() []*Node {
|
||||
return g.nodes
|
||||
}
|
||||
|
||||
// Contains returns true if the target Node is found
|
||||
// in its list
|
||||
func (g *Graph) Contains(target *Node) bool {
|
||||
for _, g := range g.nodes {
|
||||
if g.Name == target.Name {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// Add places a Node into the current Graph
|
||||
func (g *Graph) Add(target *Node) {
|
||||
g.nodes = append(g.nodes, target)
|
||||
}
|
||||
|
||||
// Remove deletes a target Node reference from the
|
||||
// list of nodes in the graph
|
||||
func (g *Graph) Remove(target *Node) {
|
||||
var found *int
|
||||
for i, n := range g.nodes {
|
||||
if n == target {
|
||||
found = &i
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if found != nil {
|
||||
g.nodes = append(g.nodes[:*found], g.nodes[*found+1:]...)
|
||||
}
|
||||
}
|
||||
|
||||
// Resolve retruns a list of node names in order of their dependencies.
|
||||
// A use case may be for determining the correct order to install
|
||||
// software packages, or to start services.
|
||||
// Based upon the algorithm described by Ferry Boender in the following article
|
||||
// https://www.electricmonk.nl/log/2008/08/07/dependency-resolving-algorithm/
|
||||
func (g *Graph) Resolve() []string {
|
||||
resolved := &Graph{}
|
||||
unresolved := &Graph{}
|
||||
for _, node := range g.nodes {
|
||||
resolve(node, resolved, unresolved)
|
||||
}
|
||||
|
||||
order := []string{}
|
||||
|
||||
for _, node := range resolved.Nodes() {
|
||||
order = append(order, node.Name)
|
||||
}
|
||||
|
||||
return order
|
||||
}
|
||||
|
||||
// resolve mutates the resolved graph for a given starting
|
||||
// node. The unresolved graph is used to detect a circular graph
|
||||
// error and will throw a panic. This can be caught with a resolve
|
||||
// in a go routine.
|
||||
func resolve(node *Node, resolved, unresolved *Graph) {
|
||||
unresolved.Add(node)
|
||||
|
||||
for _, edge := range node.Edges {
|
||||
|
||||
if !resolved.Contains(edge) && unresolved.Contains(edge) {
|
||||
log.Panicf("edge: %s may be a circular dependency", edge.Name)
|
||||
}
|
||||
|
||||
resolve(edge, resolved, unresolved)
|
||||
}
|
||||
|
||||
for _, r := range resolved.nodes {
|
||||
if r.Name == node.Name {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
resolved.Add(node)
|
||||
unresolved.Remove(node)
|
||||
}
|
41
pkg/depgraph/depgraph_test.go
Normal file
41
pkg/depgraph/depgraph_test.go
Normal file
@ -0,0 +1,41 @@
|
||||
package depgraph
|
||||
|
||||
import "testing"
|
||||
|
||||
func Test_RemoveMedial(t *testing.T) {
|
||||
g := Graph{nodes: []*Node{}}
|
||||
a := &Node{Name: "A"}
|
||||
b := &Node{Name: "B"}
|
||||
c := &Node{Name: "C"}
|
||||
|
||||
g.nodes = append(g.nodes, a)
|
||||
g.nodes = append(g.nodes, b)
|
||||
g.nodes = append(g.nodes, c)
|
||||
|
||||
g.Remove(b)
|
||||
|
||||
for _, n := range g.nodes {
|
||||
if n.Name == b.Name {
|
||||
t.Fatalf("Found deleted node: %s", n.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func Test_RemoveFinal(t *testing.T) {
|
||||
g := Graph{nodes: []*Node{}}
|
||||
a := &Node{Name: "A"}
|
||||
b := &Node{Name: "B"}
|
||||
c := &Node{Name: "C"}
|
||||
|
||||
g.nodes = append(g.nodes, a)
|
||||
g.nodes = append(g.nodes, b)
|
||||
g.nodes = append(g.nodes, c)
|
||||
|
||||
g.Remove(c)
|
||||
|
||||
for _, n := range g.nodes {
|
||||
if n.Name == c.Name {
|
||||
t.Fatalf("Found deleted node: %s", c.Name)
|
||||
}
|
||||
}
|
||||
}
|
41
pkg/deployment_order.go
Normal file
41
pkg/deployment_order.go
Normal file
@ -0,0 +1,41 @@
|
||||
package pkg
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/openfaas/faasd/pkg/depgraph"
|
||||
)
|
||||
|
||||
func buildDeploymentOrder(svcs []Service) []string {
|
||||
|
||||
graph := buildServiceGraph(svcs)
|
||||
|
||||
order := graph.Resolve()
|
||||
|
||||
log.Printf("Start-up order:\n")
|
||||
for _, node := range order {
|
||||
log.Printf("- %s\n", node)
|
||||
}
|
||||
|
||||
return order
|
||||
}
|
||||
|
||||
func buildServiceGraph(svcs []Service) *depgraph.Graph {
|
||||
graph := depgraph.NewDepgraph()
|
||||
|
||||
nodeMap := map[string]*depgraph.Node{}
|
||||
for _, s := range svcs {
|
||||
n := &depgraph.Node{Name: s.Name}
|
||||
nodeMap[s.Name] = n
|
||||
graph.Add(n)
|
||||
|
||||
}
|
||||
|
||||
for _, s := range svcs {
|
||||
for _, d := range s.DependsOn {
|
||||
nodeMap[s.Name].Edges = append(nodeMap[s.Name].Edges, nodeMap[d])
|
||||
}
|
||||
}
|
||||
|
||||
return graph
|
||||
}
|
224
pkg/deployment_order_test.go
Normal file
224
pkg/deployment_order_test.go
Normal file
@ -0,0 +1,224 @@
|
||||
package pkg
|
||||
|
||||
import (
|
||||
"log"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func Test_buildDeploymentOrder_ARequiresB(t *testing.T) {
|
||||
svcs := []Service{
|
||||
{
|
||||
Name: "A",
|
||||
DependsOn: []string{"B"},
|
||||
},
|
||||
{
|
||||
Name: "B",
|
||||
DependsOn: []string{},
|
||||
},
|
||||
}
|
||||
|
||||
order := buildDeploymentOrder(svcs)
|
||||
|
||||
if len(order) < len(svcs) {
|
||||
t.Fatalf("length of order too short: %d", len(order))
|
||||
}
|
||||
|
||||
got := order[0]
|
||||
want := "B"
|
||||
if got != want {
|
||||
t.Fatalf("%s should be last to be installed, but was: %s", want, got)
|
||||
}
|
||||
}
|
||||
|
||||
func Test_buildDeploymentOrder_ARequiresBAndC(t *testing.T) {
|
||||
svcs := []Service{
|
||||
{
|
||||
Name: "A",
|
||||
DependsOn: []string{"B", "C"},
|
||||
},
|
||||
{
|
||||
Name: "B",
|
||||
DependsOn: []string{},
|
||||
},
|
||||
{
|
||||
Name: "C",
|
||||
DependsOn: []string{},
|
||||
},
|
||||
}
|
||||
|
||||
order := buildDeploymentOrder(svcs)
|
||||
|
||||
if len(order) < len(svcs) {
|
||||
t.Fatalf("length of order too short: %d", len(order))
|
||||
}
|
||||
|
||||
a := indexStr(order, "a")
|
||||
b := indexStr(order, "b")
|
||||
c := indexStr(order, "c")
|
||||
|
||||
if a > b {
|
||||
t.Fatalf("a should be after dependencies")
|
||||
}
|
||||
if a > c {
|
||||
t.Fatalf("a should be after dependencies")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func Test_buildDeploymentOrder_ARequiresBRequiresC(t *testing.T) {
|
||||
svcs := []Service{
|
||||
{
|
||||
Name: "A",
|
||||
DependsOn: []string{"B"},
|
||||
},
|
||||
{
|
||||
Name: "B",
|
||||
DependsOn: []string{"C"},
|
||||
},
|
||||
{
|
||||
Name: "C",
|
||||
DependsOn: []string{},
|
||||
},
|
||||
}
|
||||
|
||||
order := buildDeploymentOrder(svcs)
|
||||
|
||||
if len(order) < len(svcs) {
|
||||
t.Fatalf("length of order too short: %d", len(order))
|
||||
}
|
||||
|
||||
got := order[0]
|
||||
want := "C"
|
||||
if got != want {
|
||||
t.Fatalf("%s should be last to be installed, but was: %s", want, got)
|
||||
}
|
||||
got = order[1]
|
||||
want = "B"
|
||||
if got != want {
|
||||
t.Fatalf("%s should be last to be installed, but was: %s", want, got)
|
||||
}
|
||||
got = order[2]
|
||||
want = "A"
|
||||
if got != want {
|
||||
t.Fatalf("%s should be last to be installed, but was: %s", want, got)
|
||||
}
|
||||
}
|
||||
|
||||
func Test_buildDeploymentOrderCircularARequiresBRequiresA(t *testing.T) {
|
||||
svcs := []Service{
|
||||
{
|
||||
Name: "A",
|
||||
DependsOn: []string{"B"},
|
||||
},
|
||||
{
|
||||
Name: "B",
|
||||
DependsOn: []string{"A"},
|
||||
},
|
||||
}
|
||||
|
||||
defer func() { recover() }()
|
||||
|
||||
buildDeploymentOrder(svcs)
|
||||
|
||||
t.Fatalf("did not panic as expected")
|
||||
}
|
||||
|
||||
func Test_buildDeploymentOrderComposeFile(t *testing.T) {
|
||||
// svcs := []Service{}
|
||||
file, err := LoadComposeFileWithArch("../", "docker-compose.yaml", func() (string, string) {
|
||||
return "x86_64", "Linux"
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("unable to load compose file: %s", err)
|
||||
}
|
||||
|
||||
svcs, err := ParseCompose(file)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to parse compose file: %s", err)
|
||||
}
|
||||
|
||||
for _, s := range svcs {
|
||||
log.Printf("Service: %s\n", s.Name)
|
||||
for _, d := range s.DependsOn {
|
||||
log.Printf("Link: %s => %s\n", s.Name, d)
|
||||
}
|
||||
}
|
||||
|
||||
order := buildDeploymentOrder(svcs)
|
||||
|
||||
if len(order) < len(svcs) {
|
||||
t.Fatalf("length of order too short: %d", len(order))
|
||||
}
|
||||
|
||||
queueWorker := indexStr(order, "queue-worker")
|
||||
nats := indexStr(order, "nats")
|
||||
gateway := indexStr(order, "gateway")
|
||||
prometheus := indexStr(order, "prometheus")
|
||||
|
||||
if prometheus > gateway {
|
||||
t.Fatalf("Prometheus order was after gateway, and should be before")
|
||||
}
|
||||
if nats > gateway {
|
||||
t.Fatalf("NATS order was after gateway, and should be before")
|
||||
}
|
||||
if nats > queueWorker {
|
||||
t.Fatalf("NATS order was after queue-worker, and should be before")
|
||||
}
|
||||
}
|
||||
|
||||
func Test_buildDeploymentOrderOpenFaaS(t *testing.T) {
|
||||
svcs := []Service{
|
||||
{
|
||||
Name: "queue-worker",
|
||||
DependsOn: []string{"nats"},
|
||||
},
|
||||
{
|
||||
Name: "prometheus",
|
||||
DependsOn: []string{},
|
||||
},
|
||||
{
|
||||
Name: "gateway",
|
||||
DependsOn: []string{"prometheus", "nats", "basic-auth-plugin"},
|
||||
},
|
||||
{
|
||||
Name: "basic-auth-plugin",
|
||||
DependsOn: []string{},
|
||||
},
|
||||
{
|
||||
Name: "nats",
|
||||
DependsOn: []string{},
|
||||
},
|
||||
}
|
||||
|
||||
order := buildDeploymentOrder(svcs)
|
||||
|
||||
if len(order) < len(svcs) {
|
||||
t.Fatalf("length of order too short: %d", len(order))
|
||||
}
|
||||
|
||||
queueWorker := indexStr(order, "queue-worker")
|
||||
nats := indexStr(order, "nats")
|
||||
gateway := indexStr(order, "gateway")
|
||||
prometheus := indexStr(order, "prometheus")
|
||||
|
||||
if prometheus > gateway {
|
||||
t.Fatalf("Prometheus order was after gateway, and should be before")
|
||||
}
|
||||
if nats > gateway {
|
||||
t.Fatalf("NATS order was after gateway, and should be before")
|
||||
}
|
||||
if nats > queueWorker {
|
||||
t.Fatalf("NATS order was after queue-worker, and should be before")
|
||||
}
|
||||
}
|
||||
|
||||
func indexStr(st []string, t string) int {
|
||||
for n, s := range st {
|
||||
if s == t {
|
||||
return n
|
||||
}
|
||||
|
||||
}
|
||||
return -1
|
||||
}
|
104
pkg/local_resolver.go
Normal file
104
pkg/local_resolver.go
Normal file
@ -0,0 +1,104 @@
|
||||
package pkg
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// LocalResolver provides hostname to IP look-up for faasd core services
|
||||
type LocalResolver struct {
|
||||
Path string
|
||||
Map map[string]string
|
||||
Mutex *sync.RWMutex
|
||||
}
|
||||
|
||||
// NewLocalResolver creates a new resolver for reading from a hosts file
|
||||
func NewLocalResolver(path string) Resolver {
|
||||
return &LocalResolver{
|
||||
Path: path,
|
||||
Mutex: &sync.RWMutex{},
|
||||
Map: make(map[string]string),
|
||||
}
|
||||
}
|
||||
|
||||
// Start polling the disk for the hosts file in Path
|
||||
func (l *LocalResolver) Start() {
|
||||
var lastStat os.FileInfo
|
||||
|
||||
for {
|
||||
rebuild := false
|
||||
if info, err := os.Stat(l.Path); err == nil {
|
||||
if lastStat == nil {
|
||||
rebuild = true
|
||||
} else {
|
||||
if !lastStat.ModTime().Equal(info.ModTime()) {
|
||||
rebuild = true
|
||||
}
|
||||
}
|
||||
lastStat = info
|
||||
}
|
||||
|
||||
if rebuild {
|
||||
log.Printf("Resolver rebuilding map")
|
||||
l.rebuild()
|
||||
}
|
||||
time.Sleep(time.Second * 3)
|
||||
}
|
||||
}
|
||||
|
||||
func (l *LocalResolver) rebuild() {
|
||||
l.Mutex.Lock()
|
||||
defer l.Mutex.Unlock()
|
||||
|
||||
fileData, fileErr := ioutil.ReadFile(l.Path)
|
||||
if fileErr != nil {
|
||||
log.Printf("resolver rebuild error: %s", fileErr.Error())
|
||||
return
|
||||
}
|
||||
|
||||
lines := strings.Split(string(fileData), "\n")
|
||||
|
||||
for _, line := range lines {
|
||||
index := strings.Index(line, "\t")
|
||||
|
||||
if len(line) > 0 && index > -1 {
|
||||
ip := line[:index]
|
||||
host := line[index+1:]
|
||||
log.Printf("Resolver: %q=%q", host, ip)
|
||||
l.Map[host] = ip
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Get resolves a hostname to an IP, or timesout after the duration has passed
|
||||
func (l *LocalResolver) Get(upstream string, got chan<- string, timeout time.Duration) {
|
||||
start := time.Now()
|
||||
for {
|
||||
if val := l.get(upstream); len(val) > 0 {
|
||||
got <- val
|
||||
break
|
||||
}
|
||||
|
||||
if time.Now().After(start.Add(timeout)) {
|
||||
log.Printf("Timed out after %s getting host %q", timeout.String(), upstream)
|
||||
break
|
||||
}
|
||||
|
||||
time.Sleep(time.Millisecond * 250)
|
||||
}
|
||||
}
|
||||
|
||||
func (l *LocalResolver) get(upstream string) string {
|
||||
l.Mutex.RLock()
|
||||
defer l.Mutex.RUnlock()
|
||||
|
||||
if val, ok := l.Map[upstream]; ok {
|
||||
return val
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
183
pkg/logs/requestor.go
Normal file
183
pkg/logs/requestor.go
Normal file
@ -0,0 +1,183 @@
|
||||
package logs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/openfaas/faas-provider/logs"
|
||||
|
||||
faasd "github.com/openfaas/faasd/pkg"
|
||||
)
|
||||
|
||||
type requester struct{}
|
||||
|
||||
// New returns a new journalctl log Requester
|
||||
func New() logs.Requester {
|
||||
return &requester{}
|
||||
}
|
||||
|
||||
// Query submits a log request to the actual logging system.
|
||||
func (r *requester) Query(ctx context.Context, req logs.Request) (<-chan logs.Message, error) {
|
||||
_, err := exec.LookPath("journalctl")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can not find journalctl: %w", err)
|
||||
}
|
||||
|
||||
cmd := buildCmd(ctx, req)
|
||||
stdout, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create journalctl pipe: %w", err)
|
||||
}
|
||||
|
||||
stderr, err := cmd.StderrPipe()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create journalctl err pipe: %w", err)
|
||||
}
|
||||
|
||||
err = cmd.Start()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create journalctl: %w", err)
|
||||
}
|
||||
|
||||
// call start and get the stdout prior to streaming so that we can return a meaningful
|
||||
// error for as long as possible. If the cmd starts correctly, we are highly likely to
|
||||
// succeed anyway
|
||||
msgs := make(chan logs.Message)
|
||||
go streamLogs(ctx, cmd, stdout, msgs)
|
||||
go logErrOut(stderr)
|
||||
|
||||
return msgs, nil
|
||||
}
|
||||
|
||||
// buildCmd reeturns the equivalent of
|
||||
//
|
||||
// journalctl -t <namespace>:<name> \
|
||||
// --output=json \
|
||||
// --since=<timestamp> \
|
||||
// <--follow> \
|
||||
func buildCmd(ctx context.Context, req logs.Request) *exec.Cmd {
|
||||
// // set the cursor position based on req, default to 5m
|
||||
since := time.Now().Add(-5 * time.Minute)
|
||||
if req.Since != nil && req.Since.Before(time.Now()) {
|
||||
since = *req.Since
|
||||
}
|
||||
|
||||
namespace := req.Namespace
|
||||
if namespace == "" {
|
||||
namespace = faasd.FunctionNamespace
|
||||
}
|
||||
|
||||
// find the description of the fields here
|
||||
// https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html
|
||||
// the available fields can vary greatly, the selected fields were detemined by
|
||||
// trial and error with journalctl in an ubuntu VM (via multipass)
|
||||
args := []string{
|
||||
"--utc",
|
||||
"--no-pager",
|
||||
"--output=json",
|
||||
"--identifier=" + namespace + ":" + req.Name,
|
||||
fmt.Sprintf("--since=%s", since.UTC().Format("2006-01-02 15:04:05")),
|
||||
}
|
||||
|
||||
if req.Follow {
|
||||
args = append(args, "--follow")
|
||||
}
|
||||
|
||||
if req.Tail > 0 {
|
||||
args = append(args, fmt.Sprintf("--lines=%d", req.Tail))
|
||||
}
|
||||
|
||||
return exec.CommandContext(ctx, "journalctl", args...)
|
||||
}
|
||||
|
||||
// streamLogs copies log entries from the journalctl `cmd`/`out` to `msgs`
|
||||
// the loop is based on the Decoder example in the docs
|
||||
// https://golang.org/pkg/encoding/json/#Decoder.Decode
|
||||
func streamLogs(ctx context.Context, cmd *exec.Cmd, out io.ReadCloser, msgs chan logs.Message) {
|
||||
log.Println("starting journal stream using ", cmd.String())
|
||||
|
||||
// will ensure `out` is closed and all related resources cleaned up
|
||||
go func() {
|
||||
err := cmd.Wait()
|
||||
log.Println("wait result", err)
|
||||
}()
|
||||
|
||||
defer func() {
|
||||
log.Println("closing journal stream")
|
||||
close(msgs)
|
||||
}()
|
||||
|
||||
dec := json.NewDecoder(out)
|
||||
for dec.More() {
|
||||
if ctx.Err() != nil {
|
||||
log.Println("log stream context cancelled")
|
||||
return
|
||||
}
|
||||
|
||||
// the journalctl outputs all the values as a string, so a struct with json
|
||||
// tags wont help much
|
||||
entry := map[string]string{}
|
||||
err := dec.Decode(&entry)
|
||||
if err != nil {
|
||||
log.Printf("error decoding journalctl output: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
msg, err := parseEntry(entry)
|
||||
if err != nil {
|
||||
log.Printf("error parsing journalctl output: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
msgs <- msg
|
||||
}
|
||||
}
|
||||
|
||||
// parseEntry reads the deserialized json from journalctl into a log.Message
|
||||
//
|
||||
// The following fields are parsed from the journal
|
||||
// - MESSAGE
|
||||
// - _PID
|
||||
// - SYSLOG_IDENTIFIER
|
||||
// - __REALTIME_TIMESTAMP
|
||||
func parseEntry(entry map[string]string) (logs.Message, error) {
|
||||
logMsg := logs.Message{
|
||||
Text: entry["MESSAGE"],
|
||||
Instance: entry["_PID"],
|
||||
}
|
||||
|
||||
identifier := entry["SYSLOG_IDENTIFIER"]
|
||||
parts := strings.Split(identifier, ":")
|
||||
if len(parts) != 2 {
|
||||
return logMsg, fmt.Errorf("invalid SYSLOG_IDENTIFIER")
|
||||
}
|
||||
logMsg.Namespace = parts[0]
|
||||
logMsg.Name = parts[1]
|
||||
|
||||
ts, ok := entry["__REALTIME_TIMESTAMP"]
|
||||
if !ok {
|
||||
return logMsg, fmt.Errorf("missing required field __REALTIME_TIMESTAMP")
|
||||
}
|
||||
|
||||
ms, err := strconv.ParseInt(ts, 10, 64)
|
||||
if err != nil {
|
||||
return logMsg, fmt.Errorf("invalid timestamp: %w", err)
|
||||
}
|
||||
logMsg.Timestamp = time.Unix(0, ms*1000).UTC()
|
||||
|
||||
return logMsg, nil
|
||||
}
|
||||
|
||||
func logErrOut(out io.ReadCloser) {
|
||||
defer log.Println("stderr closed")
|
||||
defer out.Close()
|
||||
|
||||
io.Copy(log.Writer(), out)
|
||||
}
|
73
pkg/logs/requestor_test.go
Normal file
73
pkg/logs/requestor_test.go
Normal file
@ -0,0 +1,73 @@
|
||||
package logs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/openfaas/faas-provider/logs"
|
||||
)
|
||||
|
||||
func Test_parseEntry(t *testing.T) {
|
||||
rawEntry := `{ "__CURSOR" : "s=71c4550142d14ace8e2959e3540cc15c;i=133c;b=44864010f0d94baba7b6bf8019f82a56;m=2945cd3;t=5a00d4eb59180;x=8ed47f7f9b3d798", "__REALTIME_TIMESTAMP" : "1583353899094400", "__MONOTONIC_TIMESTAMP" : "43277523", "_BOOT_ID" : "44864010f0d94baba7b6bf8019f82a56", "SYSLOG_IDENTIFIER" : "openfaas-fn:nodeinfo", "_PID" : "2254", "MESSAGE" : "2020/03/04 20:31:39 POST / - 200 OK - ContentLength: 83", "_SOURCE_REALTIME_TIMESTAMP" : "1583353899094372" }`
|
||||
expectedEntry := logs.Message{
|
||||
Name: "nodeinfo",
|
||||
Namespace: "openfaas-fn",
|
||||
Text: "2020/03/04 20:31:39 POST / - 200 OK - ContentLength: 83",
|
||||
Timestamp: time.Unix(0, 1583353899094400*1000).UTC(),
|
||||
}
|
||||
|
||||
value := map[string]string{}
|
||||
json.Unmarshal([]byte(rawEntry), &value)
|
||||
|
||||
entry, err := parseEntry(value)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error %s", err)
|
||||
}
|
||||
|
||||
if entry.Name != expectedEntry.Name {
|
||||
t.Fatalf("want Name: %q, got %q", expectedEntry.Name, entry.Name)
|
||||
}
|
||||
|
||||
if entry.Namespace != expectedEntry.Namespace {
|
||||
t.Fatalf("want Namespace: %q, got %q", expectedEntry.Namespace, entry.Namespace)
|
||||
}
|
||||
|
||||
if entry.Timestamp != expectedEntry.Timestamp {
|
||||
t.Fatalf("want Timestamp: %q, got %q", expectedEntry.Timestamp, entry.Timestamp)
|
||||
}
|
||||
|
||||
if entry.Text != expectedEntry.Text {
|
||||
t.Fatalf("want Text: %q, got %q", expectedEntry.Text, entry.Text)
|
||||
}
|
||||
}
|
||||
|
||||
func Test_buildCmd(t *testing.T) {
|
||||
ctx := context.TODO()
|
||||
now := time.Now()
|
||||
req := logs.Request{
|
||||
Name: "loggyfunc",
|
||||
Namespace: "spacetwo",
|
||||
Follow: true,
|
||||
Since: &now,
|
||||
Tail: 5,
|
||||
}
|
||||
|
||||
expectedArgs := fmt.Sprintf(
|
||||
"--utc --no-pager --output=json --identifier=spacetwo:loggyfunc --since=%s --follow --lines=5",
|
||||
now.UTC().Format("2006-01-02 15:04:05"),
|
||||
)
|
||||
|
||||
cmd := buildCmd(ctx, req).String()
|
||||
wantCmd := "journalctl"
|
||||
if !strings.Contains(cmd, wantCmd) {
|
||||
t.Fatalf("cmd want: %q, got: %q", wantCmd, cmd)
|
||||
}
|
||||
|
||||
if !strings.HasSuffix(cmd, expectedArgs) {
|
||||
t.Fatalf("arg want: %q\ngot: %q", expectedArgs, cmd)
|
||||
}
|
||||
}
|
35
pkg/provider/config/read.go
Normal file
35
pkg/provider/config/read.go
Normal file
@ -0,0 +1,35 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
types "github.com/openfaas/faas-provider/types"
|
||||
)
|
||||
|
||||
type ProviderConfig struct {
|
||||
// Sock is the address of the containerd socket
|
||||
Sock string
|
||||
}
|
||||
|
||||
// ReadFromEnv loads the FaaSConfig and the Containerd specific config form the env variables
|
||||
func ReadFromEnv(hasEnv types.HasEnv) (*types.FaaSConfig, *ProviderConfig, error) {
|
||||
config, err := types.ReadConfig{}.Read(hasEnv)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
serviceTimeout := types.ParseIntOrDurationValue(hasEnv.Getenv("service_timeout"), time.Second*60)
|
||||
|
||||
config.EnableHealth = true
|
||||
config.ReadTimeout = serviceTimeout
|
||||
config.WriteTimeout = serviceTimeout
|
||||
|
||||
port := types.ParseIntValue(hasEnv.Getenv("port"), 8081)
|
||||
config.TCPPort = &port
|
||||
|
||||
providerConfig := &ProviderConfig{
|
||||
Sock: types.ParseString(hasEnv.Getenv("sock"), "/run/containerd/containerd.sock"),
|
||||
}
|
||||
|
||||
return config, providerConfig, nil
|
||||
}
|
107
pkg/provider/config/read_test.go
Normal file
107
pkg/provider/config/read_test.go
Normal file
@ -0,0 +1,107 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"testing"
|
||||
)
|
||||
|
||||
type EnvBucket struct {
|
||||
Items map[string]string
|
||||
}
|
||||
|
||||
func NewEnvBucket() EnvBucket {
|
||||
return EnvBucket{
|
||||
Items: make(map[string]string),
|
||||
}
|
||||
}
|
||||
|
||||
func (e EnvBucket) Getenv(key string) string {
|
||||
return e.Items[key]
|
||||
}
|
||||
|
||||
func (e EnvBucket) Setenv(key string, value string) {
|
||||
e.Items[key] = value
|
||||
}
|
||||
|
||||
func Test_SetSockByEnv(t *testing.T) {
|
||||
defaultSock := "/run/containerd/containerd.sock"
|
||||
expectedSock := "/non/default/value.sock"
|
||||
env := NewEnvBucket()
|
||||
_, config, err := ReadFromEnv(env)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error %s", err)
|
||||
}
|
||||
if config.Sock != defaultSock {
|
||||
t.Fatalf("expected %q, got %q", defaultSock, config.Sock)
|
||||
}
|
||||
|
||||
env.Setenv("sock", expectedSock)
|
||||
_, config, err = ReadFromEnv(env)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error %s", err)
|
||||
}
|
||||
if config.Sock != expectedSock {
|
||||
t.Fatalf("expected %q, got %q", expectedSock, config.Sock)
|
||||
}
|
||||
}
|
||||
|
||||
func Test_SetServiceTimeout(t *testing.T) {
|
||||
defaultTimeout := "1m0s"
|
||||
|
||||
env := NewEnvBucket()
|
||||
config, _, err := ReadFromEnv(env)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error %s", err)
|
||||
}
|
||||
if config.ReadTimeout.String() != defaultTimeout {
|
||||
t.Fatalf("expected %q, got %q", defaultTimeout, config.ReadTimeout)
|
||||
}
|
||||
|
||||
if config.WriteTimeout.String() != defaultTimeout {
|
||||
t.Fatalf("expected %q, got %q", defaultTimeout, config.WriteTimeout)
|
||||
}
|
||||
|
||||
newTimeout := "30s"
|
||||
env.Setenv("service_timeout", newTimeout)
|
||||
config, _, err = ReadFromEnv(env)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error %s", err)
|
||||
}
|
||||
if config.ReadTimeout.String() != newTimeout {
|
||||
t.Fatalf("expected %q, got %q", newTimeout, config.ReadTimeout)
|
||||
}
|
||||
|
||||
if config.WriteTimeout.String() != newTimeout {
|
||||
t.Fatalf("expected %q, got %q", newTimeout, config.WriteTimeout)
|
||||
}
|
||||
}
|
||||
|
||||
func Test_SetPort(t *testing.T) {
|
||||
defaultPort := 8081
|
||||
|
||||
env := NewEnvBucket()
|
||||
config, _, err := ReadFromEnv(env)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error %s", err)
|
||||
}
|
||||
if config.TCPPort == nil {
|
||||
t.Fatal("expected non-nil TCPPort")
|
||||
}
|
||||
if *config.TCPPort != defaultPort {
|
||||
t.Fatalf("expected %d, got %d", defaultPort, config.TCPPort)
|
||||
}
|
||||
|
||||
newPort := 9091
|
||||
newPortStr := strconv.Itoa(newPort)
|
||||
env.Setenv("port", newPortStr)
|
||||
config, _, err = ReadFromEnv(env)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error %s", err)
|
||||
}
|
||||
if config.TCPPort == nil {
|
||||
t.Fatal("expected non-nil TCPPort")
|
||||
}
|
||||
if *config.TCPPort != newPort {
|
||||
t.Fatalf("expected %d, got %d", newPort, config.TCPPort)
|
||||
}
|
||||
}
|
73
pkg/provider/handlers/delete.go
Normal file
73
pkg/provider/handlers/delete.go
Normal file
@ -0,0 +1,73 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
|
||||
"github.com/containerd/containerd"
|
||||
"github.com/containerd/containerd/namespaces"
|
||||
gocni "github.com/containerd/go-cni"
|
||||
"github.com/openfaas/faas/gateway/requests"
|
||||
|
||||
faasd "github.com/openfaas/faasd/pkg"
|
||||
cninetwork "github.com/openfaas/faasd/pkg/cninetwork"
|
||||
"github.com/openfaas/faasd/pkg/service"
|
||||
)
|
||||
|
||||
func MakeDeleteHandler(client *containerd.Client, cni gocni.CNI) func(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
if r.Body == nil {
|
||||
http.Error(w, "expected a body", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
defer r.Body.Close()
|
||||
|
||||
body, _ := ioutil.ReadAll(r.Body)
|
||||
log.Printf("[Delete] request: %s\n", string(body))
|
||||
|
||||
req := requests.DeleteFunctionRequest{}
|
||||
err := json.Unmarshal(body, &req)
|
||||
if err != nil {
|
||||
log.Printf("[Delete] error parsing input: %s\n", err)
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
name := req.FunctionName
|
||||
|
||||
function, err := GetFunction(client, name)
|
||||
if err != nil {
|
||||
msg := fmt.Sprintf("service %s not found", name)
|
||||
log.Printf("[Delete] %s\n", msg)
|
||||
http.Error(w, msg, http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
ctx := namespaces.WithNamespace(context.Background(), faasd.FunctionNamespace)
|
||||
|
||||
// TODO: this needs to still happen if the task is paused
|
||||
if function.replicas != 0 {
|
||||
err = cninetwork.DeleteCNINetwork(ctx, cni, client, name)
|
||||
if err != nil {
|
||||
log.Printf("[Delete] error removing CNI network for %s, %s\n", name, err)
|
||||
}
|
||||
}
|
||||
|
||||
containerErr := service.Remove(ctx, client, name)
|
||||
if containerErr != nil {
|
||||
log.Printf("[Delete] error removing %s, %s\n", name, containerErr)
|
||||
http.Error(w, containerErr.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
log.Printf("[Delete] deleted %s\n", name)
|
||||
}
|
||||
}
|
271
pkg/provider/handlers/deploy.go
Normal file
271
pkg/provider/handlers/deploy.go
Normal file
@ -0,0 +1,271 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
|
||||
"github.com/containerd/containerd"
|
||||
"github.com/containerd/containerd/cio"
|
||||
"github.com/containerd/containerd/containers"
|
||||
"github.com/containerd/containerd/namespaces"
|
||||
"github.com/containerd/containerd/oci"
|
||||
gocni "github.com/containerd/go-cni"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/openfaas/faas-provider/types"
|
||||
faasd "github.com/openfaas/faasd/pkg"
|
||||
cninetwork "github.com/openfaas/faasd/pkg/cninetwork"
|
||||
"github.com/openfaas/faasd/pkg/service"
|
||||
"github.com/pkg/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
)
|
||||
|
||||
const annotationLabelPrefix = "com.openfaas.annotations."
|
||||
|
||||
func MakeDeployHandler(client *containerd.Client, cni gocni.CNI, secretMountPath string, alwaysPull bool) func(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
if r.Body == nil {
|
||||
http.Error(w, "expected a body", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
defer r.Body.Close()
|
||||
|
||||
body, _ := ioutil.ReadAll(r.Body)
|
||||
log.Printf("[Deploy] request: %s\n", string(body))
|
||||
|
||||
req := types.FunctionDeployment{}
|
||||
err := json.Unmarshal(body, &req)
|
||||
if err != nil {
|
||||
log.Printf("[Deploy] - error parsing input: %s\n", err)
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
err = validateSecrets(secretMountPath, req.Secrets)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
}
|
||||
|
||||
name := req.Service
|
||||
ctx := namespaces.WithNamespace(context.Background(), faasd.FunctionNamespace)
|
||||
|
||||
deployErr := deploy(ctx, req, client, cni, secretMountPath, alwaysPull)
|
||||
if deployErr != nil {
|
||||
log.Printf("[Deploy] error deploying %s, error: %s\n", name, deployErr)
|
||||
http.Error(w, deployErr.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func deploy(ctx context.Context, req types.FunctionDeployment, client *containerd.Client, cni gocni.CNI, secretMountPath string, alwaysPull bool) error {
|
||||
r, err := reference.ParseNormalizedNamed(req.Image)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
imgRef := reference.TagNameOnly(r).String()
|
||||
|
||||
snapshotter := ""
|
||||
if val, ok := os.LookupEnv("snapshotter"); ok {
|
||||
snapshotter = val
|
||||
}
|
||||
|
||||
image, err := service.PrepareImage(ctx, client, imgRef, snapshotter, alwaysPull)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "unable to pull image %s", imgRef)
|
||||
}
|
||||
|
||||
size, _ := image.Size(ctx)
|
||||
log.Printf("Deploy %s size: %d\n", image.Name(), size)
|
||||
|
||||
envs := prepareEnv(req.EnvProcess, req.EnvVars)
|
||||
mounts := getMounts()
|
||||
|
||||
for _, secret := range req.Secrets {
|
||||
mounts = append(mounts, specs.Mount{
|
||||
Destination: path.Join("/var/openfaas/secrets", secret),
|
||||
Type: "bind",
|
||||
Source: path.Join(secretMountPath, secret),
|
||||
Options: []string{"rbind", "ro"},
|
||||
})
|
||||
}
|
||||
|
||||
name := req.Service
|
||||
|
||||
labels, err := buildLabels(&req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to apply labels to conatiner: %s, error: %s", name, err)
|
||||
}
|
||||
|
||||
var memory *specs.LinuxMemory
|
||||
if req.Limits != nil && len(req.Limits.Memory) > 0 {
|
||||
memory = &specs.LinuxMemory{}
|
||||
|
||||
qty, err := resource.ParseQuantity(req.Limits.Memory)
|
||||
if err != nil {
|
||||
log.Printf("error parsing (%q) as quantity: %s", req.Limits.Memory, err.Error())
|
||||
}
|
||||
v := qty.Value()
|
||||
memory.Limit = &v
|
||||
}
|
||||
|
||||
container, err := client.NewContainer(
|
||||
ctx,
|
||||
name,
|
||||
containerd.WithImage(image),
|
||||
containerd.WithSnapshotter(snapshotter),
|
||||
containerd.WithNewSnapshot(name+"-snapshot", image),
|
||||
containerd.WithNewSpec(oci.WithImageConfig(image),
|
||||
oci.WithCapabilities([]string{"CAP_NET_RAW"}),
|
||||
oci.WithMounts(mounts),
|
||||
oci.WithEnv(envs),
|
||||
withMemory(memory)),
|
||||
containerd.WithContainerLabels(labels),
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to create container: %s, error: %s", name, err)
|
||||
}
|
||||
|
||||
return createTask(ctx, client, container, cni)
|
||||
|
||||
}
|
||||
|
||||
func buildLabels(request *types.FunctionDeployment) (map[string]string, error) {
|
||||
// Adapted from faas-swarm/handlers/deploy.go:buildLabels
|
||||
labels := map[string]string{}
|
||||
|
||||
if request.Labels != nil {
|
||||
for k, v := range *request.Labels {
|
||||
labels[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
if request.Annotations != nil {
|
||||
for k, v := range *request.Annotations {
|
||||
key := fmt.Sprintf("%s%s", annotationLabelPrefix, k)
|
||||
if _, ok := labels[key]; !ok {
|
||||
labels[key] = v
|
||||
} else {
|
||||
return nil, errors.New(fmt.Sprintf("Key %s cannot be used as a label due to a conflict with annotation prefix %s", k, annotationLabelPrefix))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return labels, nil
|
||||
}
|
||||
|
||||
func createTask(ctx context.Context, client *containerd.Client, container containerd.Container, cni gocni.CNI) error {
|
||||
|
||||
name := container.ID()
|
||||
|
||||
task, taskErr := container.NewTask(ctx, cio.BinaryIO("/usr/local/bin/faasd", nil))
|
||||
|
||||
if taskErr != nil {
|
||||
return fmt.Errorf("unable to start task: %s, error: %s", name, taskErr)
|
||||
}
|
||||
|
||||
log.Printf("Container ID: %s\tTask ID %s:\tTask PID: %d\t\n", name, task.ID(), task.Pid())
|
||||
|
||||
labels := map[string]string{}
|
||||
network, err := cninetwork.CreateCNINetwork(ctx, cni, task, labels)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ip, err := cninetwork.GetIPAddress(network, task)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Printf("%s has IP: %s.\n", name, ip.String())
|
||||
|
||||
_, waitErr := task.Wait(ctx)
|
||||
if waitErr != nil {
|
||||
return errors.Wrapf(waitErr, "Unable to wait for task to start: %s", name)
|
||||
}
|
||||
|
||||
if startErr := task.Start(ctx); startErr != nil {
|
||||
return errors.Wrapf(startErr, "Unable to start task: %s", name)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func prepareEnv(envProcess string, reqEnvVars map[string]string) []string {
|
||||
envs := []string{}
|
||||
fprocessFound := false
|
||||
fprocess := "fprocess=" + envProcess
|
||||
if len(envProcess) > 0 {
|
||||
fprocessFound = true
|
||||
}
|
||||
|
||||
for k, v := range reqEnvVars {
|
||||
if k == "fprocess" {
|
||||
fprocessFound = true
|
||||
fprocess = v
|
||||
} else {
|
||||
envs = append(envs, k+"="+v)
|
||||
}
|
||||
}
|
||||
if fprocessFound {
|
||||
envs = append(envs, fprocess)
|
||||
}
|
||||
return envs
|
||||
}
|
||||
|
||||
func getMounts() []specs.Mount {
|
||||
wd, _ := os.Getwd()
|
||||
mounts := []specs.Mount{}
|
||||
mounts = append(mounts, specs.Mount{
|
||||
Destination: "/etc/resolv.conf",
|
||||
Type: "bind",
|
||||
Source: path.Join(wd, "resolv.conf"),
|
||||
Options: []string{"rbind", "ro"},
|
||||
})
|
||||
|
||||
mounts = append(mounts, specs.Mount{
|
||||
Destination: "/etc/hosts",
|
||||
Type: "bind",
|
||||
Source: path.Join(wd, "hosts"),
|
||||
Options: []string{"rbind", "ro"},
|
||||
})
|
||||
return mounts
|
||||
}
|
||||
|
||||
func validateSecrets(secretMountPath string, secrets []string) error {
|
||||
for _, secret := range secrets {
|
||||
if _, err := os.Stat(path.Join(secretMountPath, secret)); err != nil {
|
||||
return fmt.Errorf("unable to find secret: %s", secret)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func withMemory(mem *specs.LinuxMemory) oci.SpecOpts {
|
||||
return func(ctx context.Context, _ oci.Client, c *containers.Container, s *oci.Spec) error {
|
||||
if mem != nil {
|
||||
if s.Linux == nil {
|
||||
s.Linux = &specs.Linux{}
|
||||
}
|
||||
if s.Linux.Resources == nil {
|
||||
s.Linux.Resources = &specs.LinuxResources{}
|
||||
}
|
||||
if s.Linux.Resources.Memory == nil {
|
||||
s.Linux.Resources.Memory = &specs.LinuxMemory{}
|
||||
}
|
||||
s.Linux.Resources.Memory.Limit = mem.Limit
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
77
pkg/provider/handlers/deploy_test.go
Normal file
77
pkg/provider/handlers/deploy_test.go
Normal file
@ -0,0 +1,77 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/openfaas/faas-provider/types"
|
||||
)
|
||||
|
||||
func Test_BuildLabels_WithAnnotations(t *testing.T) {
|
||||
// Test each combination of nil/non-nil annotation + label
|
||||
tables := []struct {
|
||||
name string
|
||||
label map[string]string
|
||||
annotation map[string]string
|
||||
result map[string]string
|
||||
}{
|
||||
{"Empty label and annotations returns empty table map", nil, nil, map[string]string{}},
|
||||
{
|
||||
"Label with empty annotation returns valid map",
|
||||
map[string]string{"L1": "V1"},
|
||||
nil,
|
||||
map[string]string{"L1": "V1"}},
|
||||
{
|
||||
"Annotation with empty label returns valid map",
|
||||
nil,
|
||||
map[string]string{"A1": "V2"},
|
||||
map[string]string{fmt.Sprintf("%sA1", annotationLabelPrefix): "V2"}},
|
||||
{
|
||||
"Label and annotation provided returns valid combined map",
|
||||
map[string]string{"L1": "V1"},
|
||||
map[string]string{"A1": "V2"},
|
||||
map[string]string{
|
||||
"L1": "V1",
|
||||
fmt.Sprintf("%sA1", annotationLabelPrefix): "V2",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tables {
|
||||
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
request := &types.FunctionDeployment{
|
||||
Labels: &tc.label,
|
||||
Annotations: &tc.annotation,
|
||||
}
|
||||
|
||||
val, err := buildLabels(request)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("want: no error got: %v", err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(val, tc.result) {
|
||||
t.Errorf("Got: %s, expected %s", val, tc.result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_BuildLabels_WithAnnotationCollision(t *testing.T) {
|
||||
request := &types.FunctionDeployment{
|
||||
Labels: &map[string]string{
|
||||
"function_name": "echo",
|
||||
fmt.Sprintf("%scurrent-time", annotationLabelPrefix): "Wed 25 Jul 06:41:43 BST 2018",
|
||||
},
|
||||
Annotations: &map[string]string{"current-time": "Wed 25 Jul 06:41:43 BST 2018"},
|
||||
}
|
||||
|
||||
val, err := buildLabels(request)
|
||||
|
||||
if err == nil {
|
||||
t.Errorf("Expected an error, got %d values", len(val))
|
||||
}
|
||||
|
||||
}
|
118
pkg/provider/handlers/functions.go
Normal file
118
pkg/provider/handlers/functions.go
Normal file
@ -0,0 +1,118 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/containerd"
|
||||
"github.com/containerd/containerd/namespaces"
|
||||
"github.com/openfaas/faasd/pkg/cninetwork"
|
||||
|
||||
faasd "github.com/openfaas/faasd/pkg"
|
||||
)
|
||||
|
||||
type Function struct {
|
||||
name string
|
||||
namespace string
|
||||
image string
|
||||
pid uint32
|
||||
replicas int
|
||||
IP string
|
||||
labels map[string]string
|
||||
annotations map[string]string
|
||||
}
|
||||
|
||||
// ListFunctions returns a map of all functions with running tasks on namespace
|
||||
func ListFunctions(client *containerd.Client) (map[string]Function, error) {
|
||||
ctx := namespaces.WithNamespace(context.Background(), faasd.FunctionNamespace)
|
||||
functions := make(map[string]Function)
|
||||
|
||||
containers, _ := client.Containers(ctx)
|
||||
for _, k := range containers {
|
||||
name := k.ID()
|
||||
f, err := GetFunction(client, name)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
functions[name] = f
|
||||
}
|
||||
return functions, nil
|
||||
}
|
||||
|
||||
// GetFunction returns a function that matches name
|
||||
func GetFunction(client *containerd.Client, name string) (Function, error) {
|
||||
ctx := namespaces.WithNamespace(context.Background(), faasd.FunctionNamespace)
|
||||
c, err := client.LoadContainer(ctx, name)
|
||||
|
||||
if err == nil {
|
||||
image, _ := c.Image(ctx)
|
||||
|
||||
containerName := c.ID()
|
||||
allLabels, labelErr := c.Labels(ctx)
|
||||
|
||||
if labelErr != nil {
|
||||
log.Printf("cannot list container %s labels: %s", containerName, labelErr.Error())
|
||||
}
|
||||
|
||||
labels, annotations := buildLabelsAndAnnotations(allLabels)
|
||||
|
||||
f := Function{
|
||||
name: containerName,
|
||||
namespace: faasd.FunctionNamespace,
|
||||
image: image.Name(),
|
||||
labels: labels,
|
||||
annotations: annotations,
|
||||
}
|
||||
|
||||
replicas := 0
|
||||
task, err := c.Task(ctx, nil)
|
||||
if err == nil {
|
||||
// Task for container exists
|
||||
svc, err := task.Status(ctx)
|
||||
if err != nil {
|
||||
return Function{}, fmt.Errorf("unable to get task status for container: %s %s", name, err)
|
||||
}
|
||||
|
||||
if svc.Status == "running" {
|
||||
replicas = 1
|
||||
f.pid = task.Pid()
|
||||
|
||||
// Get container IP address
|
||||
ip, err := cninetwork.GetIPfromPID(int(task.Pid()))
|
||||
if err != nil {
|
||||
return Function{}, err
|
||||
}
|
||||
f.IP = ip.String()
|
||||
}
|
||||
} else {
|
||||
replicas = 0
|
||||
}
|
||||
|
||||
f.replicas = replicas
|
||||
return f, nil
|
||||
}
|
||||
|
||||
return Function{}, fmt.Errorf("unable to find function: %s, error %s", name, err)
|
||||
}
|
||||
|
||||
func buildLabelsAndAnnotations(ctrLabels map[string]string) (labels map[string]string, annotations map[string]string) {
|
||||
for k, v := range ctrLabels {
|
||||
if strings.HasPrefix(k, annotationLabelPrefix) {
|
||||
if annotations == nil {
|
||||
annotations = make(map[string]string)
|
||||
}
|
||||
|
||||
annotations[strings.TrimPrefix(k, annotationLabelPrefix)] = v
|
||||
} else {
|
||||
if labels == nil {
|
||||
labels = make(map[string]string)
|
||||
}
|
||||
|
||||
labels[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
return labels, annotations
|
||||
}
|
29
pkg/provider/handlers/functions_test.go
Normal file
29
pkg/provider/handlers/functions_test.go
Normal file
@ -0,0 +1,29 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func Test_BuildLabelsAndAnnotationsFromServiceSpec_Annotations(t *testing.T) {
|
||||
container := map[string]string{
|
||||
"qwer": "ty",
|
||||
"dvor": "ak",
|
||||
fmt.Sprintf("%scurrent-time", annotationLabelPrefix): "5 Nov 20:10:20 PST 1955",
|
||||
fmt.Sprintf("%sfuture-time", annotationLabelPrefix): "21 Oct 20:10:20 PST 2015",
|
||||
}
|
||||
|
||||
labels, annotation := buildLabelsAndAnnotations(container)
|
||||
|
||||
if len(labels) != 2 {
|
||||
t.Errorf("want: %d labels got: %d", 2, len(labels))
|
||||
}
|
||||
|
||||
if len(annotation) != 2 {
|
||||
t.Errorf("want: %d annotation got: %d", 1, len(annotation))
|
||||
}
|
||||
|
||||
if _, ok := annotation["current-time"]; !ok {
|
||||
t.Errorf("want: '%s' entry in annotation map got: key not found", "current-time")
|
||||
}
|
||||
}
|
44
pkg/provider/handlers/info.go
Normal file
44
pkg/provider/handlers/info.go
Normal file
@ -0,0 +1,44 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
|
||||
"github.com/openfaas/faas-provider/types"
|
||||
)
|
||||
|
||||
const (
|
||||
// OrchestrationIdentifier identifier string for provider orchestration
|
||||
OrchestrationIdentifier = "containerd"
|
||||
|
||||
// ProviderName name of the provider
|
||||
ProviderName = "faasd"
|
||||
)
|
||||
|
||||
//MakeInfoHandler creates handler for /system/info endpoint
|
||||
func MakeInfoHandler(version, sha string) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Body != nil {
|
||||
defer r.Body.Close()
|
||||
}
|
||||
|
||||
infoResponse := types.InfoResponse{
|
||||
Orchestration: OrchestrationIdentifier,
|
||||
Provider: ProviderName,
|
||||
Version: types.ProviderVersion{
|
||||
Release: version,
|
||||
SHA: sha,
|
||||
},
|
||||
}
|
||||
|
||||
jsonOut, marshalErr := json.Marshal(infoResponse)
|
||||
if marshalErr != nil {
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write(jsonOut)
|
||||
}
|
||||
}
|
39
pkg/provider/handlers/info_test.go
Normal file
39
pkg/provider/handlers/info_test.go
Normal file
@ -0,0 +1,39 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"github.com/openfaas/faas-provider/types"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func Test_InfoHandler(t *testing.T) {
|
||||
sha := "4b825dc642cb6eb9a060e54bf8d69288fbee4904"
|
||||
version := "0.0.1"
|
||||
handler := MakeInfoHandler(version, sha)
|
||||
w := httptest.NewRecorder()
|
||||
r := httptest.NewRequest("GET", "/", nil)
|
||||
handler(w, r)
|
||||
|
||||
resp := types.InfoResponse{}
|
||||
err := json.Unmarshal(w.Body.Bytes(), &resp)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error unmarshalling the response")
|
||||
}
|
||||
|
||||
if resp.Provider != ProviderName {
|
||||
t.Fatalf("expected provider %q, got %q", ProviderName, resp.Provider)
|
||||
}
|
||||
|
||||
if resp.Orchestration != OrchestrationIdentifier {
|
||||
t.Fatalf("expected orchestration %q, got %q", OrchestrationIdentifier, resp.Orchestration)
|
||||
}
|
||||
|
||||
if resp.Version.SHA != sha {
|
||||
t.Fatalf("expected orchestration %q, got %q", sha, resp.Version.SHA)
|
||||
}
|
||||
|
||||
if resp.Version.Release != version {
|
||||
t.Fatalf("expected release %q, got %q", version, resp.Version.Release)
|
||||
}
|
||||
}
|
39
pkg/provider/handlers/invoke_resolver.go
Normal file
39
pkg/provider/handlers/invoke_resolver.go
Normal file
@ -0,0 +1,39 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"net/url"
|
||||
|
||||
"github.com/containerd/containerd"
|
||||
)
|
||||
|
||||
const watchdogPort = 8080
|
||||
|
||||
type InvokeResolver struct {
|
||||
client *containerd.Client
|
||||
}
|
||||
|
||||
func NewInvokeResolver(client *containerd.Client) *InvokeResolver {
|
||||
return &InvokeResolver{client: client}
|
||||
}
|
||||
|
||||
func (i *InvokeResolver) Resolve(functionName string) (url.URL, error) {
|
||||
log.Printf("Resolve: %q\n", functionName)
|
||||
|
||||
function, err := GetFunction(i.client, functionName)
|
||||
if err != nil {
|
||||
return url.URL{}, fmt.Errorf("%s not found", functionName)
|
||||
}
|
||||
|
||||
serviceIP := function.IP
|
||||
|
||||
urlStr := fmt.Sprintf("http://%s:%d", serviceIP, watchdogPort)
|
||||
|
||||
urlRes, err := url.Parse(urlStr)
|
||||
if err != nil {
|
||||
return url.URL{}, err
|
||||
}
|
||||
|
||||
return *urlRes, nil
|
||||
}
|
41
pkg/provider/handlers/read.go
Normal file
41
pkg/provider/handlers/read.go
Normal file
@ -0,0 +1,41 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"log"
|
||||
"net/http"
|
||||
|
||||
"github.com/containerd/containerd"
|
||||
"github.com/openfaas/faas-provider/types"
|
||||
)
|
||||
|
||||
func MakeReadHandler(client *containerd.Client) func(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
res := []types.FunctionStatus{}
|
||||
funcs, err := ListFunctions(client)
|
||||
if err != nil {
|
||||
log.Printf("[Read] error listing functions. Error: %s\n", err)
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
for _, function := range funcs {
|
||||
|
||||
res = append(res, types.FunctionStatus{
|
||||
Name: function.name,
|
||||
Image: function.image,
|
||||
Replicas: uint64(function.replicas),
|
||||
Namespace: function.namespace,
|
||||
Labels: &function.labels,
|
||||
Annotations: &function.annotations,
|
||||
})
|
||||
}
|
||||
|
||||
body, _ := json.Marshal(res)
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write(body)
|
||||
|
||||
}
|
||||
}
|
36
pkg/provider/handlers/replicas.go
Normal file
36
pkg/provider/handlers/replicas.go
Normal file
@ -0,0 +1,36 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
|
||||
"github.com/containerd/containerd"
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/openfaas/faas-provider/types"
|
||||
)
|
||||
|
||||
func MakeReplicaReaderHandler(client *containerd.Client) func(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
vars := mux.Vars(r)
|
||||
functionName := vars["name"]
|
||||
|
||||
if f, err := GetFunction(client, functionName); err == nil {
|
||||
found := types.FunctionStatus{
|
||||
Name: functionName,
|
||||
AvailableReplicas: uint64(f.replicas),
|
||||
Replicas: uint64(f.replicas),
|
||||
Namespace: f.namespace,
|
||||
Labels: &f.labels,
|
||||
Annotations: &f.annotations,
|
||||
}
|
||||
|
||||
functionBytes, _ := json.Marshal(found)
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write(functionBytes)
|
||||
} else {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
}
|
||||
}
|
||||
}
|
129
pkg/provider/handlers/scale.go
Normal file
129
pkg/provider/handlers/scale.go
Normal file
@ -0,0 +1,129 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
|
||||
"github.com/containerd/containerd"
|
||||
"github.com/containerd/containerd/namespaces"
|
||||
gocni "github.com/containerd/go-cni"
|
||||
|
||||
"github.com/openfaas/faas-provider/types"
|
||||
faasd "github.com/openfaas/faasd/pkg"
|
||||
)
|
||||
|
||||
func MakeReplicaUpdateHandler(client *containerd.Client, cni gocni.CNI) func(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
if r.Body == nil {
|
||||
http.Error(w, "expected a body", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
defer r.Body.Close()
|
||||
|
||||
body, _ := ioutil.ReadAll(r.Body)
|
||||
log.Printf("[Scale] request: %s\n", string(body))
|
||||
|
||||
req := types.ScaleServiceRequest{}
|
||||
err := json.Unmarshal(body, &req)
|
||||
|
||||
if err != nil {
|
||||
log.Printf("[Scale] error parsing input: %s\n", err)
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
name := req.ServiceName
|
||||
|
||||
if _, err := GetFunction(client, name); err != nil {
|
||||
msg := fmt.Sprintf("service %s not found", name)
|
||||
log.Printf("[Scale] %s\n", msg)
|
||||
http.Error(w, msg, http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
ctx := namespaces.WithNamespace(context.Background(), faasd.FunctionNamespace)
|
||||
|
||||
ctr, ctrErr := client.LoadContainer(ctx, name)
|
||||
if ctrErr != nil {
|
||||
msg := fmt.Sprintf("cannot load service %s, error: %s", name, ctrErr)
|
||||
log.Printf("[Scale] %s\n", msg)
|
||||
http.Error(w, msg, http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
var taskExists bool
|
||||
var taskStatus *containerd.Status
|
||||
|
||||
task, taskErr := ctr.Task(ctx, nil)
|
||||
if taskErr != nil {
|
||||
msg := fmt.Sprintf("cannot load task for service %s, error: %s", name, taskErr)
|
||||
log.Printf("[Scale] %s\n", msg)
|
||||
taskExists = false
|
||||
} else {
|
||||
taskExists = true
|
||||
status, statusErr := task.Status(ctx)
|
||||
if statusErr != nil {
|
||||
msg := fmt.Sprintf("cannot load task status for %s, error: %s", name, statusErr)
|
||||
log.Printf("[Scale] %s\n", msg)
|
||||
http.Error(w, msg, http.StatusInternalServerError)
|
||||
return
|
||||
} else {
|
||||
taskStatus = &status
|
||||
}
|
||||
}
|
||||
|
||||
createNewTask := false
|
||||
|
||||
// Scale to zero
|
||||
if req.Replicas == 0 {
|
||||
// If a task is running, pause it
|
||||
if taskExists && taskStatus.Status == containerd.Running {
|
||||
if pauseErr := task.Pause(ctx); pauseErr != nil {
|
||||
wrappedPauseErr := fmt.Errorf("error pausing task %s, error: %s", name, pauseErr)
|
||||
log.Printf("[Scale] %s\n", wrappedPauseErr.Error())
|
||||
http.Error(w, wrappedPauseErr.Error(), http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if taskExists {
|
||||
if taskStatus != nil {
|
||||
if taskStatus.Status == containerd.Paused {
|
||||
if resumeErr := task.Resume(ctx); resumeErr != nil {
|
||||
log.Printf("[Scale] error resuming task %s, error: %s\n", name, resumeErr)
|
||||
http.Error(w, resumeErr.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
} else if taskStatus.Status == containerd.Stopped {
|
||||
// Stopped tasks cannot be restarted, must be removed, and created again
|
||||
if _, delErr := task.Delete(ctx); delErr != nil {
|
||||
log.Printf("[Scale] error deleting stopped task %s, error: %s\n", name, delErr)
|
||||
http.Error(w, delErr.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
createNewTask = true
|
||||
}
|
||||
}
|
||||
} else {
|
||||
createNewTask = true
|
||||
}
|
||||
|
||||
if createNewTask {
|
||||
deployErr := createTask(ctx, client, ctr, cni)
|
||||
if deployErr != nil {
|
||||
log.Printf("[Scale] error deploying %s, error: %s\n", name, deployErr)
|
||||
http.Error(w, deployErr.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
122
pkg/provider/handlers/secret.go
Normal file
122
pkg/provider/handlers/secret.go
Normal file
@ -0,0 +1,122 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/containerd"
|
||||
"github.com/openfaas/faas-provider/types"
|
||||
)
|
||||
|
||||
const secretFilePermission = 0644
|
||||
|
||||
func MakeSecretHandler(c *containerd.Client, mountPath string) func(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
err := os.MkdirAll(mountPath, secretFilePermission)
|
||||
if err != nil {
|
||||
log.Printf("Creating path: %s, error: %s\n", mountPath, err)
|
||||
}
|
||||
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Body != nil {
|
||||
defer r.Body.Close()
|
||||
}
|
||||
|
||||
switch r.Method {
|
||||
case http.MethodGet:
|
||||
listSecrets(c, w, r, mountPath)
|
||||
case http.MethodPost:
|
||||
createSecret(c, w, r, mountPath)
|
||||
case http.MethodPut:
|
||||
createSecret(c, w, r, mountPath)
|
||||
case http.MethodDelete:
|
||||
deleteSecret(c, w, r, mountPath)
|
||||
default:
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func listSecrets(c *containerd.Client, w http.ResponseWriter, r *http.Request, mountPath string) {
|
||||
files, err := ioutil.ReadDir(mountPath)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
secrets := []types.Secret{}
|
||||
for _, f := range files {
|
||||
secrets = append(secrets, types.Secret{Name: f.Name()})
|
||||
}
|
||||
|
||||
bytesOut, _ := json.Marshal(secrets)
|
||||
w.Write(bytesOut)
|
||||
}
|
||||
|
||||
func createSecret(c *containerd.Client, w http.ResponseWriter, r *http.Request, mountPath string) {
|
||||
secret, err := parseSecret(r)
|
||||
if err != nil {
|
||||
log.Printf("[secret] error %s", err.Error())
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
err = ioutil.WriteFile(path.Join(mountPath, secret.Name), []byte(secret.Value), secretFilePermission)
|
||||
|
||||
if err != nil {
|
||||
log.Printf("[secret] error %s", err.Error())
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func deleteSecret(c *containerd.Client, w http.ResponseWriter, r *http.Request, mountPath string) {
|
||||
secret, err := parseSecret(r)
|
||||
if err != nil {
|
||||
log.Printf("[secret] error %s", err.Error())
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
err = os.Remove(path.Join(mountPath, secret.Name))
|
||||
|
||||
if err != nil {
|
||||
log.Printf("[secret] error %s", err.Error())
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func parseSecret(r *http.Request) (types.Secret, error) {
|
||||
secret := types.Secret{}
|
||||
bytesOut, err := ioutil.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
return secret, err
|
||||
}
|
||||
|
||||
err = json.Unmarshal(bytesOut, &secret)
|
||||
if err != nil {
|
||||
return secret, err
|
||||
}
|
||||
|
||||
if isTraversal(secret.Name) {
|
||||
return secret, fmt.Errorf(traverseErrorSt)
|
||||
}
|
||||
|
||||
return secret, err
|
||||
}
|
||||
|
||||
const traverseErrorSt = "directory traversal found in name"
|
||||
|
||||
func isTraversal(name string) bool {
|
||||
return strings.Contains(name, fmt.Sprintf("%s", string(os.PathSeparator))) ||
|
||||
strings.Contains(name, "..")
|
||||
}
|
63
pkg/provider/handlers/secret_test.go
Normal file
63
pkg/provider/handlers/secret_test.go
Normal file
@ -0,0 +1,63 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/openfaas/faas-provider/types"
|
||||
)
|
||||
|
||||
func Test_parseSecretValidName(t *testing.T) {
|
||||
|
||||
s := types.Secret{Name: "authorized_keys"}
|
||||
body, _ := json.Marshal(s)
|
||||
reader := bytes.NewReader(body)
|
||||
r := httptest.NewRequest(http.MethodPost, "/", reader)
|
||||
_, err := parseSecret(r)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("secret name is valid with no traversal characters")
|
||||
}
|
||||
}
|
||||
|
||||
func Test_parseSecretValidNameWithDot(t *testing.T) {
|
||||
|
||||
s := types.Secret{Name: "authorized.keys"}
|
||||
body, _ := json.Marshal(s)
|
||||
reader := bytes.NewReader(body)
|
||||
r := httptest.NewRequest(http.MethodPost, "/", reader)
|
||||
_, err := parseSecret(r)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("secret name is valid with no traversal characters")
|
||||
}
|
||||
}
|
||||
|
||||
func Test_parseSecretWithTraversalWithSlash(t *testing.T) {
|
||||
|
||||
s := types.Secret{Name: "/root/.ssh/authorized_keys"}
|
||||
body, _ := json.Marshal(s)
|
||||
reader := bytes.NewReader(body)
|
||||
r := httptest.NewRequest(http.MethodPost, "/", reader)
|
||||
_, err := parseSecret(r)
|
||||
|
||||
if err == nil {
|
||||
t.Fatalf("secret name should fail due to path traversal")
|
||||
}
|
||||
}
|
||||
|
||||
func Test_parseSecretWithTraversalWithDoubleDot(t *testing.T) {
|
||||
|
||||
s := types.Secret{Name: ".."}
|
||||
body, _ := json.Marshal(s)
|
||||
reader := bytes.NewReader(body)
|
||||
r := httptest.NewRequest(http.MethodPost, "/", reader)
|
||||
_, err := parseSecret(r)
|
||||
|
||||
if err == nil {
|
||||
t.Fatalf("secret name should fail due to path traversal")
|
||||
}
|
||||
}
|
81
pkg/provider/handlers/update.go
Normal file
81
pkg/provider/handlers/update.go
Normal file
@ -0,0 +1,81 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
|
||||
"github.com/containerd/containerd"
|
||||
"github.com/containerd/containerd/namespaces"
|
||||
gocni "github.com/containerd/go-cni"
|
||||
"github.com/openfaas/faas-provider/types"
|
||||
|
||||
faasd "github.com/openfaas/faasd/pkg"
|
||||
"github.com/openfaas/faasd/pkg/cninetwork"
|
||||
"github.com/openfaas/faasd/pkg/service"
|
||||
)
|
||||
|
||||
func MakeUpdateHandler(client *containerd.Client, cni gocni.CNI, secretMountPath string, alwaysPull bool) func(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
if r.Body == nil {
|
||||
http.Error(w, "expected a body", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
defer r.Body.Close()
|
||||
|
||||
body, _ := ioutil.ReadAll(r.Body)
|
||||
log.Printf("[Update] request: %s\n", string(body))
|
||||
|
||||
req := types.FunctionDeployment{}
|
||||
err := json.Unmarshal(body, &req)
|
||||
if err != nil {
|
||||
log.Printf("[Update] error parsing input: %s\n", err)
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
|
||||
return
|
||||
}
|
||||
name := req.Service
|
||||
|
||||
function, err := GetFunction(client, name)
|
||||
if err != nil {
|
||||
msg := fmt.Sprintf("service %s not found", name)
|
||||
log.Printf("[Update] %s\n", msg)
|
||||
http.Error(w, msg, http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
err = validateSecrets(secretMountPath, req.Secrets)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
}
|
||||
|
||||
ctx := namespaces.WithNamespace(context.Background(), faasd.FunctionNamespace)
|
||||
if function.replicas != 0 {
|
||||
err = cninetwork.DeleteCNINetwork(ctx, cni, client, name)
|
||||
if err != nil {
|
||||
log.Printf("[Update] error removing CNI network for %s, %s\n", name, err)
|
||||
}
|
||||
}
|
||||
|
||||
containerErr := service.Remove(ctx, client, name)
|
||||
if containerErr != nil {
|
||||
log.Printf("[Update] error removing %s, %s\n", name, containerErr)
|
||||
http.Error(w, containerErr.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
deployErr := deploy(ctx, req, client, cni, secretMountPath, alwaysPull)
|
||||
if deployErr != nil {
|
||||
log.Printf("[Update] error deploying %s, error: %s\n", name, deployErr)
|
||||
http.Error(w, deployErr.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
}
|
116
pkg/proxy.go
Normal file
116
pkg/proxy.go
Normal file
@ -0,0 +1,116 @@
|
||||
package pkg
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"time"
|
||||
)
|
||||
|
||||
// NewProxy creates a HTTP proxy to expose a host
|
||||
func NewProxy(upstream string, listenPort uint32, hostIP string, timeout time.Duration, resolver Resolver) *Proxy {
|
||||
|
||||
return &Proxy{
|
||||
Upstream: upstream,
|
||||
Port: listenPort,
|
||||
HostIP: hostIP,
|
||||
Timeout: timeout,
|
||||
Resolver: resolver,
|
||||
}
|
||||
}
|
||||
|
||||
// Proxy for exposing a private container
|
||||
type Proxy struct {
|
||||
Timeout time.Duration
|
||||
|
||||
// Port on which to listen to traffic
|
||||
Port uint32
|
||||
|
||||
// Upstream is where to send traffic when received
|
||||
Upstream string
|
||||
|
||||
// The IP to use to bind locally
|
||||
HostIP string
|
||||
|
||||
Resolver Resolver
|
||||
}
|
||||
|
||||
// Start listening and forwarding HTTP to the host
|
||||
func (p *Proxy) Start() error {
|
||||
|
||||
http.DefaultClient.CheckRedirect = func(req *http.Request, via []*http.Request) error {
|
||||
return http.ErrUseLastResponse
|
||||
}
|
||||
upstreamHost, upstreamPort, err := getUpstream(p.Upstream, p.Port)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Printf("Looking up IP for: %q", upstreamHost)
|
||||
got := make(chan string, 1)
|
||||
|
||||
go p.Resolver.Get(upstreamHost, got, time.Second*5)
|
||||
|
||||
ipAddress := <-got
|
||||
close(got)
|
||||
|
||||
upstreamAddr := fmt.Sprintf("%s:%d", ipAddress, upstreamPort)
|
||||
|
||||
localBind := fmt.Sprintf("%s:%d", p.HostIP, p.Port)
|
||||
log.Printf("Proxy from: %s, to: %s (%s)\n", localBind, p.Upstream, ipAddress)
|
||||
|
||||
l, err := net.Listen("tcp", localBind)
|
||||
if err != nil {
|
||||
log.Printf("Error: %s", err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
defer l.Close()
|
||||
for {
|
||||
// Wait for a connection.
|
||||
conn, err := l.Accept()
|
||||
if err != nil {
|
||||
acceptErr := fmt.Errorf("Unable to accept on %d, error: %s",
|
||||
p.Port,
|
||||
err.Error())
|
||||
log.Printf("%s", acceptErr.Error())
|
||||
return acceptErr
|
||||
}
|
||||
|
||||
upstream, err := net.Dial("tcp", upstreamAddr)
|
||||
|
||||
if err != nil {
|
||||
log.Printf("unable to dial to %s, error: %s", upstreamAddr, err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
go pipe(conn, upstream)
|
||||
go pipe(upstream, conn)
|
||||
}
|
||||
}
|
||||
|
||||
func pipe(from net.Conn, to net.Conn) {
|
||||
defer from.Close()
|
||||
io.Copy(from, to)
|
||||
}
|
||||
|
||||
func getUpstream(val string, defaultPort uint32) (string, uint32, error) {
|
||||
upstreamHostname := val
|
||||
upstreamPort := defaultPort
|
||||
|
||||
if in := strings.Index(val, ":"); in > -1 {
|
||||
upstreamHostname = val[:in]
|
||||
port, err := strconv.ParseInt(val[in+1:], 10, 32)
|
||||
if err != nil {
|
||||
return "", defaultPort, err
|
||||
}
|
||||
upstreamPort = uint32(port)
|
||||
}
|
||||
|
||||
return upstreamHostname, upstreamPort, nil
|
||||
}
|
86
pkg/proxy_test.go
Normal file
86
pkg/proxy_test.go
Normal file
@ -0,0 +1,86 @@
|
||||
package pkg
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func Test_Proxy_ToPrivateServer(t *testing.T) {
|
||||
|
||||
wantBodyText := "OK"
|
||||
wantBody := []byte(wantBodyText)
|
||||
upstreamSvr := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
if r.Body != nil {
|
||||
defer r.Body.Close()
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write(wantBody)
|
||||
|
||||
}))
|
||||
|
||||
defer upstreamSvr.Close()
|
||||
port := 8080
|
||||
u, _ := url.Parse(upstreamSvr.URL)
|
||||
log.Println("Host", u.Host)
|
||||
|
||||
upstreamAddr := u.Host
|
||||
proxy := NewProxy(upstreamAddr, 8080, "127.0.0.1", time.Second*1, &mockResolver{})
|
||||
|
||||
gwChan := make(chan string, 1)
|
||||
doneCh := make(chan bool)
|
||||
|
||||
go proxy.Start()
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
gwChan <- u.Host
|
||||
wg.Done()
|
||||
}()
|
||||
wg.Wait()
|
||||
|
||||
req, err := http.NewRequest(http.MethodGet, fmt.Sprintf("http://127.0.0.1:%d", port), nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for i := 1; i < 11; i++ {
|
||||
res, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
t.Logf("Try %d, gave error: %s", i, err)
|
||||
|
||||
time.Sleep(time.Millisecond * 100)
|
||||
} else {
|
||||
|
||||
resBody, _ := ioutil.ReadAll(res.Body)
|
||||
if string(resBody) != string(wantBody) {
|
||||
t.Errorf("want %s, but got %s in body", string(wantBody), string(resBody))
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
go func() {
|
||||
doneCh <- true
|
||||
}()
|
||||
}
|
||||
|
||||
type mockResolver struct {
|
||||
}
|
||||
|
||||
func (m *mockResolver) Start() {
|
||||
|
||||
}
|
||||
|
||||
func (m *mockResolver) Get(upstream string, got chan<- string, timeout time.Duration) {
|
||||
got <- upstream
|
||||
}
|
12
pkg/resolver.go
Normal file
12
pkg/resolver.go
Normal file
@ -0,0 +1,12 @@
|
||||
package pkg
|
||||
|
||||
import "time"
|
||||
|
||||
// Resolver resolves an upstream IP address for a given upstream host
|
||||
type Resolver interface {
|
||||
// Start any polling or connections required to resolve
|
||||
Start()
|
||||
|
||||
// Get an IP address using an asynchronous operation
|
||||
Get(upstream string, got chan<- string, timeout time.Duration)
|
||||
}
|
199
pkg/service/service.go
Normal file
199
pkg/service/service.go
Normal file
@ -0,0 +1,199 @@
|
||||
package service
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/remotes"
|
||||
"github.com/containerd/containerd/remotes/docker"
|
||||
"github.com/docker/cli/cli/config"
|
||||
"github.com/docker/cli/cli/config/configfile"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// dockerConfigDir contains "config.json"
|
||||
const dockerConfigDir = "/var/lib/faasd/.docker/"
|
||||
|
||||
// Remove removes a container
|
||||
func Remove(ctx context.Context, client *containerd.Client, name string) error {
|
||||
|
||||
container, containerErr := client.LoadContainer(ctx, name)
|
||||
|
||||
if containerErr == nil {
|
||||
found := true
|
||||
t, err := container.Task(ctx, nil)
|
||||
if err != nil {
|
||||
if errdefs.IsNotFound(err) {
|
||||
found = false
|
||||
} else {
|
||||
return fmt.Errorf("unable to get task %s: ", err)
|
||||
}
|
||||
}
|
||||
|
||||
if found {
|
||||
status, _ := t.Status(ctx)
|
||||
fmt.Printf("Status of %s is: %s\n", name, status.Status)
|
||||
|
||||
log.Printf("Need to kill %s\n", name)
|
||||
err := killTask(ctx, t)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error killing task %s, %s, %s", container.ID(), name, err)
|
||||
}
|
||||
}
|
||||
|
||||
err = container.Delete(ctx, containerd.WithSnapshotCleanup)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error deleting container %s, %s, %s", container.ID(), name, err)
|
||||
}
|
||||
} else {
|
||||
service := client.SnapshotService("")
|
||||
key := name + "snapshot"
|
||||
if _, err := client.SnapshotService("").Stat(ctx, key); err == nil {
|
||||
service.Remove(ctx, key)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Adapted from Stellar - https://github.com/stellar
|
||||
func killTask(ctx context.Context, task containerd.Task) error {
|
||||
|
||||
killTimeout := 30 * time.Second
|
||||
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
var err error
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
if task != nil {
|
||||
wait, err := task.Wait(ctx)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("error waiting on task: %s", err)
|
||||
return
|
||||
}
|
||||
if err := task.Kill(ctx, unix.SIGTERM, containerd.WithKillAll); err != nil {
|
||||
log.Printf("error killing container task: %s", err)
|
||||
}
|
||||
|
||||
select {
|
||||
case <-wait:
|
||||
task.Delete(ctx)
|
||||
return
|
||||
case <-time.After(killTimeout):
|
||||
if err := task.Kill(ctx, unix.SIGKILL, containerd.WithKillAll); err != nil {
|
||||
log.Printf("error force killing container task: %s", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
wg.Wait()
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func getResolver(ctx context.Context, configFile *configfile.ConfigFile) (remotes.Resolver, error) {
|
||||
// credsFunc is based on https://github.com/moby/buildkit/blob/0b130cca040246d2ddf55117eeff34f546417e40/session/auth/authprovider/authprovider.go#L35
|
||||
credFunc := func(host string) (string, string, error) {
|
||||
if host == "registry-1.docker.io" {
|
||||
host = "https://index.docker.io/v1/"
|
||||
}
|
||||
ac, err := configFile.GetAuthConfig(host)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
if ac.IdentityToken != "" {
|
||||
return "", ac.IdentityToken, nil
|
||||
}
|
||||
return ac.Username, ac.Password, nil
|
||||
}
|
||||
authOpts := []docker.AuthorizerOpt{docker.WithAuthCreds(credFunc)}
|
||||
authorizer := docker.NewDockerAuthorizer(authOpts...)
|
||||
opts := docker.ResolverOptions{
|
||||
Hosts: docker.ConfigureDefaultRegistries(docker.WithAuthorizer(authorizer)),
|
||||
}
|
||||
return docker.NewResolver(opts), nil
|
||||
}
|
||||
|
||||
func PrepareImage(ctx context.Context, client *containerd.Client, imageName, snapshotter string, pullAlways bool) (containerd.Image, error) {
|
||||
var (
|
||||
empty containerd.Image
|
||||
resolver remotes.Resolver
|
||||
)
|
||||
|
||||
if _, stErr := os.Stat(filepath.Join(dockerConfigDir, config.ConfigFileName)); stErr == nil {
|
||||
configFile, err := config.Load(dockerConfigDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resolver, err = getResolver(ctx, configFile)
|
||||
if err != nil {
|
||||
return empty, err
|
||||
}
|
||||
} else if !os.IsNotExist(stErr) {
|
||||
return empty, stErr
|
||||
}
|
||||
|
||||
var image containerd.Image
|
||||
if pullAlways {
|
||||
img, err := pullImage(ctx, client, resolver, imageName)
|
||||
if err != nil {
|
||||
return empty, err
|
||||
}
|
||||
|
||||
image = img
|
||||
} else {
|
||||
|
||||
img, err := client.GetImage(ctx, imageName)
|
||||
if err != nil {
|
||||
if !errdefs.IsNotFound(err) {
|
||||
return empty, err
|
||||
}
|
||||
img, err := pullImage(ctx, client, resolver, imageName)
|
||||
if err != nil {
|
||||
return empty, err
|
||||
}
|
||||
image = img
|
||||
} else {
|
||||
image = img
|
||||
}
|
||||
}
|
||||
|
||||
unpacked, err := image.IsUnpacked(ctx, snapshotter)
|
||||
if err != nil {
|
||||
return empty, fmt.Errorf("cannot check if unpacked: %s", err)
|
||||
}
|
||||
|
||||
if !unpacked {
|
||||
if err := image.Unpack(ctx, snapshotter); err != nil {
|
||||
return empty, fmt.Errorf("cannot unpack: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
return image, nil
|
||||
}
|
||||
|
||||
func pullImage(ctx context.Context, client *containerd.Client, resolver remotes.Resolver, imageName string) (containerd.Image, error) {
|
||||
|
||||
var empty containerd.Image
|
||||
|
||||
rOpts := []containerd.RemoteOpt{
|
||||
containerd.WithPullUnpack,
|
||||
}
|
||||
if resolver != nil {
|
||||
rOpts = append(rOpts, containerd.WithResolver(resolver))
|
||||
}
|
||||
img, err := client.Pull(ctx, imageName, rOpts...)
|
||||
if err != nil {
|
||||
return empty, fmt.Errorf("cannot pull: %s", err)
|
||||
}
|
||||
|
||||
return img, nil
|
||||
}
|
@ -6,76 +6,103 @@ import (
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"sync"
|
||||
"time"
|
||||
"sort"
|
||||
|
||||
"github.com/alexellis/faasd/pkg/weave"
|
||||
"github.com/alexellis/k3sup/pkg/env"
|
||||
"github.com/compose-spec/compose-go/loader"
|
||||
compose "github.com/compose-spec/compose-go/types"
|
||||
"github.com/containerd/containerd"
|
||||
"github.com/containerd/containerd/cio"
|
||||
"github.com/containerd/containerd/containers"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"golang.org/x/sys/unix"
|
||||
"github.com/containerd/containerd/oci"
|
||||
gocni "github.com/containerd/go-cni"
|
||||
"github.com/openfaas/faasd/pkg/cninetwork"
|
||||
"github.com/openfaas/faasd/pkg/service"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/containerd/containerd/namespaces"
|
||||
"github.com/containerd/containerd/oci"
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
)
|
||||
|
||||
const defaultSnapshotter = "overlayfs"
|
||||
const (
|
||||
defaultSnapshotter = "overlayfs"
|
||||
workingDirectoryPermission = 0644
|
||||
// faasdNamespace is the containerd namespace services are created
|
||||
faasdNamespace = "default"
|
||||
faasServicesPullAlways = false
|
||||
)
|
||||
|
||||
type Service struct {
|
||||
Image string
|
||||
Env []string
|
||||
Name string
|
||||
Mounts []Mount
|
||||
Caps []string
|
||||
Args []string
|
||||
DependsOn []string
|
||||
Ports []ServicePort
|
||||
}
|
||||
|
||||
type ServicePort struct {
|
||||
TargetPort uint32
|
||||
Port uint32
|
||||
HostIP string
|
||||
}
|
||||
|
||||
type Mount struct {
|
||||
Src string
|
||||
Dest string
|
||||
}
|
||||
|
||||
type Supervisor struct {
|
||||
client *containerd.Client
|
||||
cni gocni.CNI
|
||||
}
|
||||
|
||||
func NewSupervisor(sock string) (*Supervisor, error) {
|
||||
client, err := containerd.New(sock)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cni, err := cninetwork.InitNetwork()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Supervisor{
|
||||
client: client,
|
||||
cni: cni,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *Supervisor) Close() {
|
||||
defer s.client.Close()
|
||||
}
|
||||
|
||||
func (s *Supervisor) Remove(svcs []Service) error {
|
||||
ctx := namespaces.WithNamespace(context.Background(), "default")
|
||||
|
||||
for _, svc := range svcs {
|
||||
err := removeContainer(ctx, s.client, svc.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Supervisor) Start(svcs []Service) error {
|
||||
ctx := namespaces.WithNamespace(context.Background(), "default")
|
||||
ctx := namespaces.WithNamespace(context.Background(), faasdNamespace)
|
||||
|
||||
wd, _ := os.Getwd()
|
||||
|
||||
gw, err := cninetwork.CNIGateway()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hosts := fmt.Sprintf(`
|
||||
127.0.0.1 localhost
|
||||
%s faasd-provider`, gw)
|
||||
|
||||
writeHostsErr := ioutil.WriteFile(path.Join(wd, "hosts"),
|
||||
[]byte(`127.0.0.1 localhost
|
||||
172.19.0.1 faas-containerd`), 0644)
|
||||
[]byte(hosts), workingDirectoryPermission)
|
||||
|
||||
if writeHostsErr != nil {
|
||||
return fmt.Errorf("cannot write hosts file: %s", writeHostsErr)
|
||||
}
|
||||
// os.Chown("hosts", 101, 101)
|
||||
|
||||
images := map[string]containerd.Image{}
|
||||
|
||||
for _, svc := range svcs {
|
||||
fmt.Printf("Preparing: %s with image: %s\n", svc.Name, svc.Image)
|
||||
fmt.Printf("Preparing %s with image: %s\n", svc.Name, svc.Image)
|
||||
|
||||
img, err := prepareImage(ctx, s.client, svc.Image)
|
||||
img, err := service.PrepareImage(ctx, s.client, svc.Image, defaultSnapshotter, faasServicesPullAlways)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -85,14 +112,28 @@ func (s *Supervisor) Start(svcs []Service) error {
|
||||
}
|
||||
|
||||
for _, svc := range svcs {
|
||||
fmt.Printf("Reconciling: %s\n", svc.Name)
|
||||
|
||||
image := images[svc.Name]
|
||||
|
||||
containerErr := removeContainer(ctx, s.client, svc.Name)
|
||||
fmt.Printf("Removing old container for: %s\n", svc.Name)
|
||||
containerErr := service.Remove(ctx, s.client, svc.Name)
|
||||
if containerErr != nil {
|
||||
return containerErr
|
||||
}
|
||||
}
|
||||
|
||||
order := buildDeploymentOrder(svcs)
|
||||
|
||||
for _, key := range order {
|
||||
|
||||
var svc *Service
|
||||
for _, s := range svcs {
|
||||
if s.Name == key {
|
||||
svc = &s
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Printf("Starting: %s\n", svc.Name)
|
||||
|
||||
image := images[svc.Name]
|
||||
|
||||
mounts := []specs.Mount{}
|
||||
if len(svc.Mounts) > 0 {
|
||||
@ -104,7 +145,6 @@ func (s *Supervisor) Start(svcs []Service) error {
|
||||
Options: []string{"rbind", "rw"},
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
mounts = append(mounts, specs.Mount{
|
||||
@ -121,28 +161,7 @@ func (s *Supervisor) Start(svcs []Service) error {
|
||||
Options: []string{"rbind", "ro"},
|
||||
})
|
||||
|
||||
hook := func(_ context.Context, _ oci.Client, _ *containers.Container, s *specs.Spec) error {
|
||||
if s.Hooks == nil {
|
||||
s.Hooks = &specs.Hooks{}
|
||||
}
|
||||
netnsPath, err := exec.LookPath("netns")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.Hooks.Prestart = []specs.Hook{
|
||||
{
|
||||
Path: netnsPath,
|
||||
Args: []string{
|
||||
"netns",
|
||||
},
|
||||
Env: os.Environ(),
|
||||
},
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
newContainer, containerCreateErr := s.client.NewContainer(
|
||||
newContainer, err := s.client.NewContainer(
|
||||
ctx,
|
||||
svc.Name,
|
||||
containerd.WithImage(image),
|
||||
@ -151,47 +170,60 @@ func (s *Supervisor) Start(svcs []Service) error {
|
||||
oci.WithCapabilities(svc.Caps),
|
||||
oci.WithMounts(mounts),
|
||||
withOCIArgs(svc.Args),
|
||||
hook,
|
||||
oci.WithEnv(svc.Env)),
|
||||
)
|
||||
|
||||
if containerCreateErr != nil {
|
||||
log.Println(containerCreateErr)
|
||||
return containerCreateErr
|
||||
}
|
||||
|
||||
log.Printf("Created container %s\n", newContainer.ID())
|
||||
|
||||
task, err := newContainer.NewTask(ctx, cio.NewCreator(cio.WithStdio))
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
log.Printf("Error creating container: %s\n", err)
|
||||
return err
|
||||
}
|
||||
|
||||
ip := getIP(newContainer.ID(), task.Pid())
|
||||
log.Printf("Created container: %s\n", newContainer.ID())
|
||||
|
||||
task, err := newContainer.NewTask(ctx, cio.BinaryIO("/usr/local/bin/faasd", nil))
|
||||
if err != nil {
|
||||
log.Printf("Error creating task: %s\n", err)
|
||||
return err
|
||||
}
|
||||
|
||||
labels := map[string]string{}
|
||||
network, err := cninetwork.CreateCNINetwork(ctx, s.cni, task, labels)
|
||||
if err != nil {
|
||||
log.Printf("Error creating CNI for %s: %s", svc.Name, err)
|
||||
return err
|
||||
}
|
||||
|
||||
ip, err := cninetwork.GetIPAddress(network, task)
|
||||
if err != nil {
|
||||
log.Printf("Error getting IP for %s: %s", svc.Name, err)
|
||||
return err
|
||||
}
|
||||
|
||||
log.Printf("%s has IP: %s\n", newContainer.ID(), ip.String())
|
||||
|
||||
hosts, _ := ioutil.ReadFile("hosts")
|
||||
|
||||
hosts = []byte(string(hosts) + fmt.Sprintf(`
|
||||
%s %s
|
||||
`, ip, svc.Name))
|
||||
writeErr := ioutil.WriteFile("hosts", hosts, 0644)
|
||||
writeErr := ioutil.WriteFile("hosts", hosts, workingDirectoryPermission)
|
||||
|
||||
if writeErr != nil {
|
||||
log.Println("Error writing hosts file")
|
||||
log.Printf("Error writing file %s %s\n", "hosts", writeErr)
|
||||
}
|
||||
// os.Chown("hosts", 101, 101)
|
||||
|
||||
exitStatusC, err := task.Wait(ctx)
|
||||
_, err = task.Wait(ctx)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
log.Printf("Wait err: %s\n", err)
|
||||
return err
|
||||
}
|
||||
log.Println("Exited: ", exitStatusC)
|
||||
|
||||
// call start on the task to execute the redis server
|
||||
if err := task.Start(ctx); err != nil {
|
||||
log.Println("Task err: ", err)
|
||||
log.Printf("Task: %s\tContainer: %s\n", task.ID(), newContainer.ID())
|
||||
// log.Println("Exited: ", exitStatusC)
|
||||
|
||||
if err = task.Start(ctx); err != nil {
|
||||
log.Printf("Task err: %s\n", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -199,68 +231,26 @@ func (s *Supervisor) Start(svcs []Service) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func prepareImage(ctx context.Context, client *containerd.Client, imageName string) (containerd.Image, error) {
|
||||
snapshotter := defaultSnapshotter
|
||||
func (s *Supervisor) Close() {
|
||||
defer s.client.Close()
|
||||
}
|
||||
|
||||
var empty containerd.Image
|
||||
image, err := client.GetImage(ctx, imageName)
|
||||
if err != nil {
|
||||
if !errdefs.IsNotFound(err) {
|
||||
return empty, err
|
||||
}
|
||||
func (s *Supervisor) Remove(svcs []Service) error {
|
||||
ctx := namespaces.WithNamespace(context.Background(), faasdNamespace)
|
||||
|
||||
img, err := client.Pull(ctx, imageName, containerd.WithPullUnpack)
|
||||
for _, svc := range svcs {
|
||||
err := cninetwork.DeleteCNINetwork(ctx, s.cni, s.client, svc.Name)
|
||||
if err != nil {
|
||||
return empty, fmt.Errorf("cannot pull: %s", err)
|
||||
log.Printf("[Delete] error removing CNI network for %s, %s\n", svc.Name, err)
|
||||
return err
|
||||
}
|
||||
image = img
|
||||
}
|
||||
|
||||
unpacked, err := image.IsUnpacked(ctx, snapshotter)
|
||||
if err != nil {
|
||||
return empty, fmt.Errorf("cannot check if unpacked: %s", err)
|
||||
}
|
||||
|
||||
if !unpacked {
|
||||
if err := image.Unpack(ctx, snapshotter); err != nil {
|
||||
return empty, fmt.Errorf("cannot unpack: %s", err)
|
||||
err = service.Remove(ctx, s.client, svc.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return image, nil
|
||||
}
|
||||
|
||||
func getIP(containerID string, taskPID uint32) string {
|
||||
// https://github.com/weaveworks/weave/blob/master/net/netdev.go
|
||||
|
||||
peerIDs, err := weave.ConnectedToBridgeVethPeerIds("netns0")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
addrs, addrsErr := weave.GetNetDevsByVethPeerIds(int(taskPID), peerIDs)
|
||||
if addrsErr != nil {
|
||||
log.Fatal(addrsErr)
|
||||
}
|
||||
if len(addrs) > 0 {
|
||||
return addrs[0].CIDRs[0].IP.String()
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
type Service struct {
|
||||
Image string
|
||||
Env []string
|
||||
Name string
|
||||
Mounts []Mount
|
||||
Caps []string
|
||||
Args []string
|
||||
}
|
||||
|
||||
type Mount struct {
|
||||
Src string
|
||||
Dest string
|
||||
return nil
|
||||
}
|
||||
|
||||
func withOCIArgs(args []string) oci.SpecOpts {
|
||||
@ -269,75 +259,139 @@ func withOCIArgs(args []string) oci.SpecOpts {
|
||||
}
|
||||
|
||||
return func(_ context.Context, _ oci.Client, _ *containers.Container, s *oci.Spec) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// From Stellar
|
||||
func killTask(ctx context.Context, task containerd.Task) error {
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
var err error
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
if task != nil {
|
||||
wait, err := task.Wait(ctx)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("error waiting on task: %s", err)
|
||||
return
|
||||
}
|
||||
if err := task.Kill(ctx, unix.SIGTERM, containerd.WithKillAll); err != nil {
|
||||
log.Printf("error killing container task: %s", err)
|
||||
}
|
||||
select {
|
||||
case <-wait:
|
||||
task.Delete(ctx)
|
||||
return
|
||||
case <-time.After(5 * time.Second):
|
||||
if err := task.Kill(ctx, unix.SIGKILL, containerd.WithKillAll); err != nil {
|
||||
log.Printf("error force killing container task: %s", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
wg.Wait()
|
||||
// ParseCompose converts a docker-compose Config into a service list that we can
|
||||
// pass to the supervisor client Start.
|
||||
//
|
||||
// The only anticipated error is a failure if the value mounts are not of type `bind`.
|
||||
func ParseCompose(config *compose.Config) ([]Service, error) {
|
||||
services := make([]Service, len(config.Services))
|
||||
for idx, s := range config.Services {
|
||||
// environment is a map[string]*string
|
||||
// but we want a []string
|
||||
|
||||
return err
|
||||
}
|
||||
var env []string
|
||||
|
||||
func removeContainer(ctx context.Context, client *containerd.Client, name string) error {
|
||||
|
||||
container, containerErr := client.LoadContainer(ctx, name)
|
||||
|
||||
if containerErr == nil {
|
||||
found := true
|
||||
t, err := container.Task(ctx, nil)
|
||||
if err != nil {
|
||||
if errdefs.IsNotFound(err) {
|
||||
found = false
|
||||
envKeys := sortedEnvKeys(s.Environment)
|
||||
for _, name := range envKeys {
|
||||
value := s.Environment[name]
|
||||
if value == nil {
|
||||
env = append(env, fmt.Sprintf(`%s=""`, name))
|
||||
} else {
|
||||
return fmt.Errorf("unable to get task %s: ", err)
|
||||
env = append(env, fmt.Sprintf(`%s=%s`, name, *value))
|
||||
}
|
||||
}
|
||||
|
||||
if found {
|
||||
status, _ := t.Status(ctx)
|
||||
fmt.Printf("Status of %s is: %s\n", name, status.Status)
|
||||
|
||||
log.Printf("Need to kill %s\n", name)
|
||||
err := killTask(ctx, t)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error killing task %s, %s, %s", container.ID(), name, err)
|
||||
var mounts []Mount
|
||||
for _, v := range s.Volumes {
|
||||
if v.Type != "bind" {
|
||||
return nil, errors.Errorf("unsupported volume mount type '%s' when parsing service '%s'", v.Type, s.Name)
|
||||
}
|
||||
mounts = append(mounts, Mount{
|
||||
Src: v.Source,
|
||||
Dest: v.Target,
|
||||
})
|
||||
}
|
||||
|
||||
err = container.Delete(ctx, containerd.WithSnapshotCleanup)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error deleting container %s, %s, %s", container.ID(), name, err)
|
||||
services[idx] = Service{
|
||||
Name: s.Name,
|
||||
Image: s.Image,
|
||||
// ShellCommand is just an alias of string slice
|
||||
Args: []string(s.Command),
|
||||
Caps: s.CapAdd,
|
||||
Env: env,
|
||||
Mounts: mounts,
|
||||
DependsOn: s.DependsOn,
|
||||
Ports: convertPorts(s.Ports),
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
||||
return services, nil
|
||||
}
|
||||
|
||||
func convertPorts(ports []compose.ServicePortConfig) []ServicePort {
|
||||
servicePorts := []ServicePort{}
|
||||
for _, p := range ports {
|
||||
servicePorts = append(servicePorts, ServicePort{
|
||||
Port: p.Published,
|
||||
TargetPort: p.Target,
|
||||
HostIP: p.HostIP,
|
||||
})
|
||||
}
|
||||
|
||||
return servicePorts
|
||||
}
|
||||
|
||||
// LoadComposeFile is a helper method for loading a docker-compose file
|
||||
func LoadComposeFile(wd string, file string) (*compose.Config, error) {
|
||||
return LoadComposeFileWithArch(wd, file, env.GetClientArch)
|
||||
}
|
||||
|
||||
// LoadComposeFileWithArch is a helper method for loading a docker-compose file
|
||||
func LoadComposeFileWithArch(wd string, file string, archGetter ArchGetter) (*compose.Config, error) {
|
||||
|
||||
file = path.Join(wd, file)
|
||||
b, err := ioutil.ReadFile(file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
config, err := loader.ParseYAML(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
archSuffix, err := GetArchSuffix(archGetter)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var files []compose.ConfigFile
|
||||
files = append(files, compose.ConfigFile{Filename: file, Config: config})
|
||||
|
||||
return loader.Load(compose.ConfigDetails{
|
||||
WorkingDir: wd,
|
||||
ConfigFiles: files,
|
||||
Environment: map[string]string{
|
||||
"ARCH_SUFFIX": archSuffix,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func sortedEnvKeys(env map[string]*string) (keys []string) {
|
||||
for k := range env {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
return keys
|
||||
}
|
||||
|
||||
// ArchGetter provides client CPU architecture and
|
||||
// client OS
|
||||
type ArchGetter func() (string, string)
|
||||
|
||||
// GetArchSuffix provides client CPU architecture and
|
||||
// client OS from ArchGetter
|
||||
func GetArchSuffix(getClientArch ArchGetter) (suffix string, err error) {
|
||||
clientArch, clientOS := getClientArch()
|
||||
|
||||
if clientOS != "Linux" {
|
||||
return "", fmt.Errorf("you can only use faasd with Linux")
|
||||
}
|
||||
|
||||
switch clientArch {
|
||||
case "x86_64":
|
||||
// no suffix needed
|
||||
return "", nil
|
||||
case "armhf", "armv7l":
|
||||
return "-armhf", nil
|
||||
case "arm64", "aarch64":
|
||||
return "-arm64", nil
|
||||
default:
|
||||
// unknown, so use the default without suffix for now
|
||||
return "", nil
|
||||
}
|
||||
}
|
||||
|
262
pkg/supervisor_test.go
Normal file
262
pkg/supervisor_test.go
Normal file
@ -0,0 +1,262 @@
|
||||
package pkg
|
||||
|
||||
import (
|
||||
"path"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func Test_ParseCompose(t *testing.T) {
|
||||
|
||||
wd := "testdata"
|
||||
|
||||
want := map[string]Service{
|
||||
"basic-auth-plugin": {
|
||||
Name: "basic-auth-plugin",
|
||||
Image: "docker.io/openfaas/basic-auth-plugin:0.18.17",
|
||||
Env: []string{
|
||||
"pass_filename=basic-auth-password",
|
||||
"port=8080",
|
||||
"secret_mount_path=/run/secrets",
|
||||
"user_filename=basic-auth-user",
|
||||
},
|
||||
Mounts: []Mount{
|
||||
{
|
||||
Src: path.Join(wd, "secrets", "basic-auth-password"),
|
||||
Dest: path.Join("/run/secrets", "basic-auth-password"),
|
||||
},
|
||||
{
|
||||
Src: path.Join(wd, "secrets", "basic-auth-user"),
|
||||
Dest: path.Join("/run/secrets", "basic-auth-user"),
|
||||
},
|
||||
},
|
||||
Caps: []string{"CAP_NET_RAW"},
|
||||
},
|
||||
"nats": {
|
||||
Name: "nats",
|
||||
Image: "docker.io/library/nats-streaming:0.11.2",
|
||||
Args: []string{"/nats-streaming-server", "-m", "8222", "--store=memory", "--cluster_id=faas-cluster"},
|
||||
},
|
||||
"prometheus": {
|
||||
Name: "prometheus",
|
||||
Image: "docker.io/prom/prometheus:v2.14.0",
|
||||
Mounts: []Mount{
|
||||
{
|
||||
Src: path.Join(wd, "prometheus.yml"),
|
||||
Dest: "/etc/prometheus/prometheus.yml",
|
||||
},
|
||||
},
|
||||
Caps: []string{"CAP_NET_RAW"},
|
||||
},
|
||||
"gateway": {
|
||||
Name: "gateway",
|
||||
Env: []string{
|
||||
"auth_proxy_pass_body=false",
|
||||
"auth_proxy_url=http://basic-auth-plugin:8080/validate",
|
||||
"basic_auth=true",
|
||||
"direct_functions=false",
|
||||
"faas_nats_address=nats",
|
||||
"faas_nats_port=4222",
|
||||
"functions_provider_url=http://faasd-provider:8081/",
|
||||
"read_timeout=60s",
|
||||
"scale_from_zero=true",
|
||||
"secret_mount_path=/run/secrets",
|
||||
"upstream_timeout=65s",
|
||||
"write_timeout=60s",
|
||||
},
|
||||
Image: "docker.io/openfaas/gateway:0.18.17",
|
||||
Mounts: []Mount{
|
||||
{
|
||||
Src: path.Join(wd, "secrets", "basic-auth-password"),
|
||||
Dest: path.Join("/run/secrets", "basic-auth-password"),
|
||||
},
|
||||
{
|
||||
Src: path.Join(wd, "secrets", "basic-auth-user"),
|
||||
Dest: path.Join("/run/secrets", "basic-auth-user"),
|
||||
},
|
||||
},
|
||||
Caps: []string{"CAP_NET_RAW"},
|
||||
DependsOn: []string{"nats"},
|
||||
},
|
||||
"queue-worker": {
|
||||
Name: "queue-worker",
|
||||
Env: []string{
|
||||
"ack_wait=5m5s",
|
||||
"basic_auth=true",
|
||||
"faas_gateway_address=gateway",
|
||||
"faas_nats_address=nats",
|
||||
"faas_nats_port=4222",
|
||||
"gateway_invoke=true",
|
||||
"max_inflight=1",
|
||||
"secret_mount_path=/run/secrets",
|
||||
"write_debug=false",
|
||||
},
|
||||
Image: "docker.io/openfaas/queue-worker:0.11.2",
|
||||
Mounts: []Mount{
|
||||
{
|
||||
Src: path.Join(wd, "secrets", "basic-auth-password"),
|
||||
Dest: path.Join("/run/secrets", "basic-auth-password"),
|
||||
},
|
||||
{
|
||||
Src: path.Join(wd, "secrets", "basic-auth-user"),
|
||||
Dest: path.Join("/run/secrets", "basic-auth-user"),
|
||||
},
|
||||
},
|
||||
Caps: []string{"CAP_NET_RAW"},
|
||||
},
|
||||
}
|
||||
|
||||
compose, err := LoadComposeFileWithArch(wd, "docker-compose.yaml", func() (string, string) { return "x86_64", "Linux" })
|
||||
if err != nil {
|
||||
t.Fatalf("can't read docker-compose file: %s", err)
|
||||
}
|
||||
|
||||
services, err := ParseCompose(compose)
|
||||
if err != nil {
|
||||
t.Fatalf("can't parse compose services: %s", err)
|
||||
}
|
||||
|
||||
if len(services) != len(want) {
|
||||
t.Fatalf("want: %d services, got: %d", len(want), len(services))
|
||||
}
|
||||
|
||||
for _, service := range services {
|
||||
exp, ok := want[service.Name]
|
||||
|
||||
if service.Name == "gateway" {
|
||||
if len(service.DependsOn) == 0 {
|
||||
t.Fatalf("gateway should have at least one depends_on entry")
|
||||
}
|
||||
}
|
||||
|
||||
if !ok {
|
||||
t.Fatalf("incorrect service: %s", service.Name)
|
||||
}
|
||||
|
||||
if service.Name != exp.Name {
|
||||
t.Fatalf("incorrect service Name:\n\twant: %s,\n\tgot: %s", exp.Name, service.Name)
|
||||
}
|
||||
|
||||
if service.Image != exp.Image {
|
||||
t.Fatalf("incorrect service Image:\n\twant: %s,\n\tgot: %s", exp.Image, service.Image)
|
||||
}
|
||||
|
||||
equalStringSlice(t, exp.Env, service.Env)
|
||||
equalStringSlice(t, exp.Caps, service.Caps)
|
||||
equalStringSlice(t, exp.Args, service.Args)
|
||||
|
||||
if !reflect.DeepEqual(exp.Mounts, service.Mounts) {
|
||||
t.Fatalf("incorrect service Mounts:\n\twant: %+v,\n\tgot: %+v", exp.Mounts, service.Mounts)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func equalStringSlice(t *testing.T, want, found []string) {
|
||||
t.Helper()
|
||||
if (want == nil) != (found == nil) {
|
||||
t.Fatalf("unexpected nil slice: want %+v, got %+v", want, found)
|
||||
}
|
||||
|
||||
if len(want) != len(found) {
|
||||
t.Fatalf("unequal slice length: want %+v, got %+v", want, found)
|
||||
}
|
||||
|
||||
for i := range want {
|
||||
if want[i] != found[i] {
|
||||
t.Fatalf("unexpected value at postition %d: want %s, got %s", i, want[i], found[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func equalMountSlice(t *testing.T, want, found []Mount) {
|
||||
t.Helper()
|
||||
if (want == nil) != (found == nil) {
|
||||
t.Fatalf("unexpected nil slice: want %+v, got %+v", want, found)
|
||||
}
|
||||
|
||||
if len(want) != len(found) {
|
||||
t.Fatalf("unequal slice length: want %+v, got %+v", want, found)
|
||||
}
|
||||
|
||||
for i := range want {
|
||||
if !reflect.DeepEqual(want[i], found[i]) {
|
||||
t.Fatalf("unexpected value at postition %d: want %s, got %s", i, want[i], found[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func Test_GetArchSuffix(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
want string
|
||||
foundArch string
|
||||
foundOS string
|
||||
err string
|
||||
}{
|
||||
{
|
||||
name: "error if os is not linux",
|
||||
foundOS: "mac",
|
||||
err: "you can only use faasd with Linux",
|
||||
},
|
||||
{
|
||||
name: "x86 has no suffix",
|
||||
foundOS: "Linux",
|
||||
foundArch: "x86_64",
|
||||
want: "",
|
||||
},
|
||||
{
|
||||
name: "unknown arch has no suffix",
|
||||
foundOS: "Linux",
|
||||
foundArch: "anything_else",
|
||||
want: "",
|
||||
},
|
||||
{
|
||||
name: "armhf has armhf suffix",
|
||||
foundOS: "Linux",
|
||||
foundArch: "armhf",
|
||||
want: "-armhf",
|
||||
},
|
||||
{
|
||||
name: "armv7l has armhf suffix",
|
||||
foundOS: "Linux",
|
||||
foundArch: "armv7l",
|
||||
want: "-armhf",
|
||||
},
|
||||
{
|
||||
name: "arm64 has arm64 suffix",
|
||||
foundOS: "Linux",
|
||||
foundArch: "arm64",
|
||||
want: "-arm64",
|
||||
},
|
||||
{
|
||||
name: "aarch64 has arm64 suffix",
|
||||
foundOS: "Linux",
|
||||
foundArch: "aarch64",
|
||||
want: "-arm64",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
|
||||
suffix, err := GetArchSuffix(testArchGetter(tc.foundArch, tc.foundOS))
|
||||
if tc.err != "" && err == nil {
|
||||
t.Fatalf("want error %s but got nil", tc.err)
|
||||
} else if tc.err != "" && err.Error() != tc.err {
|
||||
t.Fatalf("want error %s, got %s", tc.err, err.Error())
|
||||
} else if tc.err == "" && err != nil {
|
||||
t.Fatalf("unexpected error %s", err.Error())
|
||||
}
|
||||
|
||||
if suffix != tc.want {
|
||||
t.Fatalf("want suffix %s, got %s", tc.want, suffix)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testArchGetter(arch, os string) ArchGetter {
|
||||
return func() (string, string) {
|
||||
return arch, os
|
||||
}
|
||||
}
|
@ -64,23 +64,21 @@ func DaemonReload() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func InstallUnit(name string) error {
|
||||
func InstallUnit(name string, tokens map[string]string) error {
|
||||
if len(tokens["Cwd"]) == 0 {
|
||||
return fmt.Errorf("key Cwd expected in tokens parameter")
|
||||
}
|
||||
|
||||
tmplName := "./hack/" + name + ".service"
|
||||
tmpl, err := template.ParseFiles()
|
||||
tmpl, err := template.ParseFiles(tmplName)
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("error loading template %s, error %s", tmplName, err)
|
||||
}
|
||||
|
||||
wd, _ := os.Getwd()
|
||||
var tpl bytes.Buffer
|
||||
userData := struct {
|
||||
Cwd string
|
||||
}{
|
||||
Cwd: wd,
|
||||
}
|
||||
|
||||
err = tmpl.Execute(&tpl, userData)
|
||||
err = tmpl.Execute(&tpl, tokens)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
98
pkg/testdata/docker-compose.yaml
vendored
Normal file
98
pkg/testdata/docker-compose.yaml
vendored
Normal file
@ -0,0 +1,98 @@
|
||||
version: "3.7"
|
||||
services:
|
||||
basic-auth-plugin:
|
||||
image: "docker.io/openfaas/basic-auth-plugin:0.18.17${ARCH_SUFFIX}"
|
||||
environment:
|
||||
- port=8080
|
||||
- secret_mount_path=/run/secrets
|
||||
- user_filename=basic-auth-user
|
||||
- pass_filename=basic-auth-password
|
||||
volumes:
|
||||
# we assume cwd == /var/lib/faasd
|
||||
- type: bind
|
||||
source: ./secrets/basic-auth-password
|
||||
target: /run/secrets/basic-auth-password
|
||||
- type: bind
|
||||
source: ./secrets/basic-auth-user
|
||||
target: /run/secrets/basic-auth-user
|
||||
cap_add:
|
||||
- CAP_NET_RAW
|
||||
|
||||
nats:
|
||||
image: docker.io/library/nats-streaming:0.11.2
|
||||
command:
|
||||
- "/nats-streaming-server"
|
||||
- "-m"
|
||||
- "8222"
|
||||
- "--store=memory"
|
||||
- "--cluster_id=faas-cluster"
|
||||
ports:
|
||||
- "127.0.0.1:8222:8222"
|
||||
|
||||
prometheus:
|
||||
image: docker.io/prom/prometheus:v2.14.0
|
||||
volumes:
|
||||
- type: bind
|
||||
source: ./prometheus.yml
|
||||
target: /etc/prometheus/prometheus.yml
|
||||
cap_add:
|
||||
- CAP_NET_RAW
|
||||
ports:
|
||||
- "127.0.0.1:9090:9090"
|
||||
|
||||
gateway:
|
||||
image: "docker.io/openfaas/gateway:0.18.17${ARCH_SUFFIX}"
|
||||
environment:
|
||||
- basic_auth=true
|
||||
- functions_provider_url=http://faasd-provider:8081/
|
||||
- direct_functions=false
|
||||
- read_timeout=60s
|
||||
- write_timeout=60s
|
||||
- upstream_timeout=65s
|
||||
- faas_nats_address=nats
|
||||
- faas_nats_port=4222
|
||||
- auth_proxy_url=http://basic-auth-plugin:8080/validate
|
||||
- auth_proxy_pass_body=false
|
||||
- secret_mount_path=/run/secrets
|
||||
- scale_from_zero=true
|
||||
volumes:
|
||||
# we assume cwd == /var/lib/faasd
|
||||
- type: bind
|
||||
source: ./secrets/basic-auth-password
|
||||
target: /run/secrets/basic-auth-password
|
||||
- type: bind
|
||||
source: ./secrets/basic-auth-user
|
||||
target: /run/secrets/basic-auth-user
|
||||
cap_add:
|
||||
- CAP_NET_RAW
|
||||
depends_on:
|
||||
- basic-auth-plugin
|
||||
- nats
|
||||
- prometheus
|
||||
ports:
|
||||
- "8080:8080"
|
||||
|
||||
queue-worker:
|
||||
image: docker.io/openfaas/queue-worker:0.11.2
|
||||
environment:
|
||||
- faas_nats_address=nats
|
||||
- faas_nats_port=4222
|
||||
- gateway_invoke=true
|
||||
- faas_gateway_address=gateway
|
||||
- ack_wait=5m5s
|
||||
- max_inflight=1
|
||||
- write_debug=false
|
||||
- basic_auth=true
|
||||
- secret_mount_path=/run/secrets
|
||||
volumes:
|
||||
# we assume cwd == /var/lib/faasd
|
||||
- type: bind
|
||||
source: ./secrets/basic-auth-password
|
||||
target: /run/secrets/basic-auth-password
|
||||
- type: bind
|
||||
source: ./secrets/basic-auth-user
|
||||
target: /run/secrets/basic-auth-user
|
||||
cap_add:
|
||||
- CAP_NET_RAW
|
||||
depends_on:
|
||||
- nats
|
@ -1,23 +1 @@
|
||||
package pkg
|
||||
|
||||
var (
|
||||
//GitCommit Git Commit SHA
|
||||
GitCommit string
|
||||
//Version version of the CLI
|
||||
Version string
|
||||
)
|
||||
|
||||
//GetVersion get latest version
|
||||
func GetVersion() string {
|
||||
if len(Version) == 0 {
|
||||
return "dev"
|
||||
}
|
||||
return Version
|
||||
}
|
||||
|
||||
const Logo = ` __ _
|
||||
/ _| __ _ __ _ ___ __| |
|
||||
| |_ / _` + "`" + ` |/ _` + "`" + ` / __|/ _` + "`" + ` |
|
||||
| _| (_| | (_| \__ \ (_| |
|
||||
|_| \__,_|\__,_|___/\__,_|
|
||||
`
|
||||
|
22
vendor/github.com/Microsoft/hcsshim/cmd/runhcs/NOTICE
generated
vendored
22
vendor/github.com/Microsoft/hcsshim/cmd/runhcs/NOTICE
generated
vendored
@ -1,22 +0,0 @@
|
||||
runhcs is a fork of runc.
|
||||
|
||||
The following is runc's legal notice.
|
||||
|
||||
---
|
||||
|
||||
runc
|
||||
|
||||
Copyright 2012-2015 Docker, Inc.
|
||||
|
||||
This product includes software developed at Docker, Inc. (http://www.docker.com).
|
||||
|
||||
The following is courtesy of our legal counsel:
|
||||
|
||||
Use and transfer of Docker may be subject to certain restrictions by the
|
||||
United States and other governments.
|
||||
It is your responsibility to ensure that your use and/or transfer does not
|
||||
violate applicable laws.
|
||||
|
||||
For more information, please see http://www.bis.doc.gov
|
||||
|
||||
See also http://www.apache.org/dev/crypto.html and/or seek legal counsel.
|
943
vendor/github.com/Microsoft/hcsshim/mksyscall_windows.go
generated
vendored
943
vendor/github.com/Microsoft/hcsshim/mksyscall_windows.go
generated
vendored
@ -1,943 +0,0 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build ignore
|
||||
|
||||
/*
|
||||
mksyscall_windows generates windows system call bodies
|
||||
|
||||
It parses all files specified on command line containing function
|
||||
prototypes (like syscall_windows.go) and prints system call bodies
|
||||
to standard output.
|
||||
|
||||
The prototypes are marked by lines beginning with "//sys" and read
|
||||
like func declarations if //sys is replaced by func, but:
|
||||
|
||||
* The parameter lists must give a name for each argument. This
|
||||
includes return parameters.
|
||||
|
||||
* The parameter lists must give a type for each argument:
|
||||
the (x, y, z int) shorthand is not allowed.
|
||||
|
||||
* If the return parameter is an error number, it must be named err.
|
||||
|
||||
* If go func name needs to be different from it's winapi dll name,
|
||||
the winapi name could be specified at the end, after "=" sign, like
|
||||
//sys LoadLibrary(libname string) (handle uint32, err error) = LoadLibraryA
|
||||
|
||||
* Each function that returns err needs to supply a condition, that
|
||||
return value of winapi will be tested against to detect failure.
|
||||
This would set err to windows "last-error", otherwise it will be nil.
|
||||
The value can be provided at end of //sys declaration, like
|
||||
//sys LoadLibrary(libname string) (handle uint32, err error) [failretval==-1] = LoadLibraryA
|
||||
and is [failretval==0] by default.
|
||||
|
||||
Usage:
|
||||
mksyscall_windows [flags] [path ...]
|
||||
|
||||
The flags are:
|
||||
-output
|
||||
Specify output file name (outputs to console if blank).
|
||||
-trace
|
||||
Generate print statement after every syscall.
|
||||
*/
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"go/format"
|
||||
"go/parser"
|
||||
"go/token"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"text/template"
|
||||
)
|
||||
|
||||
var (
|
||||
filename = flag.String("output", "", "output file name (standard output if omitted)")
|
||||
printTraceFlag = flag.Bool("trace", false, "generate print statement after every syscall")
|
||||
systemDLL = flag.Bool("systemdll", true, "whether all DLLs should be loaded from the Windows system directory")
|
||||
winio = flag.Bool("winio", false, "import go-winio")
|
||||
)
|
||||
|
||||
func trim(s string) string {
|
||||
return strings.Trim(s, " \t")
|
||||
}
|
||||
|
||||
var packageName string
|
||||
|
||||
func packagename() string {
|
||||
return packageName
|
||||
}
|
||||
|
||||
func syscalldot() string {
|
||||
if packageName == "syscall" {
|
||||
return ""
|
||||
}
|
||||
return "syscall."
|
||||
}
|
||||
|
||||
// Param is function parameter
|
||||
type Param struct {
|
||||
Name string
|
||||
Type string
|
||||
fn *Fn
|
||||
tmpVarIdx int
|
||||
}
|
||||
|
||||
// tmpVar returns temp variable name that will be used to represent p during syscall.
|
||||
func (p *Param) tmpVar() string {
|
||||
if p.tmpVarIdx < 0 {
|
||||
p.tmpVarIdx = p.fn.curTmpVarIdx
|
||||
p.fn.curTmpVarIdx++
|
||||
}
|
||||
return fmt.Sprintf("_p%d", p.tmpVarIdx)
|
||||
}
|
||||
|
||||
// BoolTmpVarCode returns source code for bool temp variable.
|
||||
func (p *Param) BoolTmpVarCode() string {
|
||||
const code = `var %s uint32
|
||||
if %s {
|
||||
%s = 1
|
||||
} else {
|
||||
%s = 0
|
||||
}`
|
||||
tmp := p.tmpVar()
|
||||
return fmt.Sprintf(code, tmp, p.Name, tmp, tmp)
|
||||
}
|
||||
|
||||
// SliceTmpVarCode returns source code for slice temp variable.
|
||||
func (p *Param) SliceTmpVarCode() string {
|
||||
const code = `var %s *%s
|
||||
if len(%s) > 0 {
|
||||
%s = &%s[0]
|
||||
}`
|
||||
tmp := p.tmpVar()
|
||||
return fmt.Sprintf(code, tmp, p.Type[2:], p.Name, tmp, p.Name)
|
||||
}
|
||||
|
||||
// StringTmpVarCode returns source code for string temp variable.
|
||||
func (p *Param) StringTmpVarCode() string {
|
||||
errvar := p.fn.Rets.ErrorVarName()
|
||||
if errvar == "" {
|
||||
errvar = "_"
|
||||
}
|
||||
tmp := p.tmpVar()
|
||||
const code = `var %s %s
|
||||
%s, %s = %s(%s)`
|
||||
s := fmt.Sprintf(code, tmp, p.fn.StrconvType(), tmp, errvar, p.fn.StrconvFunc(), p.Name)
|
||||
if errvar == "-" {
|
||||
return s
|
||||
}
|
||||
const morecode = `
|
||||
if %s != nil {
|
||||
return
|
||||
}`
|
||||
return s + fmt.Sprintf(morecode, errvar)
|
||||
}
|
||||
|
||||
// TmpVarCode returns source code for temp variable.
|
||||
func (p *Param) TmpVarCode() string {
|
||||
switch {
|
||||
case p.Type == "bool":
|
||||
return p.BoolTmpVarCode()
|
||||
case strings.HasPrefix(p.Type, "[]"):
|
||||
return p.SliceTmpVarCode()
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
// TmpVarHelperCode returns source code for helper's temp variable.
|
||||
func (p *Param) TmpVarHelperCode() string {
|
||||
if p.Type != "string" {
|
||||
return ""
|
||||
}
|
||||
return p.StringTmpVarCode()
|
||||
}
|
||||
|
||||
// SyscallArgList returns source code fragments representing p parameter
|
||||
// in syscall. Slices are translated into 2 syscall parameters: pointer to
|
||||
// the first element and length.
|
||||
func (p *Param) SyscallArgList() []string {
|
||||
t := p.HelperType()
|
||||
var s string
|
||||
switch {
|
||||
case t[0] == '*':
|
||||
s = fmt.Sprintf("unsafe.Pointer(%s)", p.Name)
|
||||
case t == "bool":
|
||||
s = p.tmpVar()
|
||||
case strings.HasPrefix(t, "[]"):
|
||||
return []string{
|
||||
fmt.Sprintf("uintptr(unsafe.Pointer(%s))", p.tmpVar()),
|
||||
fmt.Sprintf("uintptr(len(%s))", p.Name),
|
||||
}
|
||||
default:
|
||||
s = p.Name
|
||||
}
|
||||
return []string{fmt.Sprintf("uintptr(%s)", s)}
|
||||
}
|
||||
|
||||
// IsError determines if p parameter is used to return error.
|
||||
func (p *Param) IsError() bool {
|
||||
return p.Name == "err" && p.Type == "error"
|
||||
}
|
||||
|
||||
// HelperType returns type of parameter p used in helper function.
|
||||
func (p *Param) HelperType() string {
|
||||
if p.Type == "string" {
|
||||
return p.fn.StrconvType()
|
||||
}
|
||||
return p.Type
|
||||
}
|
||||
|
||||
// join concatenates parameters ps into a string with sep separator.
|
||||
// Each parameter is converted into string by applying fn to it
|
||||
// before conversion.
|
||||
func join(ps []*Param, fn func(*Param) string, sep string) string {
|
||||
if len(ps) == 0 {
|
||||
return ""
|
||||
}
|
||||
a := make([]string, 0)
|
||||
for _, p := range ps {
|
||||
a = append(a, fn(p))
|
||||
}
|
||||
return strings.Join(a, sep)
|
||||
}
|
||||
|
||||
// Rets describes function return parameters.
|
||||
type Rets struct {
|
||||
Name string
|
||||
Type string
|
||||
ReturnsError bool
|
||||
FailCond string
|
||||
}
|
||||
|
||||
// ErrorVarName returns error variable name for r.
|
||||
func (r *Rets) ErrorVarName() string {
|
||||
if r.ReturnsError {
|
||||
return "err"
|
||||
}
|
||||
if r.Type == "error" {
|
||||
return r.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// ToParams converts r into slice of *Param.
|
||||
func (r *Rets) ToParams() []*Param {
|
||||
ps := make([]*Param, 0)
|
||||
if len(r.Name) > 0 {
|
||||
ps = append(ps, &Param{Name: r.Name, Type: r.Type})
|
||||
}
|
||||
if r.ReturnsError {
|
||||
ps = append(ps, &Param{Name: "err", Type: "error"})
|
||||
}
|
||||
return ps
|
||||
}
|
||||
|
||||
// List returns source code of syscall return parameters.
|
||||
func (r *Rets) List() string {
|
||||
s := join(r.ToParams(), func(p *Param) string { return p.Name + " " + p.Type }, ", ")
|
||||
if len(s) > 0 {
|
||||
s = "(" + s + ")"
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// PrintList returns source code of trace printing part correspondent
|
||||
// to syscall return values.
|
||||
func (r *Rets) PrintList() string {
|
||||
return join(r.ToParams(), func(p *Param) string { return fmt.Sprintf(`"%s=", %s, `, p.Name, p.Name) }, `", ", `)
|
||||
}
|
||||
|
||||
// SetReturnValuesCode returns source code that accepts syscall return values.
|
||||
func (r *Rets) SetReturnValuesCode() string {
|
||||
if r.Name == "" && !r.ReturnsError {
|
||||
return ""
|
||||
}
|
||||
retvar := "r0"
|
||||
if r.Name == "" {
|
||||
retvar = "r1"
|
||||
}
|
||||
errvar := "_"
|
||||
if r.ReturnsError {
|
||||
errvar = "e1"
|
||||
}
|
||||
return fmt.Sprintf("%s, _, %s := ", retvar, errvar)
|
||||
}
|
||||
|
||||
func (r *Rets) useLongHandleErrorCode(retvar string) string {
|
||||
const code = `if %s {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = %sEINVAL
|
||||
}
|
||||
}`
|
||||
cond := retvar + " == 0"
|
||||
if r.FailCond != "" {
|
||||
cond = strings.Replace(r.FailCond, "failretval", retvar, 1)
|
||||
}
|
||||
return fmt.Sprintf(code, cond, syscalldot())
|
||||
}
|
||||
|
||||
// SetErrorCode returns source code that sets return parameters.
|
||||
func (r *Rets) SetErrorCode() string {
|
||||
const code = `if r0 != 0 {
|
||||
%s = %sErrno(r0)
|
||||
}`
|
||||
const hrCode = `if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
}
|
||||
%s = %sErrno(r0)
|
||||
}`
|
||||
if r.Name == "" && !r.ReturnsError {
|
||||
return ""
|
||||
}
|
||||
if r.Name == "" {
|
||||
return r.useLongHandleErrorCode("r1")
|
||||
}
|
||||
if r.Type == "error" {
|
||||
if r.Name == "hr" {
|
||||
return fmt.Sprintf(hrCode, r.Name, syscalldot())
|
||||
} else {
|
||||
return fmt.Sprintf(code, r.Name, syscalldot())
|
||||
}
|
||||
}
|
||||
s := ""
|
||||
switch {
|
||||
case r.Type[0] == '*':
|
||||
s = fmt.Sprintf("%s = (%s)(unsafe.Pointer(r0))", r.Name, r.Type)
|
||||
case r.Type == "bool":
|
||||
s = fmt.Sprintf("%s = r0 != 0", r.Name)
|
||||
default:
|
||||
s = fmt.Sprintf("%s = %s(r0)", r.Name, r.Type)
|
||||
}
|
||||
if !r.ReturnsError {
|
||||
return s
|
||||
}
|
||||
return s + "\n\t" + r.useLongHandleErrorCode(r.Name)
|
||||
}
|
||||
|
||||
// Fn describes syscall function.
|
||||
type Fn struct {
|
||||
Name string
|
||||
Params []*Param
|
||||
Rets *Rets
|
||||
PrintTrace bool
|
||||
confirmproc bool
|
||||
dllname string
|
||||
dllfuncname string
|
||||
src string
|
||||
// TODO: get rid of this field and just use parameter index instead
|
||||
curTmpVarIdx int // insure tmp variables have uniq names
|
||||
}
|
||||
|
||||
// extractParams parses s to extract function parameters.
|
||||
func extractParams(s string, f *Fn) ([]*Param, error) {
|
||||
s = trim(s)
|
||||
if s == "" {
|
||||
return nil, nil
|
||||
}
|
||||
a := strings.Split(s, ",")
|
||||
ps := make([]*Param, len(a))
|
||||
for i := range ps {
|
||||
s2 := trim(a[i])
|
||||
b := strings.Split(s2, " ")
|
||||
if len(b) != 2 {
|
||||
b = strings.Split(s2, "\t")
|
||||
if len(b) != 2 {
|
||||
return nil, errors.New("Could not extract function parameter from \"" + s2 + "\"")
|
||||
}
|
||||
}
|
||||
ps[i] = &Param{
|
||||
Name: trim(b[0]),
|
||||
Type: trim(b[1]),
|
||||
fn: f,
|
||||
tmpVarIdx: -1,
|
||||
}
|
||||
}
|
||||
return ps, nil
|
||||
}
|
||||
|
||||
// extractSection extracts text out of string s starting after start
|
||||
// and ending just before end. found return value will indicate success,
|
||||
// and prefix, body and suffix will contain correspondent parts of string s.
|
||||
func extractSection(s string, start, end rune) (prefix, body, suffix string, found bool) {
|
||||
s = trim(s)
|
||||
if strings.HasPrefix(s, string(start)) {
|
||||
// no prefix
|
||||
body = s[1:]
|
||||
} else {
|
||||
a := strings.SplitN(s, string(start), 2)
|
||||
if len(a) != 2 {
|
||||
return "", "", s, false
|
||||
}
|
||||
prefix = a[0]
|
||||
body = a[1]
|
||||
}
|
||||
a := strings.SplitN(body, string(end), 2)
|
||||
if len(a) != 2 {
|
||||
return "", "", "", false
|
||||
}
|
||||
return prefix, a[0], a[1], true
|
||||
}
|
||||
|
||||
// newFn parses string s and return created function Fn.
|
||||
func newFn(s string) (*Fn, error) {
|
||||
s = trim(s)
|
||||
f := &Fn{
|
||||
Rets: &Rets{},
|
||||
src: s,
|
||||
PrintTrace: *printTraceFlag,
|
||||
}
|
||||
// function name and args
|
||||
prefix, body, s, found := extractSection(s, '(', ')')
|
||||
if !found || prefix == "" {
|
||||
return nil, errors.New("Could not extract function name and parameters from \"" + f.src + "\"")
|
||||
}
|
||||
f.Name = prefix
|
||||
var err error
|
||||
f.Params, err = extractParams(body, f)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// return values
|
||||
_, body, s, found = extractSection(s, '(', ')')
|
||||
if found {
|
||||
r, err := extractParams(body, f)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch len(r) {
|
||||
case 0:
|
||||
case 1:
|
||||
if r[0].IsError() {
|
||||
f.Rets.ReturnsError = true
|
||||
} else {
|
||||
f.Rets.Name = r[0].Name
|
||||
f.Rets.Type = r[0].Type
|
||||
}
|
||||
case 2:
|
||||
if !r[1].IsError() {
|
||||
return nil, errors.New("Only last windows error is allowed as second return value in \"" + f.src + "\"")
|
||||
}
|
||||
f.Rets.ReturnsError = true
|
||||
f.Rets.Name = r[0].Name
|
||||
f.Rets.Type = r[0].Type
|
||||
default:
|
||||
return nil, errors.New("Too many return values in \"" + f.src + "\"")
|
||||
}
|
||||
}
|
||||
// fail condition
|
||||
_, body, s, found = extractSection(s, '[', ']')
|
||||
if found {
|
||||
f.Rets.FailCond = body
|
||||
}
|
||||
// dll and dll function names
|
||||
s = trim(s)
|
||||
if s == "" {
|
||||
return f, nil
|
||||
}
|
||||
if !strings.HasPrefix(s, "=") {
|
||||
return nil, errors.New("Could not extract dll name from \"" + f.src + "\"")
|
||||
}
|
||||
s = trim(s[1:])
|
||||
a := strings.Split(s, ".")
|
||||
switch len(a) {
|
||||
case 1:
|
||||
f.dllfuncname = a[0]
|
||||
case 2:
|
||||
f.dllname = a[0]
|
||||
f.dllfuncname = a[1]
|
||||
default:
|
||||
return nil, errors.New("Could not extract dll name from \"" + f.src + "\"")
|
||||
}
|
||||
if f.dllfuncname[len(f.dllfuncname)-1] == '?' {
|
||||
f.confirmproc = true
|
||||
f.dllfuncname = f.dllfuncname[0 : len(f.dllfuncname)-1]
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// DLLName returns DLL name for function f.
|
||||
func (f *Fn) DLLName() string {
|
||||
if f.dllname == "" {
|
||||
return "kernel32"
|
||||
}
|
||||
return f.dllname
|
||||
}
|
||||
|
||||
// DLLName returns DLL function name for function f.
|
||||
func (f *Fn) DLLFuncName() string {
|
||||
if f.dllfuncname == "" {
|
||||
return f.Name
|
||||
}
|
||||
return f.dllfuncname
|
||||
}
|
||||
|
||||
func (f *Fn) ConfirmProc() bool {
|
||||
return f.confirmproc
|
||||
}
|
||||
|
||||
// ParamList returns source code for function f parameters.
|
||||
func (f *Fn) ParamList() string {
|
||||
return join(f.Params, func(p *Param) string { return p.Name + " " + p.Type }, ", ")
|
||||
}
|
||||
|
||||
// HelperParamList returns source code for helper function f parameters.
|
||||
func (f *Fn) HelperParamList() string {
|
||||
return join(f.Params, func(p *Param) string { return p.Name + " " + p.HelperType() }, ", ")
|
||||
}
|
||||
|
||||
// ParamPrintList returns source code of trace printing part correspondent
|
||||
// to syscall input parameters.
|
||||
func (f *Fn) ParamPrintList() string {
|
||||
return join(f.Params, func(p *Param) string { return fmt.Sprintf(`"%s=", %s, `, p.Name, p.Name) }, `", ", `)
|
||||
}
|
||||
|
||||
// ParamCount return number of syscall parameters for function f.
|
||||
func (f *Fn) ParamCount() int {
|
||||
n := 0
|
||||
for _, p := range f.Params {
|
||||
n += len(p.SyscallArgList())
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// SyscallParamCount determines which version of Syscall/Syscall6/Syscall9/...
|
||||
// to use. It returns parameter count for correspondent SyscallX function.
|
||||
func (f *Fn) SyscallParamCount() int {
|
||||
n := f.ParamCount()
|
||||
switch {
|
||||
case n <= 3:
|
||||
return 3
|
||||
case n <= 6:
|
||||
return 6
|
||||
case n <= 9:
|
||||
return 9
|
||||
case n <= 12:
|
||||
return 12
|
||||
case n <= 15:
|
||||
return 15
|
||||
default:
|
||||
panic("too many arguments to system call")
|
||||
}
|
||||
}
|
||||
|
||||
// Syscall determines which SyscallX function to use for function f.
|
||||
func (f *Fn) Syscall() string {
|
||||
c := f.SyscallParamCount()
|
||||
if c == 3 {
|
||||
return syscalldot() + "Syscall"
|
||||
}
|
||||
return syscalldot() + "Syscall" + strconv.Itoa(c)
|
||||
}
|
||||
|
||||
// SyscallParamList returns source code for SyscallX parameters for function f.
|
||||
func (f *Fn) SyscallParamList() string {
|
||||
a := make([]string, 0)
|
||||
for _, p := range f.Params {
|
||||
a = append(a, p.SyscallArgList()...)
|
||||
}
|
||||
for len(a) < f.SyscallParamCount() {
|
||||
a = append(a, "0")
|
||||
}
|
||||
return strings.Join(a, ", ")
|
||||
}
|
||||
|
||||
// HelperCallParamList returns source code of call into function f helper.
|
||||
func (f *Fn) HelperCallParamList() string {
|
||||
a := make([]string, 0, len(f.Params))
|
||||
for _, p := range f.Params {
|
||||
s := p.Name
|
||||
if p.Type == "string" {
|
||||
s = p.tmpVar()
|
||||
}
|
||||
a = append(a, s)
|
||||
}
|
||||
return strings.Join(a, ", ")
|
||||
}
|
||||
|
||||
// IsUTF16 is true, if f is W (utf16) function. It is false
|
||||
// for all A (ascii) functions.
|
||||
func (_ *Fn) IsUTF16() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// StrconvFunc returns name of Go string to OS string function for f.
|
||||
func (f *Fn) StrconvFunc() string {
|
||||
if f.IsUTF16() {
|
||||
return syscalldot() + "UTF16PtrFromString"
|
||||
}
|
||||
return syscalldot() + "BytePtrFromString"
|
||||
}
|
||||
|
||||
// StrconvType returns Go type name used for OS string for f.
|
||||
func (f *Fn) StrconvType() string {
|
||||
if f.IsUTF16() {
|
||||
return "*uint16"
|
||||
}
|
||||
return "*byte"
|
||||
}
|
||||
|
||||
// HasStringParam is true, if f has at least one string parameter.
|
||||
// Otherwise it is false.
|
||||
func (f *Fn) HasStringParam() bool {
|
||||
for _, p := range f.Params {
|
||||
if p.Type == "string" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
var uniqDllFuncName = make(map[string]bool)
|
||||
|
||||
// IsNotDuplicate is true if f is not a duplicated function
|
||||
func (f *Fn) IsNotDuplicate() bool {
|
||||
funcName := f.DLLFuncName()
|
||||
if uniqDllFuncName[funcName] == false {
|
||||
uniqDllFuncName[funcName] = true
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// HelperName returns name of function f helper.
|
||||
func (f *Fn) HelperName() string {
|
||||
if !f.HasStringParam() {
|
||||
return f.Name
|
||||
}
|
||||
return "_" + f.Name
|
||||
}
|
||||
|
||||
// Source files and functions.
|
||||
type Source struct {
|
||||
Funcs []*Fn
|
||||
Files []string
|
||||
StdLibImports []string
|
||||
ExternalImports []string
|
||||
}
|
||||
|
||||
func (src *Source) Import(pkg string) {
|
||||
src.StdLibImports = append(src.StdLibImports, pkg)
|
||||
sort.Strings(src.StdLibImports)
|
||||
}
|
||||
|
||||
func (src *Source) ExternalImport(pkg string) {
|
||||
src.ExternalImports = append(src.ExternalImports, pkg)
|
||||
sort.Strings(src.ExternalImports)
|
||||
}
|
||||
|
||||
// ParseFiles parses files listed in fs and extracts all syscall
|
||||
// functions listed in sys comments. It returns source files
|
||||
// and functions collection *Source if successful.
|
||||
func ParseFiles(fs []string) (*Source, error) {
|
||||
src := &Source{
|
||||
Funcs: make([]*Fn, 0),
|
||||
Files: make([]string, 0),
|
||||
StdLibImports: []string{
|
||||
"unsafe",
|
||||
},
|
||||
ExternalImports: make([]string, 0),
|
||||
}
|
||||
for _, file := range fs {
|
||||
if err := src.ParseFile(file); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return src, nil
|
||||
}
|
||||
|
||||
// DLLs return dll names for a source set src.
|
||||
func (src *Source) DLLs() []string {
|
||||
uniq := make(map[string]bool)
|
||||
r := make([]string, 0)
|
||||
for _, f := range src.Funcs {
|
||||
name := f.DLLName()
|
||||
if _, found := uniq[name]; !found {
|
||||
uniq[name] = true
|
||||
r = append(r, name)
|
||||
}
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
// ParseFile adds additional file path to a source set src.
|
||||
func (src *Source) ParseFile(path string) error {
|
||||
file, err := os.Open(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
s := bufio.NewScanner(file)
|
||||
for s.Scan() {
|
||||
t := trim(s.Text())
|
||||
if len(t) < 7 {
|
||||
continue
|
||||
}
|
||||
if !strings.HasPrefix(t, "//sys") {
|
||||
continue
|
||||
}
|
||||
t = t[5:]
|
||||
if !(t[0] == ' ' || t[0] == '\t') {
|
||||
continue
|
||||
}
|
||||
f, err := newFn(t[1:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
src.Funcs = append(src.Funcs, f)
|
||||
}
|
||||
if err := s.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
src.Files = append(src.Files, path)
|
||||
|
||||
// get package name
|
||||
fset := token.NewFileSet()
|
||||
_, err = file.Seek(0, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pkg, err := parser.ParseFile(fset, "", file, parser.PackageClauseOnly)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
packageName = pkg.Name.Name
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsStdRepo returns true if src is part of standard library.
|
||||
func (src *Source) IsStdRepo() (bool, error) {
|
||||
if len(src.Files) == 0 {
|
||||
return false, errors.New("no input files provided")
|
||||
}
|
||||
abspath, err := filepath.Abs(src.Files[0])
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
goroot := runtime.GOROOT()
|
||||
if runtime.GOOS == "windows" {
|
||||
abspath = strings.ToLower(abspath)
|
||||
goroot = strings.ToLower(goroot)
|
||||
}
|
||||
sep := string(os.PathSeparator)
|
||||
if !strings.HasSuffix(goroot, sep) {
|
||||
goroot += sep
|
||||
}
|
||||
return strings.HasPrefix(abspath, goroot), nil
|
||||
}
|
||||
|
||||
// Generate output source file from a source set src.
|
||||
func (src *Source) Generate(w io.Writer) error {
|
||||
const (
|
||||
pkgStd = iota // any package in std library
|
||||
pkgXSysWindows // x/sys/windows package
|
||||
pkgOther
|
||||
)
|
||||
isStdRepo, err := src.IsStdRepo()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var pkgtype int
|
||||
switch {
|
||||
case isStdRepo:
|
||||
pkgtype = pkgStd
|
||||
case packageName == "windows":
|
||||
// TODO: this needs better logic than just using package name
|
||||
pkgtype = pkgXSysWindows
|
||||
default:
|
||||
pkgtype = pkgOther
|
||||
}
|
||||
if *systemDLL {
|
||||
switch pkgtype {
|
||||
case pkgStd:
|
||||
src.Import("internal/syscall/windows/sysdll")
|
||||
case pkgXSysWindows:
|
||||
default:
|
||||
src.ExternalImport("golang.org/x/sys/windows")
|
||||
}
|
||||
}
|
||||
if *winio {
|
||||
src.ExternalImport("github.com/Microsoft/go-winio")
|
||||
}
|
||||
if packageName != "syscall" {
|
||||
src.Import("syscall")
|
||||
}
|
||||
funcMap := template.FuncMap{
|
||||
"packagename": packagename,
|
||||
"syscalldot": syscalldot,
|
||||
"newlazydll": func(dll string) string {
|
||||
arg := "\"" + dll + ".dll\""
|
||||
if !*systemDLL {
|
||||
return syscalldot() + "NewLazyDLL(" + arg + ")"
|
||||
}
|
||||
if strings.HasPrefix(dll, "api_") || strings.HasPrefix(dll, "ext_") {
|
||||
arg = strings.Replace(arg, "_", "-", -1)
|
||||
}
|
||||
switch pkgtype {
|
||||
case pkgStd:
|
||||
return syscalldot() + "NewLazyDLL(sysdll.Add(" + arg + "))"
|
||||
case pkgXSysWindows:
|
||||
return "NewLazySystemDLL(" + arg + ")"
|
||||
default:
|
||||
return "windows.NewLazySystemDLL(" + arg + ")"
|
||||
}
|
||||
},
|
||||
}
|
||||
t := template.Must(template.New("main").Funcs(funcMap).Parse(srcTemplate))
|
||||
err = t.Execute(w, src)
|
||||
if err != nil {
|
||||
return errors.New("Failed to execute template: " + err.Error())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func usage() {
|
||||
fmt.Fprintf(os.Stderr, "usage: mksyscall_windows [flags] [path ...]\n")
|
||||
flag.PrintDefaults()
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Usage = usage
|
||||
flag.Parse()
|
||||
if len(flag.Args()) <= 0 {
|
||||
fmt.Fprintf(os.Stderr, "no files to parse provided\n")
|
||||
usage()
|
||||
}
|
||||
|
||||
src, err := ParseFiles(flag.Args())
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
if err := src.Generate(&buf); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
data, err := format.Source(buf.Bytes())
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if *filename == "" {
|
||||
_, err = os.Stdout.Write(data)
|
||||
} else {
|
||||
err = ioutil.WriteFile(*filename, data, 0644)
|
||||
}
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: use println instead to print in the following template
|
||||
const srcTemplate = `
|
||||
|
||||
{{define "main"}}// Code generated mksyscall_windows.exe DO NOT EDIT
|
||||
|
||||
package {{packagename}}
|
||||
|
||||
import (
|
||||
{{range .StdLibImports}}"{{.}}"
|
||||
{{end}}
|
||||
|
||||
{{range .ExternalImports}}"{{.}}"
|
||||
{{end}}
|
||||
)
|
||||
|
||||
var _ unsafe.Pointer
|
||||
|
||||
// Do the interface allocations only once for common
|
||||
// Errno values.
|
||||
const (
|
||||
errnoERROR_IO_PENDING = 997
|
||||
)
|
||||
|
||||
var (
|
||||
errERROR_IO_PENDING error = {{syscalldot}}Errno(errnoERROR_IO_PENDING)
|
||||
)
|
||||
|
||||
// errnoErr returns common boxed Errno values, to prevent
|
||||
// allocations at runtime.
|
||||
func errnoErr(e {{syscalldot}}Errno) error {
|
||||
switch e {
|
||||
case 0:
|
||||
return nil
|
||||
case errnoERROR_IO_PENDING:
|
||||
return errERROR_IO_PENDING
|
||||
}
|
||||
// TODO: add more here, after collecting data on the common
|
||||
// error values see on Windows. (perhaps when running
|
||||
// all.bat?)
|
||||
return e
|
||||
}
|
||||
|
||||
var (
|
||||
{{template "dlls" .}}
|
||||
{{template "funcnames" .}})
|
||||
{{range .Funcs}}{{if .HasStringParam}}{{template "helperbody" .}}{{end}}{{template "funcbody" .}}{{end}}
|
||||
{{end}}
|
||||
|
||||
{{/* help functions */}}
|
||||
|
||||
{{define "dlls"}}{{range .DLLs}} mod{{.}} = {{newlazydll .}}
|
||||
{{end}}{{end}}
|
||||
|
||||
{{define "funcnames"}}{{range .Funcs}}{{if .IsNotDuplicate}} proc{{.DLLFuncName}} = mod{{.DLLName}}.NewProc("{{.DLLFuncName}}"){{end}}
|
||||
{{end}}{{end}}
|
||||
|
||||
{{define "helperbody"}}
|
||||
func {{.Name}}({{.ParamList}}) {{template "results" .}}{
|
||||
{{template "helpertmpvars" .}} return {{.HelperName}}({{.HelperCallParamList}})
|
||||
}
|
||||
{{end}}
|
||||
|
||||
{{define "funcbody"}}
|
||||
func {{.HelperName}}({{.HelperParamList}}) {{template "results" .}}{
|
||||
{{template "tmpvars" .}} {{template "syscallcheck" .}}{{template "syscall" .}}
|
||||
{{template "seterror" .}}{{template "printtrace" .}} return
|
||||
}
|
||||
{{end}}
|
||||
|
||||
{{define "helpertmpvars"}}{{range .Params}}{{if .TmpVarHelperCode}} {{.TmpVarHelperCode}}
|
||||
{{end}}{{end}}{{end}}
|
||||
|
||||
{{define "tmpvars"}}{{range .Params}}{{if .TmpVarCode}} {{.TmpVarCode}}
|
||||
{{end}}{{end}}{{end}}
|
||||
|
||||
{{define "results"}}{{if .Rets.List}}{{.Rets.List}} {{end}}{{end}}
|
||||
|
||||
{{define "syscall"}}{{.Rets.SetReturnValuesCode}}{{.Syscall}}(proc{{.DLLFuncName}}.Addr(), {{.ParamCount}}, {{.SyscallParamList}}){{end}}
|
||||
|
||||
{{define "syscallcheck"}}{{if .ConfirmProc}}if {{.Rets.ErrorVarName}} = proc{{.DLLFuncName}}.Find(); {{.Rets.ErrorVarName}} != nil {
|
||||
return
|
||||
}
|
||||
{{end}}{{end}}
|
||||
|
||||
|
||||
{{define "seterror"}}{{if .Rets.SetErrorCode}} {{.Rets.SetErrorCode}}
|
||||
{{end}}{{end}}
|
||||
|
||||
{{define "printtrace"}}{{if .PrintTrace}} print("SYSCALL: {{.Name}}(", {{.ParamPrintList}}") (", {{.Rets.PrintList}}")\n")
|
||||
{{end}}{{end}}
|
||||
|
||||
`
|
57
vendor/github.com/Microsoft/hcsshim/osversion/osversion_windows.go
generated
vendored
Normal file
57
vendor/github.com/Microsoft/hcsshim/osversion/osversion_windows.go
generated
vendored
Normal file
@ -0,0 +1,57 @@
|
||||
package osversion
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
// OSVersion is a wrapper for Windows version information
|
||||
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724439(v=vs.85).aspx
|
||||
type OSVersion struct {
|
||||
Version uint32
|
||||
MajorVersion uint8
|
||||
MinorVersion uint8
|
||||
Build uint16
|
||||
}
|
||||
|
||||
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724833(v=vs.85).aspx
|
||||
type osVersionInfoEx struct {
|
||||
OSVersionInfoSize uint32
|
||||
MajorVersion uint32
|
||||
MinorVersion uint32
|
||||
BuildNumber uint32
|
||||
PlatformID uint32
|
||||
CSDVersion [128]uint16
|
||||
ServicePackMajor uint16
|
||||
ServicePackMinor uint16
|
||||
SuiteMask uint16
|
||||
ProductType byte
|
||||
Reserve byte
|
||||
}
|
||||
|
||||
// Get gets the operating system version on Windows.
|
||||
// The calling application must be manifested to get the correct version information.
|
||||
func Get() OSVersion {
|
||||
var err error
|
||||
osv := OSVersion{}
|
||||
osv.Version, err = windows.GetVersion()
|
||||
if err != nil {
|
||||
// GetVersion never fails.
|
||||
panic(err)
|
||||
}
|
||||
osv.MajorVersion = uint8(osv.Version & 0xFF)
|
||||
osv.MinorVersion = uint8(osv.Version >> 8 & 0xFF)
|
||||
osv.Build = uint16(osv.Version >> 16)
|
||||
return osv
|
||||
}
|
||||
|
||||
// Build gets the build-number on Windows
|
||||
// The calling application must be manifested to get the correct version information.
|
||||
func Build() uint16 {
|
||||
return Get().Build
|
||||
}
|
||||
|
||||
func (osv OSVersion) ToString() string {
|
||||
return fmt.Sprintf("%d.%d.%d", osv.MajorVersion, osv.MinorVersion, osv.Build)
|
||||
}
|
23
vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go
generated
vendored
Normal file
23
vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go
generated
vendored
Normal file
@ -0,0 +1,23 @@
|
||||
package osversion
|
||||
|
||||
const (
|
||||
// RS1 (version 1607, codename "Redstone 1") corresponds to Windows Server
|
||||
// 2016 (ltsc2016) and Windows 10 (Anniversary Update).
|
||||
RS1 = 14393
|
||||
|
||||
// RS2 (version 1703, codename "Redstone 2") was a client-only update, and
|
||||
// corresponds to Windows 10 (Creators Update).
|
||||
RS2 = 15063
|
||||
|
||||
// RS3 (version 1709, codename "Redstone 3") corresponds to Windows Server
|
||||
// 1709 (Semi-Annual Channel (SAC)), and Windows 10 (Fall Creators Update).
|
||||
RS3 = 16299
|
||||
|
||||
// RS4 (version 1803, codename "Redstone 4") corresponds to Windows Server
|
||||
// 1803 (Semi-Annual Channel (SAC)), and Windows 10 (April 2018 Update).
|
||||
RS4 = 17134
|
||||
|
||||
// RS5 (version 1809, codename "Redstone 5") corresponds to Windows Server
|
||||
// 2019 (ltsc2019), and Windows 10 (October 2018 Update).
|
||||
RS5 = 17763
|
||||
)
|
22
vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/NOTICE
generated
vendored
22
vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/NOTICE
generated
vendored
@ -1,22 +0,0 @@
|
||||
go-runhcs is a fork of go-runc
|
||||
|
||||
The following is runc's legal notice.
|
||||
|
||||
---
|
||||
|
||||
runc
|
||||
|
||||
Copyright 2012-2015 Docker, Inc.
|
||||
|
||||
This product includes software developed at Docker, Inc. (http://www.docker.com).
|
||||
|
||||
The following is courtesy of our legal counsel:
|
||||
|
||||
Use and transfer of Docker may be subject to certain restrictions by the
|
||||
United States and other governments.
|
||||
It is your responsibility to ensure that your use and/or transfer does not
|
||||
violate applicable laws.
|
||||
|
||||
For more information, please see http://www.bis.doc.gov
|
||||
|
||||
See also http://www.apache.org/dev/crypto.html and/or seek legal counsel.
|
12
vendor/github.com/alexellis/go-execute/pkg/v1/exec.go
generated
vendored
12
vendor/github.com/alexellis/go-execute/pkg/v1/exec.go
generated
vendored
@ -71,10 +71,20 @@ func (et ExecTask) Execute() (ExecResult, error) {
|
||||
cmd.Dir = et.Cwd
|
||||
|
||||
if len(et.Env) > 0 {
|
||||
cmd.Env = os.Environ()
|
||||
overrides := map[string]bool{}
|
||||
for _, env := range et.Env {
|
||||
key := strings.Split(env, "=")[0]
|
||||
overrides[key] = true
|
||||
cmd.Env = append(cmd.Env, env)
|
||||
}
|
||||
|
||||
for _, env := range os.Environ() {
|
||||
key := strings.Split(env, "=")[0]
|
||||
|
||||
if _, ok := overrides[key]; !ok {
|
||||
cmd.Env = append(cmd.Env, env)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stdoutBuff := bytes.Buffer{}
|
||||
|
26
vendor/github.com/alexellis/k3sup/pkg/env/env.go
generated
vendored
26
vendor/github.com/alexellis/k3sup/pkg/env/env.go
generated
vendored
@ -2,6 +2,8 @@ package env
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
execute "github.com/alexellis/go-execute/pkg/v1"
|
||||
@ -9,7 +11,12 @@ import (
|
||||
|
||||
// GetClientArch returns a pair of arch and os
|
||||
func GetClientArch() (string, string) {
|
||||
task := execute.ExecTask{Command: "uname", Args: []string{"-m"}}
|
||||
task := execute.ExecTask{
|
||||
Command: "uname",
|
||||
Args: []string{"-m"},
|
||||
StreamStdio: false,
|
||||
}
|
||||
|
||||
res, err := task.Execute()
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
@ -17,7 +24,12 @@ func GetClientArch() (string, string) {
|
||||
|
||||
arch := strings.TrimSpace(res.Stdout)
|
||||
|
||||
taskOS := execute.ExecTask{Command: "uname", Args: []string{"-s"}}
|
||||
taskOS := execute.ExecTask{
|
||||
Command: "uname",
|
||||
Args: []string{"-s"},
|
||||
StreamStdio: false,
|
||||
}
|
||||
|
||||
resOS, errOS := taskOS.Execute()
|
||||
if errOS != nil {
|
||||
log.Println(errOS)
|
||||
@ -27,3 +39,13 @@ func GetClientArch() (string, string) {
|
||||
|
||||
return arch, os
|
||||
}
|
||||
|
||||
func LocalBinary(name, subdir string) string {
|
||||
home := os.Getenv("HOME")
|
||||
val := path.Join(home, ".k3sup/bin/")
|
||||
if len(subdir) > 0 {
|
||||
val = path.Join(val, subdir)
|
||||
}
|
||||
|
||||
return path.Join(val, name)
|
||||
}
|
||||
|
191
vendor/github.com/compose-spec/compose-go/LICENSE
generated
vendored
Normal file
191
vendor/github.com/compose-spec/compose-go/LICENSE
generated
vendored
Normal file
@ -0,0 +1,191 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
https://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
Copyright 2013-2017 Docker, Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
https://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
104
vendor/github.com/compose-spec/compose-go/envfile/envfile.go
generated
vendored
Normal file
104
vendor/github.com/compose-spec/compose-go/envfile/envfile.go
generated
vendored
Normal file
@ -0,0 +1,104 @@
|
||||
/*
|
||||
Copyright 2020 The Compose Specification Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package envfile
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/compose-spec/compose-go/types"
|
||||
)
|
||||
|
||||
const whiteSpaces = " \t"
|
||||
|
||||
// ErrBadKey typed error for bad environment variable
|
||||
type ErrBadKey struct {
|
||||
msg string
|
||||
}
|
||||
|
||||
func (e ErrBadKey) Error() string {
|
||||
return fmt.Sprintf("poorly formatted environment: %s", e.msg)
|
||||
}
|
||||
|
||||
// Parse reads a file with environment variables enumerated by lines
|
||||
//
|
||||
// ``Environment variable names used by the utilities in the Shell and
|
||||
// Utilities volume of IEEE Std 1003.1-2001 consist solely of uppercase
|
||||
// letters, digits, and the '_' (underscore) from the characters defined in
|
||||
// Portable Character Set and do not begin with a digit. *But*, other
|
||||
// characters may be permitted by an implementation; applications shall
|
||||
// tolerate the presence of such names.''
|
||||
// -- http://pubs.opengroup.org/onlinepubs/009695399/basedefs/xbd_chap08.html
|
||||
//
|
||||
// As of #16585, it's up to application inside docker to validate or not
|
||||
// environment variables, that's why we just strip leading whitespace and
|
||||
// nothing more.
|
||||
// Converts ["key=value"] to {"key":"value"} but set unset keys - the ones with no "=" in them - to nil
|
||||
// We use this in cases where we need to distinguish between FOO= and FOO
|
||||
// where the latter case just means FOO was mentioned but not given a value
|
||||
func Parse(filename string) (types.MappingWithEquals, error) {
|
||||
vars := types.MappingWithEquals{}
|
||||
fh, err := os.Open(filename)
|
||||
if err != nil {
|
||||
return vars, err
|
||||
}
|
||||
defer fh.Close()
|
||||
|
||||
scanner := bufio.NewScanner(fh)
|
||||
currentLine := 0
|
||||
utf8bom := []byte{0xEF, 0xBB, 0xBF}
|
||||
for scanner.Scan() {
|
||||
scannedBytes := scanner.Bytes()
|
||||
if !utf8.Valid(scannedBytes) {
|
||||
return vars, fmt.Errorf("env file %s contains invalid utf8 bytes at line %d: %v", filename, currentLine+1, scannedBytes)
|
||||
}
|
||||
// We trim UTF8 BOM
|
||||
if currentLine == 0 {
|
||||
scannedBytes = bytes.TrimPrefix(scannedBytes, utf8bom)
|
||||
}
|
||||
// trim the line from all leading whitespace first
|
||||
line := strings.TrimLeftFunc(string(scannedBytes), unicode.IsSpace)
|
||||
currentLine++
|
||||
// line is not empty, and not starting with '#'
|
||||
if len(line) > 0 && !strings.HasPrefix(line, "#") {
|
||||
data := strings.SplitN(line, "=", 2)
|
||||
|
||||
// trim the front of a variable, but nothing else
|
||||
variable := strings.TrimLeft(data[0], whiteSpaces)
|
||||
if strings.ContainsAny(variable, whiteSpaces) {
|
||||
return vars, ErrBadKey{fmt.Sprintf("variable '%s' contains whitespaces", variable)}
|
||||
}
|
||||
if len(variable) == 0 {
|
||||
return vars, ErrBadKey{fmt.Sprintf("no variable name on line '%s'", line)}
|
||||
}
|
||||
|
||||
if len(data) > 1 {
|
||||
// pass the value through, no trimming
|
||||
vars[variable] = &data[1]
|
||||
} else {
|
||||
// variable was not given a value but declared
|
||||
vars[strings.TrimSpace(line)] = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return vars, scanner.Err()
|
||||
}
|
177
vendor/github.com/compose-spec/compose-go/interpolation/interpolation.go
generated
vendored
Normal file
177
vendor/github.com/compose-spec/compose-go/interpolation/interpolation.go
generated
vendored
Normal file
@ -0,0 +1,177 @@
|
||||
/*
|
||||
Copyright 2020 The Compose Specification Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package interpolation
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/compose-spec/compose-go/template"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Options supported by Interpolate
|
||||
type Options struct {
|
||||
// LookupValue from a key
|
||||
LookupValue LookupValue
|
||||
// TypeCastMapping maps key paths to functions to cast to a type
|
||||
TypeCastMapping map[Path]Cast
|
||||
// Substitution function to use
|
||||
Substitute func(string, template.Mapping) (string, error)
|
||||
}
|
||||
|
||||
// LookupValue is a function which maps from variable names to values.
|
||||
// Returns the value as a string and a bool indicating whether
|
||||
// the value is present, to distinguish between an empty string
|
||||
// and the absence of a value.
|
||||
type LookupValue func(key string) (string, bool)
|
||||
|
||||
// Cast a value to a new type, or return an error if the value can't be cast
|
||||
type Cast func(value string) (interface{}, error)
|
||||
|
||||
// Interpolate replaces variables in a string with the values from a mapping
|
||||
func Interpolate(config map[string]interface{}, opts Options) (map[string]interface{}, error) {
|
||||
if opts.LookupValue == nil {
|
||||
opts.LookupValue = os.LookupEnv
|
||||
}
|
||||
if opts.TypeCastMapping == nil {
|
||||
opts.TypeCastMapping = make(map[Path]Cast)
|
||||
}
|
||||
if opts.Substitute == nil {
|
||||
opts.Substitute = template.Substitute
|
||||
}
|
||||
|
||||
out := map[string]interface{}{}
|
||||
|
||||
for key, value := range config {
|
||||
interpolatedValue, err := recursiveInterpolate(value, NewPath(key), opts)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
out[key] = interpolatedValue
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func recursiveInterpolate(value interface{}, path Path, opts Options) (interface{}, error) {
|
||||
switch value := value.(type) {
|
||||
case string:
|
||||
newValue, err := opts.Substitute(value, template.Mapping(opts.LookupValue))
|
||||
if err != nil || newValue == value {
|
||||
return value, newPathError(path, err)
|
||||
}
|
||||
caster, ok := opts.getCasterForPath(path)
|
||||
if !ok {
|
||||
return newValue, nil
|
||||
}
|
||||
casted, err := caster(newValue)
|
||||
return casted, newPathError(path, errors.Wrap(err, "failed to cast to expected type"))
|
||||
|
||||
case map[string]interface{}:
|
||||
out := map[string]interface{}{}
|
||||
for key, elem := range value {
|
||||
interpolatedElem, err := recursiveInterpolate(elem, path.Next(key), opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
out[key] = interpolatedElem
|
||||
}
|
||||
return out, nil
|
||||
|
||||
case []interface{}:
|
||||
out := make([]interface{}, len(value))
|
||||
for i, elem := range value {
|
||||
interpolatedElem, err := recursiveInterpolate(elem, path.Next(PathMatchList), opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
out[i] = interpolatedElem
|
||||
}
|
||||
return out, nil
|
||||
|
||||
default:
|
||||
return value, nil
|
||||
}
|
||||
}
|
||||
|
||||
func newPathError(path Path, err error) error {
|
||||
switch err := err.(type) {
|
||||
case nil:
|
||||
return nil
|
||||
case *template.InvalidTemplateError:
|
||||
return errors.Errorf(
|
||||
"invalid interpolation format for %s: %#v. You may need to escape any $ with another $.",
|
||||
path, err.Template)
|
||||
default:
|
||||
return errors.Wrapf(err, "error while interpolating %s", path)
|
||||
}
|
||||
}
|
||||
|
||||
const pathSeparator = "."
|
||||
|
||||
// PathMatchAll is a token used as part of a Path to match any key at that level
|
||||
// in the nested structure
|
||||
const PathMatchAll = "*"
|
||||
|
||||
// PathMatchList is a token used as part of a Path to match items in a list
|
||||
const PathMatchList = "[]"
|
||||
|
||||
// Path is a dotted path of keys to a value in a nested mapping structure. A *
|
||||
// section in a path will match any key in the mapping structure.
|
||||
type Path string
|
||||
|
||||
// NewPath returns a new Path
|
||||
func NewPath(items ...string) Path {
|
||||
return Path(strings.Join(items, pathSeparator))
|
||||
}
|
||||
|
||||
// Next returns a new path by append part to the current path
|
||||
func (p Path) Next(part string) Path {
|
||||
return Path(string(p) + pathSeparator + part)
|
||||
}
|
||||
|
||||
func (p Path) parts() []string {
|
||||
return strings.Split(string(p), pathSeparator)
|
||||
}
|
||||
|
||||
func (p Path) matches(pattern Path) bool {
|
||||
patternParts := pattern.parts()
|
||||
parts := p.parts()
|
||||
|
||||
if len(patternParts) != len(parts) {
|
||||
return false
|
||||
}
|
||||
for index, part := range parts {
|
||||
switch patternParts[index] {
|
||||
case PathMatchAll, part:
|
||||
continue
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (o Options) getCasterForPath(path Path) (Cast, bool) {
|
||||
for pattern, caster := range o.TypeCastMapping {
|
||||
if path.matches(pattern) {
|
||||
return caster, true
|
||||
}
|
||||
}
|
||||
return nil, false
|
||||
}
|
8
vendor/github.com/compose-spec/compose-go/loader/example1.env
generated
vendored
Normal file
8
vendor/github.com/compose-spec/compose-go/loader/example1.env
generated
vendored
Normal file
@ -0,0 +1,8 @@
|
||||
# passed through
|
||||
FOO=foo_from_env_file
|
||||
|
||||
# overridden in example2.env
|
||||
BAR=bar_from_env_file
|
||||
|
||||
# overridden in full-example.yml
|
||||
BAZ=baz_from_env_file
|
4
vendor/github.com/compose-spec/compose-go/loader/example2.env
generated
vendored
Normal file
4
vendor/github.com/compose-spec/compose-go/loader/example2.env
generated
vendored
Normal file
@ -0,0 +1,4 @@
|
||||
BAR=bar_from_env_file_2
|
||||
|
||||
# overridden in configDetails.Environment
|
||||
QUX=quz_from_env_file_2
|
409
vendor/github.com/compose-spec/compose-go/loader/full-example.yml
generated
vendored
Normal file
409
vendor/github.com/compose-spec/compose-go/loader/full-example.yml
generated
vendored
Normal file
@ -0,0 +1,409 @@
|
||||
version: "3.9"
|
||||
|
||||
services:
|
||||
foo:
|
||||
|
||||
build:
|
||||
context: ./dir
|
||||
dockerfile: Dockerfile
|
||||
args:
|
||||
foo: bar
|
||||
target: foo
|
||||
network: foo
|
||||
cache_from:
|
||||
- foo
|
||||
- bar
|
||||
labels: [FOO=BAR]
|
||||
|
||||
|
||||
cap_add:
|
||||
- ALL
|
||||
|
||||
cap_drop:
|
||||
- NET_ADMIN
|
||||
- SYS_ADMIN
|
||||
|
||||
cgroup_parent: m-executor-abcd
|
||||
|
||||
# String or list
|
||||
command: bundle exec thin -p 3000
|
||||
# command: ["bundle", "exec", "thin", "-p", "3000"]
|
||||
|
||||
configs:
|
||||
- config1
|
||||
- source: config2
|
||||
target: /my_config
|
||||
uid: '103'
|
||||
gid: '103'
|
||||
mode: 0440
|
||||
|
||||
container_name: my-web-container
|
||||
|
||||
depends_on:
|
||||
- db
|
||||
- redis
|
||||
|
||||
deploy:
|
||||
mode: replicated
|
||||
replicas: 6
|
||||
labels: [FOO=BAR]
|
||||
rollback_config:
|
||||
parallelism: 3
|
||||
delay: 10s
|
||||
failure_action: continue
|
||||
monitor: 60s
|
||||
max_failure_ratio: 0.3
|
||||
order: start-first
|
||||
update_config:
|
||||
parallelism: 3
|
||||
delay: 10s
|
||||
failure_action: continue
|
||||
monitor: 60s
|
||||
max_failure_ratio: 0.3
|
||||
order: start-first
|
||||
resources:
|
||||
limits:
|
||||
cpus: '0.001'
|
||||
memory: 50M
|
||||
reservations:
|
||||
cpus: '0.0001'
|
||||
memory: 20M
|
||||
generic_resources:
|
||||
- discrete_resource_spec:
|
||||
kind: 'gpu'
|
||||
value: 2
|
||||
- discrete_resource_spec:
|
||||
kind: 'ssd'
|
||||
value: 1
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
delay: 5s
|
||||
max_attempts: 3
|
||||
window: 120s
|
||||
placement:
|
||||
constraints: [node=foo]
|
||||
max_replicas_per_node: 5
|
||||
preferences:
|
||||
- spread: node.labels.az
|
||||
endpoint_mode: dnsrr
|
||||
|
||||
devices:
|
||||
- "/dev/ttyUSB0:/dev/ttyUSB0"
|
||||
|
||||
# String or list
|
||||
# dns: 8.8.8.8
|
||||
dns:
|
||||
- 8.8.8.8
|
||||
- 9.9.9.9
|
||||
|
||||
# String or list
|
||||
# dns_search: example.com
|
||||
dns_search:
|
||||
- dc1.example.com
|
||||
- dc2.example.com
|
||||
|
||||
domainname: foo.com
|
||||
|
||||
# String or list
|
||||
# entrypoint: /code/entrypoint.sh -p 3000
|
||||
entrypoint: ["/code/entrypoint.sh", "-p", "3000"]
|
||||
|
||||
# String or list
|
||||
# env_file: .env
|
||||
env_file:
|
||||
- ./example1.env
|
||||
- ./example2.env
|
||||
|
||||
# Mapping or list
|
||||
# Mapping values can be strings, numbers or null
|
||||
# Booleans are not allowed - must be quoted
|
||||
environment:
|
||||
BAZ: baz_from_service_def
|
||||
QUX:
|
||||
# environment:
|
||||
# - RACK_ENV=development
|
||||
# - SHOW=true
|
||||
# - SESSION_SECRET
|
||||
|
||||
# Items can be strings or numbers
|
||||
expose:
|
||||
- "3000"
|
||||
- 8000
|
||||
|
||||
external_links:
|
||||
- redis_1
|
||||
- project_db_1:mysql
|
||||
- project_db_1:postgresql
|
||||
|
||||
# Mapping or list
|
||||
# Mapping values must be strings
|
||||
# extra_hosts:
|
||||
# somehost: "162.242.195.82"
|
||||
# otherhost: "50.31.209.229"
|
||||
extra_hosts:
|
||||
- "somehost:162.242.195.82"
|
||||
- "otherhost:50.31.209.229"
|
||||
|
||||
hostname: foo
|
||||
|
||||
healthcheck:
|
||||
test: echo "hello world"
|
||||
interval: 10s
|
||||
timeout: 1s
|
||||
retries: 5
|
||||
start_period: 15s
|
||||
|
||||
# Any valid image reference - repo, tag, id, sha
|
||||
image: redis
|
||||
# image: ubuntu:14.04
|
||||
# image: tutum/influxdb
|
||||
# image: example-registry.com:4000/postgresql
|
||||
# image: a4bc65fd
|
||||
# image: busybox@sha256:38a203e1986cf79639cfb9b2e1d6e773de84002feea2d4eb006b52004ee8502d
|
||||
|
||||
ipc: host
|
||||
|
||||
# Mapping or list
|
||||
# Mapping values can be strings, numbers or null
|
||||
labels:
|
||||
com.example.description: "Accounting webapp"
|
||||
com.example.number: 42
|
||||
com.example.empty-label:
|
||||
# labels:
|
||||
# - "com.example.description=Accounting webapp"
|
||||
# - "com.example.number=42"
|
||||
# - "com.example.empty-label"
|
||||
|
||||
links:
|
||||
- db
|
||||
- db:database
|
||||
- redis
|
||||
|
||||
logging:
|
||||
driver: syslog
|
||||
options:
|
||||
syslog-address: "tcp://192.168.0.42:123"
|
||||
|
||||
mac_address: 02:42:ac:11:65:43
|
||||
|
||||
# network_mode: "bridge"
|
||||
# network_mode: "host"
|
||||
# network_mode: "none"
|
||||
# Use the network mode of an arbitrary container from another service
|
||||
# network_mode: "service:db"
|
||||
# Use the network mode of another container, specified by name or id
|
||||
# network_mode: "container:some-container"
|
||||
network_mode: "container:0cfeab0f748b9a743dc3da582046357c6ef497631c1a016d28d2bf9b4f899f7b"
|
||||
|
||||
networks:
|
||||
some-network:
|
||||
aliases:
|
||||
- alias1
|
||||
- alias3
|
||||
other-network:
|
||||
ipv4_address: 172.16.238.10
|
||||
ipv6_address: 2001:3984:3989::10
|
||||
other-other-network:
|
||||
|
||||
pid: "host"
|
||||
|
||||
ports:
|
||||
- 3000
|
||||
- "3001-3005"
|
||||
- "8000:8000"
|
||||
- "9090-9091:8080-8081"
|
||||
- "49100:22"
|
||||
- "127.0.0.1:8001:8001"
|
||||
- "127.0.0.1:5000-5010:5000-5010"
|
||||
|
||||
privileged: true
|
||||
|
||||
read_only: true
|
||||
|
||||
restart: always
|
||||
|
||||
secrets:
|
||||
- secret1
|
||||
- source: secret2
|
||||
target: my_secret
|
||||
uid: '103'
|
||||
gid: '103'
|
||||
mode: 0440
|
||||
|
||||
security_opt:
|
||||
- label=level:s0:c100,c200
|
||||
- label=type:svirt_apache_t
|
||||
|
||||
stdin_open: true
|
||||
|
||||
stop_grace_period: 20s
|
||||
|
||||
stop_signal: SIGUSR1
|
||||
|
||||
sysctls:
|
||||
net.core.somaxconn: 1024
|
||||
net.ipv4.tcp_syncookies: 0
|
||||
|
||||
# String or list
|
||||
# tmpfs: /run
|
||||
tmpfs:
|
||||
- /run
|
||||
- /tmp
|
||||
|
||||
tty: true
|
||||
|
||||
ulimits:
|
||||
# Single number or mapping with soft + hard limits
|
||||
nproc: 65535
|
||||
nofile:
|
||||
soft: 20000
|
||||
hard: 40000
|
||||
|
||||
user: someone
|
||||
|
||||
volumes:
|
||||
# Just specify a path and let the Engine create a volume
|
||||
- /var/lib/mysql
|
||||
# Specify an absolute path mapping
|
||||
- /opt/data:/var/lib/mysql
|
||||
# Path on the host, relative to the Compose file
|
||||
- .:/code
|
||||
- ./static:/var/www/html
|
||||
# User-relative path
|
||||
- ~/configs:/etc/configs/:ro
|
||||
# Named volume
|
||||
- datavolume:/var/lib/mysql
|
||||
- type: bind
|
||||
source: ./opt
|
||||
target: /opt
|
||||
consistency: cached
|
||||
- type: tmpfs
|
||||
target: /opt
|
||||
tmpfs:
|
||||
size: 10000
|
||||
|
||||
working_dir: /code
|
||||
x-bar: baz
|
||||
x-foo: bar
|
||||
|
||||
networks:
|
||||
# Entries can be null, which specifies simply that a network
|
||||
# called "{project name}_some-network" should be created and
|
||||
# use the default driver
|
||||
some-network:
|
||||
|
||||
other-network:
|
||||
driver: overlay
|
||||
|
||||
driver_opts:
|
||||
# Values can be strings or numbers
|
||||
foo: "bar"
|
||||
baz: 1
|
||||
|
||||
ipam:
|
||||
driver: overlay
|
||||
# driver_opts:
|
||||
# # Values can be strings or numbers
|
||||
# com.docker.network.enable_ipv6: "true"
|
||||
# com.docker.network.numeric_value: 1
|
||||
config:
|
||||
- subnet: 172.16.238.0/24
|
||||
# gateway: 172.16.238.1
|
||||
- subnet: 2001:3984:3989::/64
|
||||
# gateway: 2001:3984:3989::1
|
||||
|
||||
labels:
|
||||
foo: bar
|
||||
|
||||
external-network:
|
||||
# Specifies that a pre-existing network called "external-network"
|
||||
# can be referred to within this file as "external-network"
|
||||
external: true
|
||||
|
||||
other-external-network:
|
||||
# Specifies that a pre-existing network called "my-cool-network"
|
||||
# can be referred to within this file as "other-external-network"
|
||||
external:
|
||||
name: my-cool-network
|
||||
x-bar: baz
|
||||
x-foo: bar
|
||||
|
||||
volumes:
|
||||
# Entries can be null, which specifies simply that a volume
|
||||
# called "{project name}_some-volume" should be created and
|
||||
# use the default driver
|
||||
some-volume:
|
||||
|
||||
other-volume:
|
||||
driver: flocker
|
||||
|
||||
driver_opts:
|
||||
# Values can be strings or numbers
|
||||
foo: "bar"
|
||||
baz: 1
|
||||
labels:
|
||||
foo: bar
|
||||
|
||||
another-volume:
|
||||
name: "user_specified_name"
|
||||
driver: vsphere
|
||||
|
||||
driver_opts:
|
||||
# Values can be strings or numbers
|
||||
foo: "bar"
|
||||
baz: 1
|
||||
|
||||
external-volume:
|
||||
# Specifies that a pre-existing volume called "external-volume"
|
||||
# can be referred to within this file as "external-volume"
|
||||
external: true
|
||||
|
||||
other-external-volume:
|
||||
# Specifies that a pre-existing volume called "my-cool-volume"
|
||||
# can be referred to within this file as "other-external-volume"
|
||||
# This example uses the deprecated "volume.external.name" (replaced by "volume.name")
|
||||
external:
|
||||
name: my-cool-volume
|
||||
|
||||
external-volume3:
|
||||
# Specifies that a pre-existing volume called "this-is-volume3"
|
||||
# can be referred to within this file as "external-volume3"
|
||||
name: this-is-volume3
|
||||
external: true
|
||||
x-bar: baz
|
||||
x-foo: bar
|
||||
|
||||
configs:
|
||||
config1:
|
||||
file: ./config_data
|
||||
labels:
|
||||
foo: bar
|
||||
config2:
|
||||
external:
|
||||
name: my_config
|
||||
config3:
|
||||
external: true
|
||||
config4:
|
||||
name: foo
|
||||
x-bar: baz
|
||||
x-foo: bar
|
||||
|
||||
secrets:
|
||||
secret1:
|
||||
file: ./secret_data
|
||||
labels:
|
||||
foo: bar
|
||||
secret2:
|
||||
external:
|
||||
name: my_secret
|
||||
secret3:
|
||||
external: true
|
||||
secret4:
|
||||
name: bar
|
||||
x-bar: baz
|
||||
x-foo: bar
|
||||
x-bar: baz
|
||||
x-foo: bar
|
||||
x-nested:
|
||||
bar: baz
|
||||
foo: bar
|
88
vendor/github.com/compose-spec/compose-go/loader/interpolate.go
generated
vendored
Normal file
88
vendor/github.com/compose-spec/compose-go/loader/interpolate.go
generated
vendored
Normal file
@ -0,0 +1,88 @@
|
||||
/*
|
||||
Copyright 2020 The Compose Specification Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package loader
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
interp "github.com/compose-spec/compose-go/interpolation"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var interpolateTypeCastMapping = map[interp.Path]interp.Cast{
|
||||
servicePath("configs", interp.PathMatchList, "mode"): toInt,
|
||||
servicePath("secrets", interp.PathMatchList, "mode"): toInt,
|
||||
servicePath("healthcheck", "retries"): toInt,
|
||||
servicePath("healthcheck", "disable"): toBoolean,
|
||||
servicePath("deploy", "replicas"): toInt,
|
||||
servicePath("deploy", "update_config", "parallelism"): toInt,
|
||||
servicePath("deploy", "update_config", "max_failure_ratio"): toFloat,
|
||||
servicePath("deploy", "rollback_config", "parallelism"): toInt,
|
||||
servicePath("deploy", "rollback_config", "max_failure_ratio"): toFloat,
|
||||
servicePath("deploy", "restart_policy", "max_attempts"): toInt,
|
||||
servicePath("deploy", "placement", "max_replicas_per_node"): toInt,
|
||||
servicePath("ports", interp.PathMatchList, "target"): toInt,
|
||||
servicePath("ports", interp.PathMatchList, "published"): toInt,
|
||||
servicePath("ulimits", interp.PathMatchAll): toInt,
|
||||
servicePath("ulimits", interp.PathMatchAll, "hard"): toInt,
|
||||
servicePath("ulimits", interp.PathMatchAll, "soft"): toInt,
|
||||
servicePath("privileged"): toBoolean,
|
||||
servicePath("read_only"): toBoolean,
|
||||
servicePath("stdin_open"): toBoolean,
|
||||
servicePath("tty"): toBoolean,
|
||||
servicePath("volumes", interp.PathMatchList, "read_only"): toBoolean,
|
||||
servicePath("volumes", interp.PathMatchList, "volume", "nocopy"): toBoolean,
|
||||
iPath("networks", interp.PathMatchAll, "external"): toBoolean,
|
||||
iPath("networks", interp.PathMatchAll, "internal"): toBoolean,
|
||||
iPath("networks", interp.PathMatchAll, "attachable"): toBoolean,
|
||||
iPath("volumes", interp.PathMatchAll, "external"): toBoolean,
|
||||
iPath("secrets", interp.PathMatchAll, "external"): toBoolean,
|
||||
iPath("configs", interp.PathMatchAll, "external"): toBoolean,
|
||||
}
|
||||
|
||||
func iPath(parts ...string) interp.Path {
|
||||
return interp.NewPath(parts...)
|
||||
}
|
||||
|
||||
func servicePath(parts ...string) interp.Path {
|
||||
return iPath(append([]string{"services", interp.PathMatchAll}, parts...)...)
|
||||
}
|
||||
|
||||
func toInt(value string) (interface{}, error) {
|
||||
return strconv.Atoi(value)
|
||||
}
|
||||
|
||||
func toFloat(value string) (interface{}, error) {
|
||||
return strconv.ParseFloat(value, 64)
|
||||
}
|
||||
|
||||
// should match http://yaml.org/type/bool.html
|
||||
func toBoolean(value string) (interface{}, error) {
|
||||
switch strings.ToLower(value) {
|
||||
case "y", "yes", "true", "on":
|
||||
return true, nil
|
||||
case "n", "no", "false", "off":
|
||||
return false, nil
|
||||
default:
|
||||
return nil, errors.Errorf("invalid boolean: %s", value)
|
||||
}
|
||||
}
|
||||
|
||||
func interpolateConfig(configDict map[string]interface{}, opts interp.Options) (map[string]interface{}, error) {
|
||||
return interp.Interpolate(configDict, opts)
|
||||
}
|
876
vendor/github.com/compose-spec/compose-go/loader/loader.go
generated
vendored
Normal file
876
vendor/github.com/compose-spec/compose-go/loader/loader.go
generated
vendored
Normal file
@ -0,0 +1,876 @@
|
||||
/*
|
||||
Copyright 2020 The Compose Specification Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package loader
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/compose-spec/compose-go/envfile"
|
||||
interp "github.com/compose-spec/compose-go/interpolation"
|
||||
"github.com/compose-spec/compose-go/schema"
|
||||
"github.com/compose-spec/compose-go/template"
|
||||
"github.com/compose-spec/compose-go/types"
|
||||
units "github.com/docker/go-units"
|
||||
shellwords "github.com/mattn/go-shellwords"
|
||||
"github.com/mitchellh/mapstructure"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
yaml "gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
// Options supported by Load
|
||||
type Options struct {
|
||||
// Skip schema validation
|
||||
SkipValidation bool
|
||||
// Skip interpolation
|
||||
SkipInterpolation bool
|
||||
// Interpolation options
|
||||
Interpolate *interp.Options
|
||||
// Discard 'env_file' entries after resolving to 'environment' section
|
||||
discardEnvFiles bool
|
||||
}
|
||||
|
||||
// WithDiscardEnvFiles sets the Options to discard the `env_file` section after resolving to
|
||||
// the `environment` section
|
||||
func WithDiscardEnvFiles(opts *Options) {
|
||||
opts.discardEnvFiles = true
|
||||
}
|
||||
|
||||
// ParseYAML reads the bytes from a file, parses the bytes into a mapping
|
||||
// structure, and returns it.
|
||||
func ParseYAML(source []byte) (map[string]interface{}, error) {
|
||||
var cfg interface{}
|
||||
if err := yaml.Unmarshal(source, &cfg); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cfgMap, ok := cfg.(map[interface{}]interface{})
|
||||
if !ok {
|
||||
return nil, errors.Errorf("Top-level object must be a mapping")
|
||||
}
|
||||
converted, err := convertToStringKeysRecursive(cfgMap, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return converted.(map[string]interface{}), nil
|
||||
}
|
||||
|
||||
// Load reads a ConfigDetails and returns a fully loaded configuration
|
||||
func Load(configDetails types.ConfigDetails, options ...func(*Options)) (*types.Config, error) {
|
||||
if len(configDetails.ConfigFiles) < 1 {
|
||||
return nil, errors.Errorf("No files specified")
|
||||
}
|
||||
|
||||
opts := &Options{
|
||||
Interpolate: &interp.Options{
|
||||
Substitute: template.Substitute,
|
||||
LookupValue: configDetails.LookupEnv,
|
||||
TypeCastMapping: interpolateTypeCastMapping,
|
||||
},
|
||||
}
|
||||
|
||||
for _, op := range options {
|
||||
op(opts)
|
||||
}
|
||||
|
||||
configs := []*types.Config{}
|
||||
var err error
|
||||
|
||||
for _, file := range configDetails.ConfigFiles {
|
||||
configDict := file.Config
|
||||
version := schema.Version(configDict)
|
||||
if configDetails.Version == "" {
|
||||
configDetails.Version = version
|
||||
}
|
||||
if configDetails.Version != version {
|
||||
return nil, errors.Errorf("version mismatched between two composefiles : %v and %v", configDetails.Version, version)
|
||||
}
|
||||
|
||||
if !opts.SkipInterpolation {
|
||||
configDict, err = interpolateConfig(configDict, *opts.Interpolate)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if !opts.SkipValidation {
|
||||
if err := schema.Validate(configDict, configDetails.Version); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
configDict = groupXFieldsIntoExtensions(configDict)
|
||||
|
||||
cfg, err := loadSections(configDict, configDetails)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cfg.Filename = file.Filename
|
||||
if opts.discardEnvFiles {
|
||||
for i := range cfg.Services {
|
||||
cfg.Services[i].EnvFile = nil
|
||||
}
|
||||
}
|
||||
|
||||
configs = append(configs, cfg)
|
||||
}
|
||||
|
||||
return merge(configs)
|
||||
}
|
||||
|
||||
func groupXFieldsIntoExtensions(dict map[string]interface{}) map[string]interface{} {
|
||||
extras := map[string]interface{}{}
|
||||
for key, value := range dict {
|
||||
if strings.HasPrefix(key, "x-") {
|
||||
extras[key] = value
|
||||
delete(dict, key)
|
||||
}
|
||||
if d, ok := value.(map[string]interface{}); ok {
|
||||
dict[key] = groupXFieldsIntoExtensions(d)
|
||||
}
|
||||
}
|
||||
if len(extras) > 0 {
|
||||
dict["extensions"] = extras
|
||||
}
|
||||
return dict
|
||||
}
|
||||
|
||||
func loadSections(config map[string]interface{}, configDetails types.ConfigDetails) (*types.Config, error) {
|
||||
var err error
|
||||
cfg := types.Config{
|
||||
Version: schema.Version(config),
|
||||
}
|
||||
|
||||
var loaders = []struct {
|
||||
key string
|
||||
fnc func(config map[string]interface{}) error
|
||||
}{
|
||||
{
|
||||
key: "services",
|
||||
fnc: func(config map[string]interface{}) error {
|
||||
cfg.Services, err = LoadServices(config, configDetails.WorkingDir, configDetails.LookupEnv)
|
||||
return err
|
||||
},
|
||||
},
|
||||
{
|
||||
key: "networks",
|
||||
fnc: func(config map[string]interface{}) error {
|
||||
cfg.Networks, err = LoadNetworks(config, configDetails.Version)
|
||||
return err
|
||||
},
|
||||
},
|
||||
{
|
||||
key: "volumes",
|
||||
fnc: func(config map[string]interface{}) error {
|
||||
cfg.Volumes, err = LoadVolumes(config)
|
||||
return err
|
||||
},
|
||||
},
|
||||
{
|
||||
key: "secrets",
|
||||
fnc: func(config map[string]interface{}) error {
|
||||
cfg.Secrets, err = LoadSecrets(config, configDetails)
|
||||
return err
|
||||
},
|
||||
},
|
||||
{
|
||||
key: "configs",
|
||||
fnc: func(config map[string]interface{}) error {
|
||||
cfg.Configs, err = LoadConfigObjs(config, configDetails)
|
||||
return err
|
||||
},
|
||||
},
|
||||
{
|
||||
key: "extensions",
|
||||
fnc: func(config map[string]interface{}) error {
|
||||
if len(config) > 0 {
|
||||
cfg.Extensions = config
|
||||
}
|
||||
return err
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, loader := range loaders {
|
||||
if err := loader.fnc(getSection(config, loader.key)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return &cfg, nil
|
||||
}
|
||||
|
||||
func getSection(config map[string]interface{}, key string) map[string]interface{} {
|
||||
section, ok := config[key]
|
||||
if !ok {
|
||||
return make(map[string]interface{})
|
||||
}
|
||||
return section.(map[string]interface{})
|
||||
}
|
||||
|
||||
func sortedKeys(set map[string]bool) []string {
|
||||
var keys []string
|
||||
for key := range set {
|
||||
keys = append(keys, key)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
return keys
|
||||
}
|
||||
|
||||
func getProperties(services map[string]interface{}, propertyMap map[string]string) map[string]string {
|
||||
output := map[string]string{}
|
||||
|
||||
for _, service := range services {
|
||||
if serviceDict, ok := service.(map[string]interface{}); ok {
|
||||
for property, description := range propertyMap {
|
||||
if _, isSet := serviceDict[property]; isSet {
|
||||
output[property] = description
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return output
|
||||
}
|
||||
|
||||
// ForbiddenPropertiesError is returned when there are properties in the Compose
|
||||
// file that are forbidden.
|
||||
type ForbiddenPropertiesError struct {
|
||||
Properties map[string]string
|
||||
}
|
||||
|
||||
func (e *ForbiddenPropertiesError) Error() string {
|
||||
return "Configuration contains forbidden properties"
|
||||
}
|
||||
|
||||
func getServices(configDict map[string]interface{}) map[string]interface{} {
|
||||
if services, ok := configDict["services"]; ok {
|
||||
if servicesDict, ok := services.(map[string]interface{}); ok {
|
||||
return servicesDict
|
||||
}
|
||||
}
|
||||
|
||||
return map[string]interface{}{}
|
||||
}
|
||||
|
||||
// Transform converts the source into the target struct with compose types transformer
|
||||
// and the specified transformers if any.
|
||||
func Transform(source interface{}, target interface{}, additionalTransformers ...Transformer) error {
|
||||
data := mapstructure.Metadata{}
|
||||
config := &mapstructure.DecoderConfig{
|
||||
DecodeHook: mapstructure.ComposeDecodeHookFunc(
|
||||
createTransformHook(additionalTransformers...),
|
||||
mapstructure.StringToTimeDurationHookFunc()),
|
||||
Result: target,
|
||||
Metadata: &data,
|
||||
}
|
||||
decoder, err := mapstructure.NewDecoder(config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return decoder.Decode(source)
|
||||
}
|
||||
|
||||
// TransformerFunc defines a function to perform the actual transformation
|
||||
type TransformerFunc func(interface{}) (interface{}, error)
|
||||
|
||||
// Transformer defines a map to type transformer
|
||||
type Transformer struct {
|
||||
TypeOf reflect.Type
|
||||
Func TransformerFunc
|
||||
}
|
||||
|
||||
func createTransformHook(additionalTransformers ...Transformer) mapstructure.DecodeHookFuncType {
|
||||
transforms := map[reflect.Type]func(interface{}) (interface{}, error){
|
||||
reflect.TypeOf(types.External{}): transformExternal,
|
||||
reflect.TypeOf(types.HealthCheckTest{}): transformHealthCheckTest,
|
||||
reflect.TypeOf(types.ShellCommand{}): transformShellCommand,
|
||||
reflect.TypeOf(types.StringList{}): transformStringList,
|
||||
reflect.TypeOf(map[string]string{}): transformMapStringString,
|
||||
reflect.TypeOf(types.UlimitsConfig{}): transformUlimits,
|
||||
reflect.TypeOf(types.UnitBytes(0)): transformSize,
|
||||
reflect.TypeOf([]types.ServicePortConfig{}): transformServicePort,
|
||||
reflect.TypeOf(types.ServiceSecretConfig{}): transformStringSourceMap,
|
||||
reflect.TypeOf(types.ServiceConfigObjConfig{}): transformStringSourceMap,
|
||||
reflect.TypeOf(types.StringOrNumberList{}): transformStringOrNumberList,
|
||||
reflect.TypeOf(map[string]*types.ServiceNetworkConfig{}): transformServiceNetworkMap,
|
||||
reflect.TypeOf(types.Mapping{}): transformMappingOrListFunc("=", false),
|
||||
reflect.TypeOf(types.MappingWithEquals{}): transformMappingOrListFunc("=", true),
|
||||
reflect.TypeOf(types.Labels{}): transformMappingOrListFunc("=", false),
|
||||
reflect.TypeOf(types.MappingWithColon{}): transformMappingOrListFunc(":", false),
|
||||
reflect.TypeOf(types.HostsList{}): transformListOrMappingFunc(":", false),
|
||||
reflect.TypeOf(types.ServiceVolumeConfig{}): transformServiceVolumeConfig,
|
||||
reflect.TypeOf(types.BuildConfig{}): transformBuildConfig,
|
||||
reflect.TypeOf(types.Duration(0)): transformStringToDuration,
|
||||
}
|
||||
|
||||
for _, transformer := range additionalTransformers {
|
||||
transforms[transformer.TypeOf] = transformer.Func
|
||||
}
|
||||
|
||||
return func(_ reflect.Type, target reflect.Type, data interface{}) (interface{}, error) {
|
||||
transform, ok := transforms[target]
|
||||
if !ok {
|
||||
return data, nil
|
||||
}
|
||||
return transform(data)
|
||||
}
|
||||
}
|
||||
|
||||
// keys needs to be converted to strings for jsonschema
|
||||
func convertToStringKeysRecursive(value interface{}, keyPrefix string) (interface{}, error) {
|
||||
if mapping, ok := value.(map[interface{}]interface{}); ok {
|
||||
dict := make(map[string]interface{})
|
||||
for key, entry := range mapping {
|
||||
str, ok := key.(string)
|
||||
if !ok {
|
||||
return nil, formatInvalidKeyError(keyPrefix, key)
|
||||
}
|
||||
var newKeyPrefix string
|
||||
if keyPrefix == "" {
|
||||
newKeyPrefix = str
|
||||
} else {
|
||||
newKeyPrefix = fmt.Sprintf("%s.%s", keyPrefix, str)
|
||||
}
|
||||
convertedEntry, err := convertToStringKeysRecursive(entry, newKeyPrefix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dict[str] = convertedEntry
|
||||
}
|
||||
return dict, nil
|
||||
}
|
||||
if list, ok := value.([]interface{}); ok {
|
||||
var convertedList []interface{}
|
||||
for index, entry := range list {
|
||||
newKeyPrefix := fmt.Sprintf("%s[%d]", keyPrefix, index)
|
||||
convertedEntry, err := convertToStringKeysRecursive(entry, newKeyPrefix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
convertedList = append(convertedList, convertedEntry)
|
||||
}
|
||||
return convertedList, nil
|
||||
}
|
||||
return value, nil
|
||||
}
|
||||
|
||||
func formatInvalidKeyError(keyPrefix string, key interface{}) error {
|
||||
var location string
|
||||
if keyPrefix == "" {
|
||||
location = "at top level"
|
||||
} else {
|
||||
location = fmt.Sprintf("in %s", keyPrefix)
|
||||
}
|
||||
return errors.Errorf("Non-string key %s: %#v", location, key)
|
||||
}
|
||||
|
||||
// LoadServices produces a ServiceConfig map from a compose file Dict
|
||||
// the servicesDict is not validated if directly used. Use Load() to enable validation
|
||||
func LoadServices(servicesDict map[string]interface{}, workingDir string, lookupEnv template.Mapping) ([]types.ServiceConfig, error) {
|
||||
var services []types.ServiceConfig
|
||||
|
||||
for name, serviceDef := range servicesDict {
|
||||
serviceConfig, err := LoadService(name, serviceDef.(map[string]interface{}), workingDir, lookupEnv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
services = append(services, *serviceConfig)
|
||||
}
|
||||
|
||||
return services, nil
|
||||
}
|
||||
|
||||
// LoadService produces a single ServiceConfig from a compose file Dict
|
||||
// the serviceDict is not validated if directly used. Use Load() to enable validation
|
||||
func LoadService(name string, serviceDict map[string]interface{}, workingDir string, lookupEnv template.Mapping) (*types.ServiceConfig, error) {
|
||||
serviceConfig := &types.ServiceConfig{}
|
||||
if err := Transform(serviceDict, serviceConfig); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
serviceConfig.Name = name
|
||||
|
||||
if err := resolveEnvironment(serviceConfig, workingDir, lookupEnv); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := resolveVolumePaths(serviceConfig.Volumes, workingDir, lookupEnv); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return serviceConfig, nil
|
||||
}
|
||||
|
||||
func resolveEnvironment(serviceConfig *types.ServiceConfig, workingDir string, lookupEnv template.Mapping) error {
|
||||
environment := types.MappingWithEquals{}
|
||||
|
||||
if len(serviceConfig.EnvFile) > 0 {
|
||||
for _, file := range serviceConfig.EnvFile {
|
||||
filePath := absPath(workingDir, file)
|
||||
fileVars, err := envfile.Parse(filePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
environment.OverrideBy(fileVars.Resolve(lookupEnv).RemoveEmpty())
|
||||
}
|
||||
}
|
||||
|
||||
environment.OverrideBy(serviceConfig.Environment.Resolve(lookupEnv))
|
||||
serviceConfig.Environment = environment
|
||||
return nil
|
||||
}
|
||||
|
||||
func resolveVolumePaths(volumes []types.ServiceVolumeConfig, workingDir string, lookupEnv template.Mapping) error {
|
||||
for i, volume := range volumes {
|
||||
if volume.Type != "bind" {
|
||||
continue
|
||||
}
|
||||
|
||||
if volume.Source == "" {
|
||||
return errors.New(`invalid mount config for type "bind": field Source must not be empty`)
|
||||
}
|
||||
|
||||
filePath := expandUser(volume.Source, lookupEnv)
|
||||
// Check if source is an absolute path (either Unix or Windows), to
|
||||
// handle a Windows client with a Unix daemon or vice-versa.
|
||||
//
|
||||
// Note that this is not required for Docker for Windows when specifying
|
||||
// a local Windows path, because Docker for Windows translates the Windows
|
||||
// path into a valid path within the VM.
|
||||
if !path.IsAbs(filePath) && !isAbs(filePath) {
|
||||
filePath = absPath(workingDir, filePath)
|
||||
}
|
||||
volume.Source = filePath
|
||||
volumes[i] = volume
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// TODO: make this more robust
|
||||
func expandUser(path string, lookupEnv template.Mapping) string {
|
||||
if strings.HasPrefix(path, "~") {
|
||||
home, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
logrus.Warn("cannot expand '~', because the environment lacks HOME")
|
||||
return path
|
||||
}
|
||||
return filepath.Join(home, path[1:])
|
||||
}
|
||||
return path
|
||||
}
|
||||
|
||||
func transformUlimits(data interface{}) (interface{}, error) {
|
||||
switch value := data.(type) {
|
||||
case int:
|
||||
return types.UlimitsConfig{Single: value}, nil
|
||||
case map[string]interface{}:
|
||||
ulimit := types.UlimitsConfig{}
|
||||
if v, ok := value["soft"]; ok {
|
||||
ulimit.Soft = v.(int)
|
||||
}
|
||||
if v, ok := value["hard"]; ok {
|
||||
ulimit.Hard = v.(int)
|
||||
}
|
||||
return ulimit, nil
|
||||
default:
|
||||
return data, errors.Errorf("invalid type %T for ulimits", value)
|
||||
}
|
||||
}
|
||||
|
||||
// LoadNetworks produces a NetworkConfig map from a compose file Dict
|
||||
// the source Dict is not validated if directly used. Use Load() to enable validation
|
||||
func LoadNetworks(source map[string]interface{}, version string) (map[string]types.NetworkConfig, error) {
|
||||
networks := make(map[string]types.NetworkConfig)
|
||||
err := Transform(source, &networks)
|
||||
if err != nil {
|
||||
return networks, err
|
||||
}
|
||||
for name, network := range networks {
|
||||
if !network.External.External {
|
||||
continue
|
||||
}
|
||||
switch {
|
||||
case network.External.Name != "":
|
||||
if network.Name != "" {
|
||||
return nil, errors.Errorf("network %s: network.external.name and network.name conflict; only use network.name", name)
|
||||
}
|
||||
logrus.Warnf("network %s: network.external.name is deprecated in favor of network.name", name)
|
||||
network.Name = network.External.Name
|
||||
network.External.Name = ""
|
||||
case network.Name == "":
|
||||
network.Name = name
|
||||
}
|
||||
networks[name] = network
|
||||
}
|
||||
return networks, nil
|
||||
}
|
||||
|
||||
func externalVolumeError(volume, key string) error {
|
||||
return errors.Errorf(
|
||||
"conflicting parameters \"external\" and %q specified for volume %q",
|
||||
key, volume)
|
||||
}
|
||||
|
||||
// LoadVolumes produces a VolumeConfig map from a compose file Dict
|
||||
// the source Dict is not validated if directly used. Use Load() to enable validation
|
||||
func LoadVolumes(source map[string]interface{}) (map[string]types.VolumeConfig, error) {
|
||||
volumes := make(map[string]types.VolumeConfig)
|
||||
if err := Transform(source, &volumes); err != nil {
|
||||
return volumes, err
|
||||
}
|
||||
|
||||
for name, volume := range volumes {
|
||||
if !volume.External.External {
|
||||
continue
|
||||
}
|
||||
switch {
|
||||
case volume.Driver != "":
|
||||
return nil, externalVolumeError(name, "driver")
|
||||
case len(volume.DriverOpts) > 0:
|
||||
return nil, externalVolumeError(name, "driver_opts")
|
||||
case len(volume.Labels) > 0:
|
||||
return nil, externalVolumeError(name, "labels")
|
||||
case volume.External.Name != "":
|
||||
if volume.Name != "" {
|
||||
return nil, errors.Errorf("volume %s: volume.external.name and volume.name conflict; only use volume.name", name)
|
||||
}
|
||||
logrus.Warnf("volume %s: volume.external.name is deprecated in favor of volume.name", name)
|
||||
volume.Name = volume.External.Name
|
||||
volume.External.Name = ""
|
||||
case volume.Name == "":
|
||||
volume.Name = name
|
||||
}
|
||||
volumes[name] = volume
|
||||
}
|
||||
return volumes, nil
|
||||
}
|
||||
|
||||
// LoadSecrets produces a SecretConfig map from a compose file Dict
|
||||
// the source Dict is not validated if directly used. Use Load() to enable validation
|
||||
func LoadSecrets(source map[string]interface{}, details types.ConfigDetails) (map[string]types.SecretConfig, error) {
|
||||
secrets := make(map[string]types.SecretConfig)
|
||||
if err := Transform(source, &secrets); err != nil {
|
||||
return secrets, err
|
||||
}
|
||||
for name, secret := range secrets {
|
||||
obj, err := loadFileObjectConfig(name, "secret", types.FileObjectConfig(secret), details)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
secretConfig := types.SecretConfig(obj)
|
||||
secrets[name] = secretConfig
|
||||
}
|
||||
return secrets, nil
|
||||
}
|
||||
|
||||
// LoadConfigObjs produces a ConfigObjConfig map from a compose file Dict
|
||||
// the source Dict is not validated if directly used. Use Load() to enable validation
|
||||
func LoadConfigObjs(source map[string]interface{}, details types.ConfigDetails) (map[string]types.ConfigObjConfig, error) {
|
||||
configs := make(map[string]types.ConfigObjConfig)
|
||||
if err := Transform(source, &configs); err != nil {
|
||||
return configs, err
|
||||
}
|
||||
for name, config := range configs {
|
||||
obj, err := loadFileObjectConfig(name, "config", types.FileObjectConfig(config), details)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
configConfig := types.ConfigObjConfig(obj)
|
||||
configs[name] = configConfig
|
||||
}
|
||||
return configs, nil
|
||||
}
|
||||
|
||||
func loadFileObjectConfig(name string, objType string, obj types.FileObjectConfig, details types.ConfigDetails) (types.FileObjectConfig, error) {
|
||||
// if "external: true"
|
||||
switch {
|
||||
case obj.External.External:
|
||||
// handle deprecated external.name
|
||||
if obj.External.Name != "" {
|
||||
if obj.Name != "" {
|
||||
return obj, errors.Errorf("%[1]s %[2]s: %[1]s.external.name and %[1]s.name conflict; only use %[1]s.name", objType, name)
|
||||
}
|
||||
logrus.Warnf("%[1]s %[2]s: %[1]s.external.name is deprecated in favor of %[1]s.name", objType, name)
|
||||
obj.Name = obj.External.Name
|
||||
obj.External.Name = ""
|
||||
} else {
|
||||
if obj.Name == "" {
|
||||
obj.Name = name
|
||||
}
|
||||
}
|
||||
// if not "external: true"
|
||||
case obj.Driver != "":
|
||||
if obj.File != "" {
|
||||
return obj, errors.Errorf("%[1]s %[2]s: %[1]s.driver and %[1]s.file conflict; only use %[1]s.driver", objType, name)
|
||||
}
|
||||
default:
|
||||
obj.File = absPath(details.WorkingDir, obj.File)
|
||||
}
|
||||
|
||||
return obj, nil
|
||||
}
|
||||
|
||||
func absPath(workingDir string, filePath string) string {
|
||||
if filepath.IsAbs(filePath) {
|
||||
return filePath
|
||||
}
|
||||
return filepath.Join(workingDir, filePath)
|
||||
}
|
||||
|
||||
var transformMapStringString TransformerFunc = func(data interface{}) (interface{}, error) {
|
||||
switch value := data.(type) {
|
||||
case map[string]interface{}:
|
||||
return toMapStringString(value, false), nil
|
||||
case map[string]string:
|
||||
return value, nil
|
||||
default:
|
||||
return data, errors.Errorf("invalid type %T for map[string]string", value)
|
||||
}
|
||||
}
|
||||
|
||||
var transformExternal TransformerFunc = func(data interface{}) (interface{}, error) {
|
||||
switch value := data.(type) {
|
||||
case bool:
|
||||
return map[string]interface{}{"external": value}, nil
|
||||
case map[string]interface{}:
|
||||
return map[string]interface{}{"external": true, "name": value["name"]}, nil
|
||||
default:
|
||||
return data, errors.Errorf("invalid type %T for external", value)
|
||||
}
|
||||
}
|
||||
|
||||
var transformServicePort TransformerFunc = func(data interface{}) (interface{}, error) {
|
||||
switch entries := data.(type) {
|
||||
case []interface{}:
|
||||
// We process the list instead of individual items here.
|
||||
// The reason is that one entry might be mapped to multiple ServicePortConfig.
|
||||
// Therefore we take an input of a list and return an output of a list.
|
||||
ports := []interface{}{}
|
||||
for _, entry := range entries {
|
||||
switch value := entry.(type) {
|
||||
case int:
|
||||
parsed, err := types.ParsePortConfig(fmt.Sprint(value))
|
||||
if err != nil {
|
||||
return data, err
|
||||
}
|
||||
for _, v := range parsed {
|
||||
ports = append(ports, v)
|
||||
}
|
||||
case string:
|
||||
parsed, err := types.ParsePortConfig(value)
|
||||
if err != nil {
|
||||
return data, err
|
||||
}
|
||||
for _, v := range parsed {
|
||||
ports = append(ports, v)
|
||||
}
|
||||
case map[string]interface{}:
|
||||
ports = append(ports, value)
|
||||
default:
|
||||
return data, errors.Errorf("invalid type %T for port", value)
|
||||
}
|
||||
}
|
||||
return ports, nil
|
||||
default:
|
||||
return data, errors.Errorf("invalid type %T for port", entries)
|
||||
}
|
||||
}
|
||||
|
||||
var transformStringSourceMap TransformerFunc = func(data interface{}) (interface{}, error) {
|
||||
switch value := data.(type) {
|
||||
case string:
|
||||
return map[string]interface{}{"source": value}, nil
|
||||
case map[string]interface{}:
|
||||
return data, nil
|
||||
default:
|
||||
return data, errors.Errorf("invalid type %T for secret", value)
|
||||
}
|
||||
}
|
||||
|
||||
var transformBuildConfig TransformerFunc = func(data interface{}) (interface{}, error) {
|
||||
switch value := data.(type) {
|
||||
case string:
|
||||
return map[string]interface{}{"context": value}, nil
|
||||
case map[string]interface{}:
|
||||
return data, nil
|
||||
default:
|
||||
return data, errors.Errorf("invalid type %T for service build", value)
|
||||
}
|
||||
}
|
||||
|
||||
var transformServiceVolumeConfig TransformerFunc = func(data interface{}) (interface{}, error) {
|
||||
switch value := data.(type) {
|
||||
case string:
|
||||
return ParseVolume(value)
|
||||
case map[string]interface{}:
|
||||
return data, nil
|
||||
default:
|
||||
return data, errors.Errorf("invalid type %T for service volume", value)
|
||||
}
|
||||
}
|
||||
|
||||
var transformServiceNetworkMap TransformerFunc = func(value interface{}) (interface{}, error) {
|
||||
if list, ok := value.([]interface{}); ok {
|
||||
mapValue := map[interface{}]interface{}{}
|
||||
for _, name := range list {
|
||||
mapValue[name] = nil
|
||||
}
|
||||
return mapValue, nil
|
||||
}
|
||||
return value, nil
|
||||
}
|
||||
|
||||
var transformStringOrNumberList TransformerFunc = func(value interface{}) (interface{}, error) {
|
||||
list := value.([]interface{})
|
||||
result := make([]string, len(list))
|
||||
for i, item := range list {
|
||||
result[i] = fmt.Sprint(item)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
var transformStringList TransformerFunc = func(data interface{}) (interface{}, error) {
|
||||
switch value := data.(type) {
|
||||
case string:
|
||||
return []string{value}, nil
|
||||
case []interface{}:
|
||||
return value, nil
|
||||
default:
|
||||
return data, errors.Errorf("invalid type %T for string list", value)
|
||||
}
|
||||
}
|
||||
|
||||
func transformMappingOrListFunc(sep string, allowNil bool) TransformerFunc {
|
||||
return func(data interface{}) (interface{}, error) {
|
||||
return transformMappingOrList(data, sep, allowNil), nil
|
||||
}
|
||||
}
|
||||
|
||||
func transformListOrMappingFunc(sep string, allowNil bool) TransformerFunc {
|
||||
return func(data interface{}) (interface{}, error) {
|
||||
return transformListOrMapping(data, sep, allowNil), nil
|
||||
}
|
||||
}
|
||||
|
||||
func transformListOrMapping(listOrMapping interface{}, sep string, allowNil bool) interface{} {
|
||||
switch value := listOrMapping.(type) {
|
||||
case map[string]interface{}:
|
||||
return toStringList(value, sep, allowNil)
|
||||
case []interface{}:
|
||||
return listOrMapping
|
||||
}
|
||||
panic(errors.Errorf("expected a map or a list, got %T: %#v", listOrMapping, listOrMapping))
|
||||
}
|
||||
|
||||
func transformMappingOrList(mappingOrList interface{}, sep string, allowNil bool) interface{} {
|
||||
switch value := mappingOrList.(type) {
|
||||
case map[string]interface{}:
|
||||
return toMapStringString(value, allowNil)
|
||||
case ([]interface{}):
|
||||
result := make(map[string]interface{})
|
||||
for _, value := range value {
|
||||
parts := strings.SplitN(value.(string), sep, 2)
|
||||
key := parts[0]
|
||||
switch {
|
||||
case len(parts) == 1 && allowNil:
|
||||
result[key] = nil
|
||||
case len(parts) == 1 && !allowNil:
|
||||
result[key] = ""
|
||||
default:
|
||||
result[key] = parts[1]
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
panic(errors.Errorf("expected a map or a list, got %T: %#v", mappingOrList, mappingOrList))
|
||||
}
|
||||
|
||||
var transformShellCommand TransformerFunc = func(value interface{}) (interface{}, error) {
|
||||
if str, ok := value.(string); ok {
|
||||
return shellwords.Parse(str)
|
||||
}
|
||||
return value, nil
|
||||
}
|
||||
|
||||
var transformHealthCheckTest TransformerFunc = func(data interface{}) (interface{}, error) {
|
||||
switch value := data.(type) {
|
||||
case string:
|
||||
return append([]string{"CMD-SHELL"}, value), nil
|
||||
case []interface{}:
|
||||
return value, nil
|
||||
default:
|
||||
return value, errors.Errorf("invalid type %T for healthcheck.test", value)
|
||||
}
|
||||
}
|
||||
|
||||
var transformSize TransformerFunc = func(value interface{}) (interface{}, error) {
|
||||
switch value := value.(type) {
|
||||
case int:
|
||||
return int64(value), nil
|
||||
case string:
|
||||
return units.RAMInBytes(value)
|
||||
}
|
||||
panic(errors.Errorf("invalid type for size %T", value))
|
||||
}
|
||||
|
||||
var transformStringToDuration TransformerFunc = func(value interface{}) (interface{}, error) {
|
||||
switch value := value.(type) {
|
||||
case string:
|
||||
d, err := time.ParseDuration(value)
|
||||
if err != nil {
|
||||
return value, err
|
||||
}
|
||||
return types.Duration(d), nil
|
||||
default:
|
||||
return value, errors.Errorf("invalid type %T for duration", value)
|
||||
}
|
||||
}
|
||||
|
||||
func toMapStringString(value map[string]interface{}, allowNil bool) map[string]interface{} {
|
||||
output := make(map[string]interface{})
|
||||
for key, value := range value {
|
||||
output[key] = toString(value, allowNil)
|
||||
}
|
||||
return output
|
||||
}
|
||||
|
||||
func toString(value interface{}, allowNil bool) interface{} {
|
||||
switch {
|
||||
case value != nil:
|
||||
return fmt.Sprint(value)
|
||||
case allowNil:
|
||||
return nil
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
func toStringList(value map[string]interface{}, separator string, allowNil bool) []string {
|
||||
output := []string{}
|
||||
for key, value := range value {
|
||||
if value == nil && !allowNil {
|
||||
continue
|
||||
}
|
||||
output = append(output, fmt.Sprintf("%s%s%s", key, separator, value))
|
||||
}
|
||||
sort.Strings(output)
|
||||
return output
|
||||
}
|
275
vendor/github.com/compose-spec/compose-go/loader/merge.go
generated
vendored
Normal file
275
vendor/github.com/compose-spec/compose-go/loader/merge.go
generated
vendored
Normal file
@ -0,0 +1,275 @@
|
||||
/*
|
||||
Copyright 2020 The Compose Specification Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package loader
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sort"
|
||||
|
||||
"github.com/compose-spec/compose-go/types"
|
||||
"github.com/imdario/mergo"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type specials struct {
|
||||
m map[reflect.Type]func(dst, src reflect.Value) error
|
||||
}
|
||||
|
||||
func (s *specials) Transformer(t reflect.Type) func(dst, src reflect.Value) error {
|
||||
if fn, ok := s.m[t]; ok {
|
||||
return fn
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func merge(configs []*types.Config) (*types.Config, error) {
|
||||
base := configs[0]
|
||||
for _, override := range configs[1:] {
|
||||
var err error
|
||||
base.Services, err = mergeServices(base.Services, override.Services)
|
||||
if err != nil {
|
||||
return base, errors.Wrapf(err, "cannot merge services from %s", override.Filename)
|
||||
}
|
||||
base.Volumes, err = mergeVolumes(base.Volumes, override.Volumes)
|
||||
if err != nil {
|
||||
return base, errors.Wrapf(err, "cannot merge volumes from %s", override.Filename)
|
||||
}
|
||||
base.Networks, err = mergeNetworks(base.Networks, override.Networks)
|
||||
if err != nil {
|
||||
return base, errors.Wrapf(err, "cannot merge networks from %s", override.Filename)
|
||||
}
|
||||
base.Secrets, err = mergeSecrets(base.Secrets, override.Secrets)
|
||||
if err != nil {
|
||||
return base, errors.Wrapf(err, "cannot merge secrets from %s", override.Filename)
|
||||
}
|
||||
base.Configs, err = mergeConfigs(base.Configs, override.Configs)
|
||||
if err != nil {
|
||||
return base, errors.Wrapf(err, "cannot merge configs from %s", override.Filename)
|
||||
}
|
||||
}
|
||||
return base, nil
|
||||
}
|
||||
|
||||
func mergeServices(base, override []types.ServiceConfig) ([]types.ServiceConfig, error) {
|
||||
baseServices := mapByName(base)
|
||||
overrideServices := mapByName(override)
|
||||
specials := &specials{
|
||||
m: map[reflect.Type]func(dst, src reflect.Value) error{
|
||||
reflect.TypeOf(&types.LoggingConfig{}): safelyMerge(mergeLoggingConfig),
|
||||
reflect.TypeOf(&types.UlimitsConfig{}): safelyMerge(mergeUlimitsConfig),
|
||||
reflect.TypeOf([]types.ServicePortConfig{}): mergeSlice(toServicePortConfigsMap, toServicePortConfigsSlice),
|
||||
reflect.TypeOf([]types.ServiceSecretConfig{}): mergeSlice(toServiceSecretConfigsMap, toServiceSecretConfigsSlice),
|
||||
reflect.TypeOf([]types.ServiceConfigObjConfig{}): mergeSlice(toServiceConfigObjConfigsMap, toSServiceConfigObjConfigsSlice),
|
||||
reflect.TypeOf(&types.UlimitsConfig{}): mergeUlimitsConfig,
|
||||
reflect.TypeOf(&types.ServiceNetworkConfig{}): mergeServiceNetworkConfig,
|
||||
},
|
||||
}
|
||||
for name, overrideService := range overrideServices {
|
||||
overrideService := overrideService
|
||||
if baseService, ok := baseServices[name]; ok {
|
||||
if err := mergo.Merge(&baseService, &overrideService, mergo.WithAppendSlice, mergo.WithOverride, mergo.WithTransformers(specials)); err != nil {
|
||||
return base, errors.Wrapf(err, "cannot merge service %s", name)
|
||||
}
|
||||
baseServices[name] = baseService
|
||||
continue
|
||||
}
|
||||
baseServices[name] = overrideService
|
||||
}
|
||||
services := []types.ServiceConfig{}
|
||||
for _, baseService := range baseServices {
|
||||
services = append(services, baseService)
|
||||
}
|
||||
sort.Slice(services, func(i, j int) bool { return services[i].Name < services[j].Name })
|
||||
return services, nil
|
||||
}
|
||||
|
||||
func toServiceSecretConfigsMap(s interface{}) (map[interface{}]interface{}, error) {
|
||||
secrets, ok := s.([]types.ServiceSecretConfig)
|
||||
if !ok {
|
||||
return nil, errors.Errorf("not a serviceSecretConfig: %v", s)
|
||||
}
|
||||
m := map[interface{}]interface{}{}
|
||||
for _, secret := range secrets {
|
||||
m[secret.Source] = secret
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func toServiceConfigObjConfigsMap(s interface{}) (map[interface{}]interface{}, error) {
|
||||
secrets, ok := s.([]types.ServiceConfigObjConfig)
|
||||
if !ok {
|
||||
return nil, errors.Errorf("not a serviceSecretConfig: %v", s)
|
||||
}
|
||||
m := map[interface{}]interface{}{}
|
||||
for _, secret := range secrets {
|
||||
m[secret.Source] = secret
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func toServicePortConfigsMap(s interface{}) (map[interface{}]interface{}, error) {
|
||||
ports, ok := s.([]types.ServicePortConfig)
|
||||
if !ok {
|
||||
return nil, errors.Errorf("not a servicePortConfig slice: %v", s)
|
||||
}
|
||||
m := map[interface{}]interface{}{}
|
||||
for _, p := range ports {
|
||||
m[p.Published] = p
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func toServiceSecretConfigsSlice(dst reflect.Value, m map[interface{}]interface{}) error {
|
||||
s := []types.ServiceSecretConfig{}
|
||||
for _, v := range m {
|
||||
s = append(s, v.(types.ServiceSecretConfig))
|
||||
}
|
||||
sort.Slice(s, func(i, j int) bool { return s[i].Source < s[j].Source })
|
||||
dst.Set(reflect.ValueOf(s))
|
||||
return nil
|
||||
}
|
||||
|
||||
func toSServiceConfigObjConfigsSlice(dst reflect.Value, m map[interface{}]interface{}) error {
|
||||
s := []types.ServiceConfigObjConfig{}
|
||||
for _, v := range m {
|
||||
s = append(s, v.(types.ServiceConfigObjConfig))
|
||||
}
|
||||
sort.Slice(s, func(i, j int) bool { return s[i].Source < s[j].Source })
|
||||
dst.Set(reflect.ValueOf(s))
|
||||
return nil
|
||||
}
|
||||
|
||||
func toServicePortConfigsSlice(dst reflect.Value, m map[interface{}]interface{}) error {
|
||||
s := []types.ServicePortConfig{}
|
||||
for _, v := range m {
|
||||
s = append(s, v.(types.ServicePortConfig))
|
||||
}
|
||||
sort.Slice(s, func(i, j int) bool { return s[i].Published < s[j].Published })
|
||||
dst.Set(reflect.ValueOf(s))
|
||||
return nil
|
||||
}
|
||||
|
||||
type tomapFn func(s interface{}) (map[interface{}]interface{}, error)
|
||||
type writeValueFromMapFn func(reflect.Value, map[interface{}]interface{}) error
|
||||
|
||||
func safelyMerge(mergeFn func(dst, src reflect.Value) error) func(dst, src reflect.Value) error {
|
||||
return func(dst, src reflect.Value) error {
|
||||
if src.IsNil() {
|
||||
return nil
|
||||
}
|
||||
if dst.IsNil() {
|
||||
dst.Set(src)
|
||||
return nil
|
||||
}
|
||||
return mergeFn(dst, src)
|
||||
}
|
||||
}
|
||||
|
||||
func mergeSlice(tomap tomapFn, writeValue writeValueFromMapFn) func(dst, src reflect.Value) error {
|
||||
return func(dst, src reflect.Value) error {
|
||||
dstMap, err := sliceToMap(tomap, dst)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
srcMap, err := sliceToMap(tomap, src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := mergo.Map(&dstMap, srcMap, mergo.WithOverride); err != nil {
|
||||
return err
|
||||
}
|
||||
return writeValue(dst, dstMap)
|
||||
}
|
||||
}
|
||||
|
||||
func sliceToMap(tomap tomapFn, v reflect.Value) (map[interface{}]interface{}, error) {
|
||||
// check if valid
|
||||
if !v.IsValid() {
|
||||
return nil, errors.Errorf("invalid value : %+v", v)
|
||||
}
|
||||
return tomap(v.Interface())
|
||||
}
|
||||
|
||||
func mergeLoggingConfig(dst, src reflect.Value) error {
|
||||
// Same driver, merging options
|
||||
if getLoggingDriver(dst.Elem()) == getLoggingDriver(src.Elem()) ||
|
||||
getLoggingDriver(dst.Elem()) == "" || getLoggingDriver(src.Elem()) == "" {
|
||||
if getLoggingDriver(dst.Elem()) == "" {
|
||||
dst.Elem().FieldByName("Driver").SetString(getLoggingDriver(src.Elem()))
|
||||
}
|
||||
dstOptions := dst.Elem().FieldByName("Options").Interface().(map[string]string)
|
||||
srcOptions := src.Elem().FieldByName("Options").Interface().(map[string]string)
|
||||
return mergo.Merge(&dstOptions, srcOptions, mergo.WithOverride)
|
||||
}
|
||||
// Different driver, override with src
|
||||
dst.Set(src)
|
||||
return nil
|
||||
}
|
||||
|
||||
//nolint: unparam
|
||||
func mergeUlimitsConfig(dst, src reflect.Value) error {
|
||||
if src.Interface() != reflect.Zero(reflect.TypeOf(src.Interface())).Interface() {
|
||||
dst.Elem().Set(src.Elem())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
//nolint: unparam
|
||||
func mergeServiceNetworkConfig(dst, src reflect.Value) error {
|
||||
if src.Interface() != reflect.Zero(reflect.TypeOf(src.Interface())).Interface() {
|
||||
dst.Elem().FieldByName("Aliases").Set(src.Elem().FieldByName("Aliases"))
|
||||
if ipv4 := src.Elem().FieldByName("Ipv4Address").Interface().(string); ipv4 != "" {
|
||||
dst.Elem().FieldByName("Ipv4Address").SetString(ipv4)
|
||||
}
|
||||
if ipv6 := src.Elem().FieldByName("Ipv6Address").Interface().(string); ipv6 != "" {
|
||||
dst.Elem().FieldByName("Ipv6Address").SetString(ipv6)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getLoggingDriver(v reflect.Value) string {
|
||||
return v.FieldByName("Driver").String()
|
||||
}
|
||||
|
||||
func mapByName(services []types.ServiceConfig) map[string]types.ServiceConfig {
|
||||
m := map[string]types.ServiceConfig{}
|
||||
for _, service := range services {
|
||||
m[service.Name] = service
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func mergeVolumes(base, override map[string]types.VolumeConfig) (map[string]types.VolumeConfig, error) {
|
||||
err := mergo.Map(&base, &override, mergo.WithOverride)
|
||||
return base, err
|
||||
}
|
||||
|
||||
func mergeNetworks(base, override map[string]types.NetworkConfig) (map[string]types.NetworkConfig, error) {
|
||||
err := mergo.Map(&base, &override, mergo.WithOverride)
|
||||
return base, err
|
||||
}
|
||||
|
||||
func mergeSecrets(base, override map[string]types.SecretConfig) (map[string]types.SecretConfig, error) {
|
||||
err := mergo.Map(&base, &override, mergo.WithOverride)
|
||||
return base, err
|
||||
}
|
||||
|
||||
func mergeConfigs(base, override map[string]types.ConfigObjConfig) (map[string]types.ConfigObjConfig, error) {
|
||||
err := mergo.Map(&base, &override, mergo.WithOverride)
|
||||
return base, err
|
||||
}
|
146
vendor/github.com/compose-spec/compose-go/loader/volume.go
generated
vendored
Normal file
146
vendor/github.com/compose-spec/compose-go/loader/volume.go
generated
vendored
Normal file
@ -0,0 +1,146 @@
|
||||
/*
|
||||
Copyright 2020 The Compose Specification Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package loader
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/compose-spec/compose-go/types"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const endOfSpec = rune(0)
|
||||
|
||||
// ParseVolume parses a volume spec without any knowledge of the target platform
|
||||
func ParseVolume(spec string) (types.ServiceVolumeConfig, error) {
|
||||
volume := types.ServiceVolumeConfig{}
|
||||
|
||||
switch len(spec) {
|
||||
case 0:
|
||||
return volume, errors.New("invalid empty volume spec")
|
||||
case 1, 2:
|
||||
volume.Target = spec
|
||||
volume.Type = string(types.VolumeTypeVolume)
|
||||
return volume, nil
|
||||
}
|
||||
|
||||
buffer := []rune{}
|
||||
for _, char := range spec + string(endOfSpec) {
|
||||
switch {
|
||||
case isWindowsDrive(buffer, char):
|
||||
buffer = append(buffer, char)
|
||||
case char == ':' || char == endOfSpec:
|
||||
if err := populateFieldFromBuffer(char, buffer, &volume); err != nil {
|
||||
populateType(&volume)
|
||||
return volume, errors.Wrapf(err, "invalid spec: %s", spec)
|
||||
}
|
||||
buffer = []rune{}
|
||||
default:
|
||||
buffer = append(buffer, char)
|
||||
}
|
||||
}
|
||||
|
||||
populateType(&volume)
|
||||
return volume, nil
|
||||
}
|
||||
|
||||
func isWindowsDrive(buffer []rune, char rune) bool {
|
||||
return char == ':' && len(buffer) == 1 && unicode.IsLetter(buffer[0])
|
||||
}
|
||||
|
||||
func populateFieldFromBuffer(char rune, buffer []rune, volume *types.ServiceVolumeConfig) error {
|
||||
strBuffer := string(buffer)
|
||||
switch {
|
||||
case len(buffer) == 0:
|
||||
return errors.New("empty section between colons")
|
||||
// Anonymous volume
|
||||
case volume.Source == "" && char == endOfSpec:
|
||||
volume.Target = strBuffer
|
||||
return nil
|
||||
case volume.Source == "":
|
||||
volume.Source = strBuffer
|
||||
return nil
|
||||
case volume.Target == "":
|
||||
volume.Target = strBuffer
|
||||
return nil
|
||||
case char == ':':
|
||||
return errors.New("too many colons")
|
||||
}
|
||||
for _, option := range strings.Split(strBuffer, ",") {
|
||||
switch option {
|
||||
case "ro":
|
||||
volume.ReadOnly = true
|
||||
case "rw":
|
||||
volume.ReadOnly = false
|
||||
case "nocopy":
|
||||
volume.Volume = &types.ServiceVolumeVolume{NoCopy: true}
|
||||
default:
|
||||
if isBindOption(option) {
|
||||
volume.Bind = &types.ServiceVolumeBind{Propagation: option}
|
||||
}
|
||||
// ignore unknown options
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var Propagations = []string{
|
||||
types.PropagationRPrivate,
|
||||
types.PropagationPrivate,
|
||||
types.PropagationRShared,
|
||||
types.PropagationShared,
|
||||
types.PropagationRSlave,
|
||||
types.PropagationSlave,
|
||||
}
|
||||
|
||||
func isBindOption(option string) bool {
|
||||
for _, propagation := range Propagations {
|
||||
if option == propagation {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func populateType(volume *types.ServiceVolumeConfig) {
|
||||
switch {
|
||||
// Anonymous volume
|
||||
case volume.Source == "":
|
||||
volume.Type = string(types.VolumeTypeVolume)
|
||||
case isFilePath(volume.Source):
|
||||
volume.Type = string(types.VolumeTypeBind)
|
||||
default:
|
||||
volume.Type = string(types.VolumeTypeVolume)
|
||||
}
|
||||
}
|
||||
|
||||
func isFilePath(source string) bool {
|
||||
switch source[0] {
|
||||
case '.', '/', '~':
|
||||
return true
|
||||
}
|
||||
|
||||
// windows named pipes
|
||||
if strings.HasPrefix(source, `\\`) {
|
||||
return true
|
||||
}
|
||||
|
||||
first, nextIndex := utf8.DecodeRuneInString(source)
|
||||
return isWindowsDrive([]rune{first}, rune(source[nextIndex]))
|
||||
}
|
82
vendor/github.com/compose-spec/compose-go/loader/windows_path.go
generated
vendored
Normal file
82
vendor/github.com/compose-spec/compose-go/loader/windows_path.go
generated
vendored
Normal file
@ -0,0 +1,82 @@
|
||||
/*
|
||||
Copyright 2020 The Compose Specification Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package loader
|
||||
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
// https://github.com/golang/go/blob/master/LICENSE
|
||||
|
||||
// This file contains utilities to check for Windows absolute paths on Linux.
|
||||
// The code in this file was largely copied from the Golang filepath package
|
||||
// https://github.com/golang/go/blob/1d0e94b1e13d5e8a323a63cd1cc1ef95290c9c36/src/path/filepath/path_windows.go#L12-L65
|
||||
|
||||
func isSlash(c uint8) bool {
|
||||
return c == '\\' || c == '/'
|
||||
}
|
||||
|
||||
// isAbs reports whether the path is a Windows absolute path.
|
||||
func isAbs(path string) (b bool) {
|
||||
l := volumeNameLen(path)
|
||||
if l == 0 {
|
||||
return false
|
||||
}
|
||||
path = path[l:]
|
||||
if path == "" {
|
||||
return false
|
||||
}
|
||||
return isSlash(path[0])
|
||||
}
|
||||
|
||||
// volumeNameLen returns length of the leading volume name on Windows.
|
||||
// It returns 0 elsewhere.
|
||||
// nolint: gocyclo
|
||||
func volumeNameLen(path string) int {
|
||||
if len(path) < 2 {
|
||||
return 0
|
||||
}
|
||||
// with drive letter
|
||||
c := path[0]
|
||||
if path[1] == ':' && ('a' <= c && c <= 'z' || 'A' <= c && c <= 'Z') {
|
||||
return 2
|
||||
}
|
||||
// is it UNC? https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx
|
||||
if l := len(path); l >= 5 && isSlash(path[0]) && isSlash(path[1]) &&
|
||||
!isSlash(path[2]) && path[2] != '.' {
|
||||
// first, leading `\\` and next shouldn't be `\`. its server name.
|
||||
for n := 3; n < l-1; n++ {
|
||||
// second, next '\' shouldn't be repeated.
|
||||
if isSlash(path[n]) {
|
||||
n++
|
||||
// third, following something characters. its share name.
|
||||
if !isSlash(path[n]) {
|
||||
if path[n] == '.' {
|
||||
break
|
||||
}
|
||||
for ; n < l; n++ {
|
||||
if isSlash(path[n]) {
|
||||
break
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
269
vendor/github.com/compose-spec/compose-go/schema/bindata.go
generated
vendored
Normal file
269
vendor/github.com/compose-spec/compose-go/schema/bindata.go
generated
vendored
Normal file
@ -0,0 +1,269 @@
|
||||
// Code generated by "esc -o bindata.go -pkg schema -ignore .*.go -private data"; DO NOT EDIT.
|
||||
|
||||
package schema
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
type _escLocalFS struct{}
|
||||
|
||||
var _escLocal _escLocalFS
|
||||
|
||||
type _escStaticFS struct{}
|
||||
|
||||
var _escStatic _escStaticFS
|
||||
|
||||
type _escDirectory struct {
|
||||
fs http.FileSystem
|
||||
name string
|
||||
}
|
||||
|
||||
type _escFile struct {
|
||||
compressed string
|
||||
size int64
|
||||
modtime int64
|
||||
local string
|
||||
isDir bool
|
||||
|
||||
once sync.Once
|
||||
data []byte
|
||||
name string
|
||||
}
|
||||
|
||||
func (_escLocalFS) Open(name string) (http.File, error) {
|
||||
f, present := _escData[path.Clean(name)]
|
||||
if !present {
|
||||
return nil, os.ErrNotExist
|
||||
}
|
||||
return os.Open(f.local)
|
||||
}
|
||||
|
||||
func (_escStaticFS) prepare(name string) (*_escFile, error) {
|
||||
f, present := _escData[path.Clean(name)]
|
||||
if !present {
|
||||
return nil, os.ErrNotExist
|
||||
}
|
||||
var err error
|
||||
f.once.Do(func() {
|
||||
f.name = path.Base(name)
|
||||
if f.size == 0 {
|
||||
return
|
||||
}
|
||||
var gr *gzip.Reader
|
||||
b64 := base64.NewDecoder(base64.StdEncoding, bytes.NewBufferString(f.compressed))
|
||||
gr, err = gzip.NewReader(b64)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
f.data, err = ioutil.ReadAll(gr)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
func (fs _escStaticFS) Open(name string) (http.File, error) {
|
||||
f, err := fs.prepare(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.File()
|
||||
}
|
||||
|
||||
func (dir _escDirectory) Open(name string) (http.File, error) {
|
||||
return dir.fs.Open(dir.name + name)
|
||||
}
|
||||
|
||||
func (f *_escFile) File() (http.File, error) {
|
||||
type httpFile struct {
|
||||
*bytes.Reader
|
||||
*_escFile
|
||||
}
|
||||
return &httpFile{
|
||||
Reader: bytes.NewReader(f.data),
|
||||
_escFile: f,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (f *_escFile) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *_escFile) Readdir(count int) ([]os.FileInfo, error) {
|
||||
if !f.isDir {
|
||||
return nil, fmt.Errorf(" escFile.Readdir: '%s' is not directory", f.name)
|
||||
}
|
||||
|
||||
fis, ok := _escDirs[f.local]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf(" escFile.Readdir: '%s' is directory, but we have no info about content of this dir, local=%s", f.name, f.local)
|
||||
}
|
||||
limit := count
|
||||
if count <= 0 || limit > len(fis) {
|
||||
limit = len(fis)
|
||||
}
|
||||
|
||||
if len(fis) == 0 && count > 0 {
|
||||
return nil, io.EOF
|
||||
}
|
||||
|
||||
return fis[0:limit], nil
|
||||
}
|
||||
|
||||
func (f *_escFile) Stat() (os.FileInfo, error) {
|
||||
return f, nil
|
||||
}
|
||||
|
||||
func (f *_escFile) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
func (f *_escFile) Size() int64 {
|
||||
return f.size
|
||||
}
|
||||
|
||||
func (f *_escFile) Mode() os.FileMode {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (f *_escFile) ModTime() time.Time {
|
||||
return time.Unix(f.modtime, 0)
|
||||
}
|
||||
|
||||
func (f *_escFile) IsDir() bool {
|
||||
return f.isDir
|
||||
}
|
||||
|
||||
func (f *_escFile) Sys() interface{} {
|
||||
return f
|
||||
}
|
||||
|
||||
// _escFS returns a http.Filesystem for the embedded assets. If useLocal is true,
|
||||
// the filesystem's contents are instead used.
|
||||
func _escFS(useLocal bool) http.FileSystem {
|
||||
if useLocal {
|
||||
return _escLocal
|
||||
}
|
||||
return _escStatic
|
||||
}
|
||||
|
||||
// _escDir returns a http.Filesystem for the embedded assets on a given prefix dir.
|
||||
// If useLocal is true, the filesystem's contents are instead used.
|
||||
func _escDir(useLocal bool, name string) http.FileSystem {
|
||||
if useLocal {
|
||||
return _escDirectory{fs: _escLocal, name: name}
|
||||
}
|
||||
return _escDirectory{fs: _escStatic, name: name}
|
||||
}
|
||||
|
||||
// _escFSByte returns the named file from the embedded assets. If useLocal is
|
||||
// true, the filesystem's contents are instead used.
|
||||
func _escFSByte(useLocal bool, name string) ([]byte, error) {
|
||||
if useLocal {
|
||||
f, err := _escLocal.Open(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b, err := ioutil.ReadAll(f)
|
||||
_ = f.Close()
|
||||
return b, err
|
||||
}
|
||||
f, err := _escStatic.prepare(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.data, nil
|
||||
}
|
||||
|
||||
// _escFSMustByte is the same as _escFSByte, but panics if name is not present.
|
||||
func _escFSMustByte(useLocal bool, name string) []byte {
|
||||
b, err := _escFSByte(useLocal, name)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// _escFSString is the string version of _escFSByte.
|
||||
func _escFSString(useLocal bool, name string) (string, error) {
|
||||
b, err := _escFSByte(useLocal, name)
|
||||
return string(b), err
|
||||
}
|
||||
|
||||
// _escFSMustString is the string version of _escFSMustByte.
|
||||
func _escFSMustString(useLocal bool, name string) string {
|
||||
return string(_escFSMustByte(useLocal, name))
|
||||
}
|
||||
|
||||
var _escData = map[string]*_escFile{
|
||||
|
||||
"/data/config_schema_v3.9.json": {
|
||||
name: "config_schema_v3.9.json",
|
||||
local: "data/config_schema_v3.9.json",
|
||||
size: 18246,
|
||||
modtime: 1576078020,
|
||||
compressed: `
|
||||
H4sIAAAAAAAC/+xcS4/juBG++1cI2r1tPwbIIsDOLcecknMaHoGmyja3KZJbpDztHfi/B3q2RJEibcvd
|
||||
vUkHCHZaKj7qya+KJf9YJUn6s6Z7KEj6NUn3xqivj4+/aynum6cPEnePOZKtuf/y62Pz7Kf0rhrH8moI
|
||||
lWLLdlnzJjv87eG3h2p4Q2KOCioiufkdqGmeIfxRMoRq8FN6ANRMinR9t6reKZQK0DDQ6dek2lyS9CTd
|
||||
g8G02iATu7R+fKpnSJJUAx4YHczQb/Wnx9f5H3uyO3vWwWbr54oYAyj+Pd1b/frbE7n/8x/3//ly/9tD
|
||||
dr/+5efR60q+CNtm+Ry2TDDDpOjXT3vKU/uvU78wyfOamPDR2lvCNYx5FmC+S3wO8dyTvRPP7foOnsfs
|
||||
HCQvi6AGO6p3YqZZfhn9aaAIJmyyDdW7WWy1/DIMN1EjxHBH9U4MN8tfx/CqY9q9x/Tby33131M95+x8
|
||||
zSyD/dVMjGKeS5yumOOXZy9QjyRzUFwe6527ZdYQFCBM2ospSdJNyXhuS10K+Fc1xdPgYZL8sMP7YJ76
|
||||
/egvv1H07z289O+pFAZeTM3U/NKNCCR9BtwyDrEjCDaW7hEZZ9pkErOcUeMcz8kG+FUzUEL3kG1RFsFZ
|
||||
tlnDiXZO1EXwSM4NwR1ES1bvi0yzP0dyfUqZMLADTO/6seuTNXYyWdgxbZ+u/rdeOSZMKVEZyfMREwSR
|
||||
HKsdMQOFdvOXpKVgf5Twz5bEYAn2vDlKtfzEO5SlyhTBygvnZZ9SWRRELOWa5/ARIfnJITHy93aN4at+
|
||||
tdG2PNwkEVbpCBeBcBMOOJWlyxJpbPw414+SJC1ZHk+8O4e4kPl436IsNoDpaUI8cdLR3+uV642lfUOY
|
||||
AMwEKSBoxwg5CMMIz7QC6rMZh9Lm1NWaYIR40sgDIUXYMW3w6KRdeWJaXDwbyiMHBSLXWZM4nR/x0xz6
|
||||
LGrR6JSLuZOsmaY6y6q9pdbATANBur9wvCwIEzG2BMLgUUnWRM8PFxZBHLLe2s4WA4gDQymK7myIQxSD
|
||||
8S9Karg+Jvfne8v4XR9K1rZnSSxItdluba+XTC1vKMAhDxUSJzzjTDwvb+LwYpBke6nNJaAt3QPhZk/3
|
||||
QJ9nhg+pRqOlNjFGzgqyCxMJNj51NlJyIGJMpGhwHi05MW0VZ47wYqibLqrKwbRyt6tIffY7SZ0ik44c
|
||||
2QEwFhlL9ZrxueBBCJIEU+QR6beHJkOe8dH6X5xPobjr5Lef2Edi7OH2qpWC0AqTI2gdsqg2Y8kmwOWV
|
||||
dkKsY+P+RYnU+QlslOqCVY4gHPZB3ngri4O/ndo5Ixr0dRnpIAodfo20CdfYv8+O9Qz1zhmffwamGuJs
|
||||
zp0bWYeR9y3TYzXOHsaxoo4QQwdTEs2bJHSvceoVPjSLT3M8W91Rg26TGM5Eqbi0sKuWuAeocsOZ3kN+
|
||||
zhiURlLJ4xzDWf+Kd4aZJPEipKeQHRiHncWxC8YgkDyTgh8jKLUhGCytaKAlMnPMpDKLY0x3rezV6vtS
|
||||
2XhD1i3DZz3l/6eeoo+amsuwtTY5E5lUIIK+oY1U2Q4JhUwBMukUxSjA5iU2qcFkGs12gvCQm5lCbS8s
|
||||
KRgTdvaSs4L5ncZZUAritQaruSHaDDyLCtkzGcJ8ghCRGewJnnF01I659ZxPq0gMNO4XqOe7azeydtKf
|
||||
Bb3sbay96MftVKUOJnE1jdBZxNHuuPj+a0TokY5q8vVFcbxdKTJ23jrqRyOCccFYM21A0GP8Qhs2uYE5
|
||||
N++Ky7pqKrLzl2LcuUm0r7Y9EW/CipBUKo9qrmSjP1Juz0WH4fzJqR05Z/LYgglWlEX6Nfniy1jjJXNj
|
||||
aG/VgGYAvS/2fpf4XJ3sOcM5Wz7Nd4mMOzDObGOxSrVzvRdD0mA/y3wfSKhHg2mysS6jnHVbYQAPboAV
|
||||
RmgIBpl1P9Rh1yHEAv0xb1EMK0CW5lJ4StCcD3DtbrdBS013HzNnQgNK24KeehPqyi5BM4nBIyDy+h4s
|
||||
CrwgKM4o0SGAeEWRHyXnG0Kfs9d72SVueRVBwjlwposYdJvmwMnxIstpLrQI4yVCRmjElUirK8GMxMuX
|
||||
LMhL1i1bkwT8tvFTzMG3Joj6nLHxZeMZ91uG2jRlCKnav8bhf8Gr7lLlxMCnSXyaxLBCV+cGeilzcBYB
|
||||
luk+VGXsfUVaQCHDnSPXlvwnDSu6ggm+C8iPIgAH9Q4EIKPZyBo8R86U9ka3KNdbdoM9JGdNirlQm1Oz
|
||||
j5jIc2Woq+JOBcQLZXRUaP3ORC6/nw+zFpC24oSCBc2uFbQ2SJgwZ/cq2GJRCFtAEBRm3XJaM5qpGy1X
|
||||
kFcIJH+HKyOXtXXAtALsmbCRrKsieYnZXPE1hDNQzWUC0wGTlHKsd4e+/Xr267fKLSmCgX5lV7dlyIbm
|
||||
7Sd9bqthwRCfHggvI25PLuo38VUdIgafnB9nhXTakS2Q2sX0f0U1ILVUmVTL34CEm4zW4fo7U6RYKjZH
|
||||
t2SlzlTjI0TdciM8Be4bR93ljtyuN9Oj1ae+lHXXy2odrWKvYyy3/7qqZl9buspvxBhC91GVujMLJm9Q
|
||||
+JwU+p0hraX6jGhnRLS/uv1/PFttv1sNfhtZU4U/Nb3CQiO+EfkA+l9Crf9zblnlq5wYyGbYeQNbniAP
|
||||
py23VJ+2vLQtfxArsFqaBtYwvVqbU1B03/VqeJPWb8Mmc/xChy8L9W7KdxFsLdrqZp7zBYPIwy8zaH/u
|
||||
+4gbweQFmkndOrUKVKu+ddT+gQF/6OnGT35uoOJTHCdXvz/G7UPNTwWsR/KxSJpvlwZRex1VvHD9CIHd
|
||||
vNT9GICnn3Kc4a+q/59W/w0AAP//CCwovkZHAAA=
|
||||
`,
|
||||
},
|
||||
|
||||
"/data": {
|
||||
name: "data",
|
||||
local: `data`,
|
||||
isDir: true,
|
||||
},
|
||||
}
|
||||
|
||||
var _escDirs = map[string][]os.FileInfo{
|
||||
|
||||
"data": {
|
||||
_escData["/data/config_schema_v3.9.json"],
|
||||
},
|
||||
}
|
191
vendor/github.com/compose-spec/compose-go/schema/schema.go
generated
vendored
Normal file
191
vendor/github.com/compose-spec/compose-go/schema/schema.go
generated
vendored
Normal file
@ -0,0 +1,191 @@
|
||||
/*
|
||||
Copyright 2020 The Compose Specification Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package schema
|
||||
|
||||
//go:generate esc -o bindata.go -pkg schema -ignore .*\.go -private -modtime=1518458244 data
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/xeipuuv/gojsonschema"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultVersion = "1.0"
|
||||
versionField = "version"
|
||||
)
|
||||
|
||||
type portsFormatChecker struct{}
|
||||
|
||||
func (checker portsFormatChecker) IsFormat(input interface{}) bool {
|
||||
// TODO: implement this
|
||||
return true
|
||||
}
|
||||
|
||||
type durationFormatChecker struct{}
|
||||
|
||||
func (checker durationFormatChecker) IsFormat(input interface{}) bool {
|
||||
value, ok := input.(string)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
_, err := time.ParseDuration(value)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
gojsonschema.FormatCheckers.Add("expose", portsFormatChecker{})
|
||||
gojsonschema.FormatCheckers.Add("ports", portsFormatChecker{})
|
||||
gojsonschema.FormatCheckers.Add("duration", durationFormatChecker{})
|
||||
}
|
||||
|
||||
// Version returns the version of the config, defaulting to version 1.0
|
||||
func Version(config map[string]interface{}) string {
|
||||
version, ok := config[versionField]
|
||||
if !ok {
|
||||
return defaultVersion
|
||||
}
|
||||
return normalizeVersion(fmt.Sprintf("%v", version))
|
||||
}
|
||||
|
||||
func normalizeVersion(version string) string {
|
||||
switch version {
|
||||
case "3":
|
||||
return "3.9" // latest
|
||||
case "3.0", "3.1", "3.2", "3.3", "3.4", "3.5", "3.6", "3.7", "3.8":
|
||||
return "3.9" // pre-existing specification but backward compatible
|
||||
default:
|
||||
return version
|
||||
}
|
||||
}
|
||||
|
||||
// Validate uses the jsonschema to validate the configuration
|
||||
func Validate(config map[string]interface{}, version string) error {
|
||||
version = normalizeVersion(version)
|
||||
schemaData, err := _escFSByte(false, fmt.Sprintf("/data/config_schema_v%s.json", version))
|
||||
if err != nil {
|
||||
return errors.Errorf("unsupported Compose file version: %s", version)
|
||||
}
|
||||
|
||||
schemaLoader := gojsonschema.NewStringLoader(string(schemaData))
|
||||
dataLoader := gojsonschema.NewGoLoader(config)
|
||||
|
||||
result, err := gojsonschema.Validate(schemaLoader, dataLoader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !result.Valid() {
|
||||
return toError(result)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func toError(result *gojsonschema.Result) error {
|
||||
err := getMostSpecificError(result.Errors())
|
||||
return err
|
||||
}
|
||||
|
||||
const (
|
||||
jsonschemaOneOf = "number_one_of"
|
||||
jsonschemaAnyOf = "number_any_of"
|
||||
)
|
||||
|
||||
func getDescription(err validationError) string {
|
||||
switch err.parent.Type() {
|
||||
case "invalid_type":
|
||||
if expectedType, ok := err.parent.Details()["expected"].(string); ok {
|
||||
return fmt.Sprintf("must be a %s", humanReadableType(expectedType))
|
||||
}
|
||||
case jsonschemaOneOf, jsonschemaAnyOf:
|
||||
if err.child == nil {
|
||||
return err.parent.Description()
|
||||
}
|
||||
return err.child.Description()
|
||||
}
|
||||
return err.parent.Description()
|
||||
}
|
||||
|
||||
func humanReadableType(definition string) string {
|
||||
if definition[0:1] == "[" {
|
||||
allTypes := strings.Split(definition[1:len(definition)-1], ",")
|
||||
for i, t := range allTypes {
|
||||
allTypes[i] = humanReadableType(t)
|
||||
}
|
||||
return fmt.Sprintf(
|
||||
"%s or %s",
|
||||
strings.Join(allTypes[0:len(allTypes)-1], ", "),
|
||||
allTypes[len(allTypes)-1],
|
||||
)
|
||||
}
|
||||
if definition == "object" {
|
||||
return "mapping"
|
||||
}
|
||||
if definition == "array" {
|
||||
return "list"
|
||||
}
|
||||
return definition
|
||||
}
|
||||
|
||||
type validationError struct {
|
||||
parent gojsonschema.ResultError
|
||||
child gojsonschema.ResultError
|
||||
}
|
||||
|
||||
func (err validationError) Error() string {
|
||||
description := getDescription(err)
|
||||
return fmt.Sprintf("%s %s", err.parent.Field(), description)
|
||||
}
|
||||
|
||||
func getMostSpecificError(errors []gojsonschema.ResultError) validationError {
|
||||
mostSpecificError := 0
|
||||
for i, err := range errors {
|
||||
if specificity(err) > specificity(errors[mostSpecificError]) {
|
||||
mostSpecificError = i
|
||||
continue
|
||||
}
|
||||
|
||||
if specificity(err) == specificity(errors[mostSpecificError]) {
|
||||
// Invalid type errors win in a tie-breaker for most specific field name
|
||||
if err.Type() == "invalid_type" && errors[mostSpecificError].Type() != "invalid_type" {
|
||||
mostSpecificError = i
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if mostSpecificError+1 == len(errors) {
|
||||
return validationError{parent: errors[mostSpecificError]}
|
||||
}
|
||||
|
||||
switch errors[mostSpecificError].Type() {
|
||||
case "number_one_of", "number_any_of":
|
||||
return validationError{
|
||||
parent: errors[mostSpecificError],
|
||||
child: errors[mostSpecificError+1],
|
||||
}
|
||||
default:
|
||||
return validationError{parent: errors[mostSpecificError]}
|
||||
}
|
||||
}
|
||||
|
||||
func specificity(err gojsonschema.ResultError) int {
|
||||
return len(strings.Split(err.Field(), "."))
|
||||
}
|
269
vendor/github.com/compose-spec/compose-go/template/template.go
generated
vendored
Normal file
269
vendor/github.com/compose-spec/compose-go/template/template.go
generated
vendored
Normal file
@ -0,0 +1,269 @@
|
||||
/*
|
||||
Copyright 2020 The Compose Specification Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package template
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var delimiter = "\\$"
|
||||
var substitution = "[_a-z][_a-z0-9]*(?::?[-?][^}]*)?"
|
||||
|
||||
var patternString = fmt.Sprintf(
|
||||
"%s(?i:(?P<escaped>%s)|(?P<named>%s)|{(?P<braced>%s)}|(?P<invalid>))",
|
||||
delimiter, delimiter, substitution, substitution,
|
||||
)
|
||||
|
||||
var defaultPattern = regexp.MustCompile(patternString)
|
||||
|
||||
// DefaultSubstituteFuncs contains the default SubstituteFunc used by the docker cli
|
||||
var DefaultSubstituteFuncs = []SubstituteFunc{
|
||||
softDefault,
|
||||
hardDefault,
|
||||
requiredNonEmpty,
|
||||
required,
|
||||
}
|
||||
|
||||
// InvalidTemplateError is returned when a variable template is not in a valid
|
||||
// format
|
||||
type InvalidTemplateError struct {
|
||||
Template string
|
||||
}
|
||||
|
||||
func (e InvalidTemplateError) Error() string {
|
||||
return fmt.Sprintf("Invalid template: %#v", e.Template)
|
||||
}
|
||||
|
||||
// Mapping is a user-supplied function which maps from variable names to values.
|
||||
// Returns the value as a string and a bool indicating whether
|
||||
// the value is present, to distinguish between an empty string
|
||||
// and the absence of a value.
|
||||
type Mapping func(string) (string, bool)
|
||||
|
||||
// SubstituteFunc is a user-supplied function that apply substitution.
|
||||
// Returns the value as a string, a bool indicating if the function could apply
|
||||
// the substitution and an error.
|
||||
type SubstituteFunc func(string, Mapping) (string, bool, error)
|
||||
|
||||
// SubstituteWith subsitute variables in the string with their values.
|
||||
// It accepts additional substitute function.
|
||||
func SubstituteWith(template string, mapping Mapping, pattern *regexp.Regexp, subsFuncs ...SubstituteFunc) (string, error) {
|
||||
var err error
|
||||
result := pattern.ReplaceAllStringFunc(template, func(substring string) string {
|
||||
matches := pattern.FindStringSubmatch(substring)
|
||||
groups := matchGroups(matches, pattern)
|
||||
if escaped := groups["escaped"]; escaped != "" {
|
||||
return escaped
|
||||
}
|
||||
|
||||
substitution := groups["named"]
|
||||
if substitution == "" {
|
||||
substitution = groups["braced"]
|
||||
}
|
||||
|
||||
if substitution == "" {
|
||||
err = &InvalidTemplateError{Template: template}
|
||||
return ""
|
||||
}
|
||||
|
||||
for _, f := range subsFuncs {
|
||||
var (
|
||||
value string
|
||||
applied bool
|
||||
)
|
||||
value, applied, err = f(substitution, mapping)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
if !applied {
|
||||
continue
|
||||
}
|
||||
return value
|
||||
}
|
||||
|
||||
value, _ := mapping(substitution)
|
||||
return value
|
||||
})
|
||||
|
||||
return result, err
|
||||
}
|
||||
|
||||
// Substitute variables in the string with their values
|
||||
func Substitute(template string, mapping Mapping) (string, error) {
|
||||
return SubstituteWith(template, mapping, defaultPattern, DefaultSubstituteFuncs...)
|
||||
}
|
||||
|
||||
// ExtractVariables returns a map of all the variables defined in the specified
|
||||
// composefile (dict representation) and their default value if any.
|
||||
func ExtractVariables(configDict map[string]interface{}, pattern *regexp.Regexp) map[string]Variable {
|
||||
if pattern == nil {
|
||||
pattern = defaultPattern
|
||||
}
|
||||
return recurseExtract(configDict, pattern)
|
||||
}
|
||||
|
||||
func recurseExtract(value interface{}, pattern *regexp.Regexp) map[string]Variable {
|
||||
m := map[string]Variable{}
|
||||
|
||||
switch value := value.(type) {
|
||||
case string:
|
||||
if values, is := extractVariable(value, pattern); is {
|
||||
for _, v := range values {
|
||||
m[v.Name] = v
|
||||
}
|
||||
}
|
||||
case map[string]interface{}:
|
||||
for _, elem := range value {
|
||||
submap := recurseExtract(elem, pattern)
|
||||
for key, value := range submap {
|
||||
m[key] = value
|
||||
}
|
||||
}
|
||||
|
||||
case []interface{}:
|
||||
for _, elem := range value {
|
||||
if values, is := extractVariable(elem, pattern); is {
|
||||
for _, v := range values {
|
||||
m[v.Name] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return m
|
||||
}
|
||||
|
||||
type Variable struct {
|
||||
Name string
|
||||
DefaultValue string
|
||||
Required bool
|
||||
}
|
||||
|
||||
func extractVariable(value interface{}, pattern *regexp.Regexp) ([]Variable, bool) {
|
||||
sValue, ok := value.(string)
|
||||
if !ok {
|
||||
return []Variable{}, false
|
||||
}
|
||||
matches := pattern.FindAllStringSubmatch(sValue, -1)
|
||||
if len(matches) == 0 {
|
||||
return []Variable{}, false
|
||||
}
|
||||
values := []Variable{}
|
||||
for _, match := range matches {
|
||||
groups := matchGroups(match, pattern)
|
||||
if escaped := groups["escaped"]; escaped != "" {
|
||||
continue
|
||||
}
|
||||
val := groups["named"]
|
||||
if val == "" {
|
||||
val = groups["braced"]
|
||||
}
|
||||
name := val
|
||||
var defaultValue string
|
||||
var required bool
|
||||
switch {
|
||||
case strings.Contains(val, ":?"):
|
||||
name, _ = partition(val, ":?")
|
||||
required = true
|
||||
case strings.Contains(val, "?"):
|
||||
name, _ = partition(val, "?")
|
||||
required = true
|
||||
case strings.Contains(val, ":-"):
|
||||
name, defaultValue = partition(val, ":-")
|
||||
case strings.Contains(val, "-"):
|
||||
name, defaultValue = partition(val, "-")
|
||||
}
|
||||
values = append(values, Variable{
|
||||
Name: name,
|
||||
DefaultValue: defaultValue,
|
||||
Required: required,
|
||||
})
|
||||
}
|
||||
return values, len(values) > 0
|
||||
}
|
||||
|
||||
// Soft default (fall back if unset or empty)
|
||||
func softDefault(substitution string, mapping Mapping) (string, bool, error) {
|
||||
sep := ":-"
|
||||
if !strings.Contains(substitution, sep) {
|
||||
return "", false, nil
|
||||
}
|
||||
name, defaultValue := partition(substitution, sep)
|
||||
value, ok := mapping(name)
|
||||
if !ok || value == "" {
|
||||
return defaultValue, true, nil
|
||||
}
|
||||
return value, true, nil
|
||||
}
|
||||
|
||||
// Hard default (fall back if-and-only-if empty)
|
||||
func hardDefault(substitution string, mapping Mapping) (string, bool, error) {
|
||||
sep := "-"
|
||||
if !strings.Contains(substitution, sep) {
|
||||
return "", false, nil
|
||||
}
|
||||
name, defaultValue := partition(substitution, sep)
|
||||
value, ok := mapping(name)
|
||||
if !ok {
|
||||
return defaultValue, true, nil
|
||||
}
|
||||
return value, true, nil
|
||||
}
|
||||
|
||||
func requiredNonEmpty(substitution string, mapping Mapping) (string, bool, error) {
|
||||
return withRequired(substitution, mapping, ":?", func(v string) bool { return v != "" })
|
||||
}
|
||||
|
||||
func required(substitution string, mapping Mapping) (string, bool, error) {
|
||||
return withRequired(substitution, mapping, "?", func(_ string) bool { return true })
|
||||
}
|
||||
|
||||
func withRequired(substitution string, mapping Mapping, sep string, valid func(string) bool) (string, bool, error) {
|
||||
if !strings.Contains(substitution, sep) {
|
||||
return "", false, nil
|
||||
}
|
||||
name, errorMessage := partition(substitution, sep)
|
||||
value, ok := mapping(name)
|
||||
if !ok || !valid(value) {
|
||||
return "", true, &InvalidTemplateError{
|
||||
Template: fmt.Sprintf("required variable %s is missing a value: %s", name, errorMessage),
|
||||
}
|
||||
}
|
||||
return value, true, nil
|
||||
}
|
||||
|
||||
func matchGroups(matches []string, pattern *regexp.Regexp) map[string]string {
|
||||
groups := make(map[string]string)
|
||||
for i, name := range pattern.SubexpNames()[1:] {
|
||||
groups[name] = matches[i+1]
|
||||
}
|
||||
return groups
|
||||
}
|
||||
|
||||
// Split the string at the first occurrence of sep, and return the part before the separator,
|
||||
// and the part after the separator.
|
||||
//
|
||||
// If the separator is not found, return the string itself, followed by an empty string.
|
||||
func partition(s, sep string) (string, string) {
|
||||
if strings.Contains(s, sep) {
|
||||
parts := strings.SplitN(s, sep, 2)
|
||||
return parts[0], parts[1]
|
||||
}
|
||||
return s, ""
|
||||
}
|
187
vendor/github.com/compose-spec/compose-go/types/config.go
generated
vendored
Normal file
187
vendor/github.com/compose-spec/compose-go/types/config.go
generated
vendored
Normal file
@ -0,0 +1,187 @@
|
||||
/*
|
||||
Copyright 2020 The Compose Specification Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package types
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// ConfigDetails are the details about a group of ConfigFiles
|
||||
type ConfigDetails struct {
|
||||
Version string
|
||||
WorkingDir string
|
||||
ConfigFiles []ConfigFile
|
||||
Environment map[string]string
|
||||
}
|
||||
|
||||
// LookupEnv provides a lookup function for environment variables
|
||||
func (cd ConfigDetails) LookupEnv(key string) (string, bool) {
|
||||
v, ok := cd.Environment[key]
|
||||
return v, ok
|
||||
}
|
||||
|
||||
// ConfigFile is a filename and the contents of the file as a Dict
|
||||
type ConfigFile struct {
|
||||
Filename string
|
||||
Config map[string]interface{}
|
||||
}
|
||||
|
||||
// Config is a full compose file configuration
|
||||
type Config struct {
|
||||
Filename string `yaml:"-" json:"-"`
|
||||
Version string `json:"version"`
|
||||
Services Services `json:"services"`
|
||||
Networks map[string]NetworkConfig `yaml:",omitempty" json:"networks,omitempty"`
|
||||
Volumes map[string]VolumeConfig `yaml:",omitempty" json:"volumes,omitempty"`
|
||||
Secrets map[string]SecretConfig `yaml:",omitempty" json:"secrets,omitempty"`
|
||||
Configs map[string]ConfigObjConfig `yaml:",omitempty" json:"configs,omitempty"`
|
||||
Extensions map[string]interface{} `yaml:",inline" json:"-"`
|
||||
}
|
||||
|
||||
// ServiceNames return names for all services in this Compose config
|
||||
func (c Config) ServiceNames() []string {
|
||||
names := []string{}
|
||||
for _, s := range c.Services {
|
||||
names = append(names, s.Name)
|
||||
}
|
||||
sort.Strings(names)
|
||||
return names
|
||||
}
|
||||
|
||||
// VolumeNames return names for all volumes in this Compose config
|
||||
func (c Config) VolumeNames() []string {
|
||||
names := []string{}
|
||||
for k := range c.Volumes {
|
||||
names = append(names, k)
|
||||
}
|
||||
sort.Strings(names)
|
||||
return names
|
||||
}
|
||||
|
||||
// NetworkNames return names for all volumes in this Compose config
|
||||
func (c Config) NetworkNames() []string {
|
||||
names := []string{}
|
||||
for k := range c.Networks {
|
||||
names = append(names, k)
|
||||
}
|
||||
sort.Strings(names)
|
||||
return names
|
||||
}
|
||||
|
||||
// SecretNames return names for all secrets in this Compose config
|
||||
func (c Config) SecretNames() []string {
|
||||
names := []string{}
|
||||
for k := range c.Secrets {
|
||||
names = append(names, k)
|
||||
}
|
||||
sort.Strings(names)
|
||||
return names
|
||||
}
|
||||
|
||||
// ConfigNames return names for all configs in this Compose config
|
||||
func (c Config) ConfigNames() []string {
|
||||
names := []string{}
|
||||
for k := range c.Configs {
|
||||
names = append(names, k)
|
||||
}
|
||||
sort.Strings(names)
|
||||
return names
|
||||
}
|
||||
|
||||
// GetServices retrieve services by names, or return all services if no name specified
|
||||
func (c Config) GetServices(names []string) (Services, error) {
|
||||
if len(names) == 0 {
|
||||
return c.Services, nil
|
||||
}
|
||||
services := Services{}
|
||||
for _, name := range names {
|
||||
service, err := c.GetService(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
services = append(services, service)
|
||||
}
|
||||
return services, nil
|
||||
}
|
||||
|
||||
// GetService retrieve a specific service by name
|
||||
func (c Config) GetService(name string) (ServiceConfig, error) {
|
||||
for _, s := range c.Services {
|
||||
if s.Name == name {
|
||||
return s, nil
|
||||
}
|
||||
}
|
||||
return ServiceConfig{}, fmt.Errorf("no such service: %s", name)
|
||||
}
|
||||
|
||||
type ServiceFunc func(service ServiceConfig) error
|
||||
|
||||
// WithServices run ServiceFunc on each service and dependencies in dependency order
|
||||
func (c Config) WithServices(names []string, fn ServiceFunc) error {
|
||||
return c.withServices(names, fn, map[string]bool{})
|
||||
}
|
||||
|
||||
func (c Config) withServices(names []string, fn ServiceFunc, done map[string]bool) error {
|
||||
services, err := c.GetServices(names)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, service := range services {
|
||||
if done[service.Name] {
|
||||
continue
|
||||
}
|
||||
dependencies := service.GetDependencies()
|
||||
if len(dependencies) > 0 {
|
||||
err := c.withServices(dependencies, fn, done)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := fn(service); err != nil {
|
||||
return err
|
||||
}
|
||||
done[service.Name] = true
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalJSON makes Config implement json.Marshaler
|
||||
func (c Config) MarshalJSON() ([]byte, error) {
|
||||
m := map[string]interface{}{
|
||||
"version": c.Version,
|
||||
"services": c.Services,
|
||||
}
|
||||
|
||||
if len(c.Networks) > 0 {
|
||||
m["networks"] = c.Networks
|
||||
}
|
||||
if len(c.Volumes) > 0 {
|
||||
m["volumes"] = c.Volumes
|
||||
}
|
||||
if len(c.Secrets) > 0 {
|
||||
m["secrets"] = c.Secrets
|
||||
}
|
||||
if len(c.Configs) > 0 {
|
||||
m["configs"] = c.Configs
|
||||
}
|
||||
for k, v := range c.Extensions {
|
||||
m[k] = v
|
||||
}
|
||||
return json.Marshal(m)
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user