mirror of
https://github.com/openfaas/faasd.git
synced 2025-06-08 16:06:47 +00:00
Compare commits
203 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
9259344452 | ||
|
83e804513a | ||
|
f409e01aa6 | ||
|
e98c949019 | ||
|
6f0ac37e3c | ||
|
0defc22a3f | ||
|
c14d92c5c2 | ||
|
934d326d82 | ||
|
0ea1bd5ffc | ||
|
7f798267b0 | ||
|
a2254ca1ff | ||
|
087a299f4c | ||
|
6dcdab832d | ||
|
57163e27cf | ||
|
cced8eb3df | ||
|
0d4bfa938c | ||
|
120a34cfef | ||
|
ae4d0a97f8 | ||
|
ef68994a5c | ||
|
43e51c51bb | ||
|
695d80076c | ||
|
51f3f87ba1 | ||
|
e2758be25e | ||
|
7ad5c17e7c | ||
|
983dacb15c | ||
|
96abaaf102 | ||
|
d7e0bebe25 | ||
|
ef689d7b62 | ||
|
854ec5836d | ||
|
4ab5f60b9d | ||
|
a8b61f2086 | ||
|
68335e2016 | ||
|
404af1c4d9 | ||
|
055e57ec5f | ||
|
6bb1222ebb | ||
|
599ae5415f | ||
|
0d74cac072 | ||
|
c2b802cbf9 | ||
|
bd0e1d7718 | ||
|
bfc87ff432 | ||
|
032716e3e9 | ||
|
c813b0810b | ||
|
fb36d2e5aa | ||
|
038eb91191 | ||
|
5576382d96 | ||
|
7ca2621c98 | ||
|
a154fd1bc0 | ||
|
6b1e49a2a5 | ||
|
5344a32472 | ||
|
e59e3f0cb6 | ||
|
2adc1350d4 | ||
|
5b633cc017 | ||
|
1c1bfa6759 | ||
|
93f41ca35d | ||
|
0172c996b8 | ||
|
4162db43ff | ||
|
e4848cd829 | ||
|
7dbaeef3d8 | ||
|
887c18befa | ||
|
f6167e72a9 | ||
|
c12505a63f | ||
|
ffac4063b6 | ||
|
dd31784824 | ||
|
70b0929cf2 | ||
|
c61efe06f4 | ||
|
b05844acea | ||
|
fd20e69ee1 | ||
|
4a3fa684e2 | ||
|
f17a25f3e8 | ||
|
7ef56d8dae | ||
|
1cb5493f72 | ||
|
d85332be13 | ||
|
1412faffd2 | ||
|
2685c1db06 | ||
|
078043b168 | ||
|
5356fca4c5 | ||
|
99ccd75b62 | ||
|
aab5363e65 | ||
|
ba601bfc67 | ||
|
53670e2854 | ||
|
57baf34f5a | ||
|
c83b649301 | ||
|
f394b4a2f1 | ||
|
d0219d6697 | ||
|
7d073bd64b | ||
|
ebec399817 | ||
|
553054ddf5 | ||
|
86d2797873 | ||
|
0d3dc50f61 | ||
|
b1ea842fb1 | ||
|
e1d90bba60 | ||
|
6c8214b27e | ||
|
990f1a4df6 | ||
|
c41c2cd9fc | ||
|
9efd019e86 | ||
|
d09ab85bda | ||
|
c5c0fa05a2 | ||
|
aaf1811052 | ||
|
4d6b6dfdc5 | ||
|
b844a72067 | ||
|
8227285faa | ||
|
87773fd167 | ||
|
ecf82ec37b | ||
|
9e8c680f3f | ||
|
85c1082fac | ||
|
282b05802c | ||
|
7c118225b2 | ||
|
95792f8d58 | ||
|
60b724f014 | ||
|
e0db59d8a1 | ||
|
13304fa0b2 | ||
|
a65b989b15 | ||
|
6b6ff71c29 | ||
|
bb5b212663 | ||
|
9564e64980 | ||
|
6dbc33d045 | ||
|
5cedf28929 | ||
|
b7be42e5ec | ||
|
2b0cbeb25d | ||
|
d29f94a8d4 | ||
|
c5b463bee9 | ||
|
c0c4f2d068 | ||
|
886f5ba295 | ||
|
309310140c | ||
|
a88997e42c | ||
|
02e9b9961b | ||
|
fee46de596 | ||
|
6d297a96d6 | ||
|
2178d90a10 | ||
|
37c63a84a5 | ||
|
b43d2562a9 | ||
|
e668beef13 | ||
|
a574a0c06f | ||
|
4061b52b2a | ||
|
bc88d6170c | ||
|
fe057fbcf8 | ||
|
b44f57ce4d | ||
|
4ecc215a70 | ||
|
a995413971 | ||
|
912ac265f4 | ||
|
449bcf2691 | ||
|
1822114705 | ||
|
52f64dfaa2 | ||
|
7bd84766f3 | ||
|
e09f37e5cb | ||
|
4b132315c7 | ||
|
ab4708246d | ||
|
b807ff0725 | ||
|
f74f5e6a4f | ||
|
95c41ea758 | ||
|
8ac45f5379 | ||
|
3579061423 | ||
|
761d1847bf | ||
|
8003748b73 | ||
|
a2ea804d2c | ||
|
551e6645b7 | ||
|
77867f17e3 | ||
|
5aed707354 | ||
|
8fbdd1a461 | ||
|
8dd48b8957 | ||
|
6763ed6d66 | ||
|
acb5d0bd1c | ||
|
2c9eb3904e | ||
|
b42066d1a1 | ||
|
17188b8de9 | ||
|
0c0088e8b0 | ||
|
c5f167df21 | ||
|
d5fcc7b2ab | ||
|
cbfefb6fa5 | ||
|
ea62c1b12d | ||
|
8f40618a5c | ||
|
3fe0d8d8d3 | ||
|
5aa4c69e03 | ||
|
12b5e8ca7f | ||
|
195e81f595 | ||
|
06fbca83bf | ||
|
e71d2c27c5 | ||
|
13f4a487ce | ||
|
13412841aa | ||
|
e76d0d34ba | ||
|
dec02f3240 | ||
|
73c7349e36 | ||
|
b8ada0d46b | ||
|
5ac51663da | ||
|
1e9d8fffa0 | ||
|
57322c4947 | ||
|
6b840f0226 | ||
|
12ada59bf1 | ||
|
2ae8b31ac0 | ||
|
4c9c66812a | ||
|
9da2d92613 | ||
|
5e29516f86 | ||
|
9f1b5e2f7b | ||
|
efcae9888c | ||
|
2885bb0c51 | ||
|
a4e092b217 | ||
|
dca036ee51 | ||
|
583f5ad1b0 | ||
|
659f98cc0d | ||
|
c7d9353991 | ||
|
29bb5ad9cc | ||
|
6262ff2f4a | ||
|
1d86c62792 |
34
.github/ISSUE_TEMPLATE.md
vendored
34
.github/ISSUE_TEMPLATE.md
vendored
@ -1,19 +1,37 @@
|
||||
<!--- Provide a general summary of the issue in the Title above -->
|
||||
## Due diligence
|
||||
|
||||
<!-- Due dilligence -->
|
||||
## My actions before raising this issue
|
||||
Before you ask for help or support, make sure that you've [consulted the manual for faasd](https://openfaas.gumroad.com/l/serverless-for-everyone-else). We can't answer questions that are already covered by the manual.
|
||||
|
||||
|
||||
<!-- How is this affecting you? What task are you trying to accomplish? -->
|
||||
## Why do you need this?
|
||||
|
||||
<!-- Attempts to mask or hide this may result in the issue being closed -->
|
||||
## Who is this for?
|
||||
|
||||
What company is this for? Are you listed in the [ADOPTERS.md](https://github.com/openfaas/faas/blob/master/ADOPTERS.md) file?
|
||||
|
||||
<!--- Provide a general summary of the issue in the Title above -->
|
||||
## Expected Behaviour
|
||||
<!--- If you're describing a bug, tell us what should happen -->
|
||||
<!--- If you're suggesting a change/improvement, tell us how it should work -->
|
||||
|
||||
|
||||
## Current Behaviour
|
||||
<!--- If describing a bug, tell us what happens instead of the expected behavior -->
|
||||
<!--- If suggesting a change/improvement, explain the difference from current behavior -->
|
||||
|
||||
## List all Possible Solutions
|
||||
<!--- Not obligatory, but suggest a fix/reason for the bug, -->
|
||||
<!--- or ideas how to implement the addition or change -->
|
||||
|
||||
## List the one solution that you would recommend
|
||||
<!--- If you were to be on the hook for this change. -->
|
||||
## List All Possible Solutions and Workarounds
|
||||
<!--- Suggest a fix/reason for the bug, or ideas how to implement -->
|
||||
<!--- the addition or change -->
|
||||
<!--- Is there a workaround which could avoid making changes? -->
|
||||
|
||||
## Which Solution Do You Recommend?
|
||||
<!--- Pick your preferred solution, if you were to implement and maintain this change -->
|
||||
|
||||
|
||||
## Steps to Reproduce (for bugs)
|
||||
<!--- Provide a link to a live example, or an unambiguous set of steps to -->
|
||||
@ -23,10 +41,6 @@
|
||||
3.
|
||||
4.
|
||||
|
||||
## Context
|
||||
<!--- How has this issue affected you? What are you trying to accomplish? -->
|
||||
<!--- Providing context helps us come up with a solution that is most useful in the real world -->
|
||||
|
||||
## Your Environment
|
||||
|
||||
* OS and architecture:
|
||||
|
1
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
1
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
@ -0,0 +1 @@
|
||||
blank_issues_enabled: false
|
69
.github/ISSUE_TEMPLATE/issue.md
vendored
Normal file
69
.github/ISSUE_TEMPLATE/issue.md
vendored
Normal file
@ -0,0 +1,69 @@
|
||||
---
|
||||
name: Report an issue
|
||||
about: Create a report to help us improve
|
||||
title: ''
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
## Due diligence
|
||||
|
||||
<!-- Due dilligence -->
|
||||
## My actions before raising this issue
|
||||
Before you ask for help or support, make sure that you've [consulted the manual for faasd](https://openfaas.gumroad.com/l/serverless-for-everyone-else). We can't answer questions that are already covered by the manual.
|
||||
|
||||
|
||||
<!-- How is this affecting you? What task are you trying to accomplish? -->
|
||||
## Why do you need this?
|
||||
|
||||
<!-- Attempts to mask or hide this may result in the issue being closed -->
|
||||
## Who is this for?
|
||||
|
||||
What company is this for? Are you listed in the [ADOPTERS.md](https://github.com/openfaas/faas/blob/master/ADOPTERS.md) file?
|
||||
|
||||
<!--- Provide a general summary of the issue in the Title above -->
|
||||
## Expected Behaviour
|
||||
<!--- If you're describing a bug, tell us what should happen -->
|
||||
<!--- If you're suggesting a change/improvement, tell us how it should work -->
|
||||
|
||||
|
||||
## Current Behaviour
|
||||
<!--- If describing a bug, tell us what happens instead of the expected behavior -->
|
||||
<!--- If suggesting a change/improvement, explain the difference from current behavior -->
|
||||
|
||||
|
||||
## List All Possible Solutions and Workarounds
|
||||
<!--- Suggest a fix/reason for the bug, or ideas how to implement -->
|
||||
<!--- the addition or change -->
|
||||
<!--- Is there a workaround which could avoid making changes? -->
|
||||
|
||||
## Which Solution Do You Recommend?
|
||||
<!--- Pick your preferred solution, if you were to implement and maintain this change -->
|
||||
|
||||
|
||||
## Steps to Reproduce (for bugs)
|
||||
<!--- Provide a link to a live example, or an unambiguous set of steps to -->
|
||||
<!--- reproduce this bug. Include code to reproduce, if relevant -->
|
||||
1.
|
||||
2.
|
||||
3.
|
||||
4.
|
||||
|
||||
## Your Environment
|
||||
|
||||
* OS and architecture:
|
||||
|
||||
* Versions:
|
||||
|
||||
```sh
|
||||
go version
|
||||
|
||||
containerd -version
|
||||
|
||||
uname -a
|
||||
|
||||
cat /etc/os-release
|
||||
|
||||
faasd version
|
||||
```
|
10
.github/workflows/build.yaml
vendored
10
.github/workflows/build.yaml
vendored
@ -10,19 +10,15 @@ jobs:
|
||||
build:
|
||||
env:
|
||||
GO111MODULE: off
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.15.x]
|
||||
os: [ubuntu-latest]
|
||||
runs-on: ${{ matrix.os }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@master
|
||||
with:
|
||||
fetch-depth: 1
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
go-version: 1.22.x
|
||||
|
||||
- name: test
|
||||
run: make test
|
||||
|
12
.github/workflows/publish.yaml
vendored
12
.github/workflows/publish.yaml
vendored
@ -7,23 +7,19 @@ on:
|
||||
|
||||
jobs:
|
||||
publish:
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [ 1.15.x ]
|
||||
os: [ ubuntu-latest ]
|
||||
runs-on: ${{ matrix.os }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@master
|
||||
with:
|
||||
fetch-depth: 1
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
go-version: 1.22.x
|
||||
- name: Make publish
|
||||
run: make publish
|
||||
- name: Upload release binaries
|
||||
uses: alexellis/upload-assets@0.2.2
|
||||
uses: alexellis/upload-assets@0.4.1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ github.token }}
|
||||
with:
|
||||
|
18
.github/workflows/verify-images.yaml
vendored
Normal file
18
.github/workflows/verify-images.yaml
vendored
Normal file
@ -0,0 +1,18 @@
|
||||
name: Verify Docker Compose Images
|
||||
|
||||
on:
|
||||
push:
|
||||
paths:
|
||||
- '**.yaml'
|
||||
|
||||
jobs:
|
||||
verifyImages:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@master
|
||||
- uses: alexellis/setup-arkade@v3
|
||||
- name: Verify chart images
|
||||
id: verify_images
|
||||
run: |
|
||||
VERBOSE=true make verify-compose
|
22
EULA.md
Normal file
22
EULA.md
Normal file
@ -0,0 +1,22 @@
|
||||
1.1 EULA Addendum for faasd Community Edition (CE). This EULA Addendum for faasd is part of the [OpenFaaS Community Edition (CE) EULA](https://github.com/openfaas/faas/blob/master/EULA.md).
|
||||
|
||||
1.2 Agreement Parties. This Agreement is between OpenFaaS Ltd (the "Licensor") and you (the "Licensee").
|
||||
|
||||
1.3 Governing Law. This Agreement shall be governed by, and construed in accordance with, the laws of England and Wales.
|
||||
|
||||
1.4 OpenFaaS Edge (faasd-pro). OpenFaaS Edge (faasd-pro) is a separate commercial product that is fully licensed under the OpenFaaS Pro EULA.
|
||||
|
||||
2.1 Grant of License for faasd CE. faasd CE may be installed once per year for a single 60-day trial period in a commercial setting for the sole purpose of evaluation and testing. This trial does not include any support or warranty, and it terminates automatically at the end of the 60-day period unless a separate commercial license is obtained.
|
||||
|
||||
2.2 Personal (Non-Commercial) Use. faasd CE may be used by an individual for personal, non-commercial projects, provided that the user is not acting on behalf of any company, corporation, or other legal entity.
|
||||
|
||||
2.3 Restrictions.
|
||||
(a) No redistribution, resale, or sublicensing of faasd CE is permitted.
|
||||
(b) The License granted under this Addendum is non-transferable.
|
||||
(c) Any continued commercial usage beyond the 60-day trial requires a separate commercial license from the Licensor.
|
||||
|
||||
2.4 Warranty Disclaimer. faasd CE is provided "as is," without warranty of any kind. No support or guarantee is included during the 60-day trial or for personal use.
|
||||
|
||||
2.5 Termination. If the terms of this Addendum are violated, the License granted hereunder terminates immediately. The Licensee must discontinue all use of faasd CE and destroy any copies in their possession.
|
||||
|
||||
2.6 Contact Information. For additional rights, commercial licenses, or support inquiries, please contact the Licensor at contact@openfaas.com.
|
6
LICENSE
6
LICENSE
@ -1,8 +1,10 @@
|
||||
License for faasd contributions from OpenFaaS Ltd - 2017, 2029-2024, see: EULA.md
|
||||
|
||||
Only third-party contributions to source code are licensed MIT:
|
||||
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2020 Alex Ellis
|
||||
Copyright (c) 2020 OpenFaaS Ltd
|
||||
Copyright (c) 2020 OpenFaas Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
27
Makefile
27
Makefile
@ -1,8 +1,8 @@
|
||||
Version := $(shell git describe --tags --dirty)
|
||||
GitCommit := $(shell git rev-parse HEAD)
|
||||
LDFLAGS := "-s -w -X main.Version=$(Version) -X main.GitCommit=$(GitCommit)"
|
||||
CONTAINERD_VER := 1.3.4
|
||||
CNI_VERSION := v0.8.6
|
||||
LDFLAGS := "-s -w -X github.com/openfaas/faasd/pkg.Version=$(Version) -X github.com/openfaas/faasd/pkg.GitCommit=$(GitCommit)"
|
||||
CONTAINERD_VER := 1.7.27
|
||||
CNI_VERSION := v0.9.1
|
||||
ARCH := amd64
|
||||
|
||||
export GO111MODULE=on
|
||||
@ -20,11 +20,14 @@ local:
|
||||
test:
|
||||
CGO_ENABLED=0 GOOS=linux go test -mod=vendor -ldflags $(LDFLAGS) ./...
|
||||
|
||||
.PHONY: dist-local
|
||||
dist-local:
|
||||
CGO_ENABLED=0 GOOS=linux go build -mod=vendor -ldflags $(LDFLAGS) -o bin/faasd
|
||||
|
||||
.PHONY: dist
|
||||
dist:
|
||||
CGO_ENABLED=0 GOOS=linux go build -mod=vendor -ldflags $(LDFLAGS) -a -installsuffix cgo -o bin/faasd
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=arm GOARM=7 go build -mod=vendor -ldflags $(LDFLAGS) -a -installsuffix cgo -o bin/faasd-armhf
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=arm64 go build -mod=vendor -ldflags $(LDFLAGS) -a -installsuffix cgo -o bin/faasd-arm64
|
||||
CGO_ENABLED=0 GOOS=linux go build -mod=vendor -ldflags $(LDFLAGS) -o bin/faasd
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=arm64 go build -mod=vendor -ldflags $(LDFLAGS) -o bin/faasd-arm64
|
||||
|
||||
.PHONY: hashgen
|
||||
hashgen:
|
||||
@ -32,8 +35,8 @@ hashgen:
|
||||
|
||||
.PHONY: prepare-test
|
||||
prepare-test:
|
||||
curl -sLSf https://github.com/containerd/containerd/releases/download/v$(CONTAINERD_VER)/containerd-$(CONTAINERD_VER).linux-amd64.tar.gz > /tmp/containerd.tar.gz && sudo tar -xvf /tmp/containerd.tar.gz -C /usr/local/bin/ --strip-components=1
|
||||
curl -SLfs https://raw.githubusercontent.com/containerd/containerd/v1.3.2/containerd.service | sudo tee /etc/systemd/system/containerd.service
|
||||
curl -sLSf https://github.com/containerd/containerd/releases/download/v$(CONTAINERD_VER)/containerd-$(CONTAINERD_VER)-linux-amd64.tar.gz > /tmp/containerd.tar.gz && sudo tar -xvf /tmp/containerd.tar.gz -C /usr/local/bin/ --strip-components=1
|
||||
curl -SLfs https://raw.githubusercontent.com/containerd/containerd/v1.7.0/containerd.service | sudo tee /etc/systemd/system/containerd.service
|
||||
sudo systemctl daemon-reload && sudo systemctl start containerd
|
||||
sudo /sbin/sysctl -w net.ipv4.conf.all.forwarding=1
|
||||
sudo mkdir -p /opt/cni/bin
|
||||
@ -66,3 +69,11 @@ test-e2e:
|
||||
|
||||
# Removed due to timing issue in CI on GitHub Actions
|
||||
# /usr/local/bin/faas-cli logs figlet --since 15m --follow=false | grep Forking
|
||||
|
||||
verify-compose:
|
||||
@echo Verifying docker-compose.yaml images in remote registries && \
|
||||
arkade chart verify --verbose=$(VERBOSE) -f ./docker-compose.yaml
|
||||
|
||||
upgrade-compose:
|
||||
@echo Checking for newer images in remote registries && \
|
||||
arkade chart upgrade -f ./docker-compose.yaml --write
|
||||
|
151
README.md
151
README.md
@ -1,18 +1,38 @@
|
||||
# faasd - a lightweight & portable faas engine
|
||||
|
||||
[](https://github.com/openfaas/faasd/actions)
|
||||
[](https://opensource.org/licenses/MIT)
|
||||
[](https://www.openfaas.com)
|
||||

|
||||
# faasd - a lightweight and portable version of OpenFaaS
|
||||
|
||||
faasd is [OpenFaaS](https://github.com/openfaas/) reimagined, but without the cost and complexity of Kubernetes. It runs on a single host with very modest requirements, making it fast and easy to manage. Under the hood it uses [containerd](https://containerd.io/) and [Container Networking Interface (CNI)](https://github.com/containernetworking/cni) along with the same core OpenFaaS components from the main project.
|
||||
|
||||

|
||||
|
||||
## Use-cases and tutorials
|
||||
## Features & Benefits
|
||||
|
||||
faasd is just another way to runOpenFaaS, so many things you read in the docs or in blog posts will work the same way.
|
||||
- **Lightweight** - faasd is a single Go binary, which runs as a systemd service making it easy to manage
|
||||
- **Portable** - it runs on any Linux host with containerd and CNI, on as little as 2x vCPU and 2GB RAM - x86_64 and Arm64 supported
|
||||
- **Easy to manage** - unlike Kubernetes, its API is stable and requires little maintenance
|
||||
- **Low cost** - it's licensed per installation, so you can invoke your functions as much as you need, without additional cost
|
||||
- **Stateful containers** - faasd supports stateful containers with persistent volumes such as PostgreSQL, Grafana, Prometheus, etc
|
||||
- **Built on OpenFaaS** - uses the same containers that power OpenFaaS on Kubernetes for the Gateway, Queue-Worker, Event Connectors, Dashboards, Scale To Zero, etc
|
||||
- **Ideal for internal business use** - use it to build internal tools, automate tasks, and integrate with existing systems
|
||||
- **Deploy it for a customer** - package your functions along with OpenFaaS Edge into a VM image, and deploy it to your customers to run in their own datacenters
|
||||
|
||||
faasd does not create the same maintenance burden you'll find with installing, upgrading, and securing a Kubernetes cluster. You can deploy it and walk away, in the worst case, just deploy a new VM and deploy your functions again.
|
||||
|
||||
You can learn more about supported OpenFaaS features in the [ROADMAP.md](/docs/ROADMAP.md)
|
||||
|
||||
## Getting Started
|
||||
|
||||
There are two versions of faasd:
|
||||
|
||||
* faasd CE - for non-commercial, personal use only licensed under [the faasd CE EULA](/EULA.md)
|
||||
* OpenFaaS Edge (faasd-pro) - fully licensed for commercial use
|
||||
|
||||
You can install either edition using the instructions in the [OpenFaaS docs](https://docs.openfaas.com/deployment/edge/).
|
||||
|
||||
You can request a license for [OpenFaaS Edge using this form](https://forms.gle/g6oKLTG29mDTSk5k9)
|
||||
|
||||
## Further resources
|
||||
|
||||
There are many blog posts and documentation pages about OpenFaaS on Kubernetes, which also apply to faasd.
|
||||
|
||||
Videos and overviews:
|
||||
|
||||
@ -30,64 +50,33 @@ Use-cases and tutorials:
|
||||
|
||||
Additional resources:
|
||||
|
||||
* The official handbook - [Serverless For Everyone Else](https://gumroad.com/l/serverless-for-everyone-else)
|
||||
* The official handbook - [Serverless For Everyone Else](https://openfaas.gumroad.com/l/serverless-for-everyone-else)
|
||||
* For reference: [OpenFaaS docs](https://docs.openfaas.com)
|
||||
* For use-cases and tutorials: [OpenFaaS blog](https://openfaas.com/blog/)
|
||||
* For self-paced learning: [OpenFaaS workshop](https://github.com/openfaas/workshop/)
|
||||
|
||||
### About faasd
|
||||
### Deployment tutorials
|
||||
|
||||
* faasd is a static Golang binary
|
||||
* uses the same core components and ecosystem of OpenFaaS
|
||||
* uses containerd for its runtime and CNI for networking
|
||||
* is multi-arch, so works on Intel `x86_64` and ARM out the box
|
||||
* can run almost any other stateful container through its `docker-compose.yaml` file
|
||||
* [Use multipass on Windows, MacOS or Linux](/docs/MULTIPASS.md)
|
||||
* [Deploy to DigitalOcean with Terraform and TLS](https://www.openfaas.com/blog/faasd-tls-terraform/)
|
||||
* [Deploy to any IaaS with cloud-init](https://blog.alexellis.io/deploy-serverless-faasd-with-cloud-init/)
|
||||
* [Deploy faasd to your Raspberry Pi](https://blog.alexellis.io/faasd-for-lightweight-serverless/)
|
||||
|
||||
Most importantly, it's easy to manage so you can set it up and leave it alone to run your functions.
|
||||
Terraform scripts:
|
||||
|
||||

|
||||
* [Provision faasd on DigitalOcean with Terraform](docs/bootstrap/README.md)
|
||||
* [Provision faasd with TLS on DigitalOcean with Terraform](docs/bootstrap/digitalocean-terraform/README.md)
|
||||
|
||||
> Demo of faasd running asynchronous functions
|
||||
|
||||
Watch the video: [faasd walk-through with cloud-init and Multipass](https://www.youtube.com/watch?v=WX1tZoSXy8E)
|
||||
### Training / Handbook
|
||||
|
||||
### What does faasd deploy?
|
||||
You can find various resources to learn about faasd for free, however the official handbook is the most comprehensive guide to getting started with faasd and OpenFaaS.
|
||||
|
||||
* faasd - itself, and its [faas-provider](https://github.com/openfaas/faas-provider) for containerd - CRUD for functions and services, implements the OpenFaaS REST API
|
||||
* [Prometheus](https://github.com/prometheus/prometheus) - for monitoring of services, metrics, scaling and dashboards
|
||||
* [OpenFaaS Gateway](https://github.com/openfaas/faas/tree/master/gateway) - the UI portal, CLI, and other OpenFaaS tooling can talk to this.
|
||||
* [OpenFaaS queue-worker for NATS](https://github.com/openfaas/nats-queue-worker) - run your invocations in the background without adding any code. See also: [asynchronous invocations](https://docs.openfaas.com/reference/triggers/#async-nats-streaming)
|
||||
* [NATS](https://nats.io) for asynchronous processing and queues
|
||||
["Serverless For Everyone Else"](https://openfaas.gumroad.com/l/serverless-for-everyone-else) is the official handbook and was written to contribute funds towards the upkeep and maintenance of the project.
|
||||
|
||||
faasd relies on industry-standard tools for running containers:
|
||||
|
||||
* [CNI](https://github.com/containernetworking/plugins)
|
||||
* [containerd](https://github.com/containerd/containerd)
|
||||
* [runc](https://github.com/opencontainers/runc)
|
||||
|
||||
You can use the standard [faas-cli](https://github.com/openfaas/faas-cli) along with pre-packaged functions from *the Function Store*, or build your own using any OpenFaaS template.
|
||||
|
||||
### When should you use faasd over OpenFaaS on Kubernetes?
|
||||
|
||||
* To deploy microservices and functions that you can update and monitor remotely
|
||||
* When you don't have the bandwidth to learn or manage Kubernetes
|
||||
* To deploy embedded apps in IoT and edge use-cases
|
||||
* To distribute applications to a customer or client
|
||||
* You have a cost sensitive project - run faasd on a 1GB VM for 5-10 USD / mo or on your Raspberry Pi
|
||||
* When you just need a few functions or microservices, without the cost of a cluster
|
||||
|
||||
faasd does not create the same maintenance burden you'll find with maintaining, upgrading, and securing a Kubernetes cluster. You can deploy it and walk away, in the worst case, just deploy a new VM and deploy your functions again.
|
||||
|
||||
## Learning faasd
|
||||
|
||||
The faasd project is MIT licensed and open source, and you will find some documentation, blog posts and videos for free.
|
||||
|
||||
However, "Serverless For Everyone Else" is the official handbook and was written to contribute funds towards the upkeep and maintenance of the project.
|
||||
|
||||
### The official handbook and docs for faasd
|
||||
|
||||
<a href="https://gumroad.com/l/serverless-for-everyone-else">
|
||||
<img src="https://static-2.gumroad.com/res/gumroad/2028406193591/asset_previews/741f2ad46ff0a08e16aaf48d21810ba7/retina/social4.png" width="40%"></a>
|
||||
<a href="https://openfaas.gumroad.com/l/serverless-for-everyone-else">
|
||||
<img src="https://www.alexellis.io/serverless.png" width="40%"></a>
|
||||
|
||||
You'll learn how to deploy code in any language, lift and shift Dockerfiles, run requests in queues, write background jobs and to integrate with databases. faasd packages the same code as OpenFaaS, so you get built-in metrics for your HTTP endpoints, a user-friendly CLI, pre-packaged functions and templates from the store and a UI.
|
||||
|
||||
@ -113,58 +102,4 @@ Topics include:
|
||||
|
||||
View sample pages, reviews and testimonials on Gumroad:
|
||||
|
||||
["Serverless For Everyone Else"](https://gumroad.com/l/serverless-for-everyone-else)
|
||||
|
||||
### Deploy faasd
|
||||
|
||||
The easiest way to deploy faasd is with cloud-init, we give several examples below, and post IaaS platforms will accept "user-data" pasted into their UI, or via their API.
|
||||
|
||||
For trying it out on MacOS or Windows, we recommend using [multipass](https://multipass.run) to run faasd in a VM.
|
||||
|
||||
If you don't use cloud-init, or have already created your Linux server you can use the installation script as per below:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/openfaas/faasd --depth=1
|
||||
cd faasd
|
||||
|
||||
./hack/install.sh
|
||||
```
|
||||
|
||||
> This approach also works for Raspberry Pi
|
||||
|
||||
It's recommended that you do not install Docker on the same host as faasd, since 1) they may both use different versions of containerd and 2) docker's networking rules can disrupt faasd's networking. When using faasd - make your faasd server a faasd server, and build container image on your laptop or in a CI pipeline.
|
||||
|
||||
#### Deployment tutorials
|
||||
|
||||
* [Use multipass on Windows, MacOS or Linux](/docs/MULTIPASS.md)
|
||||
* [Deploy to DigitalOcean with Terraform and TLS](https://www.openfaas.com/blog/faasd-tls-terraform/)
|
||||
* [Deploy to any IaaS with cloud-init](https://blog.alexellis.io/deploy-serverless-faasd-with-cloud-init/)
|
||||
* [Deploy faasd to your Raspberry Pi](https://blog.alexellis.io/faasd-for-lightweight-serverless/)
|
||||
|
||||
Terraform scripts:
|
||||
|
||||
* [Provision faasd on DigitalOcean with Terraform](docs/bootstrap/README.md)
|
||||
* [Provision faasd with TLS on DigitalOcean with Terraform](docs/bootstrap/digitalocean-terraform/README.md)
|
||||
|
||||
### Function and template store
|
||||
|
||||
For community functions see `faas-cli store --help`
|
||||
|
||||
For templates built by the community see: `faas-cli template store list`, you can also use the `dockerfile` template if you just want to migrate an existing service without the benefits of using a template.
|
||||
|
||||
### Community support
|
||||
|
||||
Commercial users and solo business owners should become OpenFaaS GitHub Sponsors to receive regular email updates on changes, tutorials and new features.
|
||||
|
||||
If you are learning faasd, or want to share your use-case, you can join the OpenFaaS Slack community.
|
||||
|
||||
* [Become an OpenFaaS GitHub Sponsor](https://github.com/sponsors/openfaas/)
|
||||
* [Join Slack](https://slack.openfaas.io/)
|
||||
|
||||
### Backlog, features and known issues
|
||||
|
||||
For completed features, WIP and upcoming roadmap see:
|
||||
|
||||
See [ROADMAP.md](docs/ROADMAP.md)
|
||||
|
||||
Are you looking to hack on faasd? Follow the [developer instructions](docs/DEV.md) for a manual installation, or use the `hack/install.sh` script and pick up from there.
|
||||
["Serverless For Everyone Else"](https://openfaas.gumroad.com/l/serverless-for-everyone-else)
|
@ -10,17 +10,7 @@ packages:
|
||||
- git
|
||||
|
||||
runcmd:
|
||||
- curl -sLSf https://github.com/containerd/containerd/releases/download/v1.3.5/containerd-1.3.5-linux-amd64.tar.gz > /tmp/containerd.tar.gz && tar -xvf /tmp/containerd.tar.gz -C /usr/local/bin/ --strip-components=1
|
||||
- curl -SLfs https://raw.githubusercontent.com/containerd/containerd/v1.3.5/containerd.service | tee /etc/systemd/system/containerd.service
|
||||
- systemctl daemon-reload && systemctl start containerd
|
||||
- systemctl enable containerd
|
||||
- /sbin/sysctl -w net.ipv4.conf.all.forwarding=1
|
||||
- mkdir -p /opt/cni/bin
|
||||
- curl -sSL https://github.com/containernetworking/plugins/releases/download/v0.8.5/cni-plugins-linux-amd64-v0.8.5.tgz | tar -xz -C /opt/cni/bin
|
||||
- mkdir -p /go/src/github.com/openfaas/
|
||||
- cd /go/src/github.com/openfaas/ && git clone --depth 1 --branch 0.10.2 https://github.com/openfaas/faasd
|
||||
- curl -fSLs "https://github.com/openfaas/faasd/releases/download/0.10.2/faasd" --output "/usr/local/bin/faasd" && chmod a+x "/usr/local/bin/faasd"
|
||||
- cd /go/src/github.com/openfaas/faasd/ && /usr/local/bin/faasd install
|
||||
- curl -sfL https://raw.githubusercontent.com/openfaas/faasd/master/hack/install.sh | bash -s -
|
||||
- systemctl status -l containerd --no-pager
|
||||
- journalctl -u faasd-provider --no-pager
|
||||
- systemctl status -l faasd-provider --no-pager
|
||||
|
@ -50,54 +50,52 @@ func runInstall(_ *cobra.Command, _ []string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
err := binExists("/usr/local/bin/", "faasd")
|
||||
if err != nil {
|
||||
if err := binExists("/usr/local/bin/", "faasd"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = systemd.InstallUnit("faasd-provider", map[string]string{
|
||||
if err := systemd.InstallUnit("faasd-provider", map[string]string{
|
||||
"Cwd": faasdProviderWd,
|
||||
"SecretMountPath": path.Join(faasdwd, "secrets")})
|
||||
|
||||
if err != nil {
|
||||
"SecretMountPath": path.Join(faasdwd, "secrets")}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = systemd.InstallUnit("faasd", map[string]string{"Cwd": faasdwd})
|
||||
if err != nil {
|
||||
if err := systemd.InstallUnit("faasd", map[string]string{"Cwd": faasdwd}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = systemd.DaemonReload()
|
||||
if err != nil {
|
||||
if err := systemd.DaemonReload(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = systemd.Enable("faasd-provider")
|
||||
if err != nil {
|
||||
if err := systemd.Enable("faasd-provider"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = systemd.Enable("faasd")
|
||||
if err != nil {
|
||||
if err := systemd.Enable("faasd"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = systemd.Start("faasd-provider")
|
||||
if err != nil {
|
||||
if err := systemd.Start("faasd-provider"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = systemd.Start("faasd")
|
||||
if err != nil {
|
||||
if err := systemd.Start("faasd"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Println(`Check status with:
|
||||
fmt.Println(`
|
||||
The initial setup downloads various container images, which may take a
|
||||
minute or two depending on your connection.
|
||||
|
||||
Check the status of the faasd service with:
|
||||
|
||||
sudo journalctl -u faasd --lines 100 -f
|
||||
|
||||
Login with:
|
||||
sudo cat /var/lib/faasd/secrets/basic-auth-password | faas-cli login -s`)
|
||||
sudo -E cat /var/lib/faasd/secrets/basic-auth-password | faas-cli login -s`)
|
||||
|
||||
fmt.Println("")
|
||||
|
||||
return nil
|
||||
}
|
||||
@ -109,7 +107,16 @@ func binExists(folder, name string) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func ensureSecretsDir(folder string) error {
|
||||
if _, err := os.Stat(folder); err != nil {
|
||||
err = os.MkdirAll(folder, secretDirPermission)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
func ensureWorkingDir(folder string) error {
|
||||
if _, err := os.Stat(folder); err != nil {
|
||||
err = os.MkdirAll(folder, workingDirectoryPermission)
|
||||
|
220
cmd/provider.go
220
cmd/provider.go
@ -1,9 +1,8 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
@ -14,6 +13,7 @@ import (
|
||||
"github.com/openfaas/faas-provider/logs"
|
||||
"github.com/openfaas/faas-provider/proxy"
|
||||
"github.com/openfaas/faas-provider/types"
|
||||
faasd "github.com/openfaas/faasd/pkg"
|
||||
"github.com/openfaas/faasd/pkg/cninetwork"
|
||||
faasdlogs "github.com/openfaas/faasd/pkg/logs"
|
||||
"github.com/openfaas/faasd/pkg/provider/config"
|
||||
@ -21,96 +21,152 @@ import (
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
const secretDirPermission = 0755
|
||||
|
||||
func makeProviderCmd() *cobra.Command {
|
||||
var command = &cobra.Command{
|
||||
Use: "provider",
|
||||
Short: "Run the faasd-provider",
|
||||
}
|
||||
|
||||
command.Flags().String("pull-policy", "Always", `Set to "Always" to force a pull of images upon deployment, or "IfNotPresent" to try to use a cached image.`)
|
||||
|
||||
command.RunE = func(_ *cobra.Command, _ []string) error {
|
||||
|
||||
pullPolicy, flagErr := command.Flags().GetString("pull-policy")
|
||||
if flagErr != nil {
|
||||
return flagErr
|
||||
}
|
||||
|
||||
alwaysPull := false
|
||||
if pullPolicy == "Always" {
|
||||
alwaysPull = true
|
||||
}
|
||||
|
||||
config, providerConfig, err := config.ReadFromEnv(types.OsEnv{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Printf("faasd-provider starting..\tService Timeout: %s\n", config.WriteTimeout.String())
|
||||
printVersion()
|
||||
|
||||
wd, err := os.Getwd()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
writeHostsErr := ioutil.WriteFile(path.Join(wd, "hosts"),
|
||||
[]byte(`127.0.0.1 localhost`), workingDirectoryPermission)
|
||||
|
||||
if writeHostsErr != nil {
|
||||
return fmt.Errorf("cannot write hosts file: %s", writeHostsErr)
|
||||
}
|
||||
|
||||
writeResolvErr := ioutil.WriteFile(path.Join(wd, "resolv.conf"),
|
||||
[]byte(`nameserver 8.8.8.8`), workingDirectoryPermission)
|
||||
|
||||
if writeResolvErr != nil {
|
||||
return fmt.Errorf("cannot write resolv.conf file: %s", writeResolvErr)
|
||||
}
|
||||
|
||||
cni, err := cninetwork.InitNetwork()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
client, err := containerd.New(providerConfig.Sock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer client.Close()
|
||||
|
||||
invokeResolver := handlers.NewInvokeResolver(client)
|
||||
|
||||
userSecretPath := path.Join(wd, "secrets")
|
||||
|
||||
bootstrapHandlers := types.FaaSHandlers{
|
||||
FunctionProxy: proxy.NewHandlerFunc(*config, invokeResolver),
|
||||
DeleteHandler: handlers.MakeDeleteHandler(client, cni),
|
||||
DeployHandler: handlers.MakeDeployHandler(client, cni, userSecretPath, alwaysPull),
|
||||
FunctionReader: handlers.MakeReadHandler(client),
|
||||
ReplicaReader: handlers.MakeReplicaReaderHandler(client),
|
||||
ReplicaUpdater: handlers.MakeReplicaUpdateHandler(client, cni),
|
||||
UpdateHandler: handlers.MakeUpdateHandler(client, cni, userSecretPath, alwaysPull),
|
||||
HealthHandler: func(w http.ResponseWriter, r *http.Request) {},
|
||||
InfoHandler: handlers.MakeInfoHandler(Version, GitCommit),
|
||||
ListNamespaceHandler: listNamespaces(),
|
||||
SecretHandler: handlers.MakeSecretHandler(client, userSecretPath),
|
||||
LogHandler: logs.NewLogHandlerFunc(faasdlogs.New(), config.ReadTimeout),
|
||||
}
|
||||
|
||||
log.Printf("Listening on TCP port: %d\n", *config.TCPPort)
|
||||
bootstrap.Serve(&bootstrapHandlers, config)
|
||||
return nil
|
||||
}
|
||||
command.RunE = runProviderE
|
||||
command.PreRunE = preRunE
|
||||
|
||||
return command
|
||||
}
|
||||
|
||||
func listNamespaces() func(w http.ResponseWriter, r *http.Request) {
|
||||
func runProviderE(cmd *cobra.Command, _ []string) error {
|
||||
|
||||
config, providerConfig, err := config.ReadFromEnv(types.OsEnv{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Printf("faasd-provider starting..\tService Timeout: %s\n", config.WriteTimeout.String())
|
||||
printVersion()
|
||||
|
||||
wd, err := os.Getwd()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := os.WriteFile(path.Join(wd, "hosts"),
|
||||
[]byte(`127.0.0.1 localhost`), workingDirectoryPermission); err != nil {
|
||||
return fmt.Errorf("cannot write hosts file: %s", err)
|
||||
}
|
||||
|
||||
if err := os.WriteFile(path.Join(wd, "resolv.conf"),
|
||||
[]byte(`nameserver 8.8.8.8
|
||||
nameserver 8.8.4.4`), workingDirectoryPermission); err != nil {
|
||||
return fmt.Errorf("cannot write resolv.conf file: %s", err)
|
||||
}
|
||||
|
||||
cni, err := cninetwork.InitNetwork()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
client, err := containerd.New(providerConfig.Sock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer client.Close()
|
||||
|
||||
invokeResolver := handlers.NewInvokeResolver(client)
|
||||
|
||||
baseUserSecretsPath := path.Join(wd, "secrets")
|
||||
if err := moveSecretsToDefaultNamespaceSecrets(
|
||||
baseUserSecretsPath,
|
||||
faasd.DefaultFunctionNamespace); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
alwaysPull := true
|
||||
bootstrapHandlers := types.FaaSHandlers{
|
||||
FunctionProxy: httpHeaderMiddleware(proxy.NewHandlerFunc(*config, invokeResolver, false)),
|
||||
DeleteFunction: httpHeaderMiddleware(handlers.MakeDeleteHandler(client, cni)),
|
||||
DeployFunction: httpHeaderMiddleware(handlers.MakeDeployHandler(client, cni, baseUserSecretsPath, alwaysPull)),
|
||||
FunctionLister: httpHeaderMiddleware(handlers.MakeReadHandler(client)),
|
||||
FunctionStatus: httpHeaderMiddleware(handlers.MakeReplicaReaderHandler(client)),
|
||||
ScaleFunction: httpHeaderMiddleware(handlers.MakeReplicaUpdateHandler(client, cni)),
|
||||
UpdateFunction: httpHeaderMiddleware(handlers.MakeUpdateHandler(client, cni, baseUserSecretsPath, alwaysPull)),
|
||||
Health: httpHeaderMiddleware(func(w http.ResponseWriter, r *http.Request) {}),
|
||||
Info: httpHeaderMiddleware(handlers.MakeInfoHandler(faasd.Version, faasd.GitCommit)),
|
||||
ListNamespaces: httpHeaderMiddleware(handlers.MakeNamespacesLister(client)),
|
||||
Secrets: httpHeaderMiddleware(handlers.MakeSecretHandler(client.NamespaceService(), baseUserSecretsPath)),
|
||||
Logs: httpHeaderMiddleware(logs.NewLogHandlerFunc(faasdlogs.New(), config.ReadTimeout)),
|
||||
MutateNamespace: httpHeaderMiddleware(handlers.MakeMutateNamespace(client)),
|
||||
}
|
||||
|
||||
log.Printf("Listening on: 0.0.0.0:%d", *config.TCPPort)
|
||||
bootstrap.Serve(cmd.Context(), &bootstrapHandlers, config)
|
||||
return nil
|
||||
}
|
||||
|
||||
/*
|
||||
* Mutiple namespace support was added after release 0.13.0
|
||||
* Function will help users to migrate on multiple namespace support of faasd
|
||||
*/
|
||||
func moveSecretsToDefaultNamespaceSecrets(baseSecretPath string, defaultNamespace string) error {
|
||||
newSecretPath := path.Join(baseSecretPath, defaultNamespace)
|
||||
|
||||
err := ensureSecretsDir(newSecretPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
files, err := os.ReadDir(baseSecretPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, f := range files {
|
||||
if !f.IsDir() {
|
||||
|
||||
newPath := path.Join(newSecretPath, f.Name())
|
||||
|
||||
// A non-nil error means the file wasn't found in the
|
||||
// destination path
|
||||
if _, err := os.Stat(newPath); err != nil {
|
||||
oldPath := path.Join(baseSecretPath, f.Name())
|
||||
|
||||
if err := copyFile(oldPath, newPath); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Printf("[Migration] Copied %s to %s", oldPath, newPath)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func copyFile(src, dst string) error {
|
||||
inputFile, err := os.Open(src)
|
||||
if err != nil {
|
||||
return fmt.Errorf("opening %s failed %w", src, err)
|
||||
}
|
||||
defer inputFile.Close()
|
||||
|
||||
outputFile, err := os.OpenFile(dst, os.O_CREATE|os.O_WRONLY|os.O_APPEND, secretDirPermission)
|
||||
if err != nil {
|
||||
return fmt.Errorf("opening %s failed %w", dst, err)
|
||||
}
|
||||
defer outputFile.Close()
|
||||
|
||||
// Changed from os.Rename due to issue in #201
|
||||
if _, err := io.Copy(outputFile, inputFile); err != nil {
|
||||
return fmt.Errorf("writing into %s failed %w", outputFile.Name(), err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func httpHeaderMiddleware(next http.HandlerFunc) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
list := []string{""}
|
||||
out, _ := json.Marshal(list)
|
||||
w.Write(out)
|
||||
w.Header().Set("X-OpenFaaS-EULA", "openfaas-ce")
|
||||
next.ServeHTTP(w, r)
|
||||
}
|
||||
}
|
||||
|
36
cmd/root.go
36
cmd/root.go
@ -4,6 +4,7 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/morikuni/aec"
|
||||
"github.com/openfaas/faasd/pkg"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@ -16,25 +17,15 @@ func init() {
|
||||
rootCommand.AddCommand(installCmd)
|
||||
rootCommand.AddCommand(makeProviderCmd())
|
||||
rootCommand.AddCommand(collectCmd)
|
||||
rootCommand.AddCommand(makeServiceCmd())
|
||||
}
|
||||
|
||||
func RootCommand() *cobra.Command {
|
||||
return rootCommand
|
||||
}
|
||||
|
||||
var (
|
||||
// GitCommit Git Commit SHA
|
||||
GitCommit string
|
||||
// Version version of the CLI
|
||||
Version string
|
||||
)
|
||||
|
||||
// Execute faasd
|
||||
func Execute(version, gitCommit string) error {
|
||||
|
||||
// Get Version and GitCommit values from main.go.
|
||||
Version = version
|
||||
GitCommit = gitCommit
|
||||
func Execute() error {
|
||||
|
||||
if err := rootCommand.Execute(); err != nil {
|
||||
return err
|
||||
@ -46,7 +37,16 @@ var rootCommand = &cobra.Command{
|
||||
Use: "faasd",
|
||||
Short: "Start faasd",
|
||||
Long: `
|
||||
faasd - serverless without Kubernetes
|
||||
faasd Community Edition (CE):
|
||||
|
||||
Learn how to build, secure, and monitor functions with faasd with
|
||||
the eBook:
|
||||
|
||||
https://openfaas.gumroad.com/l/serverless-for-everyone-else
|
||||
|
||||
License: OpenFaaS CE EULA with faasd addendum:
|
||||
|
||||
https://github.com/openfaas/faasd/blob/master/EULA.md
|
||||
`,
|
||||
RunE: runRootCommand,
|
||||
SilenceUsage: true,
|
||||
@ -73,7 +73,7 @@ func parseBaseCommand(_ *cobra.Command, _ []string) {
|
||||
}
|
||||
|
||||
func printVersion() {
|
||||
fmt.Printf("faasd version: %s\tcommit: %s\n", GetVersion(), GitCommit)
|
||||
fmt.Printf("faasd Community Edition (CE) version: %s\tcommit: %s\n", pkg.GetVersion(), pkg.GitCommit)
|
||||
}
|
||||
|
||||
func printLogo() {
|
||||
@ -81,14 +81,6 @@ func printLogo() {
|
||||
fmt.Println(logoText)
|
||||
}
|
||||
|
||||
// GetVersion get latest version
|
||||
func GetVersion() string {
|
||||
if len(Version) == 0 {
|
||||
return "dev"
|
||||
}
|
||||
return Version
|
||||
}
|
||||
|
||||
// Logo for version and root command
|
||||
const Logo = ` __ _
|
||||
/ _| __ _ __ _ ___ __| |
|
||||
|
22
cmd/service.go
Normal file
22
cmd/service.go
Normal file
@ -0,0 +1,22 @@
|
||||
package cmd
|
||||
|
||||
import "github.com/spf13/cobra"
|
||||
|
||||
func makeServiceCmd() *cobra.Command {
|
||||
var command = &cobra.Command{
|
||||
Use: "service",
|
||||
Short: "Manage services",
|
||||
Long: `Manage services created by faasd from the docker-compose.yml file`,
|
||||
}
|
||||
|
||||
command.RunE = runServiceE
|
||||
|
||||
command.AddCommand(makeServiceLogsCmd())
|
||||
return command
|
||||
}
|
||||
|
||||
func runServiceE(cmd *cobra.Command, args []string) error {
|
||||
|
||||
return cmd.Help()
|
||||
|
||||
}
|
89
cmd/service_logs.go
Normal file
89
cmd/service_logs.go
Normal file
@ -0,0 +1,89 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
goexecute "github.com/alexellis/go-execute/v2"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func makeServiceLogsCmd() *cobra.Command {
|
||||
var command = &cobra.Command{
|
||||
Use: "logs",
|
||||
Short: "View logs for a service",
|
||||
Long: `View logs for a service created by faasd from the docker-compose.yml file.`,
|
||||
Example: ` ## View logs for the gateway for the last hour
|
||||
faasd service logs gateway --since 1h
|
||||
|
||||
## View logs for the cron-connector, and tail them
|
||||
faasd service logs cron-connector -f
|
||||
`,
|
||||
}
|
||||
|
||||
command.Flags().Duration("since", 10*time.Minute, "How far back in time to include logs")
|
||||
command.Flags().BoolP("follow", "f", false, "Follow the logs")
|
||||
|
||||
command.RunE = runServiceLogsE
|
||||
command.PreRunE = preRunServiceLogsE
|
||||
|
||||
return command
|
||||
}
|
||||
|
||||
func runServiceLogsE(cmd *cobra.Command, args []string) error {
|
||||
name := args[0]
|
||||
|
||||
namespace, _ := cmd.Flags().GetString("namespace")
|
||||
follow, _ := cmd.Flags().GetBool("follow")
|
||||
since, _ := cmd.Flags().GetDuration("since")
|
||||
|
||||
journalTask := goexecute.ExecTask{
|
||||
Command: "journalctl",
|
||||
Args: []string{"-o", "cat", "-t", fmt.Sprintf("%s:%s", namespace, name)},
|
||||
StreamStdio: true,
|
||||
}
|
||||
|
||||
if follow {
|
||||
journalTask.Args = append(journalTask.Args, "-f")
|
||||
}
|
||||
|
||||
if since != 0 {
|
||||
// Calculate the timestamp that is 'age' duration ago
|
||||
sinceTime := time.Now().Add(-since)
|
||||
// Format according to journalctl's expected format: "2012-10-30 18:17:16"
|
||||
formattedTime := sinceTime.Format("2006-01-02 15:04:05")
|
||||
journalTask.Args = append(journalTask.Args, fmt.Sprintf("--since=%s", formattedTime))
|
||||
}
|
||||
|
||||
res, err := journalTask.Execute(context.Background())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if res.ExitCode != 0 {
|
||||
return fmt.Errorf("failed to get logs for service %s: %s", name, res.Stderr)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func preRunServiceLogsE(cmd *cobra.Command, args []string) error {
|
||||
|
||||
if os.Geteuid() != 0 {
|
||||
return errors.New("this command must be run as root")
|
||||
}
|
||||
|
||||
if len(args) == 0 {
|
||||
return errors.New("service name is required as an argument")
|
||||
}
|
||||
|
||||
namespace, _ := cmd.Flags().GetString("namespace")
|
||||
if namespace == "" {
|
||||
return errors.New("namespace is required")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
23
cmd/up.go
23
cmd/up.go
@ -2,7 +2,6 @@ package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
@ -16,6 +15,7 @@ import (
|
||||
"github.com/spf13/cobra"
|
||||
flag "github.com/spf13/pflag"
|
||||
|
||||
units "github.com/docker/go-units"
|
||||
"github.com/openfaas/faasd/pkg"
|
||||
)
|
||||
|
||||
@ -38,9 +38,10 @@ func init() {
|
||||
}
|
||||
|
||||
var upCmd = &cobra.Command{
|
||||
Use: "up",
|
||||
Short: "Start faasd",
|
||||
RunE: runUp,
|
||||
Use: "up",
|
||||
Short: "Start faasd",
|
||||
RunE: runUp,
|
||||
PreRunE: preRunE,
|
||||
}
|
||||
|
||||
func runUp(cmd *cobra.Command, _ []string) error {
|
||||
@ -68,7 +69,7 @@ func runUp(cmd *cobra.Command, _ []string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Printf("Supervisor created in: %s\n", time.Since(start).String())
|
||||
log.Printf("Supervisor created in: %s\n", units.HumanDuration(time.Since(start)))
|
||||
|
||||
start = time.Now()
|
||||
if err := supervisor.Start(services); err != nil {
|
||||
@ -76,7 +77,7 @@ func runUp(cmd *cobra.Command, _ []string) error {
|
||||
}
|
||||
defer supervisor.Close()
|
||||
|
||||
log.Printf("Supervisor init done in: %s\n", time.Since(start).String())
|
||||
log.Printf("Supervisor init done in: %s\n", units.HumanDuration(time.Since(start)))
|
||||
|
||||
shutdownTimeout := time.Second * 1
|
||||
timeout := time.Second * 60
|
||||
@ -165,7 +166,7 @@ func makeFile(filePath, fileContents string) error {
|
||||
return nil
|
||||
} else if os.IsNotExist(err) {
|
||||
log.Printf("Writing to: %q\n", filePath)
|
||||
return ioutil.WriteFile(filePath, []byte(fileContents), workingDirectoryPermission)
|
||||
return os.WriteFile(filePath, []byte(fileContents), workingDirectoryPermission)
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
@ -203,3 +204,11 @@ func parseUpFlags(cmd *cobra.Command) (upConfig, error) {
|
||||
parsed.workingDir = faasdwd
|
||||
return parsed, err
|
||||
}
|
||||
|
||||
func preRunE(cmd *cobra.Command, _ []string) error {
|
||||
if err := pkg.ConnectivityCheck(); err != nil {
|
||||
return fmt.Errorf("the OpenFaaS CE EULA requires Internet access, upgrade to faasd Pro to continue")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -1,47 +1,45 @@
|
||||
version: "3.7"
|
||||
services:
|
||||
basic-auth-plugin:
|
||||
image: ghcr.io/openfaas/basic-auth:0.20.5
|
||||
environment:
|
||||
- port=8080
|
||||
- secret_mount_path=/run/secrets
|
||||
- user_filename=basic-auth-user
|
||||
- pass_filename=basic-auth-password
|
||||
volumes:
|
||||
# we assume cwd == /var/lib/faasd
|
||||
- type: bind
|
||||
source: ./secrets/basic-auth-password
|
||||
target: /run/secrets/basic-auth-password
|
||||
- type: bind
|
||||
source: ./secrets/basic-auth-user
|
||||
target: /run/secrets/basic-auth-user
|
||||
cap_add:
|
||||
- CAP_NET_RAW
|
||||
|
||||
nats:
|
||||
image: docker.io/library/nats-streaming:0.11.2
|
||||
image: docker.io/library/nats-streaming:0.25.6
|
||||
# nobody
|
||||
user: "65534"
|
||||
command:
|
||||
- "/nats-streaming-server"
|
||||
- "-m"
|
||||
- "8222"
|
||||
- "--store=memory"
|
||||
- "--store=file"
|
||||
- "--dir=/nats"
|
||||
- "--cluster_id=faas-cluster"
|
||||
volumes:
|
||||
# Data directory
|
||||
- type: bind
|
||||
source: ./nats
|
||||
target: /nats
|
||||
# ports:
|
||||
# - "127.0.0.1:8222:8222"
|
||||
|
||||
prometheus:
|
||||
image: docker.io/prom/prometheus:v2.14.0
|
||||
image: docker.io/prom/prometheus:v3.1.0
|
||||
# nobody
|
||||
user: "65534"
|
||||
volumes:
|
||||
# Config directory
|
||||
- type: bind
|
||||
source: ./prometheus.yml
|
||||
target: /etc/prometheus/prometheus.yml
|
||||
# Data directory
|
||||
- type: bind
|
||||
source: ./prometheus
|
||||
target: /prometheus
|
||||
cap_add:
|
||||
- CAP_NET_RAW
|
||||
ports:
|
||||
- "127.0.0.1:9090:9090"
|
||||
|
||||
gateway:
|
||||
image: ghcr.io/openfaas/gateway:0.20.8
|
||||
image: ghcr.io/openfaas/gateway:0.27.12
|
||||
environment:
|
||||
- basic_auth=true
|
||||
- functions_provider_url=http://faasd-provider:8081/
|
||||
@ -51,8 +49,6 @@ services:
|
||||
- upstream_timeout=65s
|
||||
- faas_nats_address=nats
|
||||
- faas_nats_port=4222
|
||||
- auth_proxy_url=http://basic-auth-plugin:8080/validate
|
||||
- auth_proxy_pass_body=false
|
||||
- secret_mount_path=/run/secrets
|
||||
- scale_from_zero=true
|
||||
- function_namespace=openfaas-fn
|
||||
@ -67,14 +63,13 @@ services:
|
||||
cap_add:
|
||||
- CAP_NET_RAW
|
||||
depends_on:
|
||||
- basic-auth-plugin
|
||||
- nats
|
||||
- prometheus
|
||||
ports:
|
||||
- "8080:8080"
|
||||
|
||||
queue-worker:
|
||||
image: docker.io/openfaas/queue-worker:0.11.2
|
||||
image: ghcr.io/openfaas/queue-worker:0.14.2
|
||||
environment:
|
||||
- faas_nats_address=nats
|
||||
- faas_nats_port=4222
|
||||
|
65
docs/DEV.md
65
docs/DEV.md
@ -1,7 +1,11 @@
|
||||
## Instructions for hacking on faasd itself
|
||||
## Instructions for building and testing faasd locally
|
||||
|
||||
> Note: if you're just wanting to try out faasd, then it's likely that you're on the wrong page. This is a detailed set of instructions for those wanting to contribute or customise faasd. Feel free to go back to the homepage and pick a tutorial instead.
|
||||
|
||||
Do you want to help the community test a pull request?
|
||||
|
||||
See these instructions instead: [Testing patches](/docs/PATCHES.md)
|
||||
|
||||
### Pre-reqs
|
||||
|
||||
> It's recommended that you do not install Docker on the same host as faasd, since 1) they may both use different versions of containerd and 2) docker's networking rules can disrupt faasd's networking. When using faasd - make your faasd server a faasd server, and build container image on your laptop or in a CI pipeline.
|
||||
@ -16,7 +20,7 @@
|
||||
|
||||
For Windows users, install [Git Bash](https://git-scm.com/downloads) along with multipass or vagrant. You can also use WSL1 or WSL2 which provides a Linux environment.
|
||||
|
||||
You will also need [containerd v1.3.5](https://github.com/containerd/containerd) and the [CNI plugins v0.8.5](https://github.com/containernetworking/plugins)
|
||||
You will also need [containerd](https://github.com/containerd/containerd) and the [CNI plugins](https://github.com/containernetworking/plugins)
|
||||
|
||||
[faas-cli](https://github.com/openfaas/faas-cli) is optional, but recommended.
|
||||
|
||||
@ -24,7 +28,7 @@ If you're using multipass, then allocate sufficient resources:
|
||||
|
||||
```bash
|
||||
multipass launch \
|
||||
--mem 4G \
|
||||
--memory 4G \
|
||||
-c 2 \
|
||||
-n faasd
|
||||
|
||||
@ -53,14 +57,13 @@ curl -sLS https://cli.openfaas.com | sudo sh
|
||||
#### Install the CNI plugins:
|
||||
|
||||
* For PC run `export ARCH=amd64`
|
||||
* For RPi/armhf run `export ARCH=arm`
|
||||
* For arm64 run `export ARCH=arm64`
|
||||
|
||||
Then run:
|
||||
|
||||
```bash
|
||||
export ARCH=amd64
|
||||
export CNI_VERSION=v0.8.5
|
||||
export CNI_VERSION=v0.9.1
|
||||
|
||||
sudo mkdir -p /opt/cni/bin
|
||||
curl -sSL https://github.com/containernetworking/plugins/releases/download/${CNI_VERSION}/cni-plugins-linux-${ARCH}-${CNI_VERSION}.tgz | sudo tar -xz -C /opt/cni/bin
|
||||
@ -79,26 +82,17 @@ EOF'
|
||||
|
||||
### Get containerd
|
||||
|
||||
You have three options - binaries for PC, binaries for armhf, or build from source.
|
||||
|
||||
* Install containerd `x86_64` only
|
||||
|
||||
```bash
|
||||
export VER=1.3.5
|
||||
curl -sSL https://github.com/containerd/containerd/releases/download/v$VER/containerd-$VER-linux-amd64.tar.gz > /tmp/containerd.tar.gz \
|
||||
export VER=1.7.27
|
||||
curl -sSL https://github.com/containerd/containerd/releases/download/v$VER/containerd-$VER-linux-amd64.tar.gz -o /tmp/containerd.tar.gz \
|
||||
&& sudo tar -xvf /tmp/containerd.tar.gz -C /usr/local/bin/ --strip-components=1
|
||||
|
||||
containerd -version
|
||||
```
|
||||
|
||||
* Or get my containerd binaries for Raspberry Pi (armhf)
|
||||
|
||||
Building `containerd` on armhf is extremely slow, so I've provided binaries for you.
|
||||
|
||||
```bash
|
||||
curl -sSL https://github.com/alexellis/containerd-armhf/releases/download/v1.3.5/containerd.tgz | sudo tar -xvz --strip-components=2 -C /usr/local/bin/
|
||||
```
|
||||
|
||||
* Or clone / build / install [containerd](https://github.com/containerd/containerd) from source:
|
||||
|
||||
```bash
|
||||
@ -108,7 +102,7 @@ containerd -version
|
||||
git clone https://github.com/containerd/containerd
|
||||
cd containerd
|
||||
git fetch origin --tags
|
||||
git checkout v1.3.5
|
||||
git checkout v1.7.27
|
||||
|
||||
make
|
||||
sudo make install
|
||||
@ -119,7 +113,7 @@ containerd -version
|
||||
#### Ensure containerd is running
|
||||
|
||||
```bash
|
||||
curl -sLS https://raw.githubusercontent.com/containerd/containerd/v1.3.5/containerd.service > /tmp/containerd.service
|
||||
curl -sLS https://raw.githubusercontent.com/containerd/containerd/v1.7.27/containerd.service > /tmp/containerd.service
|
||||
|
||||
# Extend the timeouts for low-performance VMs
|
||||
echo "[Manager]" | tee -a /tmp/containerd.service
|
||||
@ -226,14 +220,11 @@ sudo cp bin/faasd /usr/local/bin
|
||||
# For x86_64
|
||||
export SUFFIX=""
|
||||
|
||||
# armhf
|
||||
export SUFFIX="-armhf"
|
||||
|
||||
# arm64
|
||||
export SUFFIX="-arm64"
|
||||
|
||||
# Then download
|
||||
curl -fSLs "https://github.com/openfaas/faasd/releases/download/0.10.2/faasd$SUFFIX" \
|
||||
curl -fSLs "https://github.com/openfaas/faasd/releases/download/0.16.2/faasd$SUFFIX" \
|
||||
-o "/tmp/faasd" \
|
||||
&& chmod +x "/tmp/faasd"
|
||||
sudo mv /tmp/faasd /usr/local/bin/
|
||||
@ -249,7 +240,7 @@ sudo faasd install
|
||||
2020/02/17 17:38:06 Writing to: "/var/lib/faasd/secrets/basic-auth-password"
|
||||
2020/02/17 17:38:06 Writing to: "/var/lib/faasd/secrets/basic-auth-user"
|
||||
Login with:
|
||||
sudo cat /var/lib/faasd/secrets/basic-auth-password | faas-cli login -s
|
||||
sudo -E cat /var/lib/faasd/secrets/basic-auth-password | faas-cli login -s
|
||||
```
|
||||
|
||||
You can now log in either from this machine or a remote machine using the OpenFaaS UI, or CLI.
|
||||
@ -357,3 +348,31 @@ The default Basic Auth username is `admin`, which is written to `/var/lib/faasd/
|
||||
* `faasd install` - install faasd and containerd with systemd, this must be run from `$GOPATH/src/github.com/openfaas/faasd`
|
||||
* `journalctl -u faasd -f` - faasd service logs
|
||||
* `journalctl -u faasd-provider -f` - faasd-provider service logs
|
||||
|
||||
#### Uninstall
|
||||
|
||||
* Stop faasd and faasd-provider
|
||||
```
|
||||
sudo systemctl stop faasd
|
||||
sudo systemctl stop faasd-provider
|
||||
sudo systemctl stop containerd
|
||||
```
|
||||
|
||||
* Remove faasd from machine
|
||||
```
|
||||
sudo systemctl disable faasd
|
||||
sudo systemctl disable faasd-provider
|
||||
sudo systemctl disable containerd
|
||||
sudo rm -rf /usr/local/bin/faasd
|
||||
sudo rm -rf /var/lib/faasd
|
||||
sudo rm -rf /usr/lib/systemd/system/faasd-provider.service
|
||||
sudo rm -rf /usr/lib/systemd/system/faasd.service
|
||||
sudo rm -rf /usr/lib/systemd/system/containerd
|
||||
sudo systemctl daemon-reload
|
||||
```
|
||||
|
||||
* Remove additional dependencies. Be cautious as other software will be dependent on these.
|
||||
```
|
||||
sudo apt-get remove runc bridge-utils
|
||||
sudo rm -rf /opt/cni/bin
|
||||
```
|
@ -27,111 +27,112 @@ It took me about 2-3 minutes to run through everything after installing multipas
|
||||
|
||||
* Get my cloud-config.txt file
|
||||
|
||||
```sh
|
||||
curl -sSLO https://raw.githubusercontent.com/openfaas/faasd/master/cloud-config.txt
|
||||
```
|
||||
```sh
|
||||
curl -sSLO https://raw.githubusercontent.com/openfaas/faasd/master/cloud-config.txt
|
||||
```
|
||||
|
||||
* Update the SSH key to match your own, edit `cloud-config.txt`:
|
||||
* Boot the VM
|
||||
|
||||
Replace the 2nd line with the contents of `~/.ssh/id_rsa.pub`:
|
||||
The `cloud-config.txt` contains an ssh key to allow your local machine to access the VM. However, this must be updated with your local ssh key.
|
||||
This command will update the key with your local public key value and start the VM.
|
||||
|
||||
```
|
||||
ssh_authorized_keys:
|
||||
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC8Q/aUYUr3P1XKVucnO9mlWxOjJm+K01lHJR90MkHC9zbfTqlp8P7C3J26zKAuzHXOeF+VFxETRr6YedQKW9zp5oP7sN+F2gr/pO7GV3VmOqHMV7uKfyUQfq7H1aVzLfCcI7FwN2Zekv3yB7kj35pbsMa1Za58aF6oHRctZU6UWgXXbRxP+B04DoVU7jTstQ4GMoOCaqYhgPHyjEAS3DW0kkPW6HzsvJHkxvVcVlZ/wNJa1Ie/yGpzOzWIN0Ol0t2QT/RSWOhfzO1A2P0XbPuZ04NmriBonO9zR7T1fMNmmtTuK7WazKjQT3inmYRAqU6pe8wfX8WIWNV7OowUjUsv alex@alexr.local
|
||||
```
|
||||
```sh
|
||||
sed "s/ssh-rsa.*/$(cat $HOME/.ssh/id_*.pub)/" cloud-config.txt | multipass launch --name faasd --cloud-init -
|
||||
```
|
||||
|
||||
* Boot the VM
|
||||
This can also be done manually, just replace the 2nd line of the `cloud-config.txt` with the contents of your public ssh key, usually either `~/.ssh/id_rsa.pub` or `~/.ssh/id_ed25519.pub`
|
||||
|
||||
```sh
|
||||
multipass launch --cloud-init cloud-config.txt --name faasd
|
||||
```
|
||||
```
|
||||
ssh_authorized_keys:
|
||||
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC8Q/aUYUr3P1XKVucnO9mlWxOjJm+K01lHJR90MkHC9zbfTqlp8P7C3J26zKAuzHXOeF+VFxETRr6YedQKW9zp5oP7sN+F2gr/pO7GV3VmOqHMV7uKfyUQfq7H1aVzLfCcI7FwN2Zekv3yB7kj35pbsMa1Za58aF6oHRctZU6UWgXXbRxP+B04DoVU7jTstQ4GMoOCaqYhgPHyjEAS3DW0kkPW6HzsvJHkxvVcVlZ/wNJa1Ie/yGpzOzWIN0Ol0t2QT/RSWOhfzO1A2P0XbPuZ04NmriBonO9zR7T1fMNmmtTuK7WazKjQT3inmYRAqU6pe8wfX8WIWNV7OowUjUsv alex@alexr.local
|
||||
```
|
||||
|
||||
* Get the VM's IP and connect with `ssh`
|
||||
|
||||
```sh
|
||||
multipass info faasd
|
||||
Name: faasd
|
||||
State: Running
|
||||
IPv4: 192.168.64.14
|
||||
Release: Ubuntu 18.04.3 LTS
|
||||
Image hash: a720c34066dc (Ubuntu 18.04 LTS)
|
||||
Load: 0.79 0.19 0.06
|
||||
Disk usage: 1.1G out of 4.7G
|
||||
Memory usage: 145.6M out of 985.7M
|
||||
```
|
||||
```sh
|
||||
multipass info faasd
|
||||
Name: faasd
|
||||
State: Running
|
||||
IPv4: 192.168.64.14
|
||||
Release: Ubuntu 18.04.3 LTS
|
||||
Image hash: a720c34066dc (Ubuntu 18.04 LTS)
|
||||
Load: 0.79 0.19 0.06
|
||||
Disk usage: 1.1G out of 4.7G
|
||||
Memory usage: 145.6M out of 985.7M
|
||||
```
|
||||
|
||||
Set the variable `IP`:
|
||||
Set the variable `IP`:
|
||||
|
||||
```
|
||||
export IP="192.168.64.14"
|
||||
```
|
||||
```
|
||||
export IP="192.168.64.14"
|
||||
```
|
||||
|
||||
You can also try to use `jq` to get the IP into a variable:
|
||||
You can also try to use `jq` to get the IP into a variable:
|
||||
|
||||
```sh
|
||||
export IP=$(multipass info faasd --format json| jq -r '.info.faasd.ipv4[0]')
|
||||
```
|
||||
```sh
|
||||
export IP=$(multipass info faasd --format json| jq -r '.info.faasd.ipv4[0]')
|
||||
```
|
||||
|
||||
Connect to the IP listed:
|
||||
Connect to the IP listed:
|
||||
|
||||
```sh
|
||||
ssh ubuntu@$IP
|
||||
```
|
||||
```sh
|
||||
ssh ubuntu@$IP
|
||||
```
|
||||
|
||||
Log out once you know it works.
|
||||
Log out once you know it works.
|
||||
|
||||
* Let's capture the authentication password into a file for use with `faas-cli`
|
||||
|
||||
```
|
||||
ssh ubuntu@$IP "sudo cat /var/lib/faasd/secrets/basic-auth-password" > basic-auth-password
|
||||
```
|
||||
```
|
||||
ssh ubuntu@$IP "sudo cat /var/lib/faasd/secrets/basic-auth-password" > basic-auth-password
|
||||
```
|
||||
|
||||
## Try faasd (OpenFaaS)
|
||||
|
||||
* Login from your laptop (the host)
|
||||
|
||||
```
|
||||
export OPENFAAS_URL=http://$IP:8080
|
||||
cat basic-auth-password | faas-cli login -s
|
||||
```
|
||||
```
|
||||
export OPENFAAS_URL=http://$IP:8080
|
||||
cat basic-auth-password | faas-cli login -s
|
||||
```
|
||||
|
||||
* Deploy a function and invoke it
|
||||
|
||||
```
|
||||
faas-cli store deploy figlet --env write_timeout=1s
|
||||
echo "faasd" | faas-cli invoke figlet
|
||||
```
|
||||
faas-cli store deploy figlet --env write_timeout=1s
|
||||
echo "faasd" | faas-cli invoke figlet
|
||||
|
||||
faas-cli describe figlet
|
||||
faas-cli describe figlet
|
||||
|
||||
# Run async
|
||||
curl -i -d "faasd-async" $OPENFAAS_URL/async-function/figlet
|
||||
# Run async
|
||||
curl -i -d "faasd-async" $OPENFAAS_URL/async-function/figlet
|
||||
|
||||
# Run async with a callback
|
||||
# Run async with a callback
|
||||
|
||||
curl -i -d "faasd-async" -H "X-Callback-Url: http://some-request-bin.com/path" $OPENFAAS_URL/async-function/figlet
|
||||
```
|
||||
curl -i -d "faasd-async" -H "X-Callback-Url: http://some-request-bin.com/path" $OPENFAAS_URL/async-function/figlet
|
||||
```
|
||||
|
||||
You can also checkout the other store functions: `faas-cli store list`
|
||||
You can also checkout the other store functions: `faas-cli store list`
|
||||
|
||||
* Try the UI
|
||||
|
||||
Head over to the UI from your laptop and remember that your password is in the `basic-auth-password` file. The username is `admin`:
|
||||
Head over to the UI from your laptop and remember that your password is in the `basic-auth-password` file. The username is `admin`:
|
||||
|
||||
```
|
||||
echo http://$IP:8080
|
||||
```
|
||||
```
|
||||
echo http://$IP:8080
|
||||
```
|
||||
|
||||
* Stop/start the instance
|
||||
|
||||
```sh
|
||||
multipass stop faasd
|
||||
```
|
||||
```sh
|
||||
multipass stop faasd
|
||||
```
|
||||
|
||||
* Delete, if you want to:
|
||||
|
||||
```
|
||||
multipass delete --purge faasd
|
||||
```
|
||||
```
|
||||
multipass delete --purge faasd
|
||||
```
|
||||
|
||||
You now have a faasd appliance on your Mac. You can also use this cloud-init file with public cloud like AWS or DigitalOcean.
|
||||
|
||||
|
88
docs/PATCHES.md
Normal file
88
docs/PATCHES.md
Normal file
@ -0,0 +1,88 @@
|
||||
## Instructions for testing a patch for faasd
|
||||
|
||||
### Launch a virtual machine
|
||||
|
||||
You can use any kind of Linux virtual machine, Ubuntu 20.04 is recommended.
|
||||
|
||||
Launch a cloud VM or use [Multipass](https://multipass.run), which is free to use an can be run locally. A Raspberry Pi 3 or 4 could also be used, but will need you to run `make dist` to cross compile a valid binary.
|
||||
|
||||
### Copy over your SSH key
|
||||
|
||||
Your SSH key will be used, so that you can copy a new faasd binary over to the host.
|
||||
|
||||
```bash
|
||||
multipass launch \
|
||||
--memory 4G \
|
||||
-c 2 \
|
||||
-n faasd
|
||||
|
||||
# Then access its shell
|
||||
multipass shell faasd
|
||||
|
||||
# Edit .ssh/authorized_keys
|
||||
|
||||
# Add .ssh/id_rsa.pub from your host and save the file
|
||||
```
|
||||
|
||||
### Install faasd on the VM
|
||||
|
||||
You start off with the upstream version of faasd on the host, then add the new version over the top later on.
|
||||
|
||||
```bash
|
||||
cd /tmp/
|
||||
git clone https://github.com/openfaas/faasd --depth=1
|
||||
cd faasd/hack
|
||||
./install.sh
|
||||
|
||||
# Run the login command given to you at the end of the script
|
||||
```
|
||||
|
||||
Get the multipass IP address:
|
||||
|
||||
```bash
|
||||
export IP=$(multipass info faasd --format json| jq -r '.info.faasd.ipv4[0]')
|
||||
```
|
||||
|
||||
### Build a new faasd binary with the patch
|
||||
|
||||
Check out faasd on your local computer
|
||||
|
||||
```bash
|
||||
git clone https://github.com/openfaas/faasd
|
||||
cd faasd
|
||||
|
||||
gh pr checkout #PR_NUMBER_HERE
|
||||
|
||||
GOOS=linux go build
|
||||
|
||||
# You can also run "make dist" which is slower, but includes
|
||||
# a version and binaries for other platforms such as the Raspberry Pi
|
||||
```
|
||||
|
||||
### Copy it over to the VM
|
||||
|
||||
Now build a new faasd binary and copy it to the VM:
|
||||
|
||||
```bash
|
||||
scp faasd ubuntu@$IP:~/
|
||||
```
|
||||
|
||||
Now deploy the new version on the VM:
|
||||
|
||||
```bash
|
||||
killall -9 faasd-linux; killall -9 faasd-linux ; mv ./faasd-linux /usr/local/bin/faasd
|
||||
```
|
||||
|
||||
### Check it worked and test that patch
|
||||
|
||||
Now run a command with `faas-cli` such as:
|
||||
|
||||
* `faas-cli list`
|
||||
* `faas-cli version`
|
||||
|
||||
See the testing instructions on the PR and run through those steps.
|
||||
|
||||
Post your results on GitHub to assist the creator of the pull request.
|
||||
|
||||
You can see how to get the logs for various components using the [eBook Serverless For Everyone Else](https://openfaas.gumroad.com/l/serverless-for-everyone-else), or by consulting the [DEV.md](/docs/DEV.md) guide.
|
||||
|
7
docs/README.md
Normal file
7
docs/README.md
Normal file
@ -0,0 +1,7 @@
|
||||
# Documentation
|
||||
|
||||
- [Develop faasd](./DEV.md) - Instructions for building and testing faasd locally
|
||||
- [Testing faasd](./PATCHES.md) - Instructions for testing a patch for faasd
|
||||
- [Roadmap](./ROADMAP.md) - Overview of features, backlog and known issues
|
||||
- [Run faasd with multipass](./MULTIPASS.md) - Tutorial on how to run faasd on a local multipass VM
|
||||
- [Terraform modules](./TERRAFORM.md) - A collection of official and community provided terraform modules for faasd
|
105
docs/ROADMAP.md
105
docs/ROADMAP.md
@ -1,55 +1,79 @@
|
||||
# faasd backlog and features
|
||||
|
||||
It's important to understand the vision and tradeoffs between OpenFaaS Edge (faasd-pro) vs. OpenFaaS on Kubernetes.
|
||||
|
||||
faasd is a single-node implementation of OpenFaaS.
|
||||
|
||||
It is supposed to be a lightweight, low-overhead, way to deploy OpenFaaS functions for functions which do not need to scale out.
|
||||
|
||||
It is not supposed to have multiple replicas, clustering, High Availability (HA), or auto-scaling.
|
||||
|
||||
[Learn when to use faasd](https://docs.openfaas.com/deployment/)
|
||||
|
||||
## Supported operations
|
||||
|
||||
* `faas login`
|
||||
* `faas up`
|
||||
* `faas list`
|
||||
* `faas describe`
|
||||
* `faas deploy --update=true --replace=false`
|
||||
* `faas invoke --async`
|
||||
* `faas invoke`
|
||||
* `faas rm`
|
||||
* `faas store list/deploy/inspect`
|
||||
* `faas version`
|
||||
* `faas namespace`
|
||||
* `faas secret`
|
||||
* `faas logs`
|
||||
* `faas auth` - supported for Basic Authentication and OpenFaaS PRO with OIDC and Single-sign On.
|
||||
* `faas-cli login`
|
||||
* `faas-cli up`
|
||||
* `faas-cli list`
|
||||
* `faas-cli describe`
|
||||
* `faas-cli deploy --update=true --replace=false`
|
||||
* `faas-cli invoke --async`
|
||||
* `faas-cli invoke`
|
||||
* `faas-cli rm`
|
||||
* `faas-cli store list/deploy/inspect`
|
||||
* `faas-cli version`
|
||||
* `faas-cli namespace`
|
||||
* `faas-cli secret`
|
||||
* `faas-cli logs`
|
||||
* `faas-cli auth` - supported for Basic Authentication and OpenFaaS Pro with OIDC and Single-sign On.
|
||||
|
||||
Scale from and to zero is also supported. On a Dell XPS with a small, pre-pulled image unpausing an existing task took 0.19s and starting a task for a killed function took 0.39s. There may be further optimizations to be gained.
|
||||
The OpenFaaS REST API is supported by faasd, learn more in the [manual](https://store.openfaas.com/l/serverless-for-everyone-else) under "Can I get an API with that?"
|
||||
|
||||
## Constraints vs OpenFaaS on Kubernetes
|
||||
|
||||
faasd suits certain use-cases as mentioned in the README file, for those who want a solution which can scale out horizontally with minimum effort, Kubernetes or K3s is a valid option.
|
||||
faasd suits certain use-cases as mentioned in the [README.md](/README.md) file, for those who want a solution which can scale out horizontally with minimum effort, Kubernetes or K3s is a valid option.
|
||||
|
||||
Which is right for you? [Read a comparison in the OpenFaaS docs](https://docs.openfaas.com/deployment/)
|
||||
|
||||
### One replica per function
|
||||
|
||||
Functions only support one replica, so cannot scale horizontally, but can scale vertically.
|
||||
Functions only support one replica for each function, so that means horizontal scaling is not available.
|
||||
|
||||
Workaround: deploy one uniquely named function per replica.
|
||||
It can scale vertically, and this may be a suitable alternative for many use-cases. See the [YAML reference for how to configure limits](https://docs.openfaas.com/reference/yaml/).
|
||||
|
||||
### Scale from zero may give a non-200
|
||||
Workaround: deploy multiple, dynamically named functions `scraper-1`, `scraper-2`, `scraper-3` and set up a reverse proxy rule to load balance i.e. `scraper.example.com => [/function/scraper-1, /function/scraper-2, /function/scraper-3]`.
|
||||
|
||||
When scaling from zero there is no health check implemented, so the request may arrive before your HTTP server is ready to serve a request, and therefore give a non-200 code.
|
||||
### Leaf-node only - no clustering
|
||||
|
||||
Workaround: Do not scale to zero, or have your client retry HTTP calls.
|
||||
faasd is operates on a leaf-node/single-node model. If this is an issue for you, but you have resource constraints, you will need to use [OpenFaaS CE or Pro on Kubernetes](https://docs.openfaas.com/deployment/).
|
||||
|
||||
### No clustering is available
|
||||
There are no plans to add any form of clustering or multi-node support to faasd.
|
||||
|
||||
No clustering is available in faasd, however you can still apply fault-tolerance and high availability techniques.
|
||||
See past discussion at: [HA / resilience in faasd #225](https://github.com/openfaas/faasd/issues/225)
|
||||
|
||||
Workaround: deploy multiple faasd instances and use a hardware or software load-balancer. Take regular VM/host snapshots or backups.
|
||||
What about HA and fault tolerance?
|
||||
|
||||
### No rolling updates
|
||||
To achieve fault tolerance, you could put two faasd instances behind a load balancer or proxy, but you will need to deploy the same set of functions to each.
|
||||
|
||||
When running `faas-cli deploy`, your old function is removed before the new one is started. This may cause a small amount of downtime, depending on the timeouts and grace periods you set.
|
||||
An alternative would be to take regular VM backups or snapshots.
|
||||
|
||||
Workaround: deploy uniquely named functions per version, and switch an Ingress or Reverse Proxy record to point at a new version once it is ready.
|
||||
### No rolling updates are available today
|
||||
|
||||
When running `faas-cli deploy`, your old function is removed before the new one is started. This may cause a period of downtime, depending on the timeouts and grace periods you set.
|
||||
|
||||
Workaround: deploy uniquely named functions i.e. `scraper-1` and `scraper-2` with a reverse proxy rule that maps `/function/scraper` to the active version.
|
||||
|
||||
## Known issues
|
||||
|
||||
### Non 200 HTTP status from the gateway upon first use
|
||||
### Troubleshooting
|
||||
|
||||
There is a very detailed chapter on troubleshooting in the eBook [Serverless For Everyone Else](https://store.openfaas.com/l/serverless-for-everyone-else)
|
||||
|
||||
### Your function timed-out at 60 seconds
|
||||
|
||||
See the manual for how to configure a longer timeouts.
|
||||
|
||||
### Non 200 HTTP status from the gateway upon reboot
|
||||
|
||||
This issue appears to happen sporadically and only for some users.
|
||||
|
||||
@ -75,20 +99,24 @@ sudo systemctl restart faasd
|
||||
|
||||
Should have:
|
||||
|
||||
* [ ] Resolve core services from functions by populating/sharing `/etc/hosts` between `faasd` and `faasd-provider`
|
||||
* [ ] Docs or examples on how to use the various connectors and connector-sdk
|
||||
* [ ] Monitor and restart any of the core components at runtime if the container stops
|
||||
* [ ] Asynchronous deletion instead of synchronous
|
||||
* [ ] Restart any of the containers in docker-compose.yaml if they crash.
|
||||
* [ ] Asynchronous function deployment and deletion (currently synchronous/blocking)
|
||||
|
||||
Nice to Have:
|
||||
|
||||
* [ ] Terraform for AWS (in-progress)
|
||||
* [ ] Total memory limits - if a node has 1GB of RAM, don't allow more than 1000MB of RAM to be reserved via limits
|
||||
* [ ] Offer live rolling-updates, with zero downtime - requires moving to IDs vs. names for function containers
|
||||
* [ ] Live rolling-updates, with zero downtime (may require using IDs instead of names for function containers)
|
||||
* [ ] Apply a total memory limit for the host (if a node has 1GB of RAM, don't allow more than 1GB of RAM to be specified in the limits field)
|
||||
* [ ] Terraform for AWS EC2
|
||||
|
||||
Won't have:
|
||||
|
||||
* [ ] Clustering
|
||||
* [ ] Multiple replicas per function
|
||||
|
||||
### Completed
|
||||
|
||||
* [x] Docs or examples on how to use the various event connectors (Yes in the eBook)
|
||||
* [x] Resolve core services from functions by populating/sharing `/etc/hosts` between `faasd` and `faasd-provider`
|
||||
* [x] Provide a cloud-init configuration for faasd bootstrap
|
||||
* [x] Configure core services from a docker-compose.yaml file
|
||||
* [x] Store and fetch logs from the journal
|
||||
@ -101,7 +129,7 @@ Nice to Have:
|
||||
* [x] Self-install / create systemd service via `faasd install`
|
||||
* [x] Restart containers upon restart of faasd
|
||||
* [x] Clear / remove containers and tasks with SIGTERM / SIGINT
|
||||
* [x] Determine armhf/arm64 containers to run for gateway
|
||||
* [x] Determine arm64 containers to run for gateway
|
||||
* [x] Configure `basic_auth` to protect the OpenFaaS gateway and faasd-provider HTTP API
|
||||
* [x] Setup custom working directory for faasd `/var/lib/faasd/`
|
||||
* [x] Use CNI to create network namespaces and adapters
|
||||
@ -113,4 +141,7 @@ Nice to Have:
|
||||
* [x] Terraform for DigitalOcean
|
||||
* [x] [Store and retrieve annotations in function spec](https://github.com/openfaas/faasd/pull/86) - in progress
|
||||
* [x] An installer for faasd and dependencies - runc, containerd
|
||||
|
||||
* [x] Offer a recommendation or implement a strategy for faasd replication/HA
|
||||
* [x] [Remove / deprecate armhf / armv7 support](https://github.com/openfaas/faasd/issues/364)
|
||||
* [x] Add support for CPU/RAM metrics in the UI, CLI and API
|
||||
* [x] Network segmentation (functions cannot talk to each other or the host)
|
||||
|
14
docs/TERRAFORM.md
Normal file
14
docs/TERRAFORM.md
Normal file
@ -0,0 +1,14 @@
|
||||
# Terraform Modules for faasd
|
||||
|
||||
| Terraform Module | Author | Origin | Status |
|
||||
|-----------|--------|--------|--------|
|
||||
| [faasd on DigitalOcean](https://github.com/openfaas/faasd/tree/master/docs/bootstrap/digitalocean-terraform) | OpenFaaS | Official | Active |
|
||||
| [faasd on DigitalOcean](https://github.com/jsiebens/terraform-digitalocean-faasd) | Johan Siebens | Community | Active |
|
||||
| [faasd on Google Cloud Platform](https://github.com/jsiebens/terraform-google-faasd) | Johan Siebens | Community | Active |
|
||||
| [faasd on Microsoft Azure ](https://github.com/jsiebens/terraform-azurerm-faasd) | Johan Siebens | Community | Active |
|
||||
| [faasd on Equinix Metal](https://github.com/jsiebens/terraform-equinix-faasd) | Johan Siebens | Community | Active |
|
||||
| [faasd on Scaleway](https://github.com/jsiebens/terraform-scaleway-faasd) | Johan Siebens | Community | Active |
|
||||
| [faasd on Amazon Web Services](https://github.com/jsiebens/terraform-aws-faasd) |Johan Siebens | Community | Active |
|
||||
| [faasd on Linode](https://github.com/itTrident/terraform-linode-faasd) | itTrident | Community | Active |
|
||||
| [faasd on Vultr](https://github.com/itTrident/terraform-vultr-faasd) | itTrident | Community | Active |
|
||||
| [faasd on Exoscale](https://github.com/itTrident/terraform-exoscale-faasd) | itTrident | Community | Active |
|
3
docs/bootstrap/.gitignore
vendored
3
docs/bootstrap/.gitignore
vendored
@ -1,3 +0,0 @@
|
||||
/.terraform/
|
||||
/terraform.tfstate
|
||||
/terraform.tfstate.backup
|
@ -1,20 +0,0 @@
|
||||
# Bootstrap faasd on Digitalocean
|
||||
|
||||
1) [Sign up to DigitalOcean](https://www.digitalocean.com/?refcode=2962aa9e56a1&utm_campaign=Referral_Invite&utm_medium=Referral_Program&utm_source=CopyPaste)
|
||||
2) [Download Terraform](https://www.terraform.io)
|
||||
3) Clone this gist using the URL from the address bar
|
||||
4) Run `terraform init`
|
||||
5) Run `terraform apply -var="do_token=$(cat $HOME/digitalocean-access-token)"`
|
||||
6) View the output for the login command and gateway URL i.e.
|
||||
|
||||
```
|
||||
gateway_url = http://178.128.39.201:8080/
|
||||
login_cmd = faas-cli login -g http://178.128.39.201:8080/ -p rvIU49CEcFcHmqxj
|
||||
password = rvIU49CEcFcHmqxj
|
||||
```
|
||||
|
||||
Note that the user-data may take a couple of minutes to come up since it will be pulling in various components and preparing the machine.
|
||||
|
||||
A single host with 1GB of RAM will be deployed for you, to remove at a later date simply use `terraform destroy`.
|
||||
|
||||
If required, you can remove the VM via `terraform destroy -var="do_token=$(cat $HOME/digitalocean-access-token)"`
|
@ -1,8 +1,4 @@
|
||||
#cloud-config
|
||||
ssh_authorized_keys:
|
||||
## Note: Replace with your own public key
|
||||
- ${ssh_key}
|
||||
|
||||
package_update: true
|
||||
|
||||
packages:
|
||||
@ -10,18 +6,18 @@ packages:
|
||||
- git
|
||||
|
||||
runcmd:
|
||||
- curl -sLSf https://github.com/containerd/containerd/releases/download/v1.3.5/containerd-1.3.5-linux-amd64.tar.gz > /tmp/containerd.tar.gz && tar -xvf /tmp/containerd.tar.gz -C /usr/local/bin/ --strip-components=1
|
||||
- curl -SLfs https://raw.githubusercontent.com/containerd/containerd/v1.3.5/containerd.service | tee /etc/systemd/system/containerd.service
|
||||
- curl -sLSf https://github.com/containerd/containerd/releases/download/v1.7.0/containerd-1.7.0-linux-amd64.tar.gz > /tmp/containerd.tar.gz && tar -xvf /tmp/containerd.tar.gz -C /usr/local/bin/ --strip-components=1
|
||||
- curl -SLfs https://raw.githubusercontent.com/containerd/containerd/v1.7.0/containerd.service | tee /etc/systemd/system/containerd.service
|
||||
- systemctl daemon-reload && systemctl start containerd
|
||||
- /sbin/sysctl -w net.ipv4.conf.all.forwarding=1
|
||||
- mkdir -p /opt/cni/bin
|
||||
- curl -sSL https://github.com/containernetworking/plugins/releases/download/v0.8.5/cni-plugins-linux-amd64-v0.8.5.tgz | tar -xz -C /opt/cni/bin
|
||||
- curl -sSL https://github.com/containernetworking/plugins/releases/download/v0.9.1/cni-plugins-linux-amd64-v0.9.1.tgz | tar -xz -C /opt/cni/bin
|
||||
- mkdir -p /go/src/github.com/openfaas/
|
||||
- mkdir -p /var/lib/faasd/secrets/
|
||||
- echo ${gw_password} > /var/lib/faasd/secrets/basic-auth-password
|
||||
- echo admin > /var/lib/faasd/secrets/basic-auth-user
|
||||
- cd /go/src/github.com/openfaas/ && git clone --depth 1 --branch 0.10.2 https://github.com/openfaas/faasd
|
||||
- curl -fSLs "https://github.com/openfaas/faasd/releases/download/0.10.2/faasd" --output "/usr/local/bin/faasd" && chmod a+x "/usr/local/bin/faasd"
|
||||
- cd /go/src/github.com/openfaas/ && git clone --depth 1 --branch 0.16.2 https://github.com/openfaas/faasd
|
||||
- curl -fSLs "https://github.com/openfaas/faasd/releases/download/0.16.2/faasd" --output "/usr/local/bin/faasd" && chmod a+x "/usr/local/bin/faasd"
|
||||
- cd /go/src/github.com/openfaas/faasd/ && /usr/local/bin/faasd install
|
||||
- systemctl status -l containerd --no-pager
|
||||
- journalctl -u faasd-provider --no-pager
|
||||
|
66
docs/bootstrap/digitalocean-terraform/.terraform.lock.hcl
generated
Normal file
66
docs/bootstrap/digitalocean-terraform/.terraform.lock.hcl
generated
Normal file
@ -0,0 +1,66 @@
|
||||
# This file is maintained automatically by "terraform init".
|
||||
# Manual edits may be lost in future updates.
|
||||
|
||||
provider "registry.terraform.io/digitalocean/digitalocean" {
|
||||
version = "2.11.0"
|
||||
constraints = "2.11.0"
|
||||
hashes = [
|
||||
"h1:/qAnTOSP5KeZkF7wqLai34SKAs7aefulcUA3I8R7rRg=",
|
||||
"h1:PbXtjUfvxwmkycJ0Y9Dyn66Arrpk5L8/P381SXMx2O0=",
|
||||
"h1:lXLX9tmuxV7azTHd0xB0FAVrxyfBtotIz5LEJp8YUk0=",
|
||||
"zh:2191adc79bdfdb3b733e0619e4f391ae91c1631c5dafda42dab561d943651fa4",
|
||||
"zh:21a4f67e42dcdc10fbd7f8579247594844d09a469a3a54862d565913e4d6121d",
|
||||
"zh:557d98325fafcf2db91ea6d92f65373a48c4e995a1a7aeb57009661fee675250",
|
||||
"zh:68c0238cafc37433627e288fcd2c7e14f4f0afdd50b4f265d8d1f1addab6f19f",
|
||||
"zh:7e6d69720734455eb1c69880f049650276089b7fa09085e130d224abaeec887a",
|
||||
"zh:95bd93a696ec050c1cb5e724498fd12b1d69760d01e97c869be3252025691434",
|
||||
"zh:b1b075049e33aa08c032f41a497351c9894f16287a4449032d8b805bc6dcb596",
|
||||
"zh:ba91aa853372c828f808c09dbab2a5bc9493a7cf93210d1487f9637b2cac8ca4",
|
||||
"zh:bc43d27dfe014266697c2ac259f4311300391aa6aa7c5d23e382fe296df938d5",
|
||||
"zh:d3a04d2c76bfc1f46a117b1af7870a97353319ee8f924a37fe77861519f59525",
|
||||
"zh:d3da997c05a653df6cabb912c6c05ceb6bf77219b699f04daf44fd795c81c6ed",
|
||||
"zh:edd0659021b6634acf0f581d1be1985a81fcd1182e3ccb43de6eac6c43be9ab4",
|
||||
"zh:f588ace57b6c35d509ecaa7136e6a8049d227b0674104a1f958359b84862d8e3",
|
||||
"zh:f894ed195a3b9ebbfa1ba7c5d71be06df3a96d783ff064d22dd693ace34d638e",
|
||||
"zh:fb6b0d4b111fafdcb3bb9a7dbab88e2110a6ce6324de64ecf62933ee8b651ccf",
|
||||
]
|
||||
}
|
||||
|
||||
provider "registry.terraform.io/hashicorp/random" {
|
||||
version = "3.1.0"
|
||||
hashes = [
|
||||
"h1:BZMEPucF+pbu9gsPk0G0BHx7YP04+tKdq2MrRDF1EDM=",
|
||||
"h1:EPIax4Ftp2SNdB9pUfoSjxoueDoLc/Ck3EUoeX0Dvsg=",
|
||||
"h1:rKYu5ZUbXwrLG1w81k7H3nce/Ys6yAxXhWcbtk36HjY=",
|
||||
"zh:2bbb3339f0643b5daa07480ef4397bd23a79963cc364cdfbb4e86354cb7725bc",
|
||||
"zh:3cd456047805bf639fbf2c761b1848880ea703a054f76db51852008b11008626",
|
||||
"zh:4f251b0eda5bb5e3dc26ea4400dba200018213654b69b4a5f96abee815b4f5ff",
|
||||
"zh:7011332745ea061e517fe1319bd6c75054a314155cb2c1199a5b01fe1889a7e2",
|
||||
"zh:738ed82858317ccc246691c8b85995bc125ac3b4143043219bd0437adc56c992",
|
||||
"zh:7dbe52fac7bb21227acd7529b487511c91f4107db9cc4414f50d04ffc3cab427",
|
||||
"zh:a3a9251fb15f93e4cfc1789800fc2d7414bbc18944ad4c5c98f466e6477c42bc",
|
||||
"zh:a543ec1a3a8c20635cf374110bd2f87c07374cf2c50617eee2c669b3ceeeaa9f",
|
||||
"zh:d9ab41d556a48bd7059f0810cf020500635bfc696c9fc3adab5ea8915c1d886b",
|
||||
"zh:d9e13427a7d011dbd654e591b0337e6074eef8c3b9bb11b2e39eaaf257044fd7",
|
||||
"zh:f7605bd1437752114baf601bdf6931debe6dc6bfe3006eb7e9bb9080931dca8a",
|
||||
]
|
||||
}
|
||||
|
||||
provider "registry.terraform.io/hashicorp/template" {
|
||||
version = "2.2.0"
|
||||
hashes = [
|
||||
"h1:0wlehNaxBX7GJQnPfQwTNvvAf38Jm0Nv7ssKGMaG6Og=",
|
||||
"h1:94qn780bi1qjrbC3uQtjJh3Wkfwd5+tTtJHOb7KTg9w=",
|
||||
"h1:LN84cu+BZpVRvYlCzrbPfCRDaIelSyEx/W9Iwwgbnn4=",
|
||||
"zh:01702196f0a0492ec07917db7aaa595843d8f171dc195f4c988d2ffca2a06386",
|
||||
"zh:09aae3da826ba3d7df69efeb25d146a1de0d03e951d35019a0f80e4f58c89b53",
|
||||
"zh:09ba83c0625b6fe0a954da6fbd0c355ac0b7f07f86c91a2a97849140fea49603",
|
||||
"zh:0e3a6c8e16f17f19010accd0844187d524580d9fdb0731f675ffcf4afba03d16",
|
||||
"zh:45f2c594b6f2f34ea663704cc72048b212fe7d16fb4cfd959365fa997228a776",
|
||||
"zh:77ea3e5a0446784d77114b5e851c970a3dde1e08fa6de38210b8385d7605d451",
|
||||
"zh:8a154388f3708e3df5a69122a23bdfaf760a523788a5081976b3d5616f7d30ae",
|
||||
"zh:992843002f2db5a11e626b3fc23dc0c87ad3729b3b3cff08e32ffb3df97edbde",
|
||||
"zh:ad906f4cebd3ec5e43d5cd6dc8f4c5c9cc3b33d2243c89c5fc18f97f7277b51d",
|
||||
"zh:c979425ddb256511137ecd093e23283234da0154b7fa8b21c2687182d9aea8b2",
|
||||
]
|
||||
}
|
@ -27,10 +27,20 @@
|
||||
```
|
||||
droplet_ip = 178.128.39.201
|
||||
gateway_url = https://faasd.example.com/
|
||||
login_cmd = faas-cli login -g https://faasd.example.com/ -p rvIU49CEcFcHmqxj
|
||||
```
|
||||
|
||||
8) View the output for sensitive data via `terraform output` command
|
||||
|
||||
```bash
|
||||
terraform output login_cmd
|
||||
login_cmd = faas-cli login -g http://178.128.39.201:8080/ -p rvIU49CEcFcHmqxj
|
||||
|
||||
terraform output password
|
||||
password = rvIU49CEcFcHmqxj
|
||||
```
|
||||
8) Use your browser to access the OpenFaaS interface
|
||||
|
||||
|
||||
9) Use your browser to access the OpenFaaS interface
|
||||
|
||||
Note that the user-data may take a couple of minutes to come up since it will be pulling in various components and preparing the machine.
|
||||
Also take into consideration the DNS propagation time for the new DNS record.
|
||||
|
@ -1,57 +1,9 @@
|
||||
#cloud-config
|
||||
ssh_authorized_keys:
|
||||
- ${ssh_key}
|
||||
#! /bin/bash
|
||||
|
||||
groups:
|
||||
- caddy
|
||||
mkdir -p /var/lib/faasd/secrets/
|
||||
echo ${gw_password} > /var/lib/faasd/secrets/basic-auth-password
|
||||
|
||||
users:
|
||||
- name: caddy
|
||||
gecos: Caddy web server
|
||||
primary_group: caddy
|
||||
groups: caddy
|
||||
shell: /usr/sbin/nologin
|
||||
homedir: /var/lib/caddy
|
||||
export FAASD_DOMAIN=${faasd_domain_name}
|
||||
export LETSENCRYPT_EMAIL=${letsencrypt_email}
|
||||
|
||||
write_files:
|
||||
- content: |
|
||||
{
|
||||
email ${letsencrypt_email}
|
||||
}
|
||||
|
||||
${faasd_domain_name} {
|
||||
reverse_proxy 127.0.0.1:8080
|
||||
}
|
||||
|
||||
path: /etc/caddy/Caddyfile
|
||||
|
||||
package_update: true
|
||||
|
||||
packages:
|
||||
- runc
|
||||
|
||||
runcmd:
|
||||
- curl -sLSf https://github.com/containerd/containerd/releases/download/v1.3.5/containerd-1.3.5-linux-amd64.tar.gz > /tmp/containerd.tar.gz && tar -xvf /tmp/containerd.tar.gz -C /usr/local/bin/ --strip-components=1
|
||||
- curl -SLfs https://raw.githubusercontent.com/containerd/containerd/v1.3.5/containerd.service | tee /etc/systemd/system/containerd.service
|
||||
- systemctl daemon-reload && systemctl start containerd
|
||||
- /sbin/sysctl -w net.ipv4.conf.all.forwarding=1
|
||||
- mkdir -p /opt/cni/bin
|
||||
- curl -sSL https://github.com/containernetworking/plugins/releases/download/v0.8.5/cni-plugins-linux-amd64-v0.8.5.tgz | tar -xz -C /opt/cni/bin
|
||||
- mkdir -p /go/src/github.com/openfaas/
|
||||
- mkdir -p /var/lib/faasd/secrets/
|
||||
- echo ${gw_password} > /var/lib/faasd/secrets/basic-auth-password
|
||||
- echo admin > /var/lib/faasd/secrets/basic-auth-user
|
||||
- cd /go/src/github.com/openfaas/ && git clone --depth 1 --branch 0.10.2 https://github.com/openfaas/faasd
|
||||
- curl -fSLs "https://github.com/openfaas/faasd/releases/download/0.10.2/faasd" --output "/usr/local/bin/faasd" && chmod a+x "/usr/local/bin/faasd"
|
||||
- cd /go/src/github.com/openfaas/faasd/ && /usr/local/bin/faasd install
|
||||
- systemctl status -l containerd --no-pager
|
||||
- journalctl -u faasd-provider --no-pager
|
||||
- systemctl status -l faasd-provider --no-pager
|
||||
- systemctl status -l faasd --no-pager
|
||||
- curl -sSLf https://cli.openfaas.com | sh
|
||||
- sleep 5 && journalctl -u faasd --no-pager
|
||||
- wget https://github.com/caddyserver/caddy/releases/download/v2.1.1/caddy_2.1.1_linux_amd64.tar.gz -O /tmp/caddy.tar.gz && tar -zxvf /tmp/caddy.tar.gz -C /usr/bin/ caddy
|
||||
- wget https://raw.githubusercontent.com/caddyserver/dist/master/init/caddy.service -O /etc/systemd/system/caddy.service
|
||||
- systemctl daemon-reload
|
||||
- systemctl enable caddy
|
||||
- systemctl start caddy
|
||||
curl -sfL https://raw.githubusercontent.com/openfaas/faasd/master/hack/install.sh | bash -s -
|
||||
|
@ -1,5 +1,11 @@
|
||||
terraform {
|
||||
required_version = ">= 0.12"
|
||||
required_version = ">= 1.0.4"
|
||||
required_providers {
|
||||
digitalocean = {
|
||||
source = "digitalocean/digitalocean"
|
||||
version = "2.11.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
variable "do_token" {
|
||||
@ -10,7 +16,7 @@ variable "do_domain" {
|
||||
}
|
||||
variable "do_subdomain" {
|
||||
description = "Your public subdomain"
|
||||
default = "faasd"
|
||||
default = "faasd"
|
||||
}
|
||||
variable "letsencrypt_email" {
|
||||
description = "Email used to order a certificate from Letsencrypt"
|
||||
@ -32,41 +38,44 @@ provider "digitalocean" {
|
||||
token = var.do_token
|
||||
}
|
||||
|
||||
data "local_file" "ssh_key"{
|
||||
filename = pathexpand(var.ssh_key_file)
|
||||
}
|
||||
|
||||
resource "random_password" "password" {
|
||||
length = 16
|
||||
special = true
|
||||
length = 16
|
||||
special = true
|
||||
override_special = "_-#"
|
||||
}
|
||||
|
||||
data "template_file" "cloud_init" {
|
||||
template = "${file("cloud-config.tpl")}"
|
||||
vars = {
|
||||
gw_password=random_password.password.result,
|
||||
ssh_key=data.local_file.ssh_key.content,
|
||||
faasd_domain_name="${var.do_subdomain}.${var.do_domain}"
|
||||
letsencrypt_email=var.letsencrypt_email
|
||||
}
|
||||
template = file("cloud-config.tpl")
|
||||
vars = {
|
||||
gw_password = random_password.password.result,
|
||||
faasd_domain_name = "${var.do_subdomain}.${var.do_domain}"
|
||||
letsencrypt_email = var.letsencrypt_email
|
||||
}
|
||||
}
|
||||
|
||||
resource "digitalocean_ssh_key" "faasd_ssh_key" {
|
||||
name = "ssh-key"
|
||||
public_key = file(var.ssh_key_file)
|
||||
}
|
||||
|
||||
resource "digitalocean_droplet" "faasd" {
|
||||
region = var.do_region
|
||||
image = "ubuntu-18-04-x64"
|
||||
name = "faasd"
|
||||
size = "s-1vcpu-1gb"
|
||||
region = var.do_region
|
||||
image = "ubuntu-18-04-x64"
|
||||
name = "faasd"
|
||||
size = "s-1vcpu-1gb"
|
||||
user_data = data.template_file.cloud_init.rendered
|
||||
ssh_keys = [
|
||||
digitalocean_ssh_key.faasd_ssh_key.id
|
||||
]
|
||||
}
|
||||
|
||||
resource "digitalocean_record" "faasd" {
|
||||
domain = var.do_domain
|
||||
type = "A"
|
||||
name = "faasd"
|
||||
name = var.do_subdomain
|
||||
value = digitalocean_droplet.faasd.ipv4_address
|
||||
# Only creates record if do_create_record is true
|
||||
count = var.do_create_record == true ? 1 : 0
|
||||
count = var.do_create_record == true ? 1 : 0
|
||||
}
|
||||
|
||||
output "droplet_ip" {
|
||||
@ -78,9 +87,11 @@ output "gateway_url" {
|
||||
}
|
||||
|
||||
output "password" {
|
||||
value = random_password.password.result
|
||||
value = random_password.password.result
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
output "login_cmd" {
|
||||
value = "faas-cli login -g https://${var.do_subdomain}.${var.do_domain}/ -p ${random_password.password.result}"
|
||||
value = "faas-cli login -g https://${var.do_subdomain}.${var.do_domain}/ -p ${random_password.password.result}"
|
||||
sensitive = true
|
||||
}
|
||||
|
@ -1,56 +0,0 @@
|
||||
terraform {
|
||||
required_version = ">= 0.12"
|
||||
}
|
||||
|
||||
variable "do_token" {}
|
||||
|
||||
variable "ssh_key_file" {
|
||||
default = "~/.ssh/id_rsa.pub"
|
||||
description = "Path to the SSH public key file"
|
||||
}
|
||||
|
||||
provider "digitalocean" {
|
||||
token = var.do_token
|
||||
}
|
||||
|
||||
resource "random_password" "password" {
|
||||
length = 16
|
||||
special = true
|
||||
override_special = "_-#"
|
||||
}
|
||||
|
||||
data "local_file" "ssh_key"{
|
||||
filename = pathexpand(var.ssh_key_file)
|
||||
}
|
||||
|
||||
data "template_file" "cloud_init" {
|
||||
template = "${file("cloud-config.tpl")}"
|
||||
vars = {
|
||||
gw_password=random_password.password.result,
|
||||
ssh_key=data.local_file.ssh_key.content,
|
||||
}
|
||||
}
|
||||
|
||||
resource "digitalocean_droplet" "faasd" {
|
||||
|
||||
region = "lon1"
|
||||
image = "ubuntu-18-04-x64"
|
||||
name = "faasd"
|
||||
# Plans: https://developers.digitalocean.com/documentation/changelog/api-v2/new-size-slugs-for-droplet-plan-changes/
|
||||
#size = "512mb"
|
||||
size = "s-1vcpu-1gb"
|
||||
user_data = data.template_file.cloud_init.rendered
|
||||
}
|
||||
|
||||
output "password" {
|
||||
value = random_password.password.result
|
||||
}
|
||||
|
||||
output "gateway_url" {
|
||||
value = "http://${digitalocean_droplet.faasd.ipv4_address}:8080/"
|
||||
}
|
||||
|
||||
output "login_cmd" {
|
||||
value = "faas-cli login -g http://${digitalocean_droplet.faasd.ipv4_address}:8080/ -p ${random_password.password.result}"
|
||||
}
|
||||
|
131
go.mod
131
go.mod
@ -1,50 +1,101 @@
|
||||
module github.com/openfaas/faasd
|
||||
|
||||
go 1.15
|
||||
go 1.23.0
|
||||
|
||||
toolchain go1.23.7
|
||||
|
||||
require (
|
||||
github.com/Microsoft/hcsshim v0.8.7-0.20190820203702-9e921883ac92 // indirect
|
||||
github.com/alexellis/go-execute v0.0.0-20200124154445-8697e4e28c5e
|
||||
github.com/alexellis/k3sup v0.0.0-20200607084134-629c0bc6b50f
|
||||
github.com/alexellis/arkade v0.0.0-20240320084407-6cf4a641c415
|
||||
github.com/compose-spec/compose-go v0.0.0-20200528042322-36d8ce368e05
|
||||
github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327 // indirect
|
||||
github.com/containerd/containerd v1.3.2
|
||||
github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02 // indirect
|
||||
github.com/containerd/fifo v0.0.0-20190816180239-bda0ff6ed73c // indirect
|
||||
github.com/containerd/go-cni v0.0.0-20200107172653-c154a49e2c75
|
||||
github.com/containerd/ttrpc v1.0.0 // indirect
|
||||
github.com/containerd/typeurl v1.0.0 // indirect
|
||||
github.com/containerd/containerd v1.7.27
|
||||
github.com/containerd/go-cni v1.1.9
|
||||
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf
|
||||
github.com/docker/cli v0.0.0-20191105005515-99c5edceb48d
|
||||
github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible
|
||||
github.com/docker/docker v17.12.0-ce-rc1.0.20191113042239-ea84732a7725+incompatible // indirect
|
||||
github.com/docker/docker-credential-helpers v0.6.3 // indirect
|
||||
github.com/docker/go-events v0.0.0-20170721190031-9461782956ad // indirect
|
||||
github.com/gogo/googleapis v1.2.0 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9 // indirect
|
||||
github.com/gorilla/mux v1.8.0
|
||||
github.com/imdario/mergo v0.3.9 // indirect
|
||||
github.com/docker/cli v24.0.7+incompatible
|
||||
github.com/docker/docker v24.0.7+incompatible // indirect
|
||||
github.com/docker/go-units v0.5.0
|
||||
github.com/gorilla/mux v1.8.1
|
||||
github.com/morikuni/aec v1.0.0
|
||||
github.com/opencontainers/go-digest v1.0.0-rc1.0.20180430190053-c9281466c8b2 // indirect
|
||||
github.com/opencontainers/image-spec v1.0.1 // indirect
|
||||
github.com/opencontainers/runc v1.0.0-rc9 // indirect
|
||||
github.com/opencontainers/runtime-spec v1.0.2
|
||||
github.com/openfaas/faas v0.0.0-20201205125747-9bbb25e3c7c4
|
||||
github.com/openfaas/faas-provider v0.16.2
|
||||
github.com/opencontainers/runtime-spec v1.1.0
|
||||
github.com/openfaas/faas-provider v0.25.3
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/prometheus/procfs v0.2.0 // indirect
|
||||
github.com/sethvargo/go-password v0.1.3
|
||||
github.com/spf13/cobra v0.0.5
|
||||
github.com/sethvargo/go-password v0.2.0
|
||||
github.com/spf13/cobra v1.8.0
|
||||
github.com/spf13/pflag v1.0.5
|
||||
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 // indirect
|
||||
github.com/vishvananda/netlink v1.1.0
|
||||
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
||||
go.etcd.io/bbolt v1.3.5 // indirect
|
||||
go.opencensus.io v0.22.2 // indirect
|
||||
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553 // indirect
|
||||
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5
|
||||
google.golang.org/genproto v0.0.0-20191216205247-b31c10ee225f // indirect
|
||||
google.golang.org/grpc v1.23.0 // indirect
|
||||
k8s.io/apimachinery v0.18.9
|
||||
github.com/vishvananda/netlink v1.2.1-beta.2
|
||||
github.com/vishvananda/netns v0.0.4
|
||||
golang.org/x/sys v0.31.0
|
||||
k8s.io/apimachinery v0.29.3
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/alexellis/go-execute/v2 v2.2.1
|
||||
github.com/distribution/reference v0.6.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect
|
||||
github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20230306123547-8075edf89bb0 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.2 // indirect
|
||||
github.com/Microsoft/hcsshim v0.11.7 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/containerd/cgroups v1.1.0 // indirect
|
||||
github.com/containerd/containerd/api v1.8.0 // indirect
|
||||
github.com/containerd/continuity v0.4.4 // indirect
|
||||
github.com/containerd/errdefs v0.3.0 // indirect
|
||||
github.com/containerd/fifo v1.1.0 // indirect
|
||||
github.com/containerd/log v0.1.0 // indirect
|
||||
github.com/containerd/platforms v0.2.1 // indirect
|
||||
github.com/containerd/ttrpc v1.2.7 // indirect
|
||||
github.com/containerd/typeurl/v2 v2.1.1 // indirect
|
||||
github.com/containernetworking/cni v1.1.2 // indirect
|
||||
github.com/docker/docker-credential-helpers v0.8.0 // indirect
|
||||
github.com/docker/go-connections v0.4.0 // indirect
|
||||
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect
|
||||
github.com/felixge/httpsnoop v1.0.3 // indirect
|
||||
github.com/go-logr/logr v1.4.2 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/google/go-cmp v0.6.0 // indirect
|
||||
github.com/google/uuid v1.4.0 // indirect
|
||||
github.com/imdario/mergo v0.3.14 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/klauspost/compress v1.17.4 // indirect
|
||||
github.com/kr/text v0.2.0 // indirect
|
||||
github.com/mattn/go-shellwords v1.0.12 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/moby/locker v1.0.1 // indirect
|
||||
github.com/moby/sys/mountinfo v0.6.2 // indirect
|
||||
github.com/moby/sys/sequential v0.5.0 // indirect
|
||||
github.com/moby/sys/signal v0.7.0 // indirect
|
||||
github.com/moby/sys/user v0.3.0 // indirect
|
||||
github.com/moby/sys/userns v0.1.0 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.1.0 // indirect
|
||||
github.com/opencontainers/selinux v1.11.0 // indirect
|
||||
github.com/prometheus/client_golang v1.19.0 // indirect
|
||||
github.com/prometheus/client_model v0.6.0 // indirect
|
||||
github.com/prometheus/common v0.51.1 // indirect
|
||||
github.com/prometheus/procfs v0.13.0 // indirect
|
||||
github.com/sirupsen/logrus v1.9.3 // indirect
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 // indirect
|
||||
go.opentelemetry.io/otel v1.21.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.21.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.21.0 // indirect
|
||||
golang.org/x/net v0.38.0 // indirect
|
||||
golang.org/x/sync v0.12.0 // indirect
|
||||
golang.org/x/text v0.23.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20231211222908-989df2bf70f3 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda // indirect
|
||||
google.golang.org/grpc v1.59.0 // indirect
|
||||
google.golang.org/protobuf v1.35.2 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gotest.tools/v3 v3.0.3 // indirect
|
||||
)
|
||||
|
544
go.sum
544
go.sum
@ -1,361 +1,381 @@
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
|
||||
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU=
|
||||
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8=
|
||||
github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20230306123547-8075edf89bb0 h1:59MxjQVfjXsBpLy+dbd2/ELV5ofnUkUZBvWSC85sheA=
|
||||
github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20230306123547-8075edf89bb0/go.mod h1:OahwfttHWG6eJ0clwcfBAHoDI6X/LV/15hx/wlMZSrU=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/Microsoft/go-winio v0.4.14 h1:+hMXMk01us9KgxGb7ftKQt2Xpf5hH/yky+TDA+qxleU=
|
||||
github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=
|
||||
github.com/Microsoft/hcsshim v0.8.7-0.20190820203702-9e921883ac92 h1:LbNLS5KKzW/L4K2scH5rzTA5MvRWiUfcWv4Qh/6D3wY=
|
||||
github.com/Microsoft/hcsshim v0.8.7-0.20190820203702-9e921883ac92/go.mod h1:nvUXb1s75kCTKHWZ1FGlut+PBI9D9EoQHCKcil2Cyps=
|
||||
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
|
||||
github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
||||
github.com/alexellis/go-execute v0.0.0-20191207085904-961405ea7544/go.mod h1:zfRbgnPVxXCSpiKrg1CE72hNUWInqxExiaz2D9ppTts=
|
||||
github.com/alexellis/go-execute v0.0.0-20200124154445-8697e4e28c5e h1:0cv4CUENL7e67/ZlNrvExWqa6oKH/9iv0KQn0/+hYaY=
|
||||
github.com/alexellis/go-execute v0.0.0-20200124154445-8697e4e28c5e/go.mod h1:zfRbgnPVxXCSpiKrg1CE72hNUWInqxExiaz2D9ppTts=
|
||||
github.com/alexellis/k3sup v0.0.0-20200607084134-629c0bc6b50f h1:GYVsHjM9/lyu0QBfNp1RThZNDrmRJncpSYtaektq9t0=
|
||||
github.com/alexellis/k3sup v0.0.0-20200607084134-629c0bc6b50f/go.mod h1:slULyLX94hIIP3/eVeZqQAqOwpLYK9Pljr8SfaR2nWI=
|
||||
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
||||
github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
|
||||
github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs=
|
||||
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
|
||||
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
|
||||
github.com/Microsoft/hcsshim v0.11.7 h1:vl/nj3Bar/CvJSYo7gIQPyRWc9f3c6IeSNavBTSZNZQ=
|
||||
github.com/Microsoft/hcsshim v0.11.7/go.mod h1:MV8xMfmECjl5HdO7U/3/hFVnkmSBjAjmA09d4bExKcU=
|
||||
github.com/alexellis/arkade v0.0.0-20240320084407-6cf4a641c415 h1:mLD1eSfXbmXcwKMP1AsFl01G2U16aC9E22Tcjehyyrw=
|
||||
github.com/alexellis/arkade v0.0.0-20240320084407-6cf4a641c415/go.mod h1:3sT9Gq9WUFG9Wwz9dFbRRc/2L3yepsA4p272aG2DR6w=
|
||||
github.com/alexellis/go-execute/v2 v2.2.1 h1:4Ye3jiCKQarstODOEmqDSRCqxMHLkC92Bhse743RdOI=
|
||||
github.com/alexellis/go-execute/v2 v2.2.1/go.mod h1:FMdRnUTiFAmYXcv23txrp3VYZfLo24nMpiIneWgKHTQ=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
|
||||
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/compose-spec/compose-go v0.0.0-20200528042322-36d8ce368e05 h1:3CoRXflT4IAdIpsFTqZBlLuHOkHYhBvW0iijkQKm4QY=
|
||||
github.com/compose-spec/compose-go v0.0.0-20200528042322-36d8ce368e05/go.mod h1:y75QUr1jcR5aFNf3Tj3dhwnujABGz6UaRrZ5qZwF1cc=
|
||||
github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327 h1:7grrpcfCtbZLsjtB0DgMuzs1umsJmpzaHMZ6cO6iAWw=
|
||||
github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE=
|
||||
github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
|
||||
github.com/containerd/containerd v0.0.0-20190214164719-faec567304bb/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
||||
github.com/containerd/containerd v1.3.2 h1:ForxmXkA6tPIvffbrDAcPUIB32QgXkt2XFj+F0UxetA=
|
||||
github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
||||
github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
|
||||
github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02 h1:tN9D97v5A5QuKdcKHKt+UMKrkQ5YXUnD8iM7IAAjEfI=
|
||||
github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
|
||||
github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
|
||||
github.com/containerd/fifo v0.0.0-20190816180239-bda0ff6ed73c h1:KFbqHhDeaHM7IfFtXHfUHMDaUStpM2YwBR+iJCIOsKk=
|
||||
github.com/containerd/fifo v0.0.0-20190816180239-bda0ff6ed73c/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
|
||||
github.com/containerd/go-cni v0.0.0-20200107172653-c154a49e2c75 h1:5Q5C6jDObSVpjeX8CuZ5yac8d/KIYuPzUHbUzdL+NFw=
|
||||
github.com/containerd/go-cni v0.0.0-20200107172653-c154a49e2c75/go.mod h1:0mg8r6FCdbxvLDqCXwAx2rO+KA37QICjKL8+wHOG5OE=
|
||||
github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
|
||||
github.com/containerd/ttrpc v0.0.0-20180920185216-2a805f718635/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
|
||||
github.com/containerd/ttrpc v1.0.0 h1:NY8Zk2i7TpkLxrkOASo+KTFq9iNCEmMH2/ZG9OuOw6k=
|
||||
github.com/containerd/ttrpc v1.0.0/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
|
||||
github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc=
|
||||
github.com/containerd/typeurl v1.0.0 h1:7LMH7LfEmpWeCkGcIputvd4P0Rnd0LrIv1Jk2s5oobs=
|
||||
github.com/containerd/typeurl v1.0.0/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc=
|
||||
github.com/containernetworking/cni v0.7.1 h1:fE3r16wpSEyaqY4Z4oFrLMmIGfBYIKpPrHK31EJ9FzE=
|
||||
github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
|
||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
|
||||
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM=
|
||||
github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw=
|
||||
github.com/containerd/containerd v1.7.27 h1:yFyEyojddO3MIGVER2xJLWoCIn+Up4GaHFquP7hsFII=
|
||||
github.com/containerd/containerd v1.7.27/go.mod h1:xZmPnl75Vc+BLGt4MIfu6bp+fy03gdHAn9bz+FreFR0=
|
||||
github.com/containerd/containerd/api v1.8.0 h1:hVTNJKR8fMc/2Tiw60ZRijntNMd1U+JVMyTRdsD2bS0=
|
||||
github.com/containerd/containerd/api v1.8.0/go.mod h1:dFv4lt6S20wTu/hMcP4350RL87qPWLVa/OHOwmmdnYc=
|
||||
github.com/containerd/continuity v0.4.4 h1:/fNVfTJ7wIl/YPMHjf+5H32uFhl63JucB34PlCpMKII=
|
||||
github.com/containerd/continuity v0.4.4/go.mod h1:/lNJvtJKUQStBzpVQ1+rasXO1LAWtUQssk28EZvJ3nE=
|
||||
github.com/containerd/errdefs v0.3.0 h1:FSZgGOeK4yuT/+DnF07/Olde/q4KBoMsaamhXxIMDp4=
|
||||
github.com/containerd/errdefs v0.3.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M=
|
||||
github.com/containerd/fifo v1.1.0 h1:4I2mbh5stb1u6ycIABlBw9zgtlK8viPI9QkQNRQEEmY=
|
||||
github.com/containerd/fifo v1.1.0/go.mod h1:bmC4NWMbXlt2EZ0Hc7Fx7QzTFxgPID13eH0Qu+MAb2o=
|
||||
github.com/containerd/go-cni v1.1.9 h1:ORi7P1dYzCwVM6XPN4n3CbkuOx/NZ2DOqy+SHRdo9rU=
|
||||
github.com/containerd/go-cni v1.1.9/go.mod h1:XYrZJ1d5W6E2VOvjffL3IZq0Dz6bsVlERHbekNK90PM=
|
||||
github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
|
||||
github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
|
||||
github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A=
|
||||
github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw=
|
||||
github.com/containerd/ttrpc v1.2.7 h1:qIrroQvuOL9HQ1X6KHe2ohc7p+HP/0VE6XPU7elJRqQ=
|
||||
github.com/containerd/ttrpc v1.2.7/go.mod h1:YCXHsb32f+Sq5/72xHubdiJRQY9inL4a4ZQrAbN1q9o=
|
||||
github.com/containerd/typeurl/v2 v2.1.1 h1:3Q4Pt7i8nYwy2KmQWIw2+1hTvwTE/6w9FqcttATPO/4=
|
||||
github.com/containerd/typeurl/v2 v2.1.1/go.mod h1:IDp2JFvbwZ31H8dQbEIY7sDl2L3o3HZj1hsSQlywkQ0=
|
||||
github.com/containernetworking/cni v1.1.2 h1:wtRGZVv7olUHMOqouPpn3cXJWpJgM6+EUl31EQbXALQ=
|
||||
github.com/containernetworking/cni v1.1.2/go.mod h1:sDpYKmGVENF3s6uvMvGgldDWeG8dMxakj/u+i9ht9vw=
|
||||
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU=
|
||||
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/go-systemd/v22 v22.1.0 h1:kq/SbG2BCKLkDKkjQf5OWwKWUKj1lgs3lFI4PxnR5lg=
|
||||
github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
|
||||
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/docker/cli v0.0.0-20191105005515-99c5edceb48d h1:SknEFm9d070Wn2GeX8dyl7bMrX07cp3UMXuZ2Ct02Kw=
|
||||
github.com/docker/cli v0.0.0-20191105005515-99c5edceb48d/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible h1:dvc1KSkIYTVjZgHf/CTC2diTYC8PzhaA5sFISRfNVrE=
|
||||
github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker v17.12.0-ce-rc1.0.20191113042239-ea84732a7725+incompatible h1:m+SEbBCq0i1e399zUpu70L8AYiTT5UiF7O0IO+5AorM=
|
||||
github.com/docker/docker v17.12.0-ce-rc1.0.20191113042239-ea84732a7725+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker-credential-helpers v0.6.3 h1:zI2p9+1NQYdnG6sMU26EX4aVGlqbInSQxQXLvzJ4RPQ=
|
||||
github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y=
|
||||
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
|
||||
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
|
||||
github.com/docker/cli v24.0.7+incompatible h1:wa/nIwYFW7BVTGa7SWPVyyXU9lgORqUb1xfI36MSkFg=
|
||||
github.com/docker/cli v24.0.7+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/docker v24.0.7+incompatible h1:Wo6l37AuwP3JaMnZa226lzVXGA3F9Ig1seQen0cKYlM=
|
||||
github.com/docker/docker v24.0.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker-credential-helpers v0.8.0 h1:YQFtbBQb4VrpoPxhFuzEBPQ9E16qz5SpHLS+uswaCp8=
|
||||
github.com/docker/docker-credential-helpers v0.8.0/go.mod h1:UGFXcuoQ5TxPiB54nHOZ32AWRqQdECoh/Mg0AlEYb40=
|
||||
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
|
||||
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
||||
github.com/docker/go-events v0.0.0-20170721190031-9461782956ad h1:VXIse57M5C6ezDuCPyq6QmMvEJ2xclYKZ35SfkXdm3E=
|
||||
github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
|
||||
github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
|
||||
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8=
|
||||
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
|
||||
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
|
||||
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
|
||||
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
||||
github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
||||
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
|
||||
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk=
|
||||
github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
|
||||
github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=
|
||||
github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg=
|
||||
github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc=
|
||||
github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I=
|
||||
github.com/godbus/dbus/v5 v5.0.3 h1:ZqHaoEF7TBzh4jzPmqVhE/5A1z9of6orkAe5uHoAeME=
|
||||
github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/gogo/googleapis v1.2.0 h1:Z0v3OJDotX9ZBpdz2V+AI7F4fITSZhVE5mg6GQppwMM=
|
||||
github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU=
|
||||
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
||||
github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls=
|
||||
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
|
||||
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9 h1:uHTyIjqVhYRhLbJ8nIiOJHkEZZ+5YoOsAbD3sk82NiE=
|
||||
github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
||||
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.1 h1:/exdXoGamhu5ONeUJH0deniYLWYvQwW66yvlfiiKTu0=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM=
|
||||
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g=
|
||||
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
|
||||
github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
|
||||
github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
|
||||
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
|
||||
github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I=
|
||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
|
||||
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
|
||||
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec=
|
||||
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4=
|
||||
github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
|
||||
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/imdario/mergo v0.3.9 h1:UauaLniWCFHWd+Jp9oCEkTBj8VO/9DKg3PV3VCNMDIg=
|
||||
github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.8 h1:QiWkFLKq0T7mpzwOTu6BzNDbfTE8OLrYhVKYMLF46Ok=
|
||||
github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
||||
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
|
||||
github.com/imdario/mergo v0.3.14 h1:fOqeC1+nCuuk6PKQdg9YmosXX7Y7mHX6R/0ZldI9iHo=
|
||||
github.com/imdario/mergo v0.3.14/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
|
||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8=
|
||||
github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4=
|
||||
github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mattn/go-shellwords v1.0.10 h1:Y7Xqm8piKOO3v10Thp7Z36h4FYFjt5xB//6XvOrs2Gw=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/mattn/go-shellwords v1.0.10/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mitchellh/mapstructure v1.3.1 h1:cCBH2gTD2K0OtLlv/Y5H01VQCqmlDxz30kS5Y5bqfLA=
|
||||
github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk=
|
||||
github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
|
||||
github.com/mitchellh/mapstructure v1.3.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg=
|
||||
github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc=
|
||||
github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78=
|
||||
github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI=
|
||||
github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc=
|
||||
github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo=
|
||||
github.com/moby/sys/signal v0.7.0 h1:25RW3d5TnQEoKvRbEKUGay6DCQ46IxAVTT9CUMgmsSI=
|
||||
github.com/moby/sys/signal v0.7.0/go.mod h1:GQ6ObYZfqacOwTtlXvcmh9A26dVRul/hbOZn88Kg8Tg=
|
||||
github.com/moby/sys/user v0.3.0 h1:9ni5DlcW5an3SvRSx4MouotOygvzaXbaSrc/wGDFWPo=
|
||||
github.com/moby/sys/user v0.3.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs=
|
||||
github.com/moby/sys/userns v0.1.0 h1:tVLXkFOxVu9A64/yh59slHVv9ahO9UIev4JZusOLG/g=
|
||||
github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28=
|
||||
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
|
||||
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
|
||||
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
|
||||
github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
|
||||
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.11.0 h1:JAKSXpt1YjtLA7YpPiqO9ss6sNXEsPfSGdwN0UHqzrw=
|
||||
github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
|
||||
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/onsi/gomega v1.7.1 h1:K0jcRCwNQM3vFGh1ppMtDh/+7ApJrjldlX8fA0jDTLQ=
|
||||
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
|
||||
github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc=
|
||||
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
|
||||
github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
|
||||
github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4=
|
||||
github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o=
|
||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||
github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
|
||||
github.com/opencontainers/go-digest v1.0.0-rc1.0.20180430190053-c9281466c8b2 h1:2C93eP55foV5f0eNmXbidhKzwUZbs/Gk4PRp1zfeffs=
|
||||
github.com/opencontainers/go-digest v1.0.0-rc1.0.20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
|
||||
github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI=
|
||||
github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
|
||||
github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
|
||||
github.com/opencontainers/runc v1.0.0-rc9 h1:/k06BMULKF5hidyoZymkoDCzdJzltZpz/UU4LguQVtc=
|
||||
github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
|
||||
github.com/opencontainers/runtime-spec v0.0.0-20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opencontainers/runtime-spec v1.0.2 h1:UfAcuLBJB9Coz72x1hgl8O5RVzTdNiaglX6v2DM6FI0=
|
||||
github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs=
|
||||
github.com/openfaas/faas v0.0.0-20201205125747-9bbb25e3c7c4 h1:JJjthDw7WziZQ7sC5C+M2872mIdud5R+s6Cb0cXyPuA=
|
||||
github.com/openfaas/faas v0.0.0-20201205125747-9bbb25e3c7c4/go.mod h1:E0m2rLup0Vvxg53BKxGgaYAGcZa3Xl+vvL7vSi5yQ14=
|
||||
github.com/openfaas/faas-provider v0.16.2 h1:ChpiZh1RM8zFIzvp31OPlKpTbh5Lcm7f91WCFcpW4gA=
|
||||
github.com/openfaas/faas-provider v0.16.2/go.mod h1:fq1JL0mX4rNvVVvRLaLRJ3H6o667sHuyP5p/7SZEe98=
|
||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
|
||||
github.com/onsi/gomega v1.29.0 h1:KIA/t2t5UBzoirT4H9tsML45GEbo3ouUnBHsCfD2tVg=
|
||||
github.com/onsi/gomega v1.29.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ=
|
||||
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||
github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug=
|
||||
github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM=
|
||||
github.com/opencontainers/runtime-spec v1.1.0 h1:HHUyrt9mwHUjtasSbXSMvs4cyFxh+Bll4AjJ9odEGpg=
|
||||
github.com/opencontainers/runtime-spec v1.1.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opencontainers/selinux v1.11.0 h1:+5Zbo97w3Lbmb3PeqQtpmTkMwsW5nRI3YaLpt7tQ7oU=
|
||||
github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec=
|
||||
github.com/openfaas/faas-provider v0.25.3 h1:cy5GKP1R/xZkPjg+9We7yqpfz298GrKw4ZRYJVprt7Q=
|
||||
github.com/openfaas/faas-provider v0.25.3/go.mod h1:NsETIfEndZn4mn/w/XnBTcDTwKqULCziphLp7KgeRcA=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/procfs v0.2.0 h1:wH4vA7pcjKuZzjF7lM8awk4fnuJO6idemZXoKnULUx4=
|
||||
github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/sethvargo/go-password v0.1.2/go.mod h1:qKHfdSjT26DpHQWHWWR5+X4BI45jT31dg6j4RI2TEb0=
|
||||
github.com/sethvargo/go-password v0.1.3 h1:18KkbGDkw8SuzeohAbWqBLNSfRQblVwEHOLbPa0PvWM=
|
||||
github.com/sethvargo/go-password v0.1.3/go.mod h1:2tyaaoHK/AlXwh5WWQDYjqQbHcq4cjPj5qb/ciYvu/Q=
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
|
||||
github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I=
|
||||
github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU=
|
||||
github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.6.0 h1:k1v3CzpSRUTrKMppY35TLwPvxHqBu0bYgxZzqGIgaos=
|
||||
github.com/prometheus/client_model v0.6.0/go.mod h1:NTQHnmxFpouOD0DpvP4XujX3CdOAGQPoaGhyTchlyt8=
|
||||
github.com/prometheus/common v0.51.1 h1:eIjN50Bwglz6a/c3hAgSMcofL3nD+nFQkV6Dd4DsQCw=
|
||||
github.com/prometheus/common v0.51.1/go.mod h1:lrWtQx+iDfn2mbH5GUzlH9TSHyfZpHkSiG1W7y3sF2Q=
|
||||
github.com/prometheus/procfs v0.13.0 h1:GqzLlQyfsPbaEHaQkO7tbDlriv/4o5Hudv6OXHGKX7o=
|
||||
github.com/prometheus/procfs v0.13.0/go.mod h1:cd4PFCR54QLnGKPaKGA6l+cfuNXtht43ZKY6tow0Y1g=
|
||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/sethvargo/go-password v0.2.0 h1:BTDl4CC/gjf/axHMaDQtw507ogrXLci6XRiLc7i/UHI=
|
||||
github.com/sethvargo/go-password v0.2.0/go.mod h1:Ym4Mr9JXLBycr02MFuVQ/0JHidNetSgbzutTr3zsYXE=
|
||||
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
||||
github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM=
|
||||
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
|
||||
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s=
|
||||
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
|
||||
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||
github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0=
|
||||
github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho=
|
||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
|
||||
github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4=
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
|
||||
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 h1:b6uOv7YOFK0TYG7HtkIgExQo+2RdLuwRft63jn2HWj8=
|
||||
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
|
||||
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
|
||||
github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
|
||||
github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||
github.com/vishvananda/netlink v1.1.0 h1:1iyaYNBLmP6L0220aDnYQpo1QEV4t4hJ+xEEhhJH8j0=
|
||||
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
|
||||
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df h1:OviZH7qLw/7ZovXvuNyL3XQl8UFofeikI1NW1Gypu7k=
|
||||
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/vishvananda/netlink v1.2.1-beta.2 h1:Llsql0lnQEbHj0I1OuKyp8otXp0r3q0mPkuhwHfStVs=
|
||||
github.com/vishvananda/netlink v1.2.1-beta.2/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho=
|
||||
github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
|
||||
github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8=
|
||||
github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0=
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
|
||||
github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs=
|
||||
github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74=
|
||||
github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
|
||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||
go.etcd.io/bbolt v1.3.5 h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0=
|
||||
go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
|
||||
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||
go.opencensus.io v0.22.2 h1:75k/FF0Q2YM8QYo07VPddOLBslDt1MZOdEslOHvmzAs=
|
||||
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.uber.org/goleak v1.1.0 h1:MJDxhkyAAWXEJf/y4NSOPYD/bBx7JAzIjUbv12/4FFs=
|
||||
go.uber.org/goleak v1.1.0/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
|
||||
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
|
||||
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 h1:x8Z78aZx8cOF0+Kkazoc7lwUNMGy0LrzEMxTm4BbTxg=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0/go.mod h1:62CPTSry9QZtOaSsE3tOzhx6LzDhHnXJ6xHeMNNiM6Q=
|
||||
go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc=
|
||||
go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo=
|
||||
go.opentelemetry.io/otel/metric v1.21.0 h1:tlYWfeo+Bocx5kLEloTjbcDwBuELRrIFxwdQ36PlJu4=
|
||||
go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM=
|
||||
go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8fpfLc=
|
||||
go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ=
|
||||
go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A=
|
||||
go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs=
|
||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553 h1:efeOvDhwQ29Dj3SdAV/MJf8oukgn+8D8WgaCaRMchF8=
|
||||
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
|
||||
golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8=
|
||||
golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw=
|
||||
golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9 h1:1/DFK4b7JH8DmkqhUk48onnSfrPzImPoVxuomtbT2nk=
|
||||
golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5 h1:LfCXLvNmTYH9kEmVgqbnsWfruoXZIrh4YBgqVHtDvw0=
|
||||
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
|
||||
golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
|
||||
golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20191108193012-7d206e10da11 h1:Yq9t9jnGoR+dBuitxdo9l6Q7xh/zOyNnYUtDKaQ3x0E=
|
||||
golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg=
|
||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20191216205247-b31c10ee225f h1:0RYv5T9ZdroAqqfM2taEB0nJrArv0X1JpIdgUmY4xg8=
|
||||
google.golang.org/genproto v0.0.0-20191216205247-b31c10ee225f/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/genproto v0.0.0-20231211222908-989df2bf70f3 h1:1hfbdAfFbkmpg41000wDVqr7jUpK/Yo+LPnIxxGzmkg=
|
||||
google.golang.org/genproto v0.0.0-20231211222908-989df2bf70f3/go.mod h1:5RBcpGRxr25RbDzY5w+dmaqpSEvl8Gwl1x2CICf60ic=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda h1:LI5DOvAxUPMv/50agcLLoo+AdWc1irS9Rzz4vPuD1V4=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
google.golang.org/grpc v1.23.0 h1:AzbTB6ux+okLTzP8Ru1Xs41C303zdcfEht7MQnYJt5A=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
|
||||
google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk=
|
||||
google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io=
|
||||
google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
|
||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
|
||||
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
|
||||
gotest.tools/v3 v3.0.2 h1:kG1BFyqVHuQoVQiR1bWGnfz/fmHvvuiSPIV7rvl360E=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
|
||||
gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0=
|
||||
gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
k8s.io/apimachinery v0.18.9 h1:3ZABKQx3F3xPWlsGhCfUl8W+JXRRblV6Wo2A3zn0pvY=
|
||||
k8s.io/apimachinery v0.18.9/go.mod h1:PF5taHbXgTEJLU+xMypMmYTXTWPJ5LaW8bfsisxnEXk=
|
||||
k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||
k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
|
||||
k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
|
||||
k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
|
||||
k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E=
|
||||
k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
|
||||
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
|
||||
sigs.k8s.io/structured-merge-diff/v3 v3.0.0 h1:dOmIZBMfhcHS09XZkMyUgkq5trg3/jRyJYFZUiaOp8E=
|
||||
sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
|
||||
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
|
||||
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
|
||||
k8s.io/apimachinery v0.29.3 h1:2tbx+5L7RNvqJjn7RIuIKu9XTsIZ9Z5wX2G22XAa5EU=
|
||||
k8s.io/apimachinery v0.29.3/go.mod h1:hx/S4V2PNW4OMg3WizRrHutyB5la0iCUbZym+W0EQIU=
|
||||
|
18
hack/build-containerd-armhf.sh → hack/build-containerd-arm64.sh
Executable file → Normal file
18
hack/build-containerd-armhf.sh → hack/build-containerd-arm64.sh
Executable file → Normal file
@ -1,12 +1,16 @@
|
||||
#!/bin/bash
|
||||
|
||||
export ARCH="armv6l"
|
||||
echo "Downloading Go"
|
||||
# See pre-reqs:
|
||||
# https://github.com/alexellis/containerd-arm
|
||||
|
||||
curl -SLsf https://dl.google.com/go/go1.13.15.linux-$ARCH.tar.gz --output /tmp/go.tgz
|
||||
sudo rm -rf /usr/local/go/
|
||||
sudo mkdir -p /usr/local/go/
|
||||
sudo tar -xvf /tmp/go.tgz -C /usr/local/go/ --strip-components=1
|
||||
export ARCH="arm64"
|
||||
|
||||
if [ ! -d "/usr/local/go/bin" ]; then
|
||||
curl -sLS https://get.arkade.dev | sudo sh
|
||||
sudo -E arkade system install go
|
||||
else
|
||||
echo "Go already present, skipping."
|
||||
fi
|
||||
|
||||
export GOPATH=$HOME/go/
|
||||
export PATH=$PATH:/usr/local/go/bin/
|
||||
@ -21,7 +25,7 @@ git clone https://github.com/containerd/containerd
|
||||
|
||||
cd containerd
|
||||
git fetch origin --tags
|
||||
git checkout v1.3.2
|
||||
git checkout v1.7.27
|
||||
|
||||
make
|
||||
sudo make install
|
@ -1,12 +1,17 @@
|
||||
#!/bin/bash
|
||||
|
||||
export ARCH="amd64"
|
||||
echo "Downloading Go"
|
||||
|
||||
curl -SLsf https://dl.google.com/go/go1.12.14.linux-$ARCH.tar.gz --output /tmp/go.tgz
|
||||
sudo rm -rf /usr/local/go/
|
||||
sudo mkdir -p /usr/local/go/
|
||||
sudo tar -xvf /tmp/go.tgz -C /usr/local/go/ --strip-components=1
|
||||
# See pre-reqs:
|
||||
# https://github.com/alexellis/containerd-arm
|
||||
|
||||
export ARCH="arm64"
|
||||
|
||||
if [ ! -d "/usr/local/go/bin" ]; then
|
||||
curl -sLS https://get.arkade.dev | sudo sh
|
||||
sudo -E arkade system install go
|
||||
else
|
||||
echo "Go already present, skipping."
|
||||
fi
|
||||
|
||||
export GOPATH=$HOME/go/
|
||||
export PATH=$PATH:/usr/local/go/bin/
|
||||
@ -21,7 +26,7 @@ git clone https://github.com/containerd/containerd
|
||||
|
||||
cd containerd
|
||||
git fetch origin --tags
|
||||
git checkout v1.3.2
|
||||
git checkout v1.7.27
|
||||
|
||||
make
|
||||
sudo make install
|
||||
|
48
hack/enable-journal.sh
Normal file
48
hack/enable-journal.sh
Normal file
@ -0,0 +1,48 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright OpenFaaS Ltd 2025
|
||||
|
||||
# This script is for use with Droplets created on DigitalOcean, it will
|
||||
# ensure that systemd-journald is configured to log to disk and that
|
||||
# rsyslog is configured to read from systemd-journald.
|
||||
|
||||
# Without this change, no logs will be available in the journal, and only
|
||||
# /var/log/syslog will be populated.
|
||||
|
||||
set -e
|
||||
|
||||
echo "Checking systemd-journald logs..."
|
||||
JOURNAL_STATUS=$(journalctl --no-pager -n 10 2>&1)
|
||||
|
||||
if echo "$JOURNAL_STATUS" | grep -q "No journal files were found"; then
|
||||
echo "No journal files found. Fixing logging configuration..."
|
||||
else
|
||||
echo "Journald appears to be logging. No changes needed."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Backup original config before making changes
|
||||
sudo cp /etc/systemd/journald.conf /etc/systemd/journald.conf.bak
|
||||
|
||||
# Ensure Storage is persistent
|
||||
sudo sed -i '/^#Storage=/c\Storage=persistent' /etc/systemd/journald.conf
|
||||
|
||||
# Ensure logs are not forwarded only to syslog
|
||||
sudo sed -i '/^#ForwardToSyslog=/c\ForwardToSyslog=no' /etc/systemd/journald.conf
|
||||
|
||||
# Restart systemd-journald
|
||||
echo "Restarting systemd-journald..."
|
||||
sudo systemctl restart systemd-journald
|
||||
|
||||
# Check if rsyslog already loads imjournal
|
||||
if ! grep -q 'module(load="imjournal")' /etc/rsyslog.conf; then
|
||||
echo "Adding imjournal module to rsyslog..."
|
||||
echo 'module(load="imjournal" StateFile="/var/lib/rsyslog/imjournal.state")' | sudo tee -a /etc/rsyslog.conf
|
||||
fi
|
||||
|
||||
# Restart rsyslog to apply changes
|
||||
echo "Restarting rsyslog..."
|
||||
sudo systemctl restart rsyslog
|
||||
|
||||
echo "Done. Checking if logs appear in journald..."
|
||||
journalctl --no-pager -n 10
|
@ -2,9 +2,10 @@
|
||||
Description=faasd-provider
|
||||
|
||||
[Service]
|
||||
MemoryLimit=500M
|
||||
MemoryMax=500M
|
||||
Environment="secret_mount_path={{.SecretMountPath}}"
|
||||
Environment="basic_auth=true"
|
||||
Environment="hosts_dir=/var/lib/faasd"
|
||||
ExecStart=/usr/local/bin/faasd provider
|
||||
Restart=on-failure
|
||||
RestartSec=10s
|
||||
|
@ -3,7 +3,7 @@ Description=faasd
|
||||
After=faasd-provider.service
|
||||
|
||||
[Service]
|
||||
MemoryLimit=500M
|
||||
MemoryMax=500M
|
||||
ExecStart=/usr/local/bin/faasd up
|
||||
Restart=on-failure
|
||||
RestartSec=10s
|
||||
|
132
hack/install-edge.sh
Normal file
132
hack/install-edge.sh
Normal file
@ -0,0 +1,132 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright OpenFaaS Ltd 2025
|
||||
|
||||
set -e # stop on error
|
||||
set -o pipefail
|
||||
|
||||
export NEEDRESTART_MODE=a
|
||||
export DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
if [ "$EUID" -ne 0 ]; then
|
||||
echo "Please run as root or with sudo"
|
||||
exit
|
||||
fi
|
||||
|
||||
has_dnf() {
|
||||
[ -n "$(command -v dnf)" ]
|
||||
}
|
||||
|
||||
has_apt_get() {
|
||||
[ -n "$(command -v apt-get)" ]
|
||||
}
|
||||
|
||||
has_pacman() {
|
||||
[ -n "$(command -v pacman)" ]
|
||||
}
|
||||
|
||||
install_required_packages() {
|
||||
if $(has_apt_get); then
|
||||
|
||||
echo iptables-persistent iptables-persistent/autosave_v4 boolean false | sudo debconf-set-selections
|
||||
echo iptables-persistent iptables-persistent/autosave_v6 boolean false | sudo debconf-set-selections
|
||||
|
||||
# Debian bullseye is missing iptables. Added to required packages
|
||||
# to get it working in raspberry pi. No such known issues in
|
||||
# other distros. Hence, adding only to this block.
|
||||
# reference: https://github.com/openfaas/faasd/pull/237
|
||||
apt-get update -yq
|
||||
apt-get install -yq curl runc bridge-utils iptables iptables-persistent
|
||||
elif $(has_dnf); then
|
||||
dnf install -y \
|
||||
--allowerasing \
|
||||
--setopt=install_weak_deps=False \
|
||||
curl runc iptables-services bridge-utils which
|
||||
elif $(has_pacman); then
|
||||
pacman -Syy
|
||||
pacman -Sy curl runc bridge-utils
|
||||
else
|
||||
fatal "Could not find apt-get, yum, or pacman. Cannot install dependencies on this OS."
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
echo "OpenFaaS Edge combines faasd with OpenFaaS Standard"
|
||||
echo ""
|
||||
echo ""
|
||||
|
||||
echo "1. Installing required OS packages, set SKIP_OS=1 to skip this step"
|
||||
echo ""
|
||||
|
||||
if [ -z "$SKIP_OS" ]; then
|
||||
install_required_packages
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "2. Downloading OCI image, and installing pre-requisites"
|
||||
echo ""
|
||||
if [ ! -x "$(command -v arkade)" ]; then
|
||||
# For Centos, RHEL, Fedora, Amazon Linux, and Oracle Linux, use BINLOCATION=/usr/bin/
|
||||
|
||||
if $(has_dnf); then
|
||||
BINLOCATION=/usr/bin/
|
||||
fi
|
||||
|
||||
curl -sLS https://get.arkade.dev | BINLOCATION=${BINLOCATION} sh
|
||||
fi
|
||||
|
||||
PATH=$PATH:$HOME/.arkade/bin
|
||||
|
||||
tmpdir=$(mktemp -d)
|
||||
|
||||
# Ensure all existing services are stopped when installing over an
|
||||
# existing faasd installation
|
||||
systemctl stop faasd || :
|
||||
systemctl stop faasd-provider || :
|
||||
systemctl stop containerd || :
|
||||
killall -9 containerd-shim-runc-v2 || :
|
||||
killall -9 faasd || :
|
||||
|
||||
# crane, or docker can also be used to download the OCI image and to extract it
|
||||
|
||||
# Rather than the :latest tag, a specific tag can be given
|
||||
# Use "crane ls ghcr.io/openfaasltd/faasd-pro" to see available tags
|
||||
|
||||
${BINLOCATION}arkade oci install --path ${tmpdir} \
|
||||
ghcr.io/openfaasltd/faasd-pro:latest
|
||||
|
||||
cd ${tmpdir}
|
||||
./install.sh ./
|
||||
|
||||
if has_dnf; then
|
||||
isRhelLike=true
|
||||
else
|
||||
isRhelLike=false
|
||||
fi
|
||||
|
||||
binaryName="faasd"
|
||||
if [ "$isRhelLike" = true ]; then
|
||||
binaryName="/usr/local/bin/faasd"
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "3.1 Commercial users can create their license key as follows:"
|
||||
echo ""
|
||||
echo "sudo mkdir -p /var/lib/faasd/secrets"
|
||||
echo "sudo nano /var/lib/faasd/secrets/openfaas_license"
|
||||
echo ""
|
||||
echo "3.2 For personal, non-commercial use only, GitHub Sponsors of @openfaas (25USD+) can run:"
|
||||
echo ""
|
||||
echo "sudo -E ${binaryName} github login"
|
||||
echo "sudo -E ${binaryName} activate"
|
||||
echo ""
|
||||
echo "4. Then perform the final installation steps"
|
||||
echo ""
|
||||
echo "sudo -E sh -c \"cd ${tmpdir}/var/lib/faasd && ${binaryName} install\""
|
||||
echo ""
|
||||
echo "5. Refer to the complete handbook and supplementary documentation at:"
|
||||
echo ""
|
||||
echo "http://store.openfaas.com/l/serverless-for-everyone-else?layout=profile"
|
||||
echo ""
|
||||
echo "https://docs.openfaas.com/edge/overview"
|
||||
echo ""
|
131
hack/install.sh
131
hack/install.sh
@ -1,14 +1,24 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright OpenFaaS Author(s) 2020
|
||||
# Copyright OpenFaaS Author(s) 2022
|
||||
|
||||
#########################
|
||||
# Repo specific content #
|
||||
#########################
|
||||
set -e -x -o pipefail
|
||||
|
||||
export OWNER="openfaas"
|
||||
export REPO="faasd"
|
||||
|
||||
# On CentOS /usr/local/bin is not included in the PATH when using sudo.
|
||||
# Running arkade with sudo on CentOS requires the full path
|
||||
# to the arkade binary.
|
||||
export ARKADE=/usr/local/bin/arkade
|
||||
|
||||
# When running as a startup script (cloud-init), the HOME variable is not always set.
|
||||
# As it is required for arkade to properly download tools,
|
||||
# set the variable to /usr/local so arkade will download binaries to /usr/local/.arkade
|
||||
if [ -z "${HOME}" ]; then
|
||||
export HOME=/usr/local
|
||||
fi
|
||||
|
||||
version=""
|
||||
|
||||
echo "Finding latest version from GitHub"
|
||||
@ -26,77 +36,69 @@ if [ "$(id -u)" -eq 0 ]; then
|
||||
fi
|
||||
|
||||
verify_system() {
|
||||
|
||||
arch=$(uname -m)
|
||||
if [ "$arch" == "armv7l" ]; then
|
||||
fatal 'faasd requires a 64-bit Operating System, see: https://github.com/openfaas/faasd/issues/364'
|
||||
fi
|
||||
|
||||
if ! [ -d /run/systemd ]; then
|
||||
fatal 'Can not find systemd to use as a process supervisor for faasd'
|
||||
fi
|
||||
}
|
||||
|
||||
has_yum() {
|
||||
[ -n "$(command -v yum)" ]
|
||||
has_dnf() {
|
||||
[ -n "$(command -v dnf)" ]
|
||||
}
|
||||
|
||||
has_apt_get() {
|
||||
[ -n "$(command -v apt-get)" ]
|
||||
}
|
||||
|
||||
has_pacman() {
|
||||
[ -n "$(command -v pacman)" ]
|
||||
}
|
||||
|
||||
install_required_packages() {
|
||||
if $(has_apt_get); then
|
||||
# Debian bullseye is missing iptables. Added to required packages
|
||||
# to get it working in raspberry pi. No such known issues in
|
||||
# other distros. Hence, adding only to this block.
|
||||
# reference: https://github.com/openfaas/faasd/pull/237
|
||||
$SUDO apt-get update -y
|
||||
$SUDO apt-get install -y curl runc bridge-utils
|
||||
elif $(has_yum); then
|
||||
$SUDO yum check-update -y
|
||||
$SUDO yum install -y curl runc
|
||||
$SUDO apt-get install -y curl runc bridge-utils iptables
|
||||
elif $(has_dnf); then
|
||||
$SUDO dnf install -y \
|
||||
--allowerasing \
|
||||
--setopt=install_weak_deps=False \
|
||||
curl runc iptables-services bridge-utils
|
||||
elif $(has_pacman); then
|
||||
$SUDO pacman -Syy
|
||||
$SUDO pacman -Sy curl runc bridge-utils
|
||||
else
|
||||
fatal "Could not find apt-get or yum. Cannot install dependencies on this OS."
|
||||
fatal "Could not find apt-get, dnf, or pacman. Cannot install dependencies on this OS."
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
install_cni_plugins() {
|
||||
cni_version=v0.8.5
|
||||
suffix=""
|
||||
arch=$(uname -m)
|
||||
case $arch in
|
||||
x86_64 | amd64)
|
||||
suffix=amd64
|
||||
;;
|
||||
aarch64)
|
||||
suffix=arm64
|
||||
;;
|
||||
arm*)
|
||||
suffix=arm
|
||||
;;
|
||||
*)
|
||||
fatal "Unsupported architecture $arch"
|
||||
;;
|
||||
esac
|
||||
install_arkade(){
|
||||
curl -sLS https://get.arkade.dev | $SUDO sh
|
||||
arkade --help
|
||||
}
|
||||
|
||||
$SUDO mkdir -p /opt/cni/bin
|
||||
curl -sSL https://github.com/containernetworking/plugins/releases/download/${cni_version}/cni-plugins-linux-${suffix}-${cni_version}.tgz | $SUDO tar -xvz -C /opt/cni/bin
|
||||
install_cni_plugins() {
|
||||
cni_version=v0.9.1
|
||||
$SUDO $ARKADE system install cni --version ${cni_version} --path /opt/cni/bin --progress=false
|
||||
}
|
||||
|
||||
install_containerd() {
|
||||
arch=$(uname -m)
|
||||
case $arch in
|
||||
x86_64 | amd64)
|
||||
curl -sLSf https://github.com/containerd/containerd/releases/download/v1.3.7/containerd-1.3.7-linux-amd64.tar.gz | $SUDO tar -xvz --strip-components=1 -C /usr/local/bin/
|
||||
;;
|
||||
armv7l)
|
||||
curl -sSL https://github.com/alexellis/containerd-arm/releases/download/v1.3.5/containerd-1.3.5-linux-armhf.tar.gz | $SUDO tar -xvz --strip-components=1 -C /usr/local/bin/
|
||||
;;
|
||||
aarch64)
|
||||
curl -sSL https://github.com/alexellis/containerd-arm/releases/download/v1.3.5/containerd-1.3.5-linux-arm64.tar.gz | $SUDO tar -xvz --strip-components=1 -C /usr/local/bin/
|
||||
;;
|
||||
*)
|
||||
fatal "Unsupported architecture $arch"
|
||||
;;
|
||||
esac
|
||||
|
||||
CONTAINERD_VER=1.7.27
|
||||
$SUDO systemctl unmask containerd || :
|
||||
$SUDO curl -SLfs https://raw.githubusercontent.com/containerd/containerd/v1.3.5/containerd.service --output /etc/systemd/system/containerd.service
|
||||
$SUDO systemctl enable containerd
|
||||
$SUDO systemctl start containerd
|
||||
|
||||
arch=$(uname -m)
|
||||
|
||||
$SUDO $ARKADE system install containerd --systemd --version v${CONTAINERD_VER} --progress=false
|
||||
|
||||
sleep 5
|
||||
}
|
||||
|
||||
@ -109,9 +111,6 @@ install_faasd() {
|
||||
aarch64)
|
||||
suffix=-arm64
|
||||
;;
|
||||
armv7l)
|
||||
suffix=-armhf
|
||||
;;
|
||||
*)
|
||||
echo "Unsupported architecture $arch"
|
||||
exit 1
|
||||
@ -133,24 +132,12 @@ install_faasd() {
|
||||
|
||||
install_caddy() {
|
||||
if [ ! -z "${FAASD_DOMAIN}" ]; then
|
||||
arch=$(uname -m)
|
||||
case $arch in
|
||||
x86_64 | amd64)
|
||||
suffix="amd64"
|
||||
;;
|
||||
aarch64)
|
||||
suffix=-arm64
|
||||
;;
|
||||
armv7l)
|
||||
suffix=-armv7
|
||||
;;
|
||||
*)
|
||||
echo "Unsupported architecture $arch"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
CADDY_VER=v2.4.3
|
||||
arkade get --progress=false caddy -v ${CADDY_VER}
|
||||
|
||||
# /usr/bin/caddy is specified in the upstream service file.
|
||||
$SUDO install -m 755 $HOME/.arkade/bin/caddy /usr/bin/caddy
|
||||
|
||||
curl -sSL "https://github.com/caddyserver/caddy/releases/download/v2.2.1/caddy_2.2.1_linux_${suffix}.tar.gz" | $SUDO tar -xvz -C /usr/bin/ caddy
|
||||
$SUDO curl -fSLs https://raw.githubusercontent.com/caddyserver/dist/master/init/caddy.service --output /etc/systemd/system/caddy.service
|
||||
|
||||
$SUDO mkdir -p /etc/caddy
|
||||
@ -183,7 +170,8 @@ EOF
|
||||
}
|
||||
|
||||
install_faas_cli() {
|
||||
curl -sLS https://cli.openfaas.com | $SUDO sh
|
||||
arkade get --progress=false faas-cli
|
||||
$SUDO install -m 755 $HOME/.arkade/bin/faas-cli /usr/local/bin/
|
||||
}
|
||||
|
||||
verify_system
|
||||
@ -192,6 +180,7 @@ install_required_packages
|
||||
$SUDO /sbin/sysctl -w net.ipv4.conf.all.forwarding=1
|
||||
echo "net.ipv4.conf.all.forwarding=1" | $SUDO tee -a /etc/sysctl.conf
|
||||
|
||||
install_arkade
|
||||
install_cni_plugins
|
||||
install_containerd
|
||||
install_faas_cli
|
||||
|
10
main.go
10
main.go
@ -7,14 +7,6 @@ import (
|
||||
"github.com/openfaas/faasd/cmd"
|
||||
)
|
||||
|
||||
// These values will be injected into these variables at the build time.
|
||||
var (
|
||||
// GitCommit Git Commit SHA
|
||||
GitCommit string
|
||||
// Version version of the CLI
|
||||
Version string
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
||||
if _, ok := os.LookupEnv("CONTAINER_ID"); ok {
|
||||
@ -31,7 +23,7 @@ func main() {
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
if err := cmd.Execute(Version, GitCommit); err != nil {
|
||||
if err := cmd.Execute(); err != nil {
|
||||
os.Exit(1)
|
||||
}
|
||||
return
|
||||
|
@ -5,7 +5,6 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net"
|
||||
"os"
|
||||
@ -86,7 +85,7 @@ func InitNetwork() (gocni.CNI, error) {
|
||||
}
|
||||
|
||||
netConfig := path.Join(CNIConfDir, defaultCNIConfFilename)
|
||||
if err := ioutil.WriteFile(netConfig, []byte(defaultCNIConf), 644); err != nil {
|
||||
if err := os.WriteFile(netConfig, []byte(defaultCNIConf), 0644); err != nil {
|
||||
return nil, fmt.Errorf("cannot write network config: %s", defaultCNIConfFilename)
|
||||
}
|
||||
|
||||
@ -151,7 +150,7 @@ func DeleteCNINetwork(ctx context.Context, cni gocni.CNI, client *containerd.Cli
|
||||
func GetIPAddress(container string, PID uint32) (string, error) {
|
||||
CNIDir := path.Join(CNIDataDir, defaultNetworkName)
|
||||
|
||||
files, err := ioutil.ReadDir(CNIDir)
|
||||
files, err := os.ReadDir(CNIDir)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to read CNI dir for container %s: %v", container, err)
|
||||
}
|
||||
|
@ -1,7 +1,6 @@
|
||||
package cninetwork
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
@ -15,7 +14,7 @@ eth1`
|
||||
PID := uint32(621)
|
||||
fullPath := filepath.Join(os.TempDir(), fileName)
|
||||
|
||||
err := ioutil.WriteFile(fullPath, []byte(body), 0700)
|
||||
err := os.WriteFile(fullPath, []byte(body), 0700)
|
||||
if err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
@ -24,7 +23,6 @@ eth1`
|
||||
}()
|
||||
|
||||
got, err := isCNIResultForPID(fullPath, container, PID)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
@ -43,7 +41,7 @@ eth1`
|
||||
PID := uint32(621)
|
||||
fullPath := filepath.Join(os.TempDir(), fileName)
|
||||
|
||||
err := ioutil.WriteFile(fullPath, []byte(body), 0700)
|
||||
err := os.WriteFile(fullPath, []byte(body), 0700)
|
||||
if err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
@ -52,10 +50,10 @@ eth1`
|
||||
}()
|
||||
|
||||
got, err := isCNIResultForPID(fullPath, container, PID)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
|
||||
want := false
|
||||
if got != want {
|
||||
t.Fatalf("want %v, but got %v", want, got)
|
||||
|
39
pkg/connectivity.go
Normal file
39
pkg/connectivity.go
Normal file
@ -0,0 +1,39 @@
|
||||
package pkg
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ConnectivityCheck checks if the controller can reach the
|
||||
// public Internet via HTTPS.
|
||||
// A license is required to use OpenFaaS CE for Commercial Use.
|
||||
func ConnectivityCheck() error {
|
||||
req, err := http.NewRequest(http.MethodGet, "https://checkip.amazonaws.com", nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
req.Header.Set("User-Agent", fmt.Sprintf("openfaas-ce/%s faas-netes", Version))
|
||||
|
||||
res, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if req.Body != nil {
|
||||
defer req.Body.Close()
|
||||
}
|
||||
|
||||
if res.StatusCode != http.StatusOK {
|
||||
var body []byte
|
||||
if res.Body != nil {
|
||||
body, _ = io.ReadAll(res.Body)
|
||||
}
|
||||
|
||||
return fmt.Errorf("unexpected status code checking connectivity: %d, body: %s", res.StatusCode, strings.TrimSpace(string(body)))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
@ -1,11 +1,14 @@
|
||||
package pkg
|
||||
|
||||
const (
|
||||
// FunctionNamespace is the default containerd namespace functions are created
|
||||
FunctionNamespace = "openfaas-fn"
|
||||
// DefaultFunctionNamespace is the default containerd namespace functions are created
|
||||
DefaultFunctionNamespace = "openfaas-fn"
|
||||
|
||||
// faasdNamespace is the containerd namespace services are created
|
||||
faasdNamespace = "openfaas"
|
||||
// NamespaceLabel indicates that a namespace is managed by faasd
|
||||
NamespaceLabel = "openfaas"
|
||||
|
||||
// FaasdNamespace is the containerd namespace services are created
|
||||
FaasdNamespace = "openfaas"
|
||||
|
||||
faasServicesPullAlways = false
|
||||
|
||||
|
@ -1,7 +1,6 @@
|
||||
package pkg
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
@ -54,7 +53,7 @@ func (l *LocalResolver) rebuild() {
|
||||
l.Mutex.Lock()
|
||||
defer l.Mutex.Unlock()
|
||||
|
||||
fileData, fileErr := ioutil.ReadFile(l.Path)
|
||||
fileData, fileErr := os.ReadFile(l.Path)
|
||||
if fileErr != nil {
|
||||
log.Printf("resolver rebuild error: %s", fileErr.Error())
|
||||
return
|
||||
|
@ -58,10 +58,10 @@ func (r *requester) Query(ctx context.Context, req logs.Request) (<-chan logs.Me
|
||||
|
||||
// buildCmd reeturns the equivalent of
|
||||
//
|
||||
// journalctl -t <namespace>:<name> \
|
||||
// --output=json \
|
||||
// --since=<timestamp> \
|
||||
// <--follow> \
|
||||
// journalctl -t <namespace>:<name> \
|
||||
// --output=json \
|
||||
// --since=<timestamp> \
|
||||
// <--follow> \
|
||||
func buildCmd(ctx context.Context, req logs.Request) *exec.Cmd {
|
||||
// // set the cursor position based on req, default to 5m
|
||||
since := time.Now().Add(-5 * time.Minute)
|
||||
@ -71,7 +71,7 @@ func buildCmd(ctx context.Context, req logs.Request) *exec.Cmd {
|
||||
|
||||
namespace := req.Namespace
|
||||
if namespace == "" {
|
||||
namespace = faasd.FunctionNamespace
|
||||
namespace = faasd.DefaultFunctionNamespace
|
||||
}
|
||||
|
||||
// find the description of the fields here
|
||||
@ -105,12 +105,12 @@ func streamLogs(ctx context.Context, cmd *exec.Cmd, out io.ReadCloser, msgs chan
|
||||
|
||||
// will ensure `out` is closed and all related resources cleaned up
|
||||
go func() {
|
||||
err := cmd.Wait()
|
||||
log.Println("wait result", err)
|
||||
if err := cmd.Wait(); err != nil {
|
||||
log.Printf("journalctl exited with error: %s", err)
|
||||
}
|
||||
}()
|
||||
|
||||
defer func() {
|
||||
log.Println("closing journal stream")
|
||||
close(msgs)
|
||||
}()
|
||||
|
||||
@ -176,7 +176,6 @@ func parseEntry(entry map[string]string) (logs.Message, error) {
|
||||
}
|
||||
|
||||
func logErrOut(out io.ReadCloser) {
|
||||
defer log.Println("stderr closed")
|
||||
defer out.Close()
|
||||
|
||||
io.Copy(log.Writer(), out)
|
||||
|
@ -20,9 +20,11 @@ func ReadFromEnv(hasEnv types.HasEnv) (*types.FaaSConfig, *ProviderConfig, error
|
||||
|
||||
serviceTimeout := types.ParseIntOrDurationValue(hasEnv.Getenv("service_timeout"), time.Second*60)
|
||||
|
||||
config.EnableHealth = true
|
||||
config.ReadTimeout = serviceTimeout
|
||||
config.WriteTimeout = serviceTimeout
|
||||
config.EnableBasicAuth = true
|
||||
config.MaxIdleConns = types.ParseIntValue(hasEnv.Getenv("max_idle_conns"), 1024)
|
||||
config.MaxIdleConnsPerHost = types.ParseIntValue(hasEnv.Getenv("max_idle_conns_per_host"), 1024)
|
||||
|
||||
port := types.ParseIntValue(hasEnv.Getenv("port"), 8081)
|
||||
config.TCPPort = &port
|
||||
|
@ -4,16 +4,16 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
|
||||
"github.com/containerd/containerd"
|
||||
"github.com/containerd/containerd/namespaces"
|
||||
gocni "github.com/containerd/go-cni"
|
||||
"github.com/openfaas/faas/gateway/requests"
|
||||
|
||||
faasd "github.com/openfaas/faasd/pkg"
|
||||
"github.com/openfaas/faas-provider/types"
|
||||
"github.com/openfaas/faasd/pkg"
|
||||
cninetwork "github.com/openfaas/faasd/pkg/cninetwork"
|
||||
"github.com/openfaas/faasd/pkg/service"
|
||||
)
|
||||
@ -29,29 +29,45 @@ func MakeDeleteHandler(client *containerd.Client, cni gocni.CNI) func(w http.Res
|
||||
|
||||
defer r.Body.Close()
|
||||
|
||||
body, _ := ioutil.ReadAll(r.Body)
|
||||
log.Printf("[Delete] request: %s\n", string(body))
|
||||
body, _ := io.ReadAll(r.Body)
|
||||
|
||||
req := requests.DeleteFunctionRequest{}
|
||||
err := json.Unmarshal(body, &req)
|
||||
if err != nil {
|
||||
log.Printf("[Delete] error parsing input: %s\n", err)
|
||||
req := types.DeleteFunctionRequest{}
|
||||
if err := json.Unmarshal(body, &req); err != nil {
|
||||
log.Printf("[Delete] error parsing input: %s", err)
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// namespace moved from the querystring into the body
|
||||
namespace := req.Namespace
|
||||
if namespace == "" {
|
||||
namespace = pkg.DefaultFunctionNamespace
|
||||
}
|
||||
|
||||
// Check if namespace exists, and it has the openfaas label
|
||||
valid, err := validNamespace(client.NamespaceService(), namespace)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
if !valid {
|
||||
http.Error(w, "namespace not valid", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
name := req.FunctionName
|
||||
|
||||
function, err := GetFunction(client, name)
|
||||
function, err := GetFunction(client, name, namespace)
|
||||
if err != nil {
|
||||
msg := fmt.Sprintf("service %s not found", name)
|
||||
msg := fmt.Sprintf("function %s.%s not found", name, namespace)
|
||||
log.Printf("[Delete] %s\n", msg)
|
||||
http.Error(w, msg, http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
ctx := namespaces.WithNamespace(context.Background(), faasd.FunctionNamespace)
|
||||
ctx := namespaces.WithNamespace(context.Background(), namespace)
|
||||
|
||||
// TODO: this needs to still happen if the task is paused
|
||||
if function.replicas != 0 {
|
||||
@ -61,13 +77,12 @@ func MakeDeleteHandler(client *containerd.Client, cni gocni.CNI) func(w http.Res
|
||||
}
|
||||
}
|
||||
|
||||
containerErr := service.Remove(ctx, client, name)
|
||||
if containerErr != nil {
|
||||
log.Printf("[Delete] error removing %s, %s\n", name, containerErr)
|
||||
http.Error(w, containerErr.Error(), http.StatusInternalServerError)
|
||||
if err := service.Remove(ctx, client, name); err != nil {
|
||||
log.Printf("[Delete] error removing %s, %s\n", name, err)
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
log.Printf("[Delete] deleted %s\n", name)
|
||||
log.Printf("[Delete] Removed: %s.%s\n", name, namespace)
|
||||
}
|
||||
}
|
||||
|
@ -4,7 +4,7 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
@ -17,10 +17,9 @@ import (
|
||||
"github.com/containerd/containerd/namespaces"
|
||||
"github.com/containerd/containerd/oci"
|
||||
gocni "github.com/containerd/go-cni"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/distribution/reference"
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/openfaas/faas-provider/types"
|
||||
faasd "github.com/openfaas/faasd/pkg"
|
||||
cninetwork "github.com/openfaas/faasd/pkg/cninetwork"
|
||||
"github.com/openfaas/faasd/pkg/service"
|
||||
"github.com/pkg/errors"
|
||||
@ -29,8 +28,8 @@ import (
|
||||
|
||||
const annotationLabelPrefix = "com.openfaas.annotations."
|
||||
|
||||
// MakeDeployHandler returns a handler to deploy a function
|
||||
func MakeDeployHandler(client *containerd.Client, cni gocni.CNI, secretMountPath string, alwaysPull bool) func(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
if r.Body == nil {
|
||||
@ -40,30 +39,51 @@ func MakeDeployHandler(client *containerd.Client, cni gocni.CNI, secretMountPath
|
||||
|
||||
defer r.Body.Close()
|
||||
|
||||
body, _ := ioutil.ReadAll(r.Body)
|
||||
log.Printf("[Deploy] request: %s\n", string(body))
|
||||
body, _ := io.ReadAll(r.Body)
|
||||
|
||||
req := types.FunctionDeployment{}
|
||||
err := json.Unmarshal(body, &req)
|
||||
if err != nil {
|
||||
log.Printf("[Deploy] - error parsing input: %s\n", err)
|
||||
log.Printf("[Deploy] - error parsing input: %s", err)
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
err = validateSecrets(secretMountPath, req.Secrets)
|
||||
namespace := getRequestNamespace(req.Namespace)
|
||||
|
||||
// Check if namespace exists, and it has the openfaas label
|
||||
valid, err := validNamespace(client.NamespaceService(), namespace)
|
||||
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
if !valid {
|
||||
http.Error(w, "namespace not valid", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
namespaceSecretMountPath := getNamespaceSecretMountPath(secretMountPath, namespace)
|
||||
err = validateSecrets(namespaceSecretMountPath, req.Secrets)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
name := req.Service
|
||||
ctx := namespaces.WithNamespace(context.Background(), faasd.FunctionNamespace)
|
||||
ctx := namespaces.WithNamespace(context.Background(), namespace)
|
||||
|
||||
deployErr := deploy(ctx, req, client, cni, secretMountPath, alwaysPull)
|
||||
if deployErr != nil {
|
||||
log.Printf("[Deploy] error deploying %s, error: %s\n", name, deployErr)
|
||||
http.Error(w, deployErr.Error(), http.StatusBadRequest)
|
||||
if err := preDeploy(client, 1); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
log.Printf("[Deploy] error deploying %s, error: %s\n", name, err)
|
||||
return
|
||||
}
|
||||
|
||||
if err := deploy(ctx, req, client, cni, namespaceSecretMountPath, alwaysPull); err != nil {
|
||||
log.Printf("[Deploy] error deploying %s, error: %s\n", name, err)
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
}
|
||||
@ -110,7 +130,7 @@ func deploy(ctx context.Context, req types.FunctionDeployment, client *container
|
||||
}
|
||||
|
||||
envs := prepareEnv(req.EnvProcess, req.EnvVars)
|
||||
mounts := getMounts()
|
||||
mounts := getOSMounts()
|
||||
|
||||
for _, secret := range req.Secrets {
|
||||
mounts = append(mounts, specs.Mount{
|
||||
@ -125,7 +145,7 @@ func deploy(ctx context.Context, req types.FunctionDeployment, client *container
|
||||
|
||||
labels, err := buildLabels(&req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to apply labels to conatiner: %s, error: %s", name, err)
|
||||
return fmt.Errorf("unable to apply labels to container: %s, error: %w", name, err)
|
||||
}
|
||||
|
||||
var memory *specs.LinuxMemory
|
||||
@ -156,15 +176,34 @@ func deploy(ctx context.Context, req types.FunctionDeployment, client *container
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to create container: %s, error: %s", name, err)
|
||||
return fmt.Errorf("unable to create container: %s, error: %w", name, err)
|
||||
}
|
||||
|
||||
return createTask(ctx, client, container, cni)
|
||||
return createTask(ctx, container, cni)
|
||||
|
||||
}
|
||||
|
||||
// countFunctions returns the number of functions deployed along with a map with a count
|
||||
// in each namespace
|
||||
func countFunctions(client *containerd.Client) (int64, int64, error) {
|
||||
count := int64(0)
|
||||
namespaceCount := int64(0)
|
||||
|
||||
namespaces := ListNamespaces(client)
|
||||
|
||||
for _, namespace := range namespaces {
|
||||
fns, err := ListFunctions(client, namespace)
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
namespaceCount++
|
||||
count += int64(len(fns))
|
||||
}
|
||||
|
||||
return count, namespaceCount, nil
|
||||
}
|
||||
|
||||
func buildLabels(request *types.FunctionDeployment) (map[string]string, error) {
|
||||
// Adapted from faas-swarm/handlers/deploy.go:buildLabels
|
||||
labels := map[string]string{}
|
||||
|
||||
if request.Labels != nil {
|
||||
@ -187,14 +226,14 @@ func buildLabels(request *types.FunctionDeployment) (map[string]string, error) {
|
||||
return labels, nil
|
||||
}
|
||||
|
||||
func createTask(ctx context.Context, client *containerd.Client, container containerd.Container, cni gocni.CNI) error {
|
||||
func createTask(ctx context.Context, container containerd.Container, cni gocni.CNI) error {
|
||||
|
||||
name := container.ID()
|
||||
|
||||
task, taskErr := container.NewTask(ctx, cio.BinaryIO("/usr/local/bin/faasd", nil))
|
||||
|
||||
if taskErr != nil {
|
||||
return fmt.Errorf("unable to start task: %s, error: %s", name, taskErr)
|
||||
return fmt.Errorf("unable to start task: %s, error: %w", name, taskErr)
|
||||
}
|
||||
|
||||
log.Printf("Container ID: %s\tTask ID %s:\tTask PID: %d\t\n", name, task.ID(), task.Pid())
|
||||
@ -213,9 +252,8 @@ func createTask(ctx context.Context, client *containerd.Client, container contai
|
||||
|
||||
log.Printf("%s has IP: %s.\n", name, ip)
|
||||
|
||||
_, waitErr := task.Wait(ctx)
|
||||
if waitErr != nil {
|
||||
return errors.Wrapf(waitErr, "Unable to wait for task to start: %s", name)
|
||||
if _, err := task.Wait(ctx); err != nil {
|
||||
return errors.Wrapf(err, "Unable to wait for task to start: %s", name)
|
||||
}
|
||||
|
||||
if startErr := task.Start(ctx); startErr != nil {
|
||||
@ -246,20 +284,28 @@ func prepareEnv(envProcess string, reqEnvVars map[string]string) []string {
|
||||
return envs
|
||||
}
|
||||
|
||||
func getMounts() []specs.Mount {
|
||||
wd, _ := os.Getwd()
|
||||
// getOSMounts provides a mount for os-specific files such
|
||||
// as the hosts file and resolv.conf
|
||||
func getOSMounts() []specs.Mount {
|
||||
// Prior to hosts_dir env-var, this value was set to
|
||||
// os.Getwd()
|
||||
hostsDir := "/var/lib/faasd"
|
||||
if v, ok := os.LookupEnv("hosts_dir"); ok && len(v) > 0 {
|
||||
hostsDir = v
|
||||
}
|
||||
|
||||
mounts := []specs.Mount{}
|
||||
mounts = append(mounts, specs.Mount{
|
||||
Destination: "/etc/resolv.conf",
|
||||
Type: "bind",
|
||||
Source: path.Join(wd, "resolv.conf"),
|
||||
Source: path.Join(hostsDir, "resolv.conf"),
|
||||
Options: []string{"rbind", "ro"},
|
||||
})
|
||||
|
||||
mounts = append(mounts, specs.Mount{
|
||||
Destination: "/etc/hosts",
|
||||
Type: "bind",
|
||||
Source: path.Join(wd, "hosts"),
|
||||
Source: path.Join(hostsDir, "hosts"),
|
||||
Options: []string{"rbind", "ro"},
|
||||
})
|
||||
return mounts
|
||||
@ -291,3 +337,17 @@ func withMemory(mem *specs.LinuxMemory) oci.SpecOpts {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func preDeploy(client *containerd.Client, additional int64) error {
|
||||
count, countNs, err := countFunctions(client)
|
||||
log.Printf("Function count: %d, Namespace count: %d\n", count, countNs)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
} else if count+additional > faasdMaxFunctions {
|
||||
return fmt.Errorf("the OpenFaaS CE EULA allows %d/%d function(s), upgrade to faasd Pro to continue", faasdMaxFunctions, count+additional)
|
||||
} else if countNs > faasdMaxNs {
|
||||
return fmt.Errorf("the OpenFaaS CE EULA allows %d/%d namespace(s), upgrade to faasd Pro to continue", faasdMaxNs, countNs)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -53,7 +53,7 @@ func Test_BuildLabels_WithAnnotations(t *testing.T) {
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(val, tc.result) {
|
||||
t.Errorf("Got: %s, expected %s", val, tc.result)
|
||||
t.Errorf("Want: %s, got: %s", val, tc.result)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
@ -11,55 +11,16 @@ import (
|
||||
"github.com/containerd/containerd"
|
||||
"github.com/containerd/containerd/namespaces"
|
||||
"github.com/openfaas/faasd/pkg/cninetwork"
|
||||
|
||||
faasd "github.com/openfaas/faasd/pkg"
|
||||
)
|
||||
|
||||
type Function struct {
|
||||
name string
|
||||
namespace string
|
||||
image string
|
||||
pid uint32
|
||||
replicas int
|
||||
IP string
|
||||
labels map[string]string
|
||||
annotations map[string]string
|
||||
secrets []string
|
||||
envVars map[string]string
|
||||
envProcess string
|
||||
}
|
||||
|
||||
// ListFunctions returns a map of all functions with running tasks on namespace
|
||||
func ListFunctions(client *containerd.Client) (map[string]*Function, error) {
|
||||
ctx := namespaces.WithNamespace(context.Background(), faasd.FunctionNamespace)
|
||||
functions := make(map[string]*Function)
|
||||
|
||||
containers, err := client.Containers(ctx)
|
||||
if err != nil {
|
||||
return functions, err
|
||||
}
|
||||
|
||||
for _, c := range containers {
|
||||
name := c.ID()
|
||||
f, err := GetFunction(client, name)
|
||||
if err != nil {
|
||||
log.Printf("error getting function %s: ", name)
|
||||
return functions, err
|
||||
}
|
||||
functions[name] = &f
|
||||
}
|
||||
|
||||
return functions, nil
|
||||
}
|
||||
|
||||
// GetFunction returns a function that matches name
|
||||
func GetFunction(client *containerd.Client, name string) (Function, error) {
|
||||
ctx := namespaces.WithNamespace(context.Background(), faasd.FunctionNamespace)
|
||||
func GetFunction(client *containerd.Client, name string, namespace string) (Function, error) {
|
||||
ctx := namespaces.WithNamespace(context.Background(), namespace)
|
||||
fn := Function{}
|
||||
|
||||
c, err := client.LoadContainer(ctx, name)
|
||||
if err != nil {
|
||||
return Function{}, fmt.Errorf("unable to find function: %s, error %s", name, err)
|
||||
return Function{}, fmt.Errorf("unable to find function: %s, error %w", name, err)
|
||||
}
|
||||
|
||||
image, err := c.Image(ctx)
|
||||
@ -71,27 +32,34 @@ func GetFunction(client *containerd.Client, name string) (Function, error) {
|
||||
allLabels, labelErr := c.Labels(ctx)
|
||||
|
||||
if labelErr != nil {
|
||||
log.Printf("cannot list container %s labels: %s", containerName, labelErr.Error())
|
||||
log.Printf("cannot list container %s labels: %s", containerName, labelErr)
|
||||
}
|
||||
|
||||
labels, annotations := buildLabelsAndAnnotations(allLabels)
|
||||
|
||||
spec, err := c.Spec(ctx)
|
||||
if err != nil {
|
||||
return Function{}, fmt.Errorf("unable to load function spec for reading secrets: %s, error %s", name, err)
|
||||
return Function{}, fmt.Errorf("unable to load function %s error: %w", name, err)
|
||||
}
|
||||
|
||||
info, err := c.Info(ctx)
|
||||
if err != nil {
|
||||
return Function{}, fmt.Errorf("can't load info for: %s, error %w", name, err)
|
||||
}
|
||||
|
||||
envVars, envProcess := readEnvFromProcessEnv(spec.Process.Env)
|
||||
secrets := readSecretsFromMounts(spec.Mounts)
|
||||
|
||||
fn.name = containerName
|
||||
fn.namespace = faasd.FunctionNamespace
|
||||
fn.namespace = namespace
|
||||
fn.image = image.Name()
|
||||
fn.labels = labels
|
||||
fn.annotations = annotations
|
||||
fn.secrets = secrets
|
||||
fn.envVars = envVars
|
||||
fn.envProcess = envProcess
|
||||
fn.createdAt = info.CreatedAt
|
||||
fn.memoryLimit = readMemoryLimitFromSpec(spec)
|
||||
|
||||
replicas := 0
|
||||
task, err := c.Task(ctx, nil)
|
||||
@ -99,7 +67,7 @@ func GetFunction(client *containerd.Client, name string) (Function, error) {
|
||||
// Task for container exists
|
||||
svc, err := task.Status(ctx)
|
||||
if err != nil {
|
||||
return Function{}, fmt.Errorf("unable to get task status for container: %s %s", name, err)
|
||||
return Function{}, fmt.Errorf("unable to get task status for container: %s %w", name, err)
|
||||
}
|
||||
|
||||
if svc.Status == "running" {
|
||||
@ -173,3 +141,10 @@ func buildLabelsAndAnnotations(ctrLabels map[string]string) (map[string]string,
|
||||
|
||||
return labels, annotations
|
||||
}
|
||||
|
||||
func readMemoryLimitFromSpec(spec *specs.Spec) int64 {
|
||||
if spec.Linux == nil || spec.Linux.Resources == nil || spec.Linux.Resources.Memory == nil || spec.Linux.Resources.Memory.Limit == nil {
|
||||
return 0
|
||||
}
|
||||
return *spec.Linux.Resources.Memory.Limit
|
||||
}
|
65
pkg/provider/handlers/function_list.go
Normal file
65
pkg/provider/handlers/function_list.go
Normal file
@ -0,0 +1,65 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"log"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd"
|
||||
"github.com/containerd/containerd/namespaces"
|
||||
)
|
||||
|
||||
type Function struct {
|
||||
name string
|
||||
namespace string
|
||||
image string
|
||||
pid uint32
|
||||
replicas int
|
||||
IP string
|
||||
labels map[string]string
|
||||
annotations map[string]string
|
||||
secrets []string
|
||||
envVars map[string]string
|
||||
envProcess string
|
||||
memoryLimit int64
|
||||
createdAt time.Time
|
||||
}
|
||||
|
||||
// ListFunctions returns a map of all functions with running tasks on namespace
|
||||
func ListFunctions(client *containerd.Client, namespace string) (map[string]*Function, error) {
|
||||
|
||||
// Check if namespace exists, and it has the openfaas label
|
||||
valid, err := validNamespace(client.NamespaceService(), namespace)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !valid {
|
||||
return nil, errors.New("namespace not valid")
|
||||
}
|
||||
|
||||
ctx := namespaces.WithNamespace(context.Background(), namespace)
|
||||
functions := make(map[string]*Function)
|
||||
|
||||
containers, err := client.Containers(ctx)
|
||||
if err != nil {
|
||||
return functions, err
|
||||
}
|
||||
|
||||
for _, c := range containers {
|
||||
name := c.ID()
|
||||
f, err := GetFunction(client, name, namespace)
|
||||
if err != nil {
|
||||
if !strings.Contains(err.Error(), "unable to get IP address for container") {
|
||||
log.Printf("List functions, skipping: %s, error: %s", name, err)
|
||||
}
|
||||
|
||||
} else {
|
||||
functions[name] = &f
|
||||
}
|
||||
}
|
||||
|
||||
return functions, nil
|
||||
}
|
133
pkg/provider/handlers/function_test.go
Normal file
133
pkg/provider/handlers/function_test.go
Normal file
@ -0,0 +1,133 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
)
|
||||
|
||||
func Test_BuildLabelsAndAnnotationsFromServiceSpec_Annotations(t *testing.T) {
|
||||
container := map[string]string{
|
||||
"qwer": "ty",
|
||||
"dvor": "ak",
|
||||
fmt.Sprintf("%scurrent-time", annotationLabelPrefix): "5 Nov 20:10:20 PST 1955",
|
||||
fmt.Sprintf("%sfuture-time", annotationLabelPrefix): "21 Oct 20:10:20 PST 2015",
|
||||
}
|
||||
|
||||
labels, annotation := buildLabelsAndAnnotations(container)
|
||||
|
||||
if len(labels) != 2 {
|
||||
t.Errorf("want: %d labels got: %d", 2, len(labels))
|
||||
}
|
||||
|
||||
if len(annotation) != 2 {
|
||||
t.Errorf("want: %d annotation got: %d", 1, len(annotation))
|
||||
}
|
||||
|
||||
if _, ok := annotation["current-time"]; !ok {
|
||||
t.Errorf("want: '%s' entry in annotation map got: key not found", "current-time")
|
||||
}
|
||||
}
|
||||
|
||||
func Test_SplitMountToSecrets(t *testing.T) {
|
||||
type testCase struct {
|
||||
Name string
|
||||
Input []specs.Mount
|
||||
Want []string
|
||||
}
|
||||
tests := []testCase{
|
||||
{Name: "No matching openfaas secrets", Input: []specs.Mount{{Destination: "/foo/"}}, Want: []string{}},
|
||||
{Name: "Nil mounts", Input: nil, Want: []string{}},
|
||||
{Name: "No Mounts", Input: []specs.Mount{{Destination: "/foo/"}}, Want: []string{}},
|
||||
{Name: "One Mounts IS secret", Input: []specs.Mount{{Destination: "/var/openfaas/secrets/secret1"}}, Want: []string{"secret1"}},
|
||||
{Name: "Multiple Mounts 1 secret", Input: []specs.Mount{{Destination: "/var/openfaas/secrets/secret1"}, {Destination: "/some/other/path"}}, Want: []string{"secret1"}},
|
||||
{Name: "Multiple Mounts all secrets", Input: []specs.Mount{{Destination: "/var/openfaas/secrets/secret1"}, {Destination: "/var/openfaas/secrets/secret2"}}, Want: []string{"secret1", "secret2"}},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.Name, func(t *testing.T) {
|
||||
got := readSecretsFromMounts(tc.Input)
|
||||
if !reflect.DeepEqual(got, tc.Want) {
|
||||
t.Fatalf("Want %s, got %s", tc.Want, got)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_ProcessEnvToEnvVars(t *testing.T) {
|
||||
type testCase struct {
|
||||
Name string
|
||||
Input []string
|
||||
Want map[string]string
|
||||
fprocess string
|
||||
}
|
||||
tests := []testCase{
|
||||
{Name: "No matching EnvVars", Input: []string{"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "fprocess=python index.py"}, Want: make(map[string]string), fprocess: "python index.py"},
|
||||
{Name: "No EnvVars", Input: []string{}, Want: make(map[string]string), fprocess: ""},
|
||||
{Name: "One EnvVar", Input: []string{"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "fprocess=python index.py", "env=this"}, Want: map[string]string{"env": "this"}, fprocess: "python index.py"},
|
||||
{Name: "Multiple EnvVars", Input: []string{"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "this=that", "env=var", "fprocess=python index.py"}, Want: map[string]string{"this": "that", "env": "var"}, fprocess: "python index.py"},
|
||||
{Name: "Nil EnvVars", Input: nil, Want: make(map[string]string)},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.Name, func(t *testing.T) {
|
||||
got, fprocess := readEnvFromProcessEnv(tc.Input)
|
||||
if !reflect.DeepEqual(got, tc.Want) {
|
||||
t.Fatalf("Want: %s, got: %s", tc.Want, got)
|
||||
}
|
||||
|
||||
if fprocess != tc.fprocess {
|
||||
t.Fatalf("Want fprocess: %s, got: %s", tc.fprocess, got)
|
||||
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_findNamespace(t *testing.T) {
|
||||
type testCase struct {
|
||||
Name string
|
||||
foundNamespaces []string
|
||||
namespace string
|
||||
Want bool
|
||||
}
|
||||
tests := []testCase{
|
||||
{Name: "Namespace Found", namespace: "fn", foundNamespaces: []string{"fn", "openfaas-fn"}, Want: true},
|
||||
{Name: "namespace Not Found", namespace: "fn", foundNamespaces: []string{"openfaas-fn"}, Want: false},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.Name, func(t *testing.T) {
|
||||
got := findNamespace(tc.namespace, tc.foundNamespaces)
|
||||
if got != tc.Want {
|
||||
t.Fatalf("Want %t, got %t", tc.Want, got)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_readMemoryLimitFromSpec(t *testing.T) {
|
||||
type testCase struct {
|
||||
Name string
|
||||
Spec *specs.Spec
|
||||
Want int64
|
||||
}
|
||||
testLimit := int64(64)
|
||||
tests := []testCase{
|
||||
{Name: "specs.Linux not found", Spec: &specs.Spec{Linux: nil}, Want: int64(0)},
|
||||
{Name: "specs.LinuxResource not found", Spec: &specs.Spec{Linux: &specs.Linux{Resources: nil}}, Want: int64(0)},
|
||||
{Name: "specs.LinuxMemory not found", Spec: &specs.Spec{Linux: &specs.Linux{Resources: &specs.LinuxResources{Memory: nil}}}, Want: int64(0)},
|
||||
{Name: "specs.LinuxMemory.Limit not found", Spec: &specs.Spec{Linux: &specs.Linux{Resources: &specs.LinuxResources{Memory: &specs.LinuxMemory{Limit: nil}}}}, Want: int64(0)},
|
||||
{Name: "Memory limit set as Want", Spec: &specs.Spec{Linux: &specs.Linux{Resources: &specs.LinuxResources{Memory: &specs.LinuxMemory{Limit: &testLimit}}}}, Want: int64(64)},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.Name, func(t *testing.T) {
|
||||
got := readMemoryLimitFromSpec(tc.Spec)
|
||||
if got != tc.Want {
|
||||
t.Fatalf("Want %d, got %d", tc.Want, got)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
@ -1,86 +0,0 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func Test_BuildLabelsAndAnnotationsFromServiceSpec_Annotations(t *testing.T) {
|
||||
container := map[string]string{
|
||||
"qwer": "ty",
|
||||
"dvor": "ak",
|
||||
fmt.Sprintf("%scurrent-time", annotationLabelPrefix): "5 Nov 20:10:20 PST 1955",
|
||||
fmt.Sprintf("%sfuture-time", annotationLabelPrefix): "21 Oct 20:10:20 PST 2015",
|
||||
}
|
||||
|
||||
labels, annotation := buildLabelsAndAnnotations(container)
|
||||
|
||||
if len(labels) != 2 {
|
||||
t.Errorf("want: %d labels got: %d", 2, len(labels))
|
||||
}
|
||||
|
||||
if len(annotation) != 2 {
|
||||
t.Errorf("want: %d annotation got: %d", 1, len(annotation))
|
||||
}
|
||||
|
||||
if _, ok := annotation["current-time"]; !ok {
|
||||
t.Errorf("want: '%s' entry in annotation map got: key not found", "current-time")
|
||||
}
|
||||
}
|
||||
|
||||
func Test_SplitMountToSecrets(t *testing.T) {
|
||||
type test struct {
|
||||
Name string
|
||||
Input []specs.Mount
|
||||
Expected []string
|
||||
}
|
||||
tests := []test{
|
||||
{Name: "No matching openfaas secrets", Input: []specs.Mount{{Destination: "/foo/"}}, Expected: []string{}},
|
||||
{Name: "Nil mounts", Input: nil, Expected: []string{}},
|
||||
{Name: "No Mounts", Input: []specs.Mount{{Destination: "/foo/"}}, Expected: []string{}},
|
||||
{Name: "One Mounts IS secret", Input: []specs.Mount{{Destination: "/var/openfaas/secrets/secret1"}}, Expected: []string{"secret1"}},
|
||||
{Name: "Multiple Mounts 1 secret", Input: []specs.Mount{{Destination: "/var/openfaas/secrets/secret1"}, {Destination: "/some/other/path"}}, Expected: []string{"secret1"}},
|
||||
{Name: "Multiple Mounts all secrets", Input: []specs.Mount{{Destination: "/var/openfaas/secrets/secret1"}, {Destination: "/var/openfaas/secrets/secret2"}}, Expected: []string{"secret1", "secret2"}},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.Name, func(t *testing.T) {
|
||||
got := readSecretsFromMounts(tc.Input)
|
||||
if !reflect.DeepEqual(got, tc.Expected) {
|
||||
t.Fatalf("expected %s, got %s", tc.Expected, got)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_ProcessEnvToEnvVars(t *testing.T) {
|
||||
type test struct {
|
||||
Name string
|
||||
Input []string
|
||||
Expected map[string]string
|
||||
fprocess string
|
||||
}
|
||||
tests := []test{
|
||||
{Name: "No matching EnvVars", Input: []string{"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "fprocess=python index.py"}, Expected: make(map[string]string), fprocess: "python index.py"},
|
||||
{Name: "No EnvVars", Input: []string{}, Expected: make(map[string]string), fprocess: ""},
|
||||
{Name: "One EnvVar", Input: []string{"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "fprocess=python index.py", "env=this"}, Expected: map[string]string{"env": "this"}, fprocess: "python index.py"},
|
||||
{Name: "Multiple EnvVars", Input: []string{"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "this=that", "env=var", "fprocess=python index.py"}, Expected: map[string]string{"this": "that", "env": "var"}, fprocess: "python index.py"},
|
||||
{Name: "Nil EnvVars", Input: nil, Expected: make(map[string]string)},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.Name, func(t *testing.T) {
|
||||
got, fprocess := readEnvFromProcessEnv(tc.Input)
|
||||
if !reflect.DeepEqual(got, tc.Expected) {
|
||||
t.Fatalf("expected: %s, got: %s", tc.Expected, got)
|
||||
}
|
||||
|
||||
if fprocess != tc.fprocess {
|
||||
t.Fatalf("expected fprocess: %s, got: %s", tc.fprocess, got)
|
||||
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
@ -12,10 +12,10 @@ const (
|
||||
OrchestrationIdentifier = "containerd"
|
||||
|
||||
// ProviderName name of the provider
|
||||
ProviderName = "faasd"
|
||||
ProviderName = "faasd-ce"
|
||||
)
|
||||
|
||||
//MakeInfoHandler creates handler for /system/info endpoint
|
||||
// MakeInfoHandler creates handler for /system/info endpoint
|
||||
func MakeInfoHandler(version, sha string) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Body != nil {
|
||||
@ -31,8 +31,8 @@ func MakeInfoHandler(version, sha string) http.HandlerFunc {
|
||||
},
|
||||
}
|
||||
|
||||
jsonOut, marshalErr := json.Marshal(infoResponse)
|
||||
if marshalErr != nil {
|
||||
jsonOut, err := json.Marshal(infoResponse)
|
||||
if err != nil {
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
@ -42,3 +42,6 @@ func MakeInfoHandler(version, sha string) http.HandlerFunc {
|
||||
w.Write(jsonOut)
|
||||
}
|
||||
}
|
||||
|
||||
const faasdMaxFunctions = 15
|
||||
const faasdMaxNs = 1
|
||||
|
@ -4,8 +4,10 @@ import (
|
||||
"fmt"
|
||||
"log"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/containerd"
|
||||
faasd "github.com/openfaas/faasd/pkg"
|
||||
)
|
||||
|
||||
const watchdogPort = 8080
|
||||
@ -19,11 +21,18 @@ func NewInvokeResolver(client *containerd.Client) *InvokeResolver {
|
||||
}
|
||||
|
||||
func (i *InvokeResolver) Resolve(functionName string) (url.URL, error) {
|
||||
log.Printf("Resolve: %q\n", functionName)
|
||||
actualFunctionName := functionName
|
||||
log.Printf("Resolve: %q\n", actualFunctionName)
|
||||
|
||||
function, err := GetFunction(i.client, functionName)
|
||||
namespace := getNamespaceOrDefault(functionName, faasd.DefaultFunctionNamespace)
|
||||
|
||||
if strings.Contains(functionName, ".") {
|
||||
actualFunctionName = strings.TrimSuffix(functionName, "."+namespace)
|
||||
}
|
||||
|
||||
function, err := GetFunction(i.client, actualFunctionName, namespace)
|
||||
if err != nil {
|
||||
return url.URL{}, fmt.Errorf("%s not found", functionName)
|
||||
return url.URL{}, fmt.Errorf("%s not found", actualFunctionName)
|
||||
}
|
||||
|
||||
serviceIP := function.IP
|
||||
@ -37,3 +46,11 @@ func (i *InvokeResolver) Resolve(functionName string) (url.URL, error) {
|
||||
|
||||
return *urlRes, nil
|
||||
}
|
||||
|
||||
func getNamespaceOrDefault(name, defaultNamespace string) string {
|
||||
namespace := defaultNamespace
|
||||
if strings.Contains(name, ".") {
|
||||
namespace = name[strings.LastIndexAny(name, ".")+1:]
|
||||
}
|
||||
return namespace
|
||||
}
|
||||
|
285
pkg/provider/handlers/mutate_namespaces.go
Normal file
285
pkg/provider/handlers/mutate_namespaces.go
Normal file
@ -0,0 +1,285 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/containerd"
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/openfaas/faas-provider/types"
|
||||
)
|
||||
|
||||
func MakeMutateNamespace(client *containerd.Client) func(w http.ResponseWriter, r *http.Request) {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Body != nil {
|
||||
defer r.Body.Close()
|
||||
}
|
||||
|
||||
switch r.Method {
|
||||
case http.MethodPost:
|
||||
createNamespace(client, w, r)
|
||||
case http.MethodGet:
|
||||
getNamespace(client, w, r)
|
||||
case http.MethodDelete:
|
||||
deleteNamespace(client, w, r)
|
||||
case http.MethodPut:
|
||||
updateNamespace(client, w, r)
|
||||
|
||||
default:
|
||||
w.WriteHeader(http.StatusMethodNotAllowed)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func updateNamespace(client *containerd.Client, w http.ResponseWriter, r *http.Request) {
|
||||
req, err := parseNamespaceRequest(r)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), err.(*HttpError).Status)
|
||||
return
|
||||
}
|
||||
|
||||
namespaceExists, err := namespaceExists(r.Context(), client, req.Name)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
if !namespaceExists {
|
||||
http.Error(w, fmt.Sprintf("namespace %s not found", req.Name), http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
originalLabels, err := client.NamespaceService().Labels(r.Context(), req.Name)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
if !hasOpenFaaSLabel(originalLabels) {
|
||||
http.Error(w, fmt.Sprintf("namespace %s is not an openfaas namespace", req.Name), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
var exclusions []string
|
||||
|
||||
// build exclusions
|
||||
for key, _ := range originalLabels {
|
||||
if _, ok := req.Labels[key]; !ok {
|
||||
exclusions = append(exclusions, key)
|
||||
}
|
||||
}
|
||||
|
||||
// Call SetLabel with empty string if label is to be removed
|
||||
for _, key := range exclusions {
|
||||
if err := client.NamespaceService().SetLabel(r.Context(), req.Name, key, ""); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Now add the new labels
|
||||
for key, value := range req.Labels {
|
||||
if err := client.NamespaceService().SetLabel(r.Context(), req.Name, key, value); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusAccepted)
|
||||
}
|
||||
|
||||
func deleteNamespace(client *containerd.Client, w http.ResponseWriter, r *http.Request) {
|
||||
req, err := parseNamespaceRequest(r)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), err.(*HttpError).Status)
|
||||
return
|
||||
}
|
||||
|
||||
if err := client.NamespaceService().Delete(r.Context(), req.Name); err != nil {
|
||||
if strings.Contains(err.Error(), "not found") {
|
||||
http.Error(w, fmt.Sprintf("namespace %s not found", req.Name), http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusAccepted)
|
||||
}
|
||||
|
||||
func namespaceExists(ctx context.Context, client *containerd.Client, name string) (bool, error) {
|
||||
ns, err := client.NamespaceService().List(ctx)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
found := false
|
||||
for _, namespace := range ns {
|
||||
if namespace == name {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return found, nil
|
||||
}
|
||||
|
||||
func getNamespace(client *containerd.Client, w http.ResponseWriter, r *http.Request) {
|
||||
req, err := parseNamespaceRequest(r)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), err.(*HttpError).Status)
|
||||
return
|
||||
}
|
||||
|
||||
namespaceExists, err := namespaceExists(r.Context(), client, req.Name)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
if !namespaceExists {
|
||||
http.Error(w, fmt.Sprintf("namespace %s not found", req.Name), http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
labels, err := client.NamespaceService().Labels(r.Context(), req.Name)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
if !hasOpenFaaSLabel(labels) {
|
||||
http.Error(w, fmt.Sprintf("namespace %s not found", req.Name), http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
res := types.FunctionNamespace{
|
||||
Name: req.Name,
|
||||
Labels: labels,
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
if err := json.NewEncoder(w).Encode(res); err != nil {
|
||||
log.Printf("Get Namespace error: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func createNamespace(client *containerd.Client, w http.ResponseWriter, r *http.Request) {
|
||||
req, err := parseNamespaceRequest(r)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), err.(*HttpError).Status)
|
||||
return
|
||||
}
|
||||
|
||||
// Check if namespace exists, and it has the openfaas label
|
||||
namespaces, err := client.NamespaceService().List(r.Context())
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
found := false
|
||||
for _, namespace := range namespaces {
|
||||
if namespace == req.Name {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if found {
|
||||
http.Error(w, fmt.Sprintf("namespace %s already exists", req.Name), http.StatusConflict)
|
||||
return
|
||||
}
|
||||
|
||||
if err := client.NamespaceService().Create(r.Context(), req.Name, req.Labels); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusCreated)
|
||||
}
|
||||
|
||||
// getNamespace returns a namespace object or an error
|
||||
func parseNamespaceRequest(r *http.Request) (types.FunctionNamespace, error) {
|
||||
var req types.FunctionNamespace
|
||||
|
||||
vars := mux.Vars(r)
|
||||
namespaceInPath := vars["name"]
|
||||
|
||||
if r.Method == http.MethodGet {
|
||||
if namespaceInPath == "" {
|
||||
return req, &HttpError{
|
||||
Err: fmt.Errorf("namespace not specified in URL"),
|
||||
Status: http.StatusBadRequest,
|
||||
}
|
||||
}
|
||||
|
||||
return types.FunctionNamespace{
|
||||
Name: namespaceInPath,
|
||||
}, nil
|
||||
}
|
||||
|
||||
body, _ := io.ReadAll(r.Body)
|
||||
|
||||
if err := json.Unmarshal(body, &req); err != nil {
|
||||
return req, &HttpError{
|
||||
Err: fmt.Errorf("error parsing request body: %s", err.Error()),
|
||||
Status: http.StatusBadRequest,
|
||||
}
|
||||
}
|
||||
|
||||
if r.Method != http.MethodPost {
|
||||
if namespaceInPath == "" {
|
||||
return req, &HttpError{
|
||||
Err: fmt.Errorf("namespace not specified in URL"),
|
||||
Status: http.StatusBadRequest,
|
||||
}
|
||||
}
|
||||
if req.Name != namespaceInPath {
|
||||
return req, &HttpError{
|
||||
Err: fmt.Errorf("namespace in request body does not match namespace in URL"),
|
||||
Status: http.StatusBadRequest,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if req.Name == "" {
|
||||
return req, &HttpError{
|
||||
Err: fmt.Errorf("namespace not specified in request body"),
|
||||
Status: http.StatusBadRequest,
|
||||
}
|
||||
}
|
||||
|
||||
if ok := hasOpenFaaSLabel(req.Labels); !ok {
|
||||
return req, &HttpError{
|
||||
Err: fmt.Errorf("request does not have openfaas=1 label"),
|
||||
Status: http.StatusBadRequest,
|
||||
}
|
||||
}
|
||||
|
||||
return req, nil
|
||||
}
|
||||
|
||||
func hasOpenFaaSLabel(labels map[string]string) bool {
|
||||
if v, ok := labels["openfaas"]; ok && v == "1" {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
type HttpError struct {
|
||||
Err error
|
||||
Status int
|
||||
}
|
||||
|
||||
func (e *HttpError) Error() string {
|
||||
return e.Err.Error()
|
||||
}
|
62
pkg/provider/handlers/namespaces.go
Normal file
62
pkg/provider/handlers/namespaces.go
Normal file
@ -0,0 +1,62 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"log"
|
||||
"net/http"
|
||||
|
||||
"github.com/containerd/containerd"
|
||||
"github.com/openfaas/faasd/pkg"
|
||||
faasd "github.com/openfaas/faasd/pkg"
|
||||
)
|
||||
|
||||
func MakeNamespacesLister(client *containerd.Client) func(w http.ResponseWriter, r *http.Request) {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
list := ListNamespaces(client)
|
||||
body, _ := json.Marshal(list)
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write(body)
|
||||
}
|
||||
}
|
||||
|
||||
func ListNamespaces(client *containerd.Client) []string {
|
||||
set := []string{faasd.DefaultFunctionNamespace}
|
||||
|
||||
store := client.NamespaceService()
|
||||
|
||||
namespaces, err := store.List(context.Background())
|
||||
if err != nil {
|
||||
log.Printf("Error listing namespaces: %s", err.Error())
|
||||
return set
|
||||
}
|
||||
|
||||
for _, namespace := range namespaces {
|
||||
labels, err := store.Labels(context.Background(), namespace)
|
||||
if err != nil {
|
||||
log.Printf("Error listing label for namespace %s: %s", namespace, err.Error())
|
||||
continue
|
||||
}
|
||||
|
||||
if _, found := labels[pkg.NamespaceLabel]; found {
|
||||
set = append(set, namespace)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if len(set) == 0 {
|
||||
set = append(set, faasd.DefaultFunctionNamespace)
|
||||
}
|
||||
|
||||
return set
|
||||
}
|
||||
|
||||
func findNamespace(target string, items []string) bool {
|
||||
for _, n := range items {
|
||||
if n == target {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
@ -5,6 +5,8 @@ import (
|
||||
"log"
|
||||
"net/http"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
|
||||
"github.com/containerd/containerd"
|
||||
"github.com/openfaas/faas-provider/types"
|
||||
)
|
||||
@ -13,10 +15,23 @@ func MakeReadHandler(client *containerd.Client) func(w http.ResponseWriter, r *h
|
||||
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
res := []types.FunctionStatus{}
|
||||
fns, err := ListFunctions(client)
|
||||
lookupNamespace := getRequestNamespace(readNamespaceFromQuery(r))
|
||||
// Check if namespace exists, and it has the openfaas label
|
||||
valid, err := validNamespace(client.NamespaceService(), lookupNamespace)
|
||||
if err != nil {
|
||||
log.Printf("[Read] error listing functions. Error: %s\n", err)
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
if !valid {
|
||||
http.Error(w, "namespace not valid", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
res := []types.FunctionStatus{}
|
||||
fns, err := ListFunctions(client, lookupNamespace)
|
||||
if err != nil {
|
||||
log.Printf("[Read] error listing functions. Error: %s", err)
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
@ -24,7 +39,8 @@ func MakeReadHandler(client *containerd.Client) func(w http.ResponseWriter, r *h
|
||||
for _, fn := range fns {
|
||||
annotations := &fn.annotations
|
||||
labels := &fn.labels
|
||||
res = append(res, types.FunctionStatus{
|
||||
memory := resource.NewQuantity(fn.memoryLimit, resource.BinarySI)
|
||||
status := types.FunctionStatus{
|
||||
Name: fn.name,
|
||||
Image: fn.image,
|
||||
Replicas: uint64(fn.replicas),
|
||||
@ -34,13 +50,22 @@ func MakeReadHandler(client *containerd.Client) func(w http.ResponseWriter, r *h
|
||||
Secrets: fn.secrets,
|
||||
EnvVars: fn.envVars,
|
||||
EnvProcess: fn.envProcess,
|
||||
})
|
||||
CreatedAt: fn.createdAt,
|
||||
}
|
||||
|
||||
// Do not remove below memory check for 0
|
||||
// Memory limit should not be included in status until set explicitly
|
||||
limit := &types.FunctionResources{Memory: memory.String()}
|
||||
if limit.Memory != "0" {
|
||||
status.Limits = limit
|
||||
}
|
||||
|
||||
res = append(res, status)
|
||||
}
|
||||
|
||||
body, _ := json.Marshal(res)
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write(body)
|
||||
|
||||
}
|
||||
}
|
||||
|
@ -14,10 +14,24 @@ func MakeReplicaReaderHandler(client *containerd.Client) func(w http.ResponseWri
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
vars := mux.Vars(r)
|
||||
functionName := vars["name"]
|
||||
lookupNamespace := getRequestNamespace(readNamespaceFromQuery(r))
|
||||
|
||||
if f, err := GetFunction(client, functionName); err == nil {
|
||||
// Check if namespace exists, and it has the openfaas label
|
||||
valid, err := validNamespace(client.NamespaceService(), lookupNamespace)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
if !valid {
|
||||
http.Error(w, "namespace not valid", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
if f, err := GetFunction(client, functionName, lookupNamespace); err == nil {
|
||||
found := types.FunctionStatus{
|
||||
Name: functionName,
|
||||
Image: f.image,
|
||||
AvailableReplicas: uint64(f.replicas),
|
||||
Replicas: uint64(f.replicas),
|
||||
Namespace: f.namespace,
|
||||
@ -26,6 +40,7 @@ func MakeReplicaReaderHandler(client *containerd.Client) func(w http.ResponseWri
|
||||
Secrets: f.secrets,
|
||||
EnvVars: f.envVars,
|
||||
EnvProcess: f.envProcess,
|
||||
CreatedAt: f.createdAt,
|
||||
}
|
||||
|
||||
functionBytes, _ := json.Marshal(found)
|
||||
|
@ -4,7 +4,7 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
|
||||
@ -13,7 +13,7 @@ import (
|
||||
gocni "github.com/containerd/go-cni"
|
||||
|
||||
"github.com/openfaas/faas-provider/types"
|
||||
faasd "github.com/openfaas/faasd/pkg"
|
||||
"github.com/openfaas/faasd/pkg"
|
||||
)
|
||||
|
||||
func MakeReplicaUpdateHandler(client *containerd.Client, cni gocni.CNI) func(w http.ResponseWriter, r *http.Request) {
|
||||
@ -27,29 +27,43 @@ func MakeReplicaUpdateHandler(client *containerd.Client, cni gocni.CNI) func(w h
|
||||
|
||||
defer r.Body.Close()
|
||||
|
||||
body, _ := ioutil.ReadAll(r.Body)
|
||||
log.Printf("[Scale] request: %s\n", string(body))
|
||||
body, _ := io.ReadAll(r.Body)
|
||||
|
||||
req := types.ScaleServiceRequest{}
|
||||
err := json.Unmarshal(body, &req)
|
||||
|
||||
if err != nil {
|
||||
log.Printf("[Scale] error parsing input: %s\n", err)
|
||||
if err := json.Unmarshal(body, &req); err != nil {
|
||||
log.Printf("[Scale] error parsing input: %s", err)
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
namespace := req.Namespace
|
||||
if namespace == "" {
|
||||
namespace = pkg.DefaultFunctionNamespace
|
||||
}
|
||||
|
||||
// Check if namespace exists, and it has the openfaas label
|
||||
valid, err := validNamespace(client.NamespaceService(), namespace)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
if !valid {
|
||||
http.Error(w, "namespace not valid", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
name := req.ServiceName
|
||||
|
||||
if _, err := GetFunction(client, name); err != nil {
|
||||
msg := fmt.Sprintf("service %s not found", name)
|
||||
if _, err := GetFunction(client, name, namespace); err != nil {
|
||||
msg := fmt.Sprintf("function: %s.%s not found", name, namespace)
|
||||
log.Printf("[Scale] %s\n", msg)
|
||||
http.Error(w, msg, http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
ctx := namespaces.WithNamespace(context.Background(), faasd.FunctionNamespace)
|
||||
ctx := namespaces.WithNamespace(context.Background(), namespace)
|
||||
|
||||
ctr, ctrErr := client.LoadContainer(ctx, name)
|
||||
if ctrErr != nil {
|
||||
@ -82,32 +96,24 @@ func MakeReplicaUpdateHandler(client *containerd.Client, cni gocni.CNI) func(w h
|
||||
|
||||
createNewTask := false
|
||||
|
||||
// Scale to zero
|
||||
if req.Replicas == 0 {
|
||||
// If a task is running, pause it
|
||||
if taskExists && taskStatus.Status == containerd.Running {
|
||||
if pauseErr := task.Pause(ctx); pauseErr != nil {
|
||||
wrappedPauseErr := fmt.Errorf("error pausing task %s, error: %s", name, pauseErr)
|
||||
log.Printf("[Scale] %s\n", wrappedPauseErr.Error())
|
||||
http.Error(w, wrappedPauseErr.Error(), http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
}
|
||||
http.Error(w, "replicas must > 0 for faasd CE", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
if taskExists {
|
||||
if taskStatus != nil {
|
||||
if taskStatus.Status == containerd.Paused {
|
||||
if resumeErr := task.Resume(ctx); resumeErr != nil {
|
||||
log.Printf("[Scale] error resuming task %s, error: %s\n", name, resumeErr)
|
||||
http.Error(w, resumeErr.Error(), http.StatusBadRequest)
|
||||
if _, err := task.Delete(ctx); err != nil {
|
||||
log.Printf("[Scale] error deleting paused task %s, error: %s\n", name, err)
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
} else if taskStatus.Status == containerd.Stopped {
|
||||
// Stopped tasks cannot be restarted, must be removed, and created again
|
||||
if _, delErr := task.Delete(ctx); delErr != nil {
|
||||
log.Printf("[Scale] error deleting stopped task %s, error: %s\n", name, delErr)
|
||||
http.Error(w, delErr.Error(), http.StatusBadRequest)
|
||||
if _, err := task.Delete(ctx); err != nil {
|
||||
log.Printf("[Scale] error deleting stopped task %s, error: %s\n", name, err)
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
createNewTask = true
|
||||
@ -118,7 +124,7 @@ func MakeReplicaUpdateHandler(client *containerd.Client, cni gocni.CNI) func(w h
|
||||
}
|
||||
|
||||
if createNewTask {
|
||||
deployErr := createTask(ctx, client, ctr, cni)
|
||||
deployErr := createTask(ctx, ctr, cni)
|
||||
if deployErr != nil {
|
||||
log.Printf("[Scale] error deploying %s, error: %s\n", name, deployErr)
|
||||
http.Error(w, deployErr.Error(), http.StatusBadRequest)
|
||||
|
@ -3,20 +3,21 @@ package handlers
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/containerd"
|
||||
"github.com/openfaas/faas-provider/types"
|
||||
provider "github.com/openfaas/faasd/pkg/provider"
|
||||
)
|
||||
|
||||
const secretFilePermission = 0644
|
||||
const secretDirPermission = 0755
|
||||
|
||||
func MakeSecretHandler(c *containerd.Client, mountPath string) func(w http.ResponseWriter, r *http.Request) {
|
||||
func MakeSecretHandler(store provider.Labeller, mountPath string) func(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
err := os.MkdirAll(mountPath, secretFilePermission)
|
||||
if err != nil {
|
||||
@ -30,13 +31,13 @@ func MakeSecretHandler(c *containerd.Client, mountPath string) func(w http.Respo
|
||||
|
||||
switch r.Method {
|
||||
case http.MethodGet:
|
||||
listSecrets(c, w, r, mountPath)
|
||||
listSecrets(store, w, r, mountPath)
|
||||
case http.MethodPost:
|
||||
createSecret(c, w, r, mountPath)
|
||||
createSecret(w, r, mountPath)
|
||||
case http.MethodPut:
|
||||
createSecret(c, w, r, mountPath)
|
||||
createSecret(w, r, mountPath)
|
||||
case http.MethodDelete:
|
||||
deleteSecret(c, w, r, mountPath)
|
||||
deleteSecret(w, r, mountPath)
|
||||
default:
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
return
|
||||
@ -45,23 +46,46 @@ func MakeSecretHandler(c *containerd.Client, mountPath string) func(w http.Respo
|
||||
}
|
||||
}
|
||||
|
||||
func listSecrets(c *containerd.Client, w http.ResponseWriter, r *http.Request, mountPath string) {
|
||||
files, err := ioutil.ReadDir(mountPath)
|
||||
func listSecrets(store provider.Labeller, w http.ResponseWriter, r *http.Request, mountPath string) {
|
||||
|
||||
lookupNamespace := getRequestNamespace(readNamespaceFromQuery(r))
|
||||
// Check if namespace exists, and it has the openfaas label
|
||||
valid, err := validNamespace(store, lookupNamespace)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
if !valid {
|
||||
http.Error(w, "namespace not valid", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
mountPath = getNamespaceSecretMountPath(mountPath, lookupNamespace)
|
||||
|
||||
files, err := os.ReadDir(mountPath)
|
||||
if os.IsNotExist(err) {
|
||||
bytesOut, _ := json.Marshal([]types.Secret{})
|
||||
w.Write(bytesOut)
|
||||
return
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
log.Printf("[Secret] Error listing secrets: %s ", err)
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
secrets := []types.Secret{}
|
||||
for _, f := range files {
|
||||
secrets = append(secrets, types.Secret{Name: f.Name()})
|
||||
secrets = append(secrets, types.Secret{Name: f.Name(), Namespace: lookupNamespace})
|
||||
}
|
||||
|
||||
bytesOut, _ := json.Marshal(secrets)
|
||||
w.Write(bytesOut)
|
||||
}
|
||||
|
||||
func createSecret(c *containerd.Client, w http.ResponseWriter, r *http.Request, mountPath string) {
|
||||
func createSecret(w http.ResponseWriter, r *http.Request, mountPath string) {
|
||||
secret, err := parseSecret(r)
|
||||
if err != nil {
|
||||
log.Printf("[secret] error %s", err.Error())
|
||||
@ -69,7 +93,30 @@ func createSecret(c *containerd.Client, w http.ResponseWriter, r *http.Request,
|
||||
return
|
||||
}
|
||||
|
||||
err = ioutil.WriteFile(path.Join(mountPath, secret.Name), []byte(secret.Value), secretFilePermission)
|
||||
err = validateSecret(secret)
|
||||
if err != nil {
|
||||
log.Printf("[secret] error %s", err.Error())
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
log.Printf("[secret] is valid: %q", secret.Name)
|
||||
namespace := getRequestNamespace(secret.Namespace)
|
||||
mountPath = getNamespaceSecretMountPath(mountPath, namespace)
|
||||
|
||||
err = os.MkdirAll(mountPath, secretDirPermission)
|
||||
if err != nil {
|
||||
log.Printf("[secret] error %s", err.Error())
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
data := secret.RawValue
|
||||
if len(data) == 0 {
|
||||
data = []byte(secret.Value)
|
||||
}
|
||||
|
||||
err = os.WriteFile(path.Join(mountPath, secret.Name), data, secretFilePermission)
|
||||
|
||||
if err != nil {
|
||||
log.Printf("[secret] error %s", err.Error())
|
||||
@ -78,7 +125,7 @@ func createSecret(c *containerd.Client, w http.ResponseWriter, r *http.Request,
|
||||
}
|
||||
}
|
||||
|
||||
func deleteSecret(c *containerd.Client, w http.ResponseWriter, r *http.Request, mountPath string) {
|
||||
func deleteSecret(w http.ResponseWriter, r *http.Request, mountPath string) {
|
||||
secret, err := parseSecret(r)
|
||||
if err != nil {
|
||||
log.Printf("[secret] error %s", err.Error())
|
||||
@ -86,6 +133,9 @@ func deleteSecret(c *containerd.Client, w http.ResponseWriter, r *http.Request,
|
||||
return
|
||||
}
|
||||
|
||||
namespace := getRequestNamespace(readNamespaceFromQuery(r))
|
||||
mountPath = getNamespaceSecretMountPath(mountPath, namespace)
|
||||
|
||||
err = os.Remove(path.Join(mountPath, secret.Name))
|
||||
|
||||
if err != nil {
|
||||
@ -97,7 +147,7 @@ func deleteSecret(c *containerd.Client, w http.ResponseWriter, r *http.Request,
|
||||
|
||||
func parseSecret(r *http.Request) (types.Secret, error) {
|
||||
secret := types.Secret{}
|
||||
bytesOut, err := ioutil.ReadAll(r.Body)
|
||||
bytesOut, err := io.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
return secret, err
|
||||
}
|
||||
@ -107,10 +157,6 @@ func parseSecret(r *http.Request) (types.Secret, error) {
|
||||
return secret, err
|
||||
}
|
||||
|
||||
if isTraversal(secret.Name) {
|
||||
return secret, fmt.Errorf(traverseErrorSt)
|
||||
}
|
||||
|
||||
return secret, err
|
||||
}
|
||||
|
||||
@ -120,3 +166,13 @@ func isTraversal(name string) bool {
|
||||
return strings.Contains(name, fmt.Sprintf("%s", string(os.PathSeparator))) ||
|
||||
strings.Contains(name, "..")
|
||||
}
|
||||
|
||||
func validateSecret(secret types.Secret) error {
|
||||
if strings.TrimSpace(secret.Name) == "" {
|
||||
return fmt.Errorf("non-empty name is required")
|
||||
}
|
||||
if isTraversal(secret.Name) {
|
||||
return fmt.Errorf(traverseErrorSt)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -1,63 +1,252 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/openfaas/faas-provider/types"
|
||||
"github.com/openfaas/faasd/pkg"
|
||||
provider "github.com/openfaas/faasd/pkg/provider"
|
||||
)
|
||||
|
||||
func Test_parseSecretValidName(t *testing.T) {
|
||||
func Test_parseSecret(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
payload string
|
||||
expError string
|
||||
expSecret types.Secret
|
||||
}{
|
||||
{
|
||||
name: "no error when name is valid without extention and with no traversal",
|
||||
payload: `{"name": "authorized_keys", "value": "foo"}`,
|
||||
expSecret: types.Secret{Name: "authorized_keys", Value: "foo"},
|
||||
},
|
||||
{
|
||||
name: "no error when name is valid and parses RawValue correctly",
|
||||
payload: `{"name": "authorized_keys", "rawValue": "YmFy"}`,
|
||||
expSecret: types.Secret{Name: "authorized_keys", RawValue: []byte("bar")},
|
||||
},
|
||||
{
|
||||
name: "no error when name is valid with dot and with no traversal",
|
||||
payload: `{"name": "authorized.keys", "value": "foo"}`,
|
||||
expSecret: types.Secret{Name: "authorized.keys", Value: "foo"},
|
||||
},
|
||||
}
|
||||
|
||||
s := types.Secret{Name: "authorized_keys"}
|
||||
body, _ := json.Marshal(s)
|
||||
reader := bytes.NewReader(body)
|
||||
r := httptest.NewRequest(http.MethodPost, "/", reader)
|
||||
_, err := parseSecret(r)
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
reader := strings.NewReader(tc.payload)
|
||||
r := httptest.NewRequest(http.MethodPost, "/", reader)
|
||||
secret, err := parseSecret(r)
|
||||
if err != nil && tc.expError == "" {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
if tc.expError != "" {
|
||||
if err == nil {
|
||||
t.Fatalf("expected error: %s, got nil", tc.expError)
|
||||
}
|
||||
if err.Error() != tc.expError {
|
||||
t.Fatalf("expected error: %s, got: %s", tc.expError, err)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(secret, tc.expSecret) {
|
||||
t.Fatalf("expected secret: %+v, got: %+v", tc.expSecret, secret)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSecretCreation(t *testing.T) {
|
||||
mountPath, err := os.MkdirTemp("", "test_secret_creation")
|
||||
if err != nil {
|
||||
t.Fatalf("secret name is valid with no traversal characters")
|
||||
t.Fatalf("unexpected error while creating temp directory: %s", err)
|
||||
}
|
||||
|
||||
defer os.RemoveAll(mountPath)
|
||||
|
||||
handler := MakeSecretHandler(nil, mountPath)
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
verb string
|
||||
payload string
|
||||
status int
|
||||
secretPath string
|
||||
secret string
|
||||
err string
|
||||
}{
|
||||
{
|
||||
name: "returns error when the name contains a traversal",
|
||||
verb: http.MethodPost,
|
||||
payload: `{"name": "/root/.ssh/authorized_keys", "value": "foo"}`,
|
||||
status: http.StatusBadRequest,
|
||||
err: "directory traversal found in name\n",
|
||||
},
|
||||
{
|
||||
name: "returns error when the name contains a traversal",
|
||||
verb: http.MethodPost,
|
||||
payload: `{"name": "..", "value": "foo"}`,
|
||||
status: http.StatusBadRequest,
|
||||
err: "directory traversal found in name\n",
|
||||
},
|
||||
{
|
||||
name: "empty request returns a validation error",
|
||||
verb: http.MethodPost,
|
||||
payload: `{}`,
|
||||
status: http.StatusBadRequest,
|
||||
err: "non-empty name is required\n",
|
||||
},
|
||||
{
|
||||
name: "can create secret from string",
|
||||
verb: http.MethodPost,
|
||||
payload: `{"name": "foo", "value": "bar"}`,
|
||||
status: http.StatusOK,
|
||||
secretPath: "/openfaas-fn/foo",
|
||||
secret: "bar",
|
||||
},
|
||||
{
|
||||
name: "can create secret from raw value",
|
||||
verb: http.MethodPost,
|
||||
payload: `{"name": "foo", "rawValue": "YmFy"}`,
|
||||
status: http.StatusOK,
|
||||
secretPath: "/openfaas-fn/foo",
|
||||
secret: "bar",
|
||||
},
|
||||
{
|
||||
name: "can create secret in non-default namespace from raw value",
|
||||
verb: http.MethodPost,
|
||||
payload: `{"name": "pity", "rawValue": "dGhlIGZvbw==", "namespace": "a-team"}`,
|
||||
status: http.StatusOK,
|
||||
secretPath: "/a-team/pity",
|
||||
secret: "the foo",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
req := httptest.NewRequest(tc.verb, "http://example.com/foo", strings.NewReader(tc.payload))
|
||||
w := httptest.NewRecorder()
|
||||
|
||||
handler(w, req)
|
||||
|
||||
resp := w.Result()
|
||||
if resp.StatusCode != tc.status {
|
||||
t.Logf("response body: %s", w.Body.String())
|
||||
t.Fatalf("expected status: %d, got: %d", tc.status, resp.StatusCode)
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK && w.Body.String() != tc.err {
|
||||
t.Fatalf("expected error message: %q, got %q", tc.err, w.Body.String())
|
||||
|
||||
}
|
||||
|
||||
if tc.secretPath != "" {
|
||||
data, err := os.ReadFile(filepath.Join(mountPath, tc.secretPath))
|
||||
if err != nil {
|
||||
t.Fatalf("can not read the secret from disk: %s", err)
|
||||
}
|
||||
|
||||
if string(data) != tc.secret {
|
||||
t.Fatalf("expected secret value: %s, got %s", tc.secret, string(data))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_parseSecretValidNameWithDot(t *testing.T) {
|
||||
|
||||
s := types.Secret{Name: "authorized.keys"}
|
||||
body, _ := json.Marshal(s)
|
||||
reader := bytes.NewReader(body)
|
||||
r := httptest.NewRequest(http.MethodPost, "/", reader)
|
||||
_, err := parseSecret(r)
|
||||
|
||||
func TestListSecrets(t *testing.T) {
|
||||
mountPath, err := os.MkdirTemp("", "test_secret_creation")
|
||||
if err != nil {
|
||||
t.Fatalf("secret name is valid with no traversal characters")
|
||||
}
|
||||
}
|
||||
|
||||
func Test_parseSecretWithTraversalWithSlash(t *testing.T) {
|
||||
|
||||
s := types.Secret{Name: "/root/.ssh/authorized_keys"}
|
||||
body, _ := json.Marshal(s)
|
||||
reader := bytes.NewReader(body)
|
||||
r := httptest.NewRequest(http.MethodPost, "/", reader)
|
||||
_, err := parseSecret(r)
|
||||
|
||||
if err == nil {
|
||||
t.Fatalf("secret name should fail due to path traversal")
|
||||
}
|
||||
}
|
||||
|
||||
func Test_parseSecretWithTraversalWithDoubleDot(t *testing.T) {
|
||||
|
||||
s := types.Secret{Name: ".."}
|
||||
body, _ := json.Marshal(s)
|
||||
reader := bytes.NewReader(body)
|
||||
r := httptest.NewRequest(http.MethodPost, "/", reader)
|
||||
_, err := parseSecret(r)
|
||||
|
||||
if err == nil {
|
||||
t.Fatalf("secret name should fail due to path traversal")
|
||||
t.Fatalf("unexpected error while creating temp directory: %s", err)
|
||||
}
|
||||
|
||||
defer os.RemoveAll(mountPath)
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
verb string
|
||||
namespace string
|
||||
labels map[string]string
|
||||
status int
|
||||
secretPath string
|
||||
secret string
|
||||
err string
|
||||
expected []types.Secret
|
||||
}{
|
||||
{
|
||||
name: "Get empty secret list for default namespace having no secret",
|
||||
verb: http.MethodGet,
|
||||
status: http.StatusOK,
|
||||
secretPath: "/test-fn/foo",
|
||||
secret: "bar",
|
||||
expected: make([]types.Secret, 0),
|
||||
},
|
||||
{
|
||||
name: "Get empty secret list for non-default namespace having no secret",
|
||||
verb: http.MethodGet,
|
||||
status: http.StatusOK,
|
||||
secretPath: "/test-fn/foo",
|
||||
secret: "bar",
|
||||
expected: make([]types.Secret, 0),
|
||||
namespace: "other-ns",
|
||||
labels: map[string]string{
|
||||
pkg.NamespaceLabel: "true",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
labelStore := provider.NewFakeLabeller(tc.labels)
|
||||
|
||||
handler := MakeSecretHandler(labelStore, mountPath)
|
||||
|
||||
path := "http://example.com/foo"
|
||||
if len(tc.namespace) > 0 {
|
||||
path = path + fmt.Sprintf("?namespace=%s", tc.namespace)
|
||||
}
|
||||
req := httptest.NewRequest(tc.verb, path, nil)
|
||||
w := httptest.NewRecorder()
|
||||
|
||||
handler(w, req)
|
||||
|
||||
resp := w.Result()
|
||||
if resp.StatusCode != tc.status {
|
||||
t.Fatalf("want status: %d, but got: %d", tc.status, resp.StatusCode)
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK && w.Body.String() != tc.err {
|
||||
t.Fatalf("want error message: %q, but got %q", tc.err, w.Body.String())
|
||||
}
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
t.Fatalf("can't read response of list %v", err)
|
||||
}
|
||||
|
||||
var res []types.Secret
|
||||
err = json.Unmarshal(body, &res)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("unable to unmarshal %q, error: %v", string(body), err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(res, tc.expected) {
|
||||
t.Fatalf("want response: %v, but got: %v", tc.expected, res)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -4,7 +4,7 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
|
||||
@ -13,7 +13,6 @@ import (
|
||||
gocni "github.com/containerd/go-cni"
|
||||
"github.com/openfaas/faas-provider/types"
|
||||
|
||||
faasd "github.com/openfaas/faasd/pkg"
|
||||
"github.com/openfaas/faasd/pkg/cninetwork"
|
||||
"github.com/openfaas/faasd/pkg/service"
|
||||
)
|
||||
@ -29,33 +28,54 @@ func MakeUpdateHandler(client *containerd.Client, cni gocni.CNI, secretMountPath
|
||||
|
||||
defer r.Body.Close()
|
||||
|
||||
body, _ := ioutil.ReadAll(r.Body)
|
||||
log.Printf("[Update] request: %s\n", string(body))
|
||||
body, _ := io.ReadAll(r.Body)
|
||||
|
||||
req := types.FunctionDeployment{}
|
||||
err := json.Unmarshal(body, &req)
|
||||
if err != nil {
|
||||
log.Printf("[Update] error parsing input: %s\n", err)
|
||||
log.Printf("[Update] error parsing input: %s", err)
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
|
||||
return
|
||||
}
|
||||
name := req.Service
|
||||
|
||||
function, err := GetFunction(client, name)
|
||||
name := req.Service
|
||||
namespace := getRequestNamespace(req.Namespace)
|
||||
|
||||
// Check if namespace exists, and it has the openfaas label
|
||||
valid, err := validNamespace(client.NamespaceService(), namespace)
|
||||
if err != nil {
|
||||
msg := fmt.Sprintf("service %s not found", name)
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
if !valid {
|
||||
http.Error(w, "namespace not valid", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
if err := preDeploy(client, int64(0)); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
log.Printf("[Deploy] error deploying %s, error: %s\n", name, err)
|
||||
return
|
||||
}
|
||||
|
||||
namespaceSecretMountPath := getNamespaceSecretMountPath(secretMountPath, namespace)
|
||||
|
||||
function, err := GetFunction(client, name, namespace)
|
||||
if err != nil {
|
||||
msg := fmt.Sprintf("function: %s.%s not found", name, namespace)
|
||||
log.Printf("[Update] %s\n", msg)
|
||||
http.Error(w, msg, http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
err = validateSecrets(secretMountPath, req.Secrets)
|
||||
err = validateSecrets(namespaceSecretMountPath, req.Secrets)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
}
|
||||
|
||||
ctx := namespaces.WithNamespace(context.Background(), faasd.FunctionNamespace)
|
||||
ctx := namespaces.WithNamespace(context.Background(), namespace)
|
||||
|
||||
if _, err := prepull(ctx, req, client, alwaysPull); err != nil {
|
||||
log.Printf("[Update] error with pre-pull: %s, %s\n", name, err)
|
||||
@ -78,7 +98,7 @@ func MakeUpdateHandler(client *containerd.Client, cni gocni.CNI, secretMountPath
|
||||
// The pull has already been done in prepull, so we can force this pull to "false"
|
||||
pull := false
|
||||
|
||||
if err := deploy(ctx, req, client, cni, secretMountPath, pull); err != nil {
|
||||
if err := deploy(ctx, req, client, cni, namespaceSecretMountPath, pull); err != nil {
|
||||
log.Printf("[Update] error deploying %s, error: %s\n", name, err)
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
|
48
pkg/provider/handlers/utils.go
Normal file
48
pkg/provider/handlers/utils.go
Normal file
@ -0,0 +1,48 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"path"
|
||||
|
||||
"github.com/openfaas/faasd/pkg"
|
||||
faasd "github.com/openfaas/faasd/pkg"
|
||||
provider "github.com/openfaas/faasd/pkg/provider"
|
||||
)
|
||||
|
||||
func getRequestNamespace(namespace string) string {
|
||||
|
||||
if len(namespace) > 0 {
|
||||
return namespace
|
||||
}
|
||||
return faasd.DefaultFunctionNamespace
|
||||
}
|
||||
|
||||
func readNamespaceFromQuery(r *http.Request) string {
|
||||
q := r.URL.Query()
|
||||
return q.Get("namespace")
|
||||
}
|
||||
|
||||
func getNamespaceSecretMountPath(userSecretPath string, namespace string) string {
|
||||
return path.Join(userSecretPath, namespace)
|
||||
}
|
||||
|
||||
// validNamespace indicates whether the namespace is eligable to be
|
||||
// used for OpenFaaS functions.
|
||||
func validNamespace(store provider.Labeller, namespace string) (bool, error) {
|
||||
if namespace == faasd.DefaultFunctionNamespace {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
labels, err := store.Labels(context.Background(), namespace)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// check for true to keep it backward compatible
|
||||
if value, found := labels[pkg.NamespaceLabel]; found && (value == "true" || value == "1") {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
75
pkg/provider/handlers/utils_test.go
Normal file
75
pkg/provider/handlers/utils_test.go
Normal file
@ -0,0 +1,75 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
faasd "github.com/openfaas/faasd/pkg"
|
||||
)
|
||||
|
||||
func Test_getRequestNamespace(t *testing.T) {
|
||||
tables := []struct {
|
||||
name string
|
||||
requestNamespace string
|
||||
expectedNamespace string
|
||||
}{
|
||||
{name: "RequestNamespace is not provided", requestNamespace: "", expectedNamespace: faasd.DefaultFunctionNamespace},
|
||||
{name: "RequestNamespace is provided", requestNamespace: "user-namespace", expectedNamespace: "user-namespace"},
|
||||
}
|
||||
|
||||
for _, tc := range tables {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
actualNamespace := getRequestNamespace(tc.requestNamespace)
|
||||
if actualNamespace != tc.expectedNamespace {
|
||||
t.Errorf("Want: %s, got: %s", actualNamespace, tc.expectedNamespace)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_getNamespaceSecretMountPath(t *testing.T) {
|
||||
userSecretPath := "/var/openfaas/secrets"
|
||||
tables := []struct {
|
||||
name string
|
||||
requestNamespace string
|
||||
expectedSecretPath string
|
||||
}{
|
||||
{name: "Default Namespace is provided", requestNamespace: faasd.DefaultFunctionNamespace, expectedSecretPath: "/var/openfaas/secrets/" + faasd.DefaultFunctionNamespace},
|
||||
{name: "User Namespace is provided", requestNamespace: "user-namespace", expectedSecretPath: "/var/openfaas/secrets/user-namespace"},
|
||||
}
|
||||
|
||||
for _, tc := range tables {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
actualNamespace := getNamespaceSecretMountPath(userSecretPath, tc.requestNamespace)
|
||||
if actualNamespace != tc.expectedSecretPath {
|
||||
t.Errorf("Want: %s, got: %s", actualNamespace, tc.expectedSecretPath)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_readNamespaceFromQuery(t *testing.T) {
|
||||
tables := []struct {
|
||||
name string
|
||||
queryNamespace string
|
||||
expectedNamespace string
|
||||
}{
|
||||
{name: "No Namespace is provided", queryNamespace: "", expectedNamespace: ""},
|
||||
{name: "User Namespace is provided", queryNamespace: "user-namespace", expectedNamespace: "user-namespace"},
|
||||
}
|
||||
|
||||
for _, tc := range tables {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
|
||||
url := fmt.Sprintf("/test?namespace=%s", tc.queryNamespace)
|
||||
r := httptest.NewRequest(http.MethodGet, url, nil)
|
||||
|
||||
actualNamespace := readNamespaceFromQuery(r)
|
||||
if actualNamespace != tc.expectedNamespace {
|
||||
t.Errorf("Want: %s, got: %s", actualNamespace, tc.expectedNamespace)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
25
pkg/provider/labeller.go
Normal file
25
pkg/provider/labeller.go
Normal file
@ -0,0 +1,25 @@
|
||||
package provider
|
||||
|
||||
import "context"
|
||||
|
||||
// Labeller can return labels for a namespace from containerd.
|
||||
type Labeller interface {
|
||||
Labels(ctx context.Context, namespace string) (map[string]string, error)
|
||||
}
|
||||
|
||||
//
|
||||
// FakeLabeller can be used to fake labels applied on namespace to mark
|
||||
// them valid/invalid for openfaas functions
|
||||
type FakeLabeller struct {
|
||||
labels map[string]string
|
||||
}
|
||||
|
||||
func NewFakeLabeller(labels map[string]string) Labeller {
|
||||
return &FakeLabeller{
|
||||
labels: labels,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *FakeLabeller) Labels(ctx context.Context, namespace string) (map[string]string, error) {
|
||||
return s.labels, nil
|
||||
}
|
12
pkg/proxy.go
12
pkg/proxy.go
@ -75,18 +75,18 @@ func (p *Proxy) Start() error {
|
||||
// Wait for a connection.
|
||||
conn, err := l.Accept()
|
||||
if err != nil {
|
||||
acceptErr := fmt.Errorf("Unable to accept on %d, error: %s",
|
||||
log.Printf("Unable to accept on: %d, error: %s",
|
||||
p.Port,
|
||||
err.Error())
|
||||
log.Printf("%s", acceptErr.Error())
|
||||
return acceptErr
|
||||
return err
|
||||
}
|
||||
|
||||
upstream, err := net.Dial("tcp", upstreamAddr)
|
||||
|
||||
if err != nil {
|
||||
log.Printf("unable to dial to %s, error: %s", upstreamAddr, err.Error())
|
||||
return err
|
||||
conn.Close()
|
||||
|
||||
log.Printf("Unable to dial: %s, error: %s", upstreamAddr, err.Error())
|
||||
continue
|
||||
}
|
||||
|
||||
go pipe(conn, upstream)
|
||||
|
@ -2,7 +2,7 @@ package pkg
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
@ -61,7 +61,7 @@ func Test_Proxy_ToPrivateServer(t *testing.T) {
|
||||
time.Sleep(time.Millisecond * 100)
|
||||
} else {
|
||||
|
||||
resBody, _ := ioutil.ReadAll(res.Body)
|
||||
resBody, _ := io.ReadAll(res.Body)
|
||||
if string(resBody) != string(wantBody) {
|
||||
t.Errorf("want %s, but got %s in body", string(wantBody), string(resBody))
|
||||
}
|
||||
|
@ -6,6 +6,7 @@ import (
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@ -33,7 +34,7 @@ func Remove(ctx context.Context, client *containerd.Client, name string) error {
|
||||
if errdefs.IsNotFound(err) {
|
||||
taskFound = false
|
||||
} else {
|
||||
return fmt.Errorf("unable to get task %s: ", err)
|
||||
return fmt.Errorf("unable to get task %w: ", err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -45,19 +46,33 @@ func Remove(ctx context.Context, client *containerd.Client, name string) error {
|
||||
log.Printf("Status of %s is: %s\n", name, status.Status)
|
||||
}
|
||||
|
||||
log.Printf("Need to kill task: %s\n", name)
|
||||
if err = killTask(ctx, t); err != nil {
|
||||
return fmt.Errorf("error killing task %s, %s, %s", container.ID(), name, err)
|
||||
var gracePeriod = time.Second * 30
|
||||
spec, err := t.Spec(ctx)
|
||||
if err == nil {
|
||||
for _, p := range spec.Process.Env {
|
||||
k, v, ok := strings.Cut(p, "=")
|
||||
if ok && k == "grace_period" {
|
||||
periodVal, err := time.ParseDuration(v)
|
||||
if err == nil {
|
||||
gracePeriod = periodVal
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err = killTask(ctx, t, gracePeriod); err != nil {
|
||||
return fmt.Errorf("error killing task %s, %s, %w", container.ID(), name, err)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if err := container.Delete(ctx, containerd.WithSnapshotCleanup); err != nil {
|
||||
return fmt.Errorf("error deleting container %s, %s, %s", container.ID(), name, err)
|
||||
return fmt.Errorf("error deleting container %s, %s, %w", container.ID(), name, err)
|
||||
}
|
||||
|
||||
} else {
|
||||
service := client.SnapshotService("")
|
||||
key := name + "snapshot"
|
||||
key := name + "-snapshot"
|
||||
if _, err := client.SnapshotService("").Stat(ctx, key); err == nil {
|
||||
service.Remove(ctx, key)
|
||||
}
|
||||
@ -66,44 +81,61 @@ func Remove(ctx context.Context, client *containerd.Client, name string) error {
|
||||
}
|
||||
|
||||
// Adapted from Stellar - https://github.com/stellar
|
||||
func killTask(ctx context.Context, task containerd.Task) error {
|
||||
|
||||
killTimeout := 30 * time.Second
|
||||
func killTask(ctx context.Context, task containerd.Task, gracePeriod time.Duration) error {
|
||||
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
var err error
|
||||
|
||||
waited := false
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
if task != nil {
|
||||
wait, err := task.Wait(ctx)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("error waiting on task: %s", err)
|
||||
log.Printf("error waiting on task: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
if err := task.Kill(ctx, unix.SIGTERM, containerd.WithKillAll); err != nil {
|
||||
log.Printf("error killing container task: %s", err)
|
||||
}
|
||||
|
||||
select {
|
||||
case <-wait:
|
||||
task.Delete(ctx)
|
||||
waited = true
|
||||
return
|
||||
case <-time.After(killTimeout):
|
||||
case <-time.After(gracePeriod):
|
||||
log.Printf("Sending SIGKILL to: %s after: %s", task.ID(), gracePeriod.Round(time.Second).String())
|
||||
if err := task.Kill(ctx, unix.SIGKILL, containerd.WithKillAll); err != nil {
|
||||
log.Printf("error force killing container task: %s", err)
|
||||
log.Printf("error sending SIGKILL to task: %s", err)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
wg.Wait()
|
||||
|
||||
if task != nil {
|
||||
if !waited {
|
||||
wait, err := task.Wait(ctx)
|
||||
if err != nil {
|
||||
log.Printf("error waiting on task after kill: %s", err)
|
||||
}
|
||||
|
||||
<-wait
|
||||
}
|
||||
|
||||
if _, err := task.Delete(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func getResolver(ctx context.Context, configFile *configfile.ConfigFile) (remotes.Resolver, error) {
|
||||
func getResolver(configFile *configfile.ConfigFile) (remotes.Resolver, error) {
|
||||
// credsFunc is based on https://github.com/moby/buildkit/blob/0b130cca040246d2ddf55117eeff34f546417e40/session/auth/authprovider/authprovider.go#L35
|
||||
credFunc := func(host string) (string, string, error) {
|
||||
if host == "registry-1.docker.io" {
|
||||
@ -138,7 +170,7 @@ func PrepareImage(ctx context.Context, client *containerd.Client, imageName, sna
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resolver, err = getResolver(ctx, configFile)
|
||||
resolver, err = getResolver(configFile)
|
||||
if err != nil {
|
||||
return empty, err
|
||||
}
|
||||
|
@ -3,13 +3,14 @@ package pkg
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"path"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/alexellis/k3sup/pkg/env"
|
||||
"github.com/alexellis/arkade/pkg/env"
|
||||
"github.com/compose-spec/compose-go/loader"
|
||||
compose "github.com/compose-spec/compose-go/types"
|
||||
"github.com/containerd/containerd"
|
||||
@ -17,16 +18,19 @@ import (
|
||||
"github.com/containerd/containerd/containers"
|
||||
"github.com/containerd/containerd/oci"
|
||||
gocni "github.com/containerd/go-cni"
|
||||
"github.com/distribution/reference"
|
||||
"github.com/openfaas/faasd/pkg/cninetwork"
|
||||
"github.com/openfaas/faasd/pkg/service"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/containerd/containerd/namespaces"
|
||||
units "github.com/docker/go-units"
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
)
|
||||
|
||||
const (
|
||||
workingDirectoryPermission = 0644
|
||||
// workingDirectoryPermission user read/write/execute, group and others: read-only
|
||||
workingDirectoryPermission = 0744
|
||||
)
|
||||
|
||||
type Service struct {
|
||||
@ -52,8 +56,14 @@ type ServicePort struct {
|
||||
}
|
||||
|
||||
type Mount struct {
|
||||
Src string
|
||||
// Src relative to the working directory for faasd
|
||||
Src string
|
||||
|
||||
// Dest is the absolute path within the container
|
||||
Dest string
|
||||
|
||||
// ReadOnly when set to true indicates the mount will be set to "ro" instead of "rw"
|
||||
ReadOnly bool
|
||||
}
|
||||
|
||||
type Supervisor struct {
|
||||
@ -79,7 +89,7 @@ func NewSupervisor(sock string) (*Supervisor, error) {
|
||||
}
|
||||
|
||||
func (s *Supervisor) Start(svcs []Service) error {
|
||||
ctx := namespaces.WithNamespace(context.Background(), faasdNamespace)
|
||||
ctx := namespaces.WithNamespace(context.Background(), FaasdNamespace)
|
||||
|
||||
wd, _ := os.Getwd()
|
||||
|
||||
@ -91,7 +101,7 @@ func (s *Supervisor) Start(svcs []Service) error {
|
||||
127.0.0.1 localhost
|
||||
%s faasd-provider`, gw)
|
||||
|
||||
writeHostsErr := ioutil.WriteFile(path.Join(wd, "hosts"),
|
||||
writeHostsErr := os.WriteFile(path.Join(wd, "hosts"),
|
||||
[]byte(hosts), workingDirectoryPermission)
|
||||
|
||||
if writeHostsErr != nil {
|
||||
@ -103,13 +113,20 @@ func (s *Supervisor) Start(svcs []Service) error {
|
||||
for _, svc := range svcs {
|
||||
fmt.Printf("Preparing %s with image: %s\n", svc.Name, svc.Image)
|
||||
|
||||
img, err := service.PrepareImage(ctx, s.client, svc.Image, defaultSnapshotter, faasServicesPullAlways)
|
||||
r, err := reference.ParseNormalizedNamed(svc.Image)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
imgRef := reference.TagNameOnly(r).String()
|
||||
|
||||
img, err := service.PrepareImage(ctx, s.client, imgRef, defaultSnapshotter, faasServicesPullAlways)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
images[svc.Name] = img
|
||||
size, _ := img.Size(ctx)
|
||||
fmt.Printf("Prepare done for: %s, %d bytes\n", svc.Image, size)
|
||||
fmt.Printf("Prepare done for: %s, %s\n", svc.Image, units.HumanSize(float64(size)))
|
||||
}
|
||||
|
||||
for _, svc := range svcs {
|
||||
@ -139,12 +156,41 @@ func (s *Supervisor) Start(svcs []Service) error {
|
||||
mounts := []specs.Mount{}
|
||||
if len(svc.Mounts) > 0 {
|
||||
for _, mnt := range svc.Mounts {
|
||||
var options = []string{"rbind"}
|
||||
if mnt.ReadOnly {
|
||||
options = append(options, "ro")
|
||||
} else {
|
||||
options = append(options, "rw")
|
||||
}
|
||||
|
||||
mounts = append(mounts, specs.Mount{
|
||||
Source: mnt.Src,
|
||||
Destination: mnt.Dest,
|
||||
Type: "bind",
|
||||
Options: []string{"rbind", "rw"},
|
||||
Options: options,
|
||||
})
|
||||
|
||||
// Only create directories, not files.
|
||||
// Some files don't have a suffix, such as secrets.
|
||||
if len(path.Ext(mnt.Src)) == 0 &&
|
||||
!strings.HasPrefix(mnt.Src, "/var/lib/faasd/secrets/") {
|
||||
// src is already prefixed with wd from an earlier step
|
||||
src := mnt.Src
|
||||
fmt.Printf("Creating local directory: %s\n", src)
|
||||
if err := os.MkdirAll(src, workingDirectoryPermission); err != nil {
|
||||
if !errors.Is(os.ErrExist, err) {
|
||||
fmt.Printf("Unable to create: %s, %s\n", src, err)
|
||||
}
|
||||
}
|
||||
if len(svc.User) > 0 {
|
||||
uid, err := strconv.Atoi(svc.User)
|
||||
if err == nil {
|
||||
if err := os.Chown(src, uid, -1); err != nil {
|
||||
fmt.Printf("Unable to chown: %s to %d, error: %s\n", src, uid, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -181,7 +227,7 @@ func (s *Supervisor) Start(svcs []Service) error {
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
log.Printf("Error creating container: %s\n", err)
|
||||
log.Printf("Error creating container: %s", err)
|
||||
return err
|
||||
}
|
||||
|
||||
@ -189,7 +235,7 @@ func (s *Supervisor) Start(svcs []Service) error {
|
||||
|
||||
task, err := newContainer.NewTask(ctx, cio.BinaryIO("/usr/local/bin/faasd", nil))
|
||||
if err != nil {
|
||||
log.Printf("Error creating task: %s\n", err)
|
||||
log.Printf("Error creating task: %s", err)
|
||||
return err
|
||||
}
|
||||
|
||||
@ -208,7 +254,7 @@ func (s *Supervisor) Start(svcs []Service) error {
|
||||
|
||||
log.Printf("%s has IP: %s\n", newContainer.ID(), ip)
|
||||
|
||||
hosts, err := ioutil.ReadFile("hosts")
|
||||
hosts, err := os.ReadFile("hosts")
|
||||
if err != nil {
|
||||
log.Printf("Unable to read hosts file: %s\n", err.Error())
|
||||
}
|
||||
@ -217,12 +263,12 @@ func (s *Supervisor) Start(svcs []Service) error {
|
||||
%s %s
|
||||
`, ip, svc.Name))
|
||||
|
||||
if err := ioutil.WriteFile("hosts", hosts, workingDirectoryPermission); err != nil {
|
||||
if err := os.WriteFile("hosts", hosts, workingDirectoryPermission); err != nil {
|
||||
log.Printf("Error writing file: %s %s\n", "hosts", err)
|
||||
}
|
||||
|
||||
if _, err := task.Wait(ctx); err != nil {
|
||||
log.Printf("Task wait error: %s\n", err)
|
||||
log.Printf("Task wait error: %s", err)
|
||||
return err
|
||||
}
|
||||
|
||||
@ -230,7 +276,7 @@ func (s *Supervisor) Start(svcs []Service) error {
|
||||
// log.Println("Exited: ", exitStatusC)
|
||||
|
||||
if err = task.Start(ctx); err != nil {
|
||||
log.Printf("Task start error: %s\n", err)
|
||||
log.Printf("Task start error: %s", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -243,7 +289,7 @@ func (s *Supervisor) Close() {
|
||||
}
|
||||
|
||||
func (s *Supervisor) Remove(svcs []Service) error {
|
||||
ctx := namespaces.WithNamespace(context.Background(), faasdNamespace)
|
||||
ctx := namespaces.WithNamespace(context.Background(), FaasdNamespace)
|
||||
|
||||
for _, svc := range svcs {
|
||||
err := cninetwork.DeleteCNINetwork(ctx, s.cni, s.client, svc.Name)
|
||||
@ -308,8 +354,9 @@ func ParseCompose(config *compose.Config) ([]Service, error) {
|
||||
return nil, errors.Errorf("unsupported volume mount type '%s' when parsing service '%s'", v.Type, s.Name)
|
||||
}
|
||||
mounts = append(mounts, Mount{
|
||||
Src: v.Source,
|
||||
Dest: v.Target,
|
||||
Src: v.Source,
|
||||
Dest: v.Target,
|
||||
ReadOnly: v.ReadOnly,
|
||||
})
|
||||
}
|
||||
|
||||
@ -352,7 +399,7 @@ func LoadComposeFile(wd string, file string) (*compose.Config, error) {
|
||||
func LoadComposeFileWithArch(wd string, file string, archGetter ArchGetter) (*compose.Config, error) {
|
||||
|
||||
file = path.Join(wd, file)
|
||||
b, err := ioutil.ReadFile(file)
|
||||
b, err := os.ReadFile(file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -404,8 +451,6 @@ func GetArchSuffix(getClientArch ArchGetter) (suffix string, err error) {
|
||||
case "x86_64":
|
||||
// no suffix needed
|
||||
return "", nil
|
||||
case "armhf", "armv7l":
|
||||
return "-armhf", nil
|
||||
case "arm64", "aarch64":
|
||||
return "-arm64", nil
|
||||
default:
|
||||
|
@ -180,7 +180,7 @@ func equalMountSlice(t *testing.T, want, found []Mount) {
|
||||
|
||||
for i := range want {
|
||||
if !reflect.DeepEqual(want[i], found[i]) {
|
||||
t.Fatalf("unexpected value at postition %d: want %s, got %s", i, want[i], found[i])
|
||||
t.Fatalf("unexpected value at postition %d: want %v, got %v", i, want[i], found[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -210,18 +210,6 @@ func Test_GetArchSuffix(t *testing.T) {
|
||||
foundArch: "anything_else",
|
||||
want: "",
|
||||
},
|
||||
{
|
||||
name: "armhf has armhf suffix",
|
||||
foundOS: "Linux",
|
||||
foundArch: "armhf",
|
||||
want: "-armhf",
|
||||
},
|
||||
{
|
||||
name: "armv7l has armhf suffix",
|
||||
foundOS: "Linux",
|
||||
foundArch: "armv7l",
|
||||
want: "-armhf",
|
||||
},
|
||||
{
|
||||
name: "arm64 has arm64 suffix",
|
||||
foundOS: "Linux",
|
||||
|
@ -2,21 +2,23 @@ package systemd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"text/template"
|
||||
|
||||
execute "github.com/alexellis/go-execute/pkg/v1"
|
||||
execute "github.com/alexellis/go-execute/v2"
|
||||
)
|
||||
|
||||
func Enable(unit string) error {
|
||||
task := execute.ExecTask{Command: "systemctl",
|
||||
task := execute.ExecTask{
|
||||
Command: "systemctl",
|
||||
Args: []string{"enable", unit},
|
||||
StreamStdio: false,
|
||||
}
|
||||
|
||||
res, err := task.Execute()
|
||||
res, err := task.Execute(context.Background())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -29,12 +31,13 @@ func Enable(unit string) error {
|
||||
}
|
||||
|
||||
func Start(unit string) error {
|
||||
task := execute.ExecTask{Command: "systemctl",
|
||||
task := execute.ExecTask{
|
||||
Command: "systemctl",
|
||||
Args: []string{"start", unit},
|
||||
StreamStdio: false,
|
||||
}
|
||||
|
||||
res, err := task.Execute()
|
||||
res, err := task.Execute(context.Background())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -47,12 +50,13 @@ func Start(unit string) error {
|
||||
}
|
||||
|
||||
func DaemonReload() error {
|
||||
task := execute.ExecTask{Command: "systemctl",
|
||||
task := execute.ExecTask{
|
||||
Command: "systemctl",
|
||||
Args: []string{"daemon-reload"},
|
||||
StreamStdio: false,
|
||||
}
|
||||
|
||||
res, err := task.Execute()
|
||||
res, err := task.Execute(context.Background())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -71,23 +75,20 @@ func InstallUnit(name string, tokens map[string]string) error {
|
||||
|
||||
tmplName := "./hack/" + name + ".service"
|
||||
tmpl, err := template.ParseFiles(tmplName)
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("error loading template %s, error %s", tmplName, err)
|
||||
}
|
||||
|
||||
var tpl bytes.Buffer
|
||||
|
||||
err = tmpl.Execute(&tpl, tokens)
|
||||
if err != nil {
|
||||
if err := tmpl.Execute(&tpl, tokens); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = writeUnit(name+".service", tpl.Bytes())
|
||||
|
||||
if err != nil {
|
||||
if err := writeUnit(name+".service", tpl.Bytes()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -96,7 +97,12 @@ func writeUnit(name string, data []byte) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer f.Close()
|
||||
_, err = f.Write(data)
|
||||
return err
|
||||
|
||||
if _, err := f.Write(data); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -1 +1,17 @@
|
||||
package pkg
|
||||
|
||||
// These values will be injected into these variables at the build time.
|
||||
var (
|
||||
// GitCommit Git Commit SHA
|
||||
GitCommit string
|
||||
// Version version of the CLI
|
||||
Version string
|
||||
)
|
||||
|
||||
// GetVersion get latest version
|
||||
func GetVersion() string {
|
||||
if len(Version) == 0 {
|
||||
return "dev"
|
||||
}
|
||||
return Version
|
||||
}
|
||||
|
@ -26,3 +26,7 @@ scrape_configs:
|
||||
- job_name: 'gateway'
|
||||
static_configs:
|
||||
- targets: ['gateway:8082']
|
||||
|
||||
- job_name: 'provider'
|
||||
static_configs:
|
||||
- targets: ['faasd-provider:8081']
|
93
vendor/github.com/AdaLogics/go-fuzz-headers/README.md
generated
vendored
Normal file
93
vendor/github.com/AdaLogics/go-fuzz-headers/README.md
generated
vendored
Normal file
@ -0,0 +1,93 @@
|
||||
# go-fuzz-headers
|
||||
This repository contains various helper functions for go fuzzing. It is mostly used in combination with [go-fuzz](https://github.com/dvyukov/go-fuzz), but compatibility with fuzzing in the standard library will also be supported. Any coverage guided fuzzing engine that provides an array or slice of bytes can be used with go-fuzz-headers.
|
||||
|
||||
|
||||
## Usage
|
||||
Using go-fuzz-headers is easy. First create a new consumer with the bytes provided by the fuzzing engine:
|
||||
|
||||
```go
|
||||
import (
|
||||
fuzz "github.com/AdaLogics/go-fuzz-headers"
|
||||
)
|
||||
data := []byte{'R', 'a', 'n', 'd', 'o', 'm'}
|
||||
f := fuzz.NewConsumer(data)
|
||||
|
||||
```
|
||||
|
||||
This creates a `Consumer` that consumes the bytes of the input as it uses them to fuzz different types.
|
||||
|
||||
After that, `f` can be used to easily create fuzzed instances of different types. Below are some examples:
|
||||
|
||||
### Structs
|
||||
One of the most useful features of go-fuzz-headers is its ability to fill structs with the data provided by the fuzzing engine. This is done with a single line:
|
||||
```go
|
||||
type Person struct {
|
||||
Name string
|
||||
Age int
|
||||
}
|
||||
p := Person{}
|
||||
// Fill p with values based on the data provided by the fuzzing engine:
|
||||
err := f.GenerateStruct(&p)
|
||||
```
|
||||
|
||||
This includes nested structs too. In this example, the fuzz Consumer will also insert values in `p.BestFriend`:
|
||||
```go
|
||||
type PersonI struct {
|
||||
Name string
|
||||
Age int
|
||||
BestFriend PersonII
|
||||
}
|
||||
type PersonII struct {
|
||||
Name string
|
||||
Age int
|
||||
}
|
||||
p := PersonI{}
|
||||
err := f.GenerateStruct(&p)
|
||||
```
|
||||
|
||||
If the consumer should insert values for unexported fields as well as exported, this can be enabled with:
|
||||
|
||||
```go
|
||||
f.AllowUnexportedFields()
|
||||
```
|
||||
|
||||
...and disabled with:
|
||||
|
||||
```go
|
||||
f.DisallowUnexportedFields()
|
||||
```
|
||||
|
||||
### Other types:
|
||||
|
||||
Other useful APIs:
|
||||
|
||||
```go
|
||||
createdString, err := f.GetString() // Gets a string
|
||||
createdInt, err := f.GetInt() // Gets an integer
|
||||
createdByte, err := f.GetByte() // Gets a byte
|
||||
createdBytes, err := f.GetBytes() // Gets a byte slice
|
||||
createdBool, err := f.GetBool() // Gets a boolean
|
||||
err := f.FuzzMap(target_map) // Fills a map
|
||||
createdTarBytes, err := f.TarBytes() // Gets bytes of a valid tar archive
|
||||
err := f.CreateFiles(inThisDir) // Fills inThisDir with files
|
||||
createdString, err := f.GetStringFrom("anyCharInThisString", ofThisLength) // Gets a string that consists of chars from "anyCharInThisString" and has the exact length "ofThisLength"
|
||||
```
|
||||
|
||||
Most APIs are added as they are needed.
|
||||
|
||||
## Projects that use go-fuzz-headers
|
||||
- [runC](https://github.com/opencontainers/runc)
|
||||
- [Istio](https://github.com/istio/istio)
|
||||
- [Vitess](https://github.com/vitessio/vitess)
|
||||
- [Containerd](https://github.com/containerd/containerd)
|
||||
|
||||
Feel free to add your own project to the list, if you use go-fuzz-headers to fuzz it.
|
||||
|
||||
|
||||
|
||||
|
||||
## Status
|
||||
The project is under development and will be updated regularly.
|
||||
|
||||
## References
|
||||
go-fuzz-headers' approach to fuzzing structs is strongly inspired by [gofuzz](https://github.com/google/gofuzz).
|
914
vendor/github.com/AdaLogics/go-fuzz-headers/consumer.go
generated
vendored
Normal file
914
vendor/github.com/AdaLogics/go-fuzz-headers/consumer.go
generated
vendored
Normal file
@ -0,0 +1,914 @@
|
||||
// Copyright 2023 The go-fuzz-headers Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package gofuzzheaders
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
var (
|
||||
MaxTotalLen uint32 = 2000000
|
||||
maxDepth = 100
|
||||
)
|
||||
|
||||
func SetMaxTotalLen(newLen uint32) {
|
||||
MaxTotalLen = newLen
|
||||
}
|
||||
|
||||
type ConsumeFuzzer struct {
|
||||
data []byte
|
||||
dataTotal uint32
|
||||
CommandPart []byte
|
||||
RestOfArray []byte
|
||||
NumberOfCalls int
|
||||
position uint32
|
||||
fuzzUnexportedFields bool
|
||||
curDepth int
|
||||
Funcs map[reflect.Type]reflect.Value
|
||||
}
|
||||
|
||||
func IsDivisibleBy(n int, divisibleby int) bool {
|
||||
return (n % divisibleby) == 0
|
||||
}
|
||||
|
||||
func NewConsumer(fuzzData []byte) *ConsumeFuzzer {
|
||||
return &ConsumeFuzzer{
|
||||
data: fuzzData,
|
||||
dataTotal: uint32(len(fuzzData)),
|
||||
Funcs: make(map[reflect.Type]reflect.Value),
|
||||
curDepth: 0,
|
||||
}
|
||||
}
|
||||
|
||||
func (f *ConsumeFuzzer) Split(minCalls, maxCalls int) error {
|
||||
if f.dataTotal == 0 {
|
||||
return errors.New("could not split")
|
||||
}
|
||||
numberOfCalls := int(f.data[0])
|
||||
if numberOfCalls < minCalls || numberOfCalls > maxCalls {
|
||||
return errors.New("bad number of calls")
|
||||
}
|
||||
if int(f.dataTotal) < numberOfCalls+numberOfCalls+1 {
|
||||
return errors.New("length of data does not match required parameters")
|
||||
}
|
||||
|
||||
// Define part 2 and 3 of the data array
|
||||
commandPart := f.data[1 : numberOfCalls+1]
|
||||
restOfArray := f.data[numberOfCalls+1:]
|
||||
|
||||
// Just a small check. It is necessary
|
||||
if len(commandPart) != numberOfCalls {
|
||||
return errors.New("length of commandPart does not match number of calls")
|
||||
}
|
||||
|
||||
// Check if restOfArray is divisible by numberOfCalls
|
||||
if !IsDivisibleBy(len(restOfArray), numberOfCalls) {
|
||||
return errors.New("length of commandPart does not match number of calls")
|
||||
}
|
||||
f.CommandPart = commandPart
|
||||
f.RestOfArray = restOfArray
|
||||
f.NumberOfCalls = numberOfCalls
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *ConsumeFuzzer) AllowUnexportedFields() {
|
||||
f.fuzzUnexportedFields = true
|
||||
}
|
||||
|
||||
func (f *ConsumeFuzzer) DisallowUnexportedFields() {
|
||||
f.fuzzUnexportedFields = false
|
||||
}
|
||||
|
||||
func (f *ConsumeFuzzer) GenerateStruct(targetStruct interface{}) error {
|
||||
e := reflect.ValueOf(targetStruct).Elem()
|
||||
return f.fuzzStruct(e, false)
|
||||
}
|
||||
|
||||
func (f *ConsumeFuzzer) setCustom(v reflect.Value) error {
|
||||
// First: see if we have a fuzz function for it.
|
||||
doCustom, ok := f.Funcs[v.Type()]
|
||||
if !ok {
|
||||
return fmt.Errorf("could not find a custom function")
|
||||
}
|
||||
|
||||
switch v.Kind() {
|
||||
case reflect.Ptr:
|
||||
if v.IsNil() {
|
||||
if !v.CanSet() {
|
||||
return fmt.Errorf("could not use a custom function")
|
||||
}
|
||||
v.Set(reflect.New(v.Type().Elem()))
|
||||
}
|
||||
case reflect.Map:
|
||||
if v.IsNil() {
|
||||
if !v.CanSet() {
|
||||
return fmt.Errorf("could not use a custom function")
|
||||
}
|
||||
v.Set(reflect.MakeMap(v.Type()))
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("could not use a custom function")
|
||||
}
|
||||
|
||||
verr := doCustom.Call([]reflect.Value{v, reflect.ValueOf(Continue{
|
||||
F: f,
|
||||
})})
|
||||
|
||||
// check if we return an error
|
||||
if verr[0].IsNil() {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("could not use a custom function")
|
||||
}
|
||||
|
||||
func (f *ConsumeFuzzer) fuzzStruct(e reflect.Value, customFunctions bool) error {
|
||||
if f.curDepth >= maxDepth {
|
||||
// return err or nil here?
|
||||
return nil
|
||||
}
|
||||
f.curDepth++
|
||||
defer func() { f.curDepth-- }()
|
||||
|
||||
// We check if we should check for custom functions
|
||||
if customFunctions && e.IsValid() && e.CanAddr() {
|
||||
err := f.setCustom(e.Addr())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
switch e.Kind() {
|
||||
case reflect.Struct:
|
||||
for i := 0; i < e.NumField(); i++ {
|
||||
var v reflect.Value
|
||||
if !e.Field(i).CanSet() {
|
||||
if f.fuzzUnexportedFields {
|
||||
v = reflect.NewAt(e.Field(i).Type(), unsafe.Pointer(e.Field(i).UnsafeAddr())).Elem()
|
||||
}
|
||||
if err := f.fuzzStruct(v, customFunctions); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
v = e.Field(i)
|
||||
if err := f.fuzzStruct(v, customFunctions); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.String:
|
||||
str, err := f.GetString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if e.CanSet() {
|
||||
e.SetString(str)
|
||||
}
|
||||
case reflect.Slice:
|
||||
var maxElements uint32
|
||||
// Byte slices should not be restricted
|
||||
if e.Type().String() == "[]uint8" {
|
||||
maxElements = 10000000
|
||||
} else {
|
||||
maxElements = 50
|
||||
}
|
||||
|
||||
randQty, err := f.GetUint32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
numOfElements := randQty % maxElements
|
||||
if (f.dataTotal - f.position) < numOfElements {
|
||||
numOfElements = f.dataTotal - f.position
|
||||
}
|
||||
|
||||
uu := reflect.MakeSlice(e.Type(), int(numOfElements), int(numOfElements))
|
||||
|
||||
for i := 0; i < int(numOfElements); i++ {
|
||||
// If we have more than 10, then we can proceed with that.
|
||||
if err := f.fuzzStruct(uu.Index(i), customFunctions); err != nil {
|
||||
if i >= 10 {
|
||||
if e.CanSet() {
|
||||
e.Set(uu)
|
||||
}
|
||||
return nil
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
if e.CanSet() {
|
||||
e.Set(uu)
|
||||
}
|
||||
case reflect.Uint16:
|
||||
newInt, err := f.GetUint16()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if e.CanSet() {
|
||||
e.SetUint(uint64(newInt))
|
||||
}
|
||||
case reflect.Uint32:
|
||||
newInt, err := f.GetUint32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if e.CanSet() {
|
||||
e.SetUint(uint64(newInt))
|
||||
}
|
||||
case reflect.Uint64:
|
||||
newInt, err := f.GetInt()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if e.CanSet() {
|
||||
e.SetUint(uint64(newInt))
|
||||
}
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
newInt, err := f.GetInt()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if e.CanSet() {
|
||||
e.SetInt(int64(newInt))
|
||||
}
|
||||
case reflect.Float32:
|
||||
newFloat, err := f.GetFloat32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if e.CanSet() {
|
||||
e.SetFloat(float64(newFloat))
|
||||
}
|
||||
case reflect.Float64:
|
||||
newFloat, err := f.GetFloat64()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if e.CanSet() {
|
||||
e.SetFloat(float64(newFloat))
|
||||
}
|
||||
case reflect.Map:
|
||||
if e.CanSet() {
|
||||
e.Set(reflect.MakeMap(e.Type()))
|
||||
const maxElements = 50
|
||||
randQty, err := f.GetInt()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
numOfElements := randQty % maxElements
|
||||
for i := 0; i < numOfElements; i++ {
|
||||
key := reflect.New(e.Type().Key()).Elem()
|
||||
if err := f.fuzzStruct(key, customFunctions); err != nil {
|
||||
return err
|
||||
}
|
||||
val := reflect.New(e.Type().Elem()).Elem()
|
||||
if err = f.fuzzStruct(val, customFunctions); err != nil {
|
||||
return err
|
||||
}
|
||||
e.SetMapIndex(key, val)
|
||||
}
|
||||
}
|
||||
case reflect.Ptr:
|
||||
if e.CanSet() {
|
||||
e.Set(reflect.New(e.Type().Elem()))
|
||||
if err := f.fuzzStruct(e.Elem(), customFunctions); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
case reflect.Uint8:
|
||||
b, err := f.GetByte()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if e.CanSet() {
|
||||
e.SetUint(uint64(b))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *ConsumeFuzzer) GetStringArray() (reflect.Value, error) {
|
||||
// The max size of the array:
|
||||
const max uint32 = 20
|
||||
|
||||
arraySize := f.position
|
||||
if arraySize > max {
|
||||
arraySize = max
|
||||
}
|
||||
stringArray := reflect.MakeSlice(reflect.SliceOf(reflect.TypeOf("string")), int(arraySize), int(arraySize))
|
||||
if f.position+arraySize >= f.dataTotal {
|
||||
return stringArray, errors.New("could not make string array")
|
||||
}
|
||||
|
||||
for i := 0; i < int(arraySize); i++ {
|
||||
stringSize := uint32(f.data[f.position])
|
||||
if f.position+stringSize >= f.dataTotal {
|
||||
return stringArray, nil
|
||||
}
|
||||
stringToAppend := string(f.data[f.position : f.position+stringSize])
|
||||
strVal := reflect.ValueOf(stringToAppend)
|
||||
stringArray = reflect.Append(stringArray, strVal)
|
||||
f.position += stringSize
|
||||
}
|
||||
return stringArray, nil
|
||||
}
|
||||
|
||||
func (f *ConsumeFuzzer) GetInt() (int, error) {
|
||||
if f.position >= f.dataTotal {
|
||||
return 0, errors.New("not enough bytes to create int")
|
||||
}
|
||||
returnInt := int(f.data[f.position])
|
||||
f.position++
|
||||
return returnInt, nil
|
||||
}
|
||||
|
||||
func (f *ConsumeFuzzer) GetByte() (byte, error) {
|
||||
if f.position >= f.dataTotal {
|
||||
return 0x00, errors.New("not enough bytes to get byte")
|
||||
}
|
||||
returnByte := f.data[f.position]
|
||||
f.position++
|
||||
return returnByte, nil
|
||||
}
|
||||
|
||||
func (f *ConsumeFuzzer) GetNBytes(numberOfBytes int) ([]byte, error) {
|
||||
if f.position >= f.dataTotal {
|
||||
return nil, errors.New("not enough bytes to get byte")
|
||||
}
|
||||
returnBytes := make([]byte, 0, numberOfBytes)
|
||||
for i := 0; i < numberOfBytes; i++ {
|
||||
newByte, err := f.GetByte()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
returnBytes = append(returnBytes, newByte)
|
||||
}
|
||||
return returnBytes, nil
|
||||
}
|
||||
|
||||
func (f *ConsumeFuzzer) GetUint16() (uint16, error) {
|
||||
u16, err := f.GetNBytes(2)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
littleEndian, err := f.GetBool()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if littleEndian {
|
||||
return binary.LittleEndian.Uint16(u16), nil
|
||||
}
|
||||
return binary.BigEndian.Uint16(u16), nil
|
||||
}
|
||||
|
||||
func (f *ConsumeFuzzer) GetUint32() (uint32, error) {
|
||||
u32, err := f.GetNBytes(4)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return binary.BigEndian.Uint32(u32), nil
|
||||
}
|
||||
|
||||
func (f *ConsumeFuzzer) GetUint64() (uint64, error) {
|
||||
u64, err := f.GetNBytes(8)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
littleEndian, err := f.GetBool()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if littleEndian {
|
||||
return binary.LittleEndian.Uint64(u64), nil
|
||||
}
|
||||
return binary.BigEndian.Uint64(u64), nil
|
||||
}
|
||||
|
||||
func (f *ConsumeFuzzer) GetBytes() ([]byte, error) {
|
||||
var length uint32
|
||||
var err error
|
||||
length, err = f.GetUint32()
|
||||
if err != nil {
|
||||
return nil, errors.New("not enough bytes to create byte array")
|
||||
}
|
||||
|
||||
if length == 0 {
|
||||
length = 30
|
||||
}
|
||||
bytesLeft := f.dataTotal - f.position
|
||||
if bytesLeft <= 0 {
|
||||
return nil, errors.New("not enough bytes to create byte array")
|
||||
}
|
||||
|
||||
// If the length is the same as bytes left, we will not overflow
|
||||
// the remaining bytes.
|
||||
if length != bytesLeft {
|
||||
length = length % bytesLeft
|
||||
}
|
||||
byteBegin := f.position
|
||||
if byteBegin+length < byteBegin {
|
||||
return nil, errors.New("numbers overflow")
|
||||
}
|
||||
f.position = byteBegin + length
|
||||
return f.data[byteBegin:f.position], nil
|
||||
}
|
||||
|
||||
func (f *ConsumeFuzzer) GetString() (string, error) {
|
||||
if f.position >= f.dataTotal {
|
||||
return "nil", errors.New("not enough bytes to create string")
|
||||
}
|
||||
length, err := f.GetUint32()
|
||||
if err != nil {
|
||||
return "nil", errors.New("not enough bytes to create string")
|
||||
}
|
||||
if f.position > MaxTotalLen {
|
||||
return "nil", errors.New("created too large a string")
|
||||
}
|
||||
byteBegin := f.position
|
||||
if byteBegin >= f.dataTotal {
|
||||
return "nil", errors.New("not enough bytes to create string")
|
||||
}
|
||||
if byteBegin+length > f.dataTotal {
|
||||
return "nil", errors.New("not enough bytes to create string")
|
||||
}
|
||||
if byteBegin > byteBegin+length {
|
||||
return "nil", errors.New("numbers overflow")
|
||||
}
|
||||
f.position = byteBegin + length
|
||||
return string(f.data[byteBegin:f.position]), nil
|
||||
}
|
||||
|
||||
func (f *ConsumeFuzzer) GetBool() (bool, error) {
|
||||
if f.position >= f.dataTotal {
|
||||
return false, errors.New("not enough bytes to create bool")
|
||||
}
|
||||
if IsDivisibleBy(int(f.data[f.position]), 2) {
|
||||
f.position++
|
||||
return true, nil
|
||||
} else {
|
||||
f.position++
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (f *ConsumeFuzzer) FuzzMap(m interface{}) error {
|
||||
return f.GenerateStruct(m)
|
||||
}
|
||||
|
||||
func returnTarBytes(buf []byte) ([]byte, error) {
|
||||
return buf, nil
|
||||
// Count files
|
||||
var fileCounter int
|
||||
tr := tar.NewReader(bytes.NewReader(buf))
|
||||
for {
|
||||
_, err := tr.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fileCounter++
|
||||
}
|
||||
if fileCounter >= 1 {
|
||||
return buf, nil
|
||||
}
|
||||
return nil, fmt.Errorf("not enough files were created\n")
|
||||
}
|
||||
|
||||
func setTarHeaderFormat(hdr *tar.Header, f *ConsumeFuzzer) error {
|
||||
ind, err := f.GetInt()
|
||||
if err != nil {
|
||||
hdr.Format = tar.FormatGNU
|
||||
//return nil
|
||||
}
|
||||
switch ind % 4 {
|
||||
case 0:
|
||||
hdr.Format = tar.FormatUnknown
|
||||
case 1:
|
||||
hdr.Format = tar.FormatUSTAR
|
||||
case 2:
|
||||
hdr.Format = tar.FormatPAX
|
||||
case 3:
|
||||
hdr.Format = tar.FormatGNU
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func setTarHeaderTypeflag(hdr *tar.Header, f *ConsumeFuzzer) error {
|
||||
ind, err := f.GetInt()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch ind % 13 {
|
||||
case 0:
|
||||
hdr.Typeflag = tar.TypeReg
|
||||
case 1:
|
||||
hdr.Typeflag = tar.TypeLink
|
||||
linkname, err := f.GetString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hdr.Linkname = linkname
|
||||
case 2:
|
||||
hdr.Typeflag = tar.TypeSymlink
|
||||
linkname, err := f.GetString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hdr.Linkname = linkname
|
||||
case 3:
|
||||
hdr.Typeflag = tar.TypeChar
|
||||
case 4:
|
||||
hdr.Typeflag = tar.TypeBlock
|
||||
case 5:
|
||||
hdr.Typeflag = tar.TypeDir
|
||||
case 6:
|
||||
hdr.Typeflag = tar.TypeFifo
|
||||
case 7:
|
||||
hdr.Typeflag = tar.TypeCont
|
||||
case 8:
|
||||
hdr.Typeflag = tar.TypeXHeader
|
||||
case 9:
|
||||
hdr.Typeflag = tar.TypeXGlobalHeader
|
||||
case 10:
|
||||
hdr.Typeflag = tar.TypeGNUSparse
|
||||
case 11:
|
||||
hdr.Typeflag = tar.TypeGNULongName
|
||||
case 12:
|
||||
hdr.Typeflag = tar.TypeGNULongLink
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *ConsumeFuzzer) createTarFileBody() ([]byte, error) {
|
||||
return f.GetBytes()
|
||||
/*length, err := f.GetUint32()
|
||||
if err != nil {
|
||||
return nil, errors.New("not enough bytes to create byte array")
|
||||
}
|
||||
|
||||
// A bit of optimization to attempt to create a file body
|
||||
// when we don't have as many bytes left as "length"
|
||||
remainingBytes := f.dataTotal - f.position
|
||||
if remainingBytes <= 0 {
|
||||
return nil, errors.New("created too large a string")
|
||||
}
|
||||
if f.position+length > MaxTotalLen {
|
||||
return nil, errors.New("created too large a string")
|
||||
}
|
||||
byteBegin := f.position
|
||||
if byteBegin >= f.dataTotal {
|
||||
return nil, errors.New("not enough bytes to create byte array")
|
||||
}
|
||||
if length == 0 {
|
||||
return nil, errors.New("zero-length is not supported")
|
||||
}
|
||||
if byteBegin+length >= f.dataTotal {
|
||||
return nil, errors.New("not enough bytes to create byte array")
|
||||
}
|
||||
if byteBegin+length < byteBegin {
|
||||
return nil, errors.New("numbers overflow")
|
||||
}
|
||||
f.position = byteBegin + length
|
||||
return f.data[byteBegin:f.position], nil*/
|
||||
}
|
||||
|
||||
// getTarFileName is similar to GetString(), but creates string based
|
||||
// on the length of f.data to reduce the likelihood of overflowing
|
||||
// f.data.
|
||||
func (f *ConsumeFuzzer) getTarFilename() (string, error) {
|
||||
return f.GetString()
|
||||
/*length, err := f.GetUint32()
|
||||
if err != nil {
|
||||
return "nil", errors.New("not enough bytes to create string")
|
||||
}
|
||||
|
||||
// A bit of optimization to attempt to create a file name
|
||||
// when we don't have as many bytes left as "length"
|
||||
remainingBytes := f.dataTotal - f.position
|
||||
if remainingBytes <= 0 {
|
||||
return "nil", errors.New("created too large a string")
|
||||
}
|
||||
if f.position > MaxTotalLen {
|
||||
return "nil", errors.New("created too large a string")
|
||||
}
|
||||
byteBegin := f.position
|
||||
if byteBegin >= f.dataTotal {
|
||||
return "nil", errors.New("not enough bytes to create string")
|
||||
}
|
||||
if byteBegin+length > f.dataTotal {
|
||||
return "nil", errors.New("not enough bytes to create string")
|
||||
}
|
||||
if byteBegin > byteBegin+length {
|
||||
return "nil", errors.New("numbers overflow")
|
||||
}
|
||||
f.position = byteBegin + length
|
||||
return string(f.data[byteBegin:f.position]), nil*/
|
||||
}
|
||||
|
||||
type TarFile struct {
|
||||
Hdr *tar.Header
|
||||
Body []byte
|
||||
}
|
||||
|
||||
// TarBytes returns valid bytes for a tar archive
|
||||
func (f *ConsumeFuzzer) TarBytes() ([]byte, error) {
|
||||
numberOfFiles, err := f.GetInt()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var tarFiles []*TarFile
|
||||
tarFiles = make([]*TarFile, 0)
|
||||
|
||||
const maxNoOfFiles = 100
|
||||
for i := 0; i < numberOfFiles%maxNoOfFiles; i++ {
|
||||
var filename string
|
||||
var filebody []byte
|
||||
var sec, nsec int
|
||||
var err error
|
||||
|
||||
filename, err = f.getTarFilename()
|
||||
if err != nil {
|
||||
var sb strings.Builder
|
||||
sb.WriteString("file-")
|
||||
sb.WriteString(strconv.Itoa(i))
|
||||
filename = sb.String()
|
||||
}
|
||||
filebody, err = f.createTarFileBody()
|
||||
if err != nil {
|
||||
var sb strings.Builder
|
||||
sb.WriteString("filebody-")
|
||||
sb.WriteString(strconv.Itoa(i))
|
||||
filebody = []byte(sb.String())
|
||||
}
|
||||
|
||||
sec, err = f.GetInt()
|
||||
if err != nil {
|
||||
sec = 1672531200 // beginning of 2023
|
||||
}
|
||||
nsec, err = f.GetInt()
|
||||
if err != nil {
|
||||
nsec = 1703980800 // end of 2023
|
||||
}
|
||||
|
||||
hdr := &tar.Header{
|
||||
Name: filename,
|
||||
Size: int64(len(filebody)),
|
||||
Mode: 0o600,
|
||||
ModTime: time.Unix(int64(sec), int64(nsec)),
|
||||
}
|
||||
if err := setTarHeaderTypeflag(hdr, f); err != nil {
|
||||
return []byte(""), err
|
||||
}
|
||||
if err := setTarHeaderFormat(hdr, f); err != nil {
|
||||
return []byte(""), err
|
||||
}
|
||||
tf := &TarFile{
|
||||
Hdr: hdr,
|
||||
Body: filebody,
|
||||
}
|
||||
tarFiles = append(tarFiles, tf)
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
tw := tar.NewWriter(&buf)
|
||||
defer tw.Close()
|
||||
|
||||
for _, tf := range tarFiles {
|
||||
tw.WriteHeader(tf.Hdr)
|
||||
tw.Write(tf.Body)
|
||||
}
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
// This is similar to TarBytes, but it returns a series of
|
||||
// files instead of raw tar bytes. The advantage of this
|
||||
// api is that it is cheaper in terms of cpu power to
|
||||
// modify or check the files in the fuzzer with TarFiles()
|
||||
// because it avoids creating a tar reader.
|
||||
func (f *ConsumeFuzzer) TarFiles() ([]*TarFile, error) {
|
||||
numberOfFiles, err := f.GetInt()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var tarFiles []*TarFile
|
||||
tarFiles = make([]*TarFile, 0)
|
||||
|
||||
const maxNoOfFiles = 100
|
||||
for i := 0; i < numberOfFiles%maxNoOfFiles; i++ {
|
||||
filename, err := f.getTarFilename()
|
||||
if err != nil {
|
||||
return tarFiles, err
|
||||
}
|
||||
filebody, err := f.createTarFileBody()
|
||||
if err != nil {
|
||||
return tarFiles, err
|
||||
}
|
||||
|
||||
sec, err := f.GetInt()
|
||||
if err != nil {
|
||||
return tarFiles, err
|
||||
}
|
||||
nsec, err := f.GetInt()
|
||||
if err != nil {
|
||||
return tarFiles, err
|
||||
}
|
||||
|
||||
hdr := &tar.Header{
|
||||
Name: filename,
|
||||
Size: int64(len(filebody)),
|
||||
Mode: 0o600,
|
||||
ModTime: time.Unix(int64(sec), int64(nsec)),
|
||||
}
|
||||
if err := setTarHeaderTypeflag(hdr, f); err != nil {
|
||||
hdr.Typeflag = tar.TypeReg
|
||||
}
|
||||
if err := setTarHeaderFormat(hdr, f); err != nil {
|
||||
return tarFiles, err // should not happend
|
||||
}
|
||||
tf := &TarFile{
|
||||
Hdr: hdr,
|
||||
Body: filebody,
|
||||
}
|
||||
tarFiles = append(tarFiles, tf)
|
||||
}
|
||||
return tarFiles, nil
|
||||
}
|
||||
|
||||
// CreateFiles creates pseudo-random files in rootDir.
|
||||
// It creates subdirs and places the files there.
|
||||
// It is the callers responsibility to ensure that
|
||||
// rootDir exists.
|
||||
func (f *ConsumeFuzzer) CreateFiles(rootDir string) error {
|
||||
numberOfFiles, err := f.GetInt()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
maxNumberOfFiles := numberOfFiles % 4000 // This is completely arbitrary
|
||||
if maxNumberOfFiles == 0 {
|
||||
return errors.New("maxNumberOfFiles is nil")
|
||||
}
|
||||
|
||||
var noOfCreatedFiles int
|
||||
for i := 0; i < maxNumberOfFiles; i++ {
|
||||
// The file to create:
|
||||
fileName, err := f.GetString()
|
||||
if err != nil {
|
||||
if noOfCreatedFiles > 0 {
|
||||
// If files have been created, we don't return an error.
|
||||
break
|
||||
} else {
|
||||
return errors.New("could not get fileName")
|
||||
}
|
||||
}
|
||||
if strings.Contains(fileName, "..") || (len(fileName) > 0 && fileName[0] == 47) || strings.Contains(fileName, "\\") {
|
||||
continue
|
||||
}
|
||||
fullFilePath := filepath.Join(rootDir, fileName)
|
||||
|
||||
// Find the subdirectory of the file
|
||||
if subDir := filepath.Dir(fileName); subDir != "" && subDir != "." {
|
||||
// create the dir first; avoid going outside the root dir
|
||||
if strings.Contains(subDir, "../") || (len(subDir) > 0 && subDir[0] == 47) || strings.Contains(subDir, "\\") {
|
||||
continue
|
||||
}
|
||||
dirPath := filepath.Join(rootDir, subDir)
|
||||
if _, err := os.Stat(dirPath); os.IsNotExist(err) {
|
||||
err2 := os.MkdirAll(dirPath, 0o777)
|
||||
if err2 != nil {
|
||||
continue
|
||||
}
|
||||
}
|
||||
fullFilePath = filepath.Join(dirPath, fileName)
|
||||
} else {
|
||||
// Create symlink
|
||||
createSymlink, err := f.GetBool()
|
||||
if err != nil {
|
||||
if noOfCreatedFiles > 0 {
|
||||
break
|
||||
} else {
|
||||
return errors.New("could not create the symlink")
|
||||
}
|
||||
}
|
||||
if createSymlink {
|
||||
symlinkTarget, err := f.GetString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = os.Symlink(symlinkTarget, fullFilePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// stop loop here, since a symlink needs no further action
|
||||
noOfCreatedFiles++
|
||||
continue
|
||||
}
|
||||
// We create a normal file
|
||||
fileContents, err := f.GetBytes()
|
||||
if err != nil {
|
||||
if noOfCreatedFiles > 0 {
|
||||
break
|
||||
} else {
|
||||
return errors.New("could not create the file")
|
||||
}
|
||||
}
|
||||
err = os.WriteFile(fullFilePath, fileContents, 0o666)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
noOfCreatedFiles++
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetStringFrom returns a string that can only consist of characters
|
||||
// included in possibleChars. It returns an error if the created string
|
||||
// does not have the specified length.
|
||||
func (f *ConsumeFuzzer) GetStringFrom(possibleChars string, length int) (string, error) {
|
||||
if (f.dataTotal - f.position) < uint32(length) {
|
||||
return "", errors.New("not enough bytes to create a string")
|
||||
}
|
||||
output := make([]byte, 0, length)
|
||||
for i := 0; i < length; i++ {
|
||||
charIndex, err := f.GetInt()
|
||||
if err != nil {
|
||||
return string(output), err
|
||||
}
|
||||
output = append(output, possibleChars[charIndex%len(possibleChars)])
|
||||
}
|
||||
return string(output), nil
|
||||
}
|
||||
|
||||
func (f *ConsumeFuzzer) GetRune() ([]rune, error) {
|
||||
stringToConvert, err := f.GetString()
|
||||
if err != nil {
|
||||
return []rune("nil"), err
|
||||
}
|
||||
return []rune(stringToConvert), nil
|
||||
}
|
||||
|
||||
func (f *ConsumeFuzzer) GetFloat32() (float32, error) {
|
||||
u32, err := f.GetNBytes(4)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
littleEndian, err := f.GetBool()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if littleEndian {
|
||||
u32LE := binary.LittleEndian.Uint32(u32)
|
||||
return math.Float32frombits(u32LE), nil
|
||||
}
|
||||
u32BE := binary.BigEndian.Uint32(u32)
|
||||
return math.Float32frombits(u32BE), nil
|
||||
}
|
||||
|
||||
func (f *ConsumeFuzzer) GetFloat64() (float64, error) {
|
||||
u64, err := f.GetNBytes(8)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
littleEndian, err := f.GetBool()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if littleEndian {
|
||||
u64LE := binary.LittleEndian.Uint64(u64)
|
||||
return math.Float64frombits(u64LE), nil
|
||||
}
|
||||
u64BE := binary.BigEndian.Uint64(u64)
|
||||
return math.Float64frombits(u64BE), nil
|
||||
}
|
||||
|
||||
func (f *ConsumeFuzzer) CreateSlice(targetSlice interface{}) error {
|
||||
return f.GenerateStruct(targetSlice)
|
||||
}
|
62
vendor/github.com/AdaLogics/go-fuzz-headers/funcs.go
generated
vendored
Normal file
62
vendor/github.com/AdaLogics/go-fuzz-headers/funcs.go
generated
vendored
Normal file
@ -0,0 +1,62 @@
|
||||
// Copyright 2023 The go-fuzz-headers Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package gofuzzheaders
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
type Continue struct {
|
||||
F *ConsumeFuzzer
|
||||
}
|
||||
|
||||
func (f *ConsumeFuzzer) AddFuncs(fuzzFuncs []interface{}) {
|
||||
for i := range fuzzFuncs {
|
||||
v := reflect.ValueOf(fuzzFuncs[i])
|
||||
if v.Kind() != reflect.Func {
|
||||
panic("Need only funcs!")
|
||||
}
|
||||
t := v.Type()
|
||||
if t.NumIn() != 2 || t.NumOut() != 1 {
|
||||
fmt.Println(t.NumIn(), t.NumOut())
|
||||
|
||||
panic("Need 2 in and 1 out params. In must be the type. Out must be an error")
|
||||
}
|
||||
argT := t.In(0)
|
||||
switch argT.Kind() {
|
||||
case reflect.Ptr, reflect.Map:
|
||||
default:
|
||||
panic("fuzzFunc must take pointer or map type")
|
||||
}
|
||||
if t.In(1) != reflect.TypeOf(Continue{}) {
|
||||
panic("fuzzFunc's second parameter must be type Continue")
|
||||
}
|
||||
f.Funcs[argT] = v
|
||||
}
|
||||
}
|
||||
|
||||
func (f *ConsumeFuzzer) GenerateWithCustom(targetStruct interface{}) error {
|
||||
e := reflect.ValueOf(targetStruct).Elem()
|
||||
return f.fuzzStruct(e, true)
|
||||
}
|
||||
|
||||
func (c Continue) GenerateStruct(targetStruct interface{}) error {
|
||||
return c.F.GenerateStruct(targetStruct)
|
||||
}
|
||||
|
||||
func (c Continue) GenerateStructWithCustom(targetStruct interface{}) error {
|
||||
return c.F.GenerateWithCustom(targetStruct)
|
||||
}
|
556
vendor/github.com/AdaLogics/go-fuzz-headers/sql.go
generated
vendored
Normal file
556
vendor/github.com/AdaLogics/go-fuzz-headers/sql.go
generated
vendored
Normal file
@ -0,0 +1,556 @@
|
||||
// Copyright 2023 The go-fuzz-headers Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package gofuzzheaders
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// returns a keyword by index
|
||||
func getKeyword(f *ConsumeFuzzer) (string, error) {
|
||||
index, err := f.GetInt()
|
||||
if err != nil {
|
||||
return keywords[0], err
|
||||
}
|
||||
for i, k := range keywords {
|
||||
if i == index {
|
||||
return k, nil
|
||||
}
|
||||
}
|
||||
return keywords[0], fmt.Errorf("could not get a kw")
|
||||
}
|
||||
|
||||
// Simple utility function to check if a string
|
||||
// slice contains a string.
|
||||
func containsString(s []string, e string) bool {
|
||||
for _, a := range s {
|
||||
if a == e {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// These keywords are used specifically for fuzzing Vitess
|
||||
var keywords = []string{
|
||||
"accessible", "action", "add", "after", "against", "algorithm",
|
||||
"all", "alter", "always", "analyze", "and", "as", "asc", "asensitive",
|
||||
"auto_increment", "avg_row_length", "before", "begin", "between",
|
||||
"bigint", "binary", "_binary", "_utf8mb4", "_utf8", "_latin1", "bit",
|
||||
"blob", "bool", "boolean", "both", "by", "call", "cancel", "cascade",
|
||||
"cascaded", "case", "cast", "channel", "change", "char", "character",
|
||||
"charset", "check", "checksum", "coalesce", "code", "collate", "collation",
|
||||
"column", "columns", "comment", "committed", "commit", "compact", "complete",
|
||||
"compressed", "compression", "condition", "connection", "constraint", "continue",
|
||||
"convert", "copy", "cume_dist", "substr", "substring", "create", "cross",
|
||||
"csv", "current_date", "current_time", "current_timestamp", "current_user",
|
||||
"cursor", "data", "database", "databases", "day", "day_hour", "day_microsecond",
|
||||
"day_minute", "day_second", "date", "datetime", "dec", "decimal", "declare",
|
||||
"default", "definer", "delay_key_write", "delayed", "delete", "dense_rank",
|
||||
"desc", "describe", "deterministic", "directory", "disable", "discard",
|
||||
"disk", "distinct", "distinctrow", "div", "double", "do", "drop", "dumpfile",
|
||||
"duplicate", "dynamic", "each", "else", "elseif", "empty", "enable",
|
||||
"enclosed", "encryption", "end", "enforced", "engine", "engines", "enum",
|
||||
"error", "escape", "escaped", "event", "exchange", "exclusive", "exists",
|
||||
"exit", "explain", "expansion", "export", "extended", "extract", "false",
|
||||
"fetch", "fields", "first", "first_value", "fixed", "float", "float4",
|
||||
"float8", "flush", "for", "force", "foreign", "format", "from", "full",
|
||||
"fulltext", "function", "general", "generated", "geometry", "geometrycollection",
|
||||
"get", "global", "gtid_executed", "grant", "group", "grouping", "groups",
|
||||
"group_concat", "having", "header", "high_priority", "hosts", "hour", "hour_microsecond",
|
||||
"hour_minute", "hour_second", "if", "ignore", "import", "in", "index", "indexes",
|
||||
"infile", "inout", "inner", "inplace", "insensitive", "insert", "insert_method",
|
||||
"int", "int1", "int2", "int3", "int4", "int8", "integer", "interval",
|
||||
"into", "io_after_gtids", "is", "isolation", "iterate", "invoker", "join",
|
||||
"json", "json_table", "key", "keys", "keyspaces", "key_block_size", "kill", "lag",
|
||||
"language", "last", "last_value", "last_insert_id", "lateral", "lead", "leading",
|
||||
"leave", "left", "less", "level", "like", "limit", "linear", "lines",
|
||||
"linestring", "load", "local", "localtime", "localtimestamp", "lock", "logs",
|
||||
"long", "longblob", "longtext", "loop", "low_priority", "manifest",
|
||||
"master_bind", "match", "max_rows", "maxvalue", "mediumblob", "mediumint",
|
||||
"mediumtext", "memory", "merge", "microsecond", "middleint", "min_rows", "minute",
|
||||
"minute_microsecond", "minute_second", "mod", "mode", "modify", "modifies",
|
||||
"multilinestring", "multipoint", "multipolygon", "month", "name",
|
||||
"names", "natural", "nchar", "next", "no", "none", "not", "no_write_to_binlog",
|
||||
"nth_value", "ntile", "null", "numeric", "of", "off", "offset", "on",
|
||||
"only", "open", "optimize", "optimizer_costs", "option", "optionally",
|
||||
"or", "order", "out", "outer", "outfile", "over", "overwrite", "pack_keys",
|
||||
"parser", "partition", "partitioning", "password", "percent_rank", "plugins",
|
||||
"point", "polygon", "precision", "primary", "privileges", "processlist",
|
||||
"procedure", "query", "quarter", "range", "rank", "read", "reads", "read_write",
|
||||
"real", "rebuild", "recursive", "redundant", "references", "regexp", "relay",
|
||||
"release", "remove", "rename", "reorganize", "repair", "repeat", "repeatable",
|
||||
"replace", "require", "resignal", "restrict", "return", "retry", "revert",
|
||||
"revoke", "right", "rlike", "rollback", "row", "row_format", "row_number",
|
||||
"rows", "s3", "savepoint", "schema", "schemas", "second", "second_microsecond",
|
||||
"security", "select", "sensitive", "separator", "sequence", "serializable",
|
||||
"session", "set", "share", "shared", "show", "signal", "signed", "slow",
|
||||
"smallint", "spatial", "specific", "sql", "sqlexception", "sqlstate",
|
||||
"sqlwarning", "sql_big_result", "sql_cache", "sql_calc_found_rows",
|
||||
"sql_no_cache", "sql_small_result", "ssl", "start", "starting",
|
||||
"stats_auto_recalc", "stats_persistent", "stats_sample_pages", "status",
|
||||
"storage", "stored", "straight_join", "stream", "system", "vstream",
|
||||
"table", "tables", "tablespace", "temporary", "temptable", "terminated",
|
||||
"text", "than", "then", "time", "timestamp", "timestampadd", "timestampdiff",
|
||||
"tinyblob", "tinyint", "tinytext", "to", "trailing", "transaction", "tree",
|
||||
"traditional", "trigger", "triggers", "true", "truncate", "uncommitted",
|
||||
"undefined", "undo", "union", "unique", "unlock", "unsigned", "update",
|
||||
"upgrade", "usage", "use", "user", "user_resources", "using", "utc_date",
|
||||
"utc_time", "utc_timestamp", "validation", "values", "variables", "varbinary",
|
||||
"varchar", "varcharacter", "varying", "vgtid_executed", "virtual", "vindex",
|
||||
"vindexes", "view", "vitess", "vitess_keyspaces", "vitess_metadata",
|
||||
"vitess_migration", "vitess_migrations", "vitess_replication_status",
|
||||
"vitess_shards", "vitess_tablets", "vschema", "warnings", "when",
|
||||
"where", "while", "window", "with", "without", "work", "write", "xor",
|
||||
"year", "year_month", "zerofill",
|
||||
}
|
||||
|
||||
// Keywords that could get an additional keyword
|
||||
var needCustomString = []string{
|
||||
"DISTINCTROW", "FROM", // Select keywords:
|
||||
"GROUP BY", "HAVING", "WINDOW",
|
||||
"FOR",
|
||||
"ORDER BY", "LIMIT",
|
||||
"INTO", "PARTITION", "AS", // Insert Keywords:
|
||||
"ON DUPLICATE KEY UPDATE",
|
||||
"WHERE", "LIMIT", // Delete keywords
|
||||
"INFILE", "INTO TABLE", "CHARACTER SET", // Load keywords
|
||||
"TERMINATED BY", "ENCLOSED BY",
|
||||
"ESCAPED BY", "STARTING BY",
|
||||
"TERMINATED BY", "STARTING BY",
|
||||
"IGNORE",
|
||||
"VALUE", "VALUES", // Replace tokens
|
||||
"SET", // Update tokens
|
||||
"ENGINE =", // Drop tokens
|
||||
"DEFINER =", "ON SCHEDULE", "RENAME TO", // Alter tokens
|
||||
"COMMENT", "DO", "INITIAL_SIZE = ", "OPTIONS",
|
||||
}
|
||||
|
||||
var alterTableTokens = [][]string{
|
||||
{"CUSTOM_FUZZ_STRING"},
|
||||
{"CUSTOM_ALTTER_TABLE_OPTIONS"},
|
||||
{"PARTITION_OPTIONS_FOR_ALTER_TABLE"},
|
||||
}
|
||||
|
||||
var alterTokens = [][]string{
|
||||
{
|
||||
"DATABASE", "SCHEMA", "DEFINER = ", "EVENT", "FUNCTION", "INSTANCE",
|
||||
"LOGFILE GROUP", "PROCEDURE", "SERVER",
|
||||
},
|
||||
{"CUSTOM_FUZZ_STRING"},
|
||||
{
|
||||
"ON SCHEDULE", "ON COMPLETION PRESERVE", "ON COMPLETION NOT PRESERVE",
|
||||
"ADD UNDOFILE", "OPTIONS",
|
||||
},
|
||||
{"RENAME TO", "INITIAL_SIZE = "},
|
||||
{"ENABLE", "DISABLE", "DISABLE ON SLAVE", "ENGINE"},
|
||||
{"COMMENT"},
|
||||
{"DO"},
|
||||
}
|
||||
|
||||
var setTokens = [][]string{
|
||||
{"CHARACTER SET", "CHARSET", "CUSTOM_FUZZ_STRING", "NAMES"},
|
||||
{"CUSTOM_FUZZ_STRING", "DEFAULT", "="},
|
||||
{"CUSTOM_FUZZ_STRING"},
|
||||
}
|
||||
|
||||
var dropTokens = [][]string{
|
||||
{"TEMPORARY", "UNDO"},
|
||||
{
|
||||
"DATABASE", "SCHEMA", "EVENT", "INDEX", "LOGFILE GROUP",
|
||||
"PROCEDURE", "FUNCTION", "SERVER", "SPATIAL REFERENCE SYSTEM",
|
||||
"TABLE", "TABLESPACE", "TRIGGER", "VIEW",
|
||||
},
|
||||
{"IF EXISTS"},
|
||||
{"CUSTOM_FUZZ_STRING"},
|
||||
{"ON", "ENGINE = ", "RESTRICT", "CASCADE"},
|
||||
}
|
||||
|
||||
var renameTokens = [][]string{
|
||||
{"TABLE"},
|
||||
{"CUSTOM_FUZZ_STRING"},
|
||||
{"TO"},
|
||||
{"CUSTOM_FUZZ_STRING"},
|
||||
}
|
||||
|
||||
var truncateTokens = [][]string{
|
||||
{"TABLE"},
|
||||
{"CUSTOM_FUZZ_STRING"},
|
||||
}
|
||||
|
||||
var createTokens = [][]string{
|
||||
{"OR REPLACE", "TEMPORARY", "UNDO"}, // For create spatial reference system
|
||||
{
|
||||
"UNIQUE", "FULLTEXT", "SPATIAL", "ALGORITHM = UNDEFINED", "ALGORITHM = MERGE",
|
||||
"ALGORITHM = TEMPTABLE",
|
||||
},
|
||||
{
|
||||
"DATABASE", "SCHEMA", "EVENT", "FUNCTION", "INDEX", "LOGFILE GROUP",
|
||||
"PROCEDURE", "SERVER", "SPATIAL REFERENCE SYSTEM", "TABLE", "TABLESPACE",
|
||||
"TRIGGER", "VIEW",
|
||||
},
|
||||
{"IF NOT EXISTS"},
|
||||
{"CUSTOM_FUZZ_STRING"},
|
||||
}
|
||||
|
||||
/*
|
||||
// For future use.
|
||||
var updateTokens = [][]string{
|
||||
{"LOW_PRIORITY"},
|
||||
{"IGNORE"},
|
||||
{"SET"},
|
||||
{"WHERE"},
|
||||
{"ORDER BY"},
|
||||
{"LIMIT"},
|
||||
}
|
||||
*/
|
||||
|
||||
var replaceTokens = [][]string{
|
||||
{"LOW_PRIORITY", "DELAYED"},
|
||||
{"INTO"},
|
||||
{"PARTITION"},
|
||||
{"CUSTOM_FUZZ_STRING"},
|
||||
{"VALUES", "VALUE"},
|
||||
}
|
||||
|
||||
var loadTokens = [][]string{
|
||||
{"DATA"},
|
||||
{"LOW_PRIORITY", "CONCURRENT", "LOCAL"},
|
||||
{"INFILE"},
|
||||
{"REPLACE", "IGNORE"},
|
||||
{"INTO TABLE"},
|
||||
{"PARTITION"},
|
||||
{"CHARACTER SET"},
|
||||
{"FIELDS", "COLUMNS"},
|
||||
{"TERMINATED BY"},
|
||||
{"OPTIONALLY"},
|
||||
{"ENCLOSED BY"},
|
||||
{"ESCAPED BY"},
|
||||
{"LINES"},
|
||||
{"STARTING BY"},
|
||||
{"TERMINATED BY"},
|
||||
{"IGNORE"},
|
||||
{"LINES", "ROWS"},
|
||||
{"CUSTOM_FUZZ_STRING"},
|
||||
}
|
||||
|
||||
// These Are everything that comes after "INSERT"
|
||||
var insertTokens = [][]string{
|
||||
{"LOW_PRIORITY", "DELAYED", "HIGH_PRIORITY", "IGNORE"},
|
||||
{"INTO"},
|
||||
{"PARTITION"},
|
||||
{"CUSTOM_FUZZ_STRING"},
|
||||
{"AS"},
|
||||
{"ON DUPLICATE KEY UPDATE"},
|
||||
}
|
||||
|
||||
// These are everything that comes after "SELECT"
|
||||
var selectTokens = [][]string{
|
||||
{"*", "CUSTOM_FUZZ_STRING", "DISTINCTROW"},
|
||||
{"HIGH_PRIORITY"},
|
||||
{"STRAIGHT_JOIN"},
|
||||
{"SQL_SMALL_RESULT", "SQL_BIG_RESULT", "SQL_BUFFER_RESULT"},
|
||||
{"SQL_NO_CACHE", "SQL_CALC_FOUND_ROWS"},
|
||||
{"CUSTOM_FUZZ_STRING"},
|
||||
{"FROM"},
|
||||
{"WHERE"},
|
||||
{"GROUP BY"},
|
||||
{"HAVING"},
|
||||
{"WINDOW"},
|
||||
{"ORDER BY"},
|
||||
{"LIMIT"},
|
||||
{"CUSTOM_FUZZ_STRING"},
|
||||
{"FOR"},
|
||||
}
|
||||
|
||||
// These are everything that comes after "DELETE"
|
||||
var deleteTokens = [][]string{
|
||||
{"LOW_PRIORITY", "QUICK", "IGNORE", "FROM", "AS"},
|
||||
{"PARTITION"},
|
||||
{"WHERE"},
|
||||
{"ORDER BY"},
|
||||
{"LIMIT"},
|
||||
}
|
||||
|
||||
var alter_table_options = []string{
|
||||
"ADD", "COLUMN", "FIRST", "AFTER", "INDEX", "KEY", "FULLTEXT", "SPATIAL",
|
||||
"CONSTRAINT", "UNIQUE", "FOREIGN KEY", "CHECK", "ENFORCED", "DROP", "ALTER",
|
||||
"NOT", "INPLACE", "COPY", "SET", "VISIBLE", "INVISIBLE", "DEFAULT", "CHANGE",
|
||||
"CHARACTER SET", "COLLATE", "DISABLE", "ENABLE", "KEYS", "TABLESPACE", "LOCK",
|
||||
"FORCE", "MODIFY", "SHARED", "EXCLUSIVE", "NONE", "ORDER BY", "RENAME COLUMN",
|
||||
"AS", "=", "ASC", "DESC", "WITH", "WITHOUT", "VALIDATION", "ADD PARTITION",
|
||||
"DROP PARTITION", "DISCARD PARTITION", "IMPORT PARTITION", "TRUNCATE PARTITION",
|
||||
"COALESCE PARTITION", "REORGANIZE PARTITION", "EXCHANGE PARTITION",
|
||||
"ANALYZE PARTITION", "CHECK PARTITION", "OPTIMIZE PARTITION", "REBUILD PARTITION",
|
||||
"REPAIR PARTITION", "REMOVE PARTITIONING", "USING", "BTREE", "HASH", "COMMENT",
|
||||
"KEY_BLOCK_SIZE", "WITH PARSER", "AUTOEXTEND_SIZE", "AUTO_INCREMENT", "AVG_ROW_LENGTH",
|
||||
"CHECKSUM", "INSERT_METHOD", "ROW_FORMAT", "DYNAMIC", "FIXED", "COMPRESSED", "REDUNDANT",
|
||||
"COMPACT", "SECONDARY_ENGINE_ATTRIBUTE", "STATS_AUTO_RECALC", "STATS_PERSISTENT",
|
||||
"STATS_SAMPLE_PAGES", "ZLIB", "LZ4", "ENGINE_ATTRIBUTE", "KEY_BLOCK_SIZE", "MAX_ROWS",
|
||||
"MIN_ROWS", "PACK_KEYS", "PASSWORD", "COMPRESSION", "CONNECTION", "DIRECTORY",
|
||||
"DELAY_KEY_WRITE", "ENCRYPTION", "STORAGE", "DISK", "MEMORY", "UNION",
|
||||
}
|
||||
|
||||
// Creates an 'alter table' statement. 'alter table' is an exception
|
||||
// in that it has its own function. The majority of statements
|
||||
// are created by 'createStmt()'.
|
||||
func createAlterTableStmt(f *ConsumeFuzzer) (string, error) {
|
||||
maxArgs, err := f.GetInt()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
maxArgs = maxArgs % 30
|
||||
if maxArgs == 0 {
|
||||
return "", fmt.Errorf("could not create alter table stmt")
|
||||
}
|
||||
|
||||
var stmt strings.Builder
|
||||
stmt.WriteString("ALTER TABLE ")
|
||||
for i := 0; i < maxArgs; i++ {
|
||||
// Calculate if we get existing token or custom string
|
||||
tokenType, err := f.GetInt()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if tokenType%4 == 1 {
|
||||
customString, err := f.GetString()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
stmt.WriteString(" " + customString)
|
||||
} else {
|
||||
tokenIndex, err := f.GetInt()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
stmt.WriteString(" " + alter_table_options[tokenIndex%len(alter_table_options)])
|
||||
}
|
||||
}
|
||||
return stmt.String(), nil
|
||||
}
|
||||
|
||||
func chooseToken(tokens []string, f *ConsumeFuzzer) (string, error) {
|
||||
index, err := f.GetInt()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
var token strings.Builder
|
||||
token.WriteString(tokens[index%len(tokens)])
|
||||
if token.String() == "CUSTOM_FUZZ_STRING" {
|
||||
customFuzzString, err := f.GetString()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return customFuzzString, nil
|
||||
}
|
||||
|
||||
// Check if token requires an argument
|
||||
if containsString(needCustomString, token.String()) {
|
||||
customFuzzString, err := f.GetString()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
token.WriteString(" " + customFuzzString)
|
||||
}
|
||||
return token.String(), nil
|
||||
}
|
||||
|
||||
var stmtTypes = map[string][][]string{
|
||||
"DELETE": deleteTokens,
|
||||
"INSERT": insertTokens,
|
||||
"SELECT": selectTokens,
|
||||
"LOAD": loadTokens,
|
||||
"REPLACE": replaceTokens,
|
||||
"CREATE": createTokens,
|
||||
"DROP": dropTokens,
|
||||
"RENAME": renameTokens,
|
||||
"TRUNCATE": truncateTokens,
|
||||
"SET": setTokens,
|
||||
"ALTER": alterTokens,
|
||||
"ALTER TABLE": alterTableTokens, // ALTER TABLE has its own set of tokens
|
||||
}
|
||||
|
||||
var stmtTypeEnum = map[int]string{
|
||||
0: "DELETE",
|
||||
1: "INSERT",
|
||||
2: "SELECT",
|
||||
3: "LOAD",
|
||||
4: "REPLACE",
|
||||
5: "CREATE",
|
||||
6: "DROP",
|
||||
7: "RENAME",
|
||||
8: "TRUNCATE",
|
||||
9: "SET",
|
||||
10: "ALTER",
|
||||
11: "ALTER TABLE",
|
||||
}
|
||||
|
||||
func createStmt(f *ConsumeFuzzer) (string, error) {
|
||||
stmtIndex, err := f.GetInt()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
stmtIndex = stmtIndex % len(stmtTypes)
|
||||
|
||||
queryType := stmtTypeEnum[stmtIndex]
|
||||
tokens := stmtTypes[queryType]
|
||||
|
||||
// We have custom creator for ALTER TABLE
|
||||
if queryType == "ALTER TABLE" {
|
||||
query, err := createAlterTableStmt(f)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return query, nil
|
||||
}
|
||||
|
||||
// Here we are creating a query that is not
|
||||
// an 'alter table' query. For available
|
||||
// queries, see "stmtTypes"
|
||||
|
||||
// First specify the first query keyword:
|
||||
var query strings.Builder
|
||||
query.WriteString(queryType)
|
||||
|
||||
// Next create the args for the
|
||||
queryArgs, err := createStmtArgs(tokens, f)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
query.WriteString(" " + queryArgs)
|
||||
return query.String(), nil
|
||||
}
|
||||
|
||||
// Creates the arguments of a statements. In a select statement
|
||||
// that would be everything after "select".
|
||||
func createStmtArgs(tokenslice [][]string, f *ConsumeFuzzer) (string, error) {
|
||||
var query, token strings.Builder
|
||||
|
||||
// We go through the tokens in the tokenslice,
|
||||
// create the respective token and add it to
|
||||
// "query"
|
||||
for _, tokens := range tokenslice {
|
||||
// For extra randomization, the fuzzer can
|
||||
// choose to not include this token.
|
||||
includeThisToken, err := f.GetBool()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if !includeThisToken {
|
||||
continue
|
||||
}
|
||||
|
||||
// There may be several tokens to choose from:
|
||||
if len(tokens) > 1 {
|
||||
chosenToken, err := chooseToken(tokens, f)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
query.WriteString(" " + chosenToken)
|
||||
} else {
|
||||
token.WriteString(tokens[0])
|
||||
|
||||
// In case the token is "CUSTOM_FUZZ_STRING"
|
||||
// we will then create a non-structured string
|
||||
if token.String() == "CUSTOM_FUZZ_STRING" {
|
||||
customFuzzString, err := f.GetString()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
query.WriteString(" " + customFuzzString)
|
||||
continue
|
||||
}
|
||||
|
||||
// Check if token requires an argument.
|
||||
// Tokens that take an argument can be found
|
||||
// in 'needCustomString'. If so, we add a
|
||||
// non-structured string to the token.
|
||||
if containsString(needCustomString, token.String()) {
|
||||
customFuzzString, err := f.GetString()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
token.WriteString(fmt.Sprintf(" %s", customFuzzString))
|
||||
}
|
||||
query.WriteString(fmt.Sprintf(" %s", token.String()))
|
||||
}
|
||||
}
|
||||
return query.String(), nil
|
||||
}
|
||||
|
||||
// Creates a semi-structured query. It creates a string
|
||||
// that is a combination of the keywords and random strings.
|
||||
func createQuery(f *ConsumeFuzzer) (string, error) {
|
||||
queryLen, err := f.GetInt()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
maxLen := queryLen % 60
|
||||
if maxLen == 0 {
|
||||
return "", fmt.Errorf("could not create a query")
|
||||
}
|
||||
var query strings.Builder
|
||||
for i := 0; i < maxLen; i++ {
|
||||
// Get a new token:
|
||||
useKeyword, err := f.GetBool()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if useKeyword {
|
||||
keyword, err := getKeyword(f)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
query.WriteString(" " + keyword)
|
||||
} else {
|
||||
customString, err := f.GetString()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
query.WriteString(" " + customString)
|
||||
}
|
||||
}
|
||||
if query.String() == "" {
|
||||
return "", fmt.Errorf("could not create a query")
|
||||
}
|
||||
return query.String(), nil
|
||||
}
|
||||
|
||||
// GetSQLString is the API that users interact with.
|
||||
//
|
||||
// Usage:
|
||||
//
|
||||
// f := NewConsumer(data)
|
||||
// sqlString, err := f.GetSQLString()
|
||||
func (f *ConsumeFuzzer) GetSQLString() (string, error) {
|
||||
var query string
|
||||
veryStructured, err := f.GetBool()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if veryStructured {
|
||||
query, err = createStmt(f)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
} else {
|
||||
query, err = createQuery(f)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
return query, nil
|
||||
}
|
201
vendor/github.com/AdamKorcz/go-118-fuzz-build/LICENSE
generated
vendored
Normal file
201
vendor/github.com/AdamKorcz/go-118-fuzz-build/LICENSE
generated
vendored
Normal file
@ -0,0 +1,201 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
207
vendor/github.com/AdamKorcz/go-118-fuzz-build/testing/f.go
generated
vendored
Normal file
207
vendor/github.com/AdamKorcz/go-118-fuzz-build/testing/f.go
generated
vendored
Normal file
@ -0,0 +1,207 @@
|
||||
package testing
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
fuzz "github.com/AdaLogics/go-fuzz-headers"
|
||||
"os"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
type F struct {
|
||||
Data []byte
|
||||
T *T
|
||||
FuzzFunc func(*T, any)
|
||||
}
|
||||
|
||||
func (f *F) CleanupTempDirs() {
|
||||
f.T.CleanupTempDirs()
|
||||
}
|
||||
|
||||
func (f *F) Add(args ...any) {}
|
||||
func (c *F) Cleanup(f func()) {}
|
||||
func (c *F) Error(args ...any) {}
|
||||
func (c *F) Errorf(format string, args ...any) {}
|
||||
func (f *F) Fail() {}
|
||||
func (c *F) FailNow() {}
|
||||
func (c *F) Failed() bool { return false }
|
||||
func (c *F) Fatal(args ...any) {}
|
||||
func (c *F) Fatalf(format string, args ...any) {}
|
||||
func (f *F) Fuzz(ff any) {
|
||||
// we are assuming that ff is a func.
|
||||
// TODO: Add a check for UX purposes
|
||||
|
||||
fn := reflect.ValueOf(ff)
|
||||
fnType := fn.Type()
|
||||
var types []reflect.Type
|
||||
for i := 1; i < fnType.NumIn(); i++ {
|
||||
t := fnType.In(i)
|
||||
|
||||
types = append(types, t)
|
||||
}
|
||||
args := []reflect.Value{reflect.ValueOf(f.T)}
|
||||
fuzzConsumer := fuzz.NewConsumer(f.Data)
|
||||
for _, v := range types {
|
||||
switch v.String() {
|
||||
case "[]uint8":
|
||||
b, err := fuzzConsumer.GetBytes()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
newBytes := reflect.New(v)
|
||||
newBytes.Elem().SetBytes(b)
|
||||
args = append(args, newBytes.Elem())
|
||||
case "string":
|
||||
s, err := fuzzConsumer.GetString()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
newString := reflect.New(v)
|
||||
newString.Elem().SetString(s)
|
||||
args = append(args, newString.Elem())
|
||||
case "int":
|
||||
randInt, err := fuzzConsumer.GetInt()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
newInt := reflect.New(v)
|
||||
newInt.Elem().SetInt(int64(randInt))
|
||||
args = append(args, newInt.Elem())
|
||||
case "int8":
|
||||
randInt, err := fuzzConsumer.GetInt()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
newInt := reflect.New(v)
|
||||
newInt.Elem().SetInt(int64(randInt))
|
||||
args = append(args, newInt.Elem())
|
||||
case "int16":
|
||||
randInt, err := fuzzConsumer.GetInt()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
newInt := reflect.New(v)
|
||||
newInt.Elem().SetInt(int64(randInt))
|
||||
args = append(args, newInt.Elem())
|
||||
case "int32":
|
||||
randInt, err := fuzzConsumer.GetInt()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
newInt := reflect.New(v)
|
||||
newInt.Elem().SetInt(int64(randInt))
|
||||
args = append(args, newInt.Elem())
|
||||
case "int64":
|
||||
randInt, err := fuzzConsumer.GetInt()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
newInt := reflect.New(v)
|
||||
newInt.Elem().SetInt(int64(randInt))
|
||||
args = append(args, newInt.Elem())
|
||||
case "uint":
|
||||
randInt, err := fuzzConsumer.GetInt()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
newUint := reflect.New(v)
|
||||
newUint.Elem().SetUint(uint64(randInt))
|
||||
args = append(args, newUint.Elem())
|
||||
case "uint8":
|
||||
randInt, err := fuzzConsumer.GetInt()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
newUint := reflect.New(v)
|
||||
newUint.Elem().SetUint(uint64(randInt))
|
||||
args = append(args, newUint.Elem())
|
||||
case "uint16":
|
||||
randInt, err := fuzzConsumer.GetUint16()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
newUint16 := reflect.New(v)
|
||||
newUint16.Elem().SetUint(uint64(randInt))
|
||||
args = append(args, newUint16.Elem())
|
||||
case "uint32":
|
||||
randInt, err := fuzzConsumer.GetUint32()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
newUint32 := reflect.New(v)
|
||||
newUint32.Elem().SetUint(uint64(randInt))
|
||||
args = append(args, newUint32.Elem())
|
||||
case "uint64":
|
||||
randInt, err := fuzzConsumer.GetUint64()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
newUint64 := reflect.New(v)
|
||||
newUint64.Elem().SetUint(uint64(randInt))
|
||||
args = append(args, newUint64.Elem())
|
||||
case "rune":
|
||||
randRune, err := fuzzConsumer.GetRune()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
newRune := reflect.New(v)
|
||||
newRune.Elem().Set(reflect.ValueOf(randRune))
|
||||
args = append(args, newRune.Elem())
|
||||
case "float32":
|
||||
randFloat, err := fuzzConsumer.GetFloat32()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
newFloat := reflect.New(v)
|
||||
newFloat.Elem().Set(reflect.ValueOf(randFloat))
|
||||
args = append(args, newFloat.Elem())
|
||||
case "float64":
|
||||
randFloat, err := fuzzConsumer.GetFloat64()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
newFloat := reflect.New(v)
|
||||
newFloat.Elem().Set(reflect.ValueOf(randFloat))
|
||||
args = append(args, newFloat.Elem())
|
||||
case "bool":
|
||||
randBool, err := fuzzConsumer.GetBool()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
newBool := reflect.New(v)
|
||||
newBool.Elem().Set(reflect.ValueOf(randBool))
|
||||
args = append(args, newBool.Elem())
|
||||
default:
|
||||
fmt.Println(v.String())
|
||||
}
|
||||
}
|
||||
fn.Call(args)
|
||||
}
|
||||
func (f *F) Helper() {}
|
||||
func (c *F) Log(args ...any) {
|
||||
fmt.Println(args...)
|
||||
}
|
||||
func (c *F) Logf(format string, args ...any) {
|
||||
fmt.Println(format, args)
|
||||
}
|
||||
func (c *F) Name() string { return "libFuzzer" }
|
||||
func (c *F) Setenv(key, value string) {}
|
||||
func (c *F) Skip(args ...any) {
|
||||
panic("GO-FUZZ-BUILD-PANIC")
|
||||
}
|
||||
func (c *F) SkipNow() {
|
||||
panic("GO-FUZZ-BUILD-PANIC")
|
||||
}
|
||||
func (c *F) Skipf(format string, args ...any) {
|
||||
panic("GO-FUZZ-BUILD-PANIC")
|
||||
}
|
||||
func (f *F) Skipped() bool { return false }
|
||||
|
||||
func (f *F) TempDir() string {
|
||||
dir, err := os.MkdirTemp("", "fuzzdir-")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
f.T.TempDirs = append(f.T.TempDirs, dir)
|
||||
|
||||
return dir
|
||||
}
|
129
vendor/github.com/AdamKorcz/go-118-fuzz-build/testing/t.go
generated
vendored
Normal file
129
vendor/github.com/AdamKorcz/go-118-fuzz-build/testing/t.go
generated
vendored
Normal file
@ -0,0 +1,129 @@
|
||||
package testing
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// T can be used to terminate the current fuzz iteration
|
||||
// without terminating the whole fuzz run. To do so, simply
|
||||
// panic with the text "GO-FUZZ-BUILD-PANIC" and the fuzzer
|
||||
// will recover.
|
||||
type T struct {
|
||||
TempDirs []string
|
||||
}
|
||||
|
||||
func NewT() *T {
|
||||
tempDirs := make([]string, 0)
|
||||
return &T{TempDirs: tempDirs}
|
||||
}
|
||||
|
||||
func unsupportedApi(name string) string {
|
||||
plsOpenIss := "Please open an issue https://github.com/AdamKorcz/go-118-fuzz-build if you need this feature."
|
||||
var b strings.Builder
|
||||
b.WriteString(fmt.Sprintf("%s is not supported when fuzzing in libFuzzer mode\n.", name))
|
||||
b.WriteString(plsOpenIss)
|
||||
return b.String()
|
||||
}
|
||||
|
||||
func (t *T) Cleanup(f func()) {
|
||||
f()
|
||||
}
|
||||
|
||||
func (t *T) Deadline() (deadline time.Time, ok bool) {
|
||||
panic(unsupportedApi("t.Deadline()"))
|
||||
}
|
||||
|
||||
func (t *T) Error(args ...any) {
|
||||
fmt.Println(args...)
|
||||
panic("error")
|
||||
}
|
||||
|
||||
func (t *T) Errorf(format string, args ...any) {
|
||||
fmt.Printf(format+"\n", args...)
|
||||
panic("errorf")
|
||||
}
|
||||
|
||||
func (t *T) Fail() {
|
||||
panic("Called T.Fail()")
|
||||
}
|
||||
|
||||
func (t *T) FailNow() {
|
||||
panic("Called T.Fail()")
|
||||
panic(unsupportedApi("t.FailNow()"))
|
||||
}
|
||||
|
||||
func (t *T) Failed() bool {
|
||||
panic(unsupportedApi("t.Failed()"))
|
||||
}
|
||||
|
||||
func (t *T) Fatal(args ...any) {
|
||||
fmt.Println(args...)
|
||||
panic("fatal")
|
||||
}
|
||||
func (t *T) Fatalf(format string, args ...any) {
|
||||
fmt.Printf(format+"\n", args...)
|
||||
panic("fatal")
|
||||
}
|
||||
func (t *T) Helper() {
|
||||
// We can't support it, but it also just impacts how failures are reported, so we can ignore it
|
||||
}
|
||||
func (t *T) Log(args ...any) {
|
||||
fmt.Println(args...)
|
||||
}
|
||||
|
||||
func (t *T) Logf(format string, args ...any) {
|
||||
fmt.Println(format)
|
||||
fmt.Println(args...)
|
||||
}
|
||||
|
||||
func (t *T) Name() string {
|
||||
return "libFuzzer"
|
||||
}
|
||||
|
||||
func (t *T) Parallel() {
|
||||
panic(unsupportedApi("t.Parallel()"))
|
||||
}
|
||||
func (t *T) Run(name string, f func(t *T)) bool {
|
||||
panic(unsupportedApi("t.Run()"))
|
||||
}
|
||||
|
||||
func (t *T) Setenv(key, value string) {
|
||||
|
||||
}
|
||||
|
||||
func (t *T) Skip(args ...any) {
|
||||
panic("GO-FUZZ-BUILD-PANIC")
|
||||
}
|
||||
func (t *T) SkipNow() {
|
||||
panic("GO-FUZZ-BUILD-PANIC")
|
||||
}
|
||||
|
||||
// Is not really supported. We just skip instead
|
||||
// of printing any message. A log message can be
|
||||
// added if need be.
|
||||
func (t *T) Skipf(format string, args ...any) {
|
||||
panic("GO-FUZZ-BUILD-PANIC")
|
||||
}
|
||||
func (t *T) Skipped() bool {
|
||||
panic(unsupportedApi("t.Skipped()"))
|
||||
}
|
||||
func (t *T) TempDir() string {
|
||||
dir, err := os.MkdirTemp("", "fuzzdir-")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
t.TempDirs = append(t.TempDirs, dir)
|
||||
|
||||
return dir
|
||||
}
|
||||
|
||||
func (t *T) CleanupTempDirs() {
|
||||
if len(t.TempDirs) > 0 {
|
||||
for _, tempDir := range t.TempDirs {
|
||||
os.RemoveAll(tempDir)
|
||||
}
|
||||
}
|
||||
}
|
42
vendor/github.com/AdamKorcz/go-118-fuzz-build/testing/unsupported_funcs.go
generated
vendored
Normal file
42
vendor/github.com/AdamKorcz/go-118-fuzz-build/testing/unsupported_funcs.go
generated
vendored
Normal file
@ -0,0 +1,42 @@
|
||||
package testing
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func AllocsPerRun(runs int, f func()) (avg float64) {
|
||||
panic(unsupportedApi("testing.AllocsPerRun"))
|
||||
}
|
||||
func CoverMode() string {
|
||||
panic(unsupportedApi("testing.CoverMode"))
|
||||
}
|
||||
func Coverage() float64 {
|
||||
panic(unsupportedApi("testing.Coverage"))
|
||||
}
|
||||
func Init() {
|
||||
panic(unsupportedApi("testing.Init"))
|
||||
|
||||
}
|
||||
func RegisterCover(c testing.Cover) {
|
||||
panic(unsupportedApi("testing.RegisterCover"))
|
||||
}
|
||||
func RunExamples(matchString func(pat, str string) (bool, error), examples []testing.InternalExample) (ok bool) {
|
||||
panic(unsupportedApi("testing.RunExamples"))
|
||||
}
|
||||
|
||||
func RunTests(matchString func(pat, str string) (bool, error), tests []testing.InternalTest) (ok bool) {
|
||||
panic(unsupportedApi("testing.RunTests"))
|
||||
}
|
||||
|
||||
func Short() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func Verbose() bool {
|
||||
panic(unsupportedApi("testing.Verbose"))
|
||||
}
|
||||
|
||||
type M struct {}
|
||||
func (m *M) Run() (code int) {
|
||||
panic("testing.M is not support in libFuzzer Mode")
|
||||
}
|
1
vendor/github.com/Microsoft/go-winio/.gitattributes
generated
vendored
Normal file
1
vendor/github.com/Microsoft/go-winio/.gitattributes
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
* text=auto eol=lf
|
9
vendor/github.com/Microsoft/go-winio/.gitignore
generated
vendored
9
vendor/github.com/Microsoft/go-winio/.gitignore
generated
vendored
@ -1 +1,10 @@
|
||||
.vscode/
|
||||
|
||||
*.exe
|
||||
|
||||
# testing
|
||||
testdata
|
||||
|
||||
# go workspaces
|
||||
go.work
|
||||
go.work.sum
|
||||
|
147
vendor/github.com/Microsoft/go-winio/.golangci.yml
generated
vendored
Normal file
147
vendor/github.com/Microsoft/go-winio/.golangci.yml
generated
vendored
Normal file
@ -0,0 +1,147 @@
|
||||
linters:
|
||||
enable:
|
||||
# style
|
||||
- containedctx # struct contains a context
|
||||
- dupl # duplicate code
|
||||
- errname # erorrs are named correctly
|
||||
- nolintlint # "//nolint" directives are properly explained
|
||||
- revive # golint replacement
|
||||
- unconvert # unnecessary conversions
|
||||
- wastedassign
|
||||
|
||||
# bugs, performance, unused, etc ...
|
||||
- contextcheck # function uses a non-inherited context
|
||||
- errorlint # errors not wrapped for 1.13
|
||||
- exhaustive # check exhaustiveness of enum switch statements
|
||||
- gofmt # files are gofmt'ed
|
||||
- gosec # security
|
||||
- nilerr # returns nil even with non-nil error
|
||||
- thelper # test helpers without t.Helper()
|
||||
- unparam # unused function params
|
||||
|
||||
issues:
|
||||
exclude-dirs:
|
||||
- pkg/etw/sample
|
||||
|
||||
exclude-rules:
|
||||
# err is very often shadowed in nested scopes
|
||||
- linters:
|
||||
- govet
|
||||
text: '^shadow: declaration of "err" shadows declaration'
|
||||
|
||||
# ignore long lines for skip autogen directives
|
||||
- linters:
|
||||
- revive
|
||||
text: "^line-length-limit: "
|
||||
source: "^//(go:generate|sys) "
|
||||
|
||||
#TODO: remove after upgrading to go1.18
|
||||
# ignore comment spacing for nolint and sys directives
|
||||
- linters:
|
||||
- revive
|
||||
text: "^comment-spacings: no space between comment delimiter and comment text"
|
||||
source: "//(cspell:|nolint:|sys |todo)"
|
||||
|
||||
# not on go 1.18 yet, so no any
|
||||
- linters:
|
||||
- revive
|
||||
text: "^use-any: since GO 1.18 'interface{}' can be replaced by 'any'"
|
||||
|
||||
# allow unjustified ignores of error checks in defer statements
|
||||
- linters:
|
||||
- nolintlint
|
||||
text: "^directive `//nolint:errcheck` should provide explanation"
|
||||
source: '^\s*defer '
|
||||
|
||||
# allow unjustified ignores of error lints for io.EOF
|
||||
- linters:
|
||||
- nolintlint
|
||||
text: "^directive `//nolint:errorlint` should provide explanation"
|
||||
source: '[=|!]= io.EOF'
|
||||
|
||||
|
||||
linters-settings:
|
||||
exhaustive:
|
||||
default-signifies-exhaustive: true
|
||||
govet:
|
||||
enable-all: true
|
||||
disable:
|
||||
# struct order is often for Win32 compat
|
||||
# also, ignore pointer bytes/GC issues for now until performance becomes an issue
|
||||
- fieldalignment
|
||||
nolintlint:
|
||||
require-explanation: true
|
||||
require-specific: true
|
||||
revive:
|
||||
# revive is more configurable than static check, so likely the preferred alternative to static-check
|
||||
# (once the perf issue is solved: https://github.com/golangci/golangci-lint/issues/2997)
|
||||
enable-all-rules:
|
||||
true
|
||||
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md
|
||||
rules:
|
||||
# rules with required arguments
|
||||
- name: argument-limit
|
||||
disabled: true
|
||||
- name: banned-characters
|
||||
disabled: true
|
||||
- name: cognitive-complexity
|
||||
disabled: true
|
||||
- name: cyclomatic
|
||||
disabled: true
|
||||
- name: file-header
|
||||
disabled: true
|
||||
- name: function-length
|
||||
disabled: true
|
||||
- name: function-result-limit
|
||||
disabled: true
|
||||
- name: max-public-structs
|
||||
disabled: true
|
||||
# geneally annoying rules
|
||||
- name: add-constant # complains about any and all strings and integers
|
||||
disabled: true
|
||||
- name: confusing-naming # we frequently use "Foo()" and "foo()" together
|
||||
disabled: true
|
||||
- name: flag-parameter # excessive, and a common idiom we use
|
||||
disabled: true
|
||||
- name: unhandled-error # warns over common fmt.Print* and io.Close; rely on errcheck instead
|
||||
disabled: true
|
||||
# general config
|
||||
- name: line-length-limit
|
||||
arguments:
|
||||
- 140
|
||||
- name: var-naming
|
||||
arguments:
|
||||
- []
|
||||
- - CID
|
||||
- CRI
|
||||
- CTRD
|
||||
- DACL
|
||||
- DLL
|
||||
- DOS
|
||||
- ETW
|
||||
- FSCTL
|
||||
- GCS
|
||||
- GMSA
|
||||
- HCS
|
||||
- HV
|
||||
- IO
|
||||
- LCOW
|
||||
- LDAP
|
||||
- LPAC
|
||||
- LTSC
|
||||
- MMIO
|
||||
- NT
|
||||
- OCI
|
||||
- PMEM
|
||||
- PWSH
|
||||
- RX
|
||||
- SACl
|
||||
- SID
|
||||
- SMB
|
||||
- TX
|
||||
- VHD
|
||||
- VHDX
|
||||
- VMID
|
||||
- VPCI
|
||||
- WCOW
|
||||
- WIM
|
1
vendor/github.com/Microsoft/go-winio/CODEOWNERS
generated
vendored
Normal file
1
vendor/github.com/Microsoft/go-winio/CODEOWNERS
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
* @microsoft/containerplat
|
85
vendor/github.com/Microsoft/go-winio/README.md
generated
vendored
85
vendor/github.com/Microsoft/go-winio/README.md
generated
vendored
@ -1,4 +1,4 @@
|
||||
# go-winio
|
||||
# go-winio [](https://github.com/microsoft/go-winio/actions/workflows/ci.yml)
|
||||
|
||||
This repository contains utilities for efficiently performing Win32 IO operations in
|
||||
Go. Currently, this is focused on accessing named pipes and other file handles, and
|
||||
@ -11,12 +11,79 @@ package.
|
||||
|
||||
Please see the LICENSE file for licensing information.
|
||||
|
||||
This project has adopted the [Microsoft Open Source Code of
|
||||
Conduct](https://opensource.microsoft.com/codeofconduct/). For more information
|
||||
see the [Code of Conduct
|
||||
FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact
|
||||
[opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional
|
||||
questions or comments.
|
||||
## Contributing
|
||||
|
||||
Thanks to natefinch for the inspiration for this library. See https://github.com/natefinch/npipe
|
||||
for another named pipe implementation.
|
||||
This project welcomes contributions and suggestions.
|
||||
Most contributions require you to agree to a Contributor License Agreement (CLA) declaring that
|
||||
you have the right to, and actually do, grant us the rights to use your contribution.
|
||||
For details, visit [Microsoft CLA](https://cla.microsoft.com).
|
||||
|
||||
When you submit a pull request, a CLA-bot will automatically determine whether you need to
|
||||
provide a CLA and decorate the PR appropriately (e.g., label, comment).
|
||||
Simply follow the instructions provided by the bot.
|
||||
You will only need to do this once across all repos using our CLA.
|
||||
|
||||
Additionally, the pull request pipeline requires the following steps to be performed before
|
||||
mergining.
|
||||
|
||||
### Code Sign-Off
|
||||
|
||||
We require that contributors sign their commits using [`git commit --signoff`][git-commit-s]
|
||||
to certify they either authored the work themselves or otherwise have permission to use it in this project.
|
||||
|
||||
A range of commits can be signed off using [`git rebase --signoff`][git-rebase-s].
|
||||
|
||||
Please see [the developer certificate](https://developercertificate.org) for more info,
|
||||
as well as to make sure that you can attest to the rules listed.
|
||||
Our CI uses the DCO Github app to ensure that all commits in a given PR are signed-off.
|
||||
|
||||
### Linting
|
||||
|
||||
Code must pass a linting stage, which uses [`golangci-lint`][lint].
|
||||
The linting settings are stored in [`.golangci.yaml`](./.golangci.yaml), and can be run
|
||||
automatically with VSCode by adding the following to your workspace or folder settings:
|
||||
|
||||
```json
|
||||
"go.lintTool": "golangci-lint",
|
||||
"go.lintOnSave": "package",
|
||||
```
|
||||
|
||||
Additional editor [integrations options are also available][lint-ide].
|
||||
|
||||
Alternatively, `golangci-lint` can be [installed locally][lint-install] and run from the repo root:
|
||||
|
||||
```shell
|
||||
# use . or specify a path to only lint a package
|
||||
# to show all lint errors, use flags "--max-issues-per-linter=0 --max-same-issues=0"
|
||||
> golangci-lint run ./...
|
||||
```
|
||||
|
||||
### Go Generate
|
||||
|
||||
The pipeline checks that auto-generated code, via `go generate`, are up to date.
|
||||
|
||||
This can be done for the entire repo:
|
||||
|
||||
```shell
|
||||
> go generate ./...
|
||||
```
|
||||
|
||||
## Code of Conduct
|
||||
|
||||
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
|
||||
For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or
|
||||
contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.
|
||||
|
||||
## Special Thanks
|
||||
|
||||
Thanks to [natefinch][natefinch] for the inspiration for this library.
|
||||
See [npipe](https://github.com/natefinch/npipe) for another named pipe implementation.
|
||||
|
||||
[lint]: https://golangci-lint.run/
|
||||
[lint-ide]: https://golangci-lint.run/usage/integrations/#editor-integration
|
||||
[lint-install]: https://golangci-lint.run/usage/install/#local-installation
|
||||
|
||||
[git-commit-s]: https://git-scm.com/docs/git-commit#Documentation/git-commit.txt--s
|
||||
[git-rebase-s]: https://git-scm.com/docs/git-rebase#Documentation/git-rebase.txt---signoff
|
||||
|
||||
[natefinch]: https://github.com/natefinch
|
||||
|
41
vendor/github.com/Microsoft/go-winio/SECURITY.md
generated
vendored
Normal file
41
vendor/github.com/Microsoft/go-winio/SECURITY.md
generated
vendored
Normal file
@ -0,0 +1,41 @@
|
||||
<!-- BEGIN MICROSOFT SECURITY.MD V0.0.7 BLOCK -->
|
||||
|
||||
## Security
|
||||
|
||||
Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/).
|
||||
|
||||
If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/opensource/security/definition), please report it to us as described below.
|
||||
|
||||
## Reporting Security Issues
|
||||
|
||||
**Please do not report security vulnerabilities through public GitHub issues.**
|
||||
|
||||
Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/opensource/security/create-report).
|
||||
|
||||
If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/opensource/security/pgpkey).
|
||||
|
||||
You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://aka.ms/opensource/security/msrc).
|
||||
|
||||
Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue:
|
||||
|
||||
* Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.)
|
||||
* Full paths of source file(s) related to the manifestation of the issue
|
||||
* The location of the affected source code (tag/branch/commit or direct URL)
|
||||
* Any special configuration required to reproduce the issue
|
||||
* Step-by-step instructions to reproduce the issue
|
||||
* Proof-of-concept or exploit code (if possible)
|
||||
* Impact of the issue, including how an attacker might exploit the issue
|
||||
|
||||
This information will help us triage your report more quickly.
|
||||
|
||||
If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/opensource/security/bounty) page for more details about our active programs.
|
||||
|
||||
## Preferred Languages
|
||||
|
||||
We prefer all communications to be in English.
|
||||
|
||||
## Policy
|
||||
|
||||
Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/opensource/security/cvd).
|
||||
|
||||
<!-- END MICROSOFT SECURITY.MD BLOCK -->
|
65
vendor/github.com/Microsoft/go-winio/backup.go
generated
vendored
65
vendor/github.com/Microsoft/go-winio/backup.go
generated
vendored
@ -1,3 +1,4 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package winio
|
||||
@ -7,15 +8,16 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"runtime"
|
||||
"syscall"
|
||||
"unicode/utf16"
|
||||
|
||||
"github.com/Microsoft/go-winio/internal/fs"
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
//sys backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupRead
|
||||
//sys backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupWrite
|
||||
//sys backupRead(h windows.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupRead
|
||||
//sys backupWrite(h windows.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupWrite
|
||||
|
||||
const (
|
||||
BackupData = uint32(iota + 1)
|
||||
@ -24,7 +26,7 @@ const (
|
||||
BackupAlternateData
|
||||
BackupLink
|
||||
BackupPropertyData
|
||||
BackupObjectId
|
||||
BackupObjectId //revive:disable-line:var-naming ID, not Id
|
||||
BackupReparseData
|
||||
BackupSparseBlock
|
||||
BackupTxfsData
|
||||
@ -34,14 +36,16 @@ const (
|
||||
StreamSparseAttributes = uint32(8)
|
||||
)
|
||||
|
||||
//nolint:revive // var-naming: ALL_CAPS
|
||||
const (
|
||||
WRITE_DAC = 0x40000
|
||||
WRITE_OWNER = 0x80000
|
||||
ACCESS_SYSTEM_SECURITY = 0x1000000
|
||||
WRITE_DAC = windows.WRITE_DAC
|
||||
WRITE_OWNER = windows.WRITE_OWNER
|
||||
ACCESS_SYSTEM_SECURITY = windows.ACCESS_SYSTEM_SECURITY
|
||||
)
|
||||
|
||||
// BackupHeader represents a backup stream of a file.
|
||||
type BackupHeader struct {
|
||||
//revive:disable-next-line:var-naming ID, not Id
|
||||
Id uint32 // The backup stream ID
|
||||
Attributes uint32 // Stream attributes
|
||||
Size int64 // The size of the stream in bytes
|
||||
@ -49,8 +53,8 @@ type BackupHeader struct {
|
||||
Offset int64 // The offset of the stream in the file (for BackupSparseBlock only).
|
||||
}
|
||||
|
||||
type win32StreamId struct {
|
||||
StreamId uint32
|
||||
type win32StreamID struct {
|
||||
StreamID uint32
|
||||
Attributes uint32
|
||||
Size uint64
|
||||
NameSize uint32
|
||||
@ -71,7 +75,7 @@ func NewBackupStreamReader(r io.Reader) *BackupStreamReader {
|
||||
// Next returns the next backup stream and prepares for calls to Read(). It skips the remainder of the current stream if
|
||||
// it was not completely read.
|
||||
func (r *BackupStreamReader) Next() (*BackupHeader, error) {
|
||||
if r.bytesLeft > 0 {
|
||||
if r.bytesLeft > 0 { //nolint:nestif // todo: flatten this
|
||||
if s, ok := r.r.(io.Seeker); ok {
|
||||
// Make sure Seek on io.SeekCurrent sometimes succeeds
|
||||
// before trying the actual seek.
|
||||
@ -82,16 +86,16 @@ func (r *BackupStreamReader) Next() (*BackupHeader, error) {
|
||||
r.bytesLeft = 0
|
||||
}
|
||||
}
|
||||
if _, err := io.Copy(ioutil.Discard, r); err != nil {
|
||||
if _, err := io.Copy(io.Discard, r); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
var wsi win32StreamId
|
||||
var wsi win32StreamID
|
||||
if err := binary.Read(r.r, binary.LittleEndian, &wsi); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hdr := &BackupHeader{
|
||||
Id: wsi.StreamId,
|
||||
Id: wsi.StreamID,
|
||||
Attributes: wsi.Attributes,
|
||||
Size: int64(wsi.Size),
|
||||
}
|
||||
@ -100,9 +104,9 @@ func (r *BackupStreamReader) Next() (*BackupHeader, error) {
|
||||
if err := binary.Read(r.r, binary.LittleEndian, name); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hdr.Name = syscall.UTF16ToString(name)
|
||||
hdr.Name = windows.UTF16ToString(name)
|
||||
}
|
||||
if wsi.StreamId == BackupSparseBlock {
|
||||
if wsi.StreamID == BackupSparseBlock {
|
||||
if err := binary.Read(r.r, binary.LittleEndian, &hdr.Offset); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -147,8 +151,8 @@ func (w *BackupStreamWriter) WriteHeader(hdr *BackupHeader) error {
|
||||
return fmt.Errorf("missing %d bytes", w.bytesLeft)
|
||||
}
|
||||
name := utf16.Encode([]rune(hdr.Name))
|
||||
wsi := win32StreamId{
|
||||
StreamId: hdr.Id,
|
||||
wsi := win32StreamID{
|
||||
StreamID: hdr.Id,
|
||||
Attributes: hdr.Attributes,
|
||||
Size: uint64(hdr.Size),
|
||||
NameSize: uint32(len(name) * 2),
|
||||
@ -201,9 +205,9 @@ func NewBackupFileReader(f *os.File, includeSecurity bool) *BackupFileReader {
|
||||
// Read reads a backup stream from the file by calling the Win32 API BackupRead().
|
||||
func (r *BackupFileReader) Read(b []byte) (int, error) {
|
||||
var bytesRead uint32
|
||||
err := backupRead(syscall.Handle(r.f.Fd()), b, &bytesRead, false, r.includeSecurity, &r.ctx)
|
||||
err := backupRead(windows.Handle(r.f.Fd()), b, &bytesRead, false, r.includeSecurity, &r.ctx)
|
||||
if err != nil {
|
||||
return 0, &os.PathError{"BackupRead", r.f.Name(), err}
|
||||
return 0, &os.PathError{Op: "BackupRead", Path: r.f.Name(), Err: err}
|
||||
}
|
||||
runtime.KeepAlive(r.f)
|
||||
if bytesRead == 0 {
|
||||
@ -216,7 +220,7 @@ func (r *BackupFileReader) Read(b []byte) (int, error) {
|
||||
// the underlying file.
|
||||
func (r *BackupFileReader) Close() error {
|
||||
if r.ctx != 0 {
|
||||
backupRead(syscall.Handle(r.f.Fd()), nil, nil, true, false, &r.ctx)
|
||||
_ = backupRead(windows.Handle(r.f.Fd()), nil, nil, true, false, &r.ctx)
|
||||
runtime.KeepAlive(r.f)
|
||||
r.ctx = 0
|
||||
}
|
||||
@ -240,9 +244,9 @@ func NewBackupFileWriter(f *os.File, includeSecurity bool) *BackupFileWriter {
|
||||
// Write restores a portion of the file using the provided backup stream.
|
||||
func (w *BackupFileWriter) Write(b []byte) (int, error) {
|
||||
var bytesWritten uint32
|
||||
err := backupWrite(syscall.Handle(w.f.Fd()), b, &bytesWritten, false, w.includeSecurity, &w.ctx)
|
||||
err := backupWrite(windows.Handle(w.f.Fd()), b, &bytesWritten, false, w.includeSecurity, &w.ctx)
|
||||
if err != nil {
|
||||
return 0, &os.PathError{"BackupWrite", w.f.Name(), err}
|
||||
return 0, &os.PathError{Op: "BackupWrite", Path: w.f.Name(), Err: err}
|
||||
}
|
||||
runtime.KeepAlive(w.f)
|
||||
if int(bytesWritten) != len(b) {
|
||||
@ -255,7 +259,7 @@ func (w *BackupFileWriter) Write(b []byte) (int, error) {
|
||||
// close the underlying file.
|
||||
func (w *BackupFileWriter) Close() error {
|
||||
if w.ctx != 0 {
|
||||
backupWrite(syscall.Handle(w.f.Fd()), nil, nil, true, false, &w.ctx)
|
||||
_ = backupWrite(windows.Handle(w.f.Fd()), nil, nil, true, false, &w.ctx)
|
||||
runtime.KeepAlive(w.f)
|
||||
w.ctx = 0
|
||||
}
|
||||
@ -267,11 +271,14 @@ func (w *BackupFileWriter) Close() error {
|
||||
//
|
||||
// If the file opened was a directory, it cannot be used with Readdir().
|
||||
func OpenForBackup(path string, access uint32, share uint32, createmode uint32) (*os.File, error) {
|
||||
winPath, err := syscall.UTF16FromString(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
h, err := syscall.CreateFile(&winPath[0], access, share, nil, createmode, syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OPEN_REPARSE_POINT, 0)
|
||||
h, err := fs.CreateFile(path,
|
||||
fs.AccessMask(access),
|
||||
fs.FileShareMode(share),
|
||||
nil,
|
||||
fs.FileCreationDisposition(createmode),
|
||||
fs.FILE_FLAG_BACKUP_SEMANTICS|fs.FILE_FLAG_OPEN_REPARSE_POINT,
|
||||
0,
|
||||
)
|
||||
if err != nil {
|
||||
err = &os.PathError{Op: "open", Path: path, Err: err}
|
||||
return nil, err
|
||||
|
3
vendor/github.com/Microsoft/go-winio/backuptar/doc.go
generated
vendored
Normal file
3
vendor/github.com/Microsoft/go-winio/backuptar/doc.go
generated
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
// This file only exists to allow go get on non-Windows platforms.
|
||||
|
||||
package backuptar
|
@ -1,34 +1,18 @@
|
||||
// +build windows
|
||||
//go:build windows
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package archive
|
||||
package backuptar
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"archive/tar"
|
||||
)
|
||||
|
||||
// Forked from https://github.com/golang/go/blob/master/src/archive/tar/strconv.go
|
||||
// as archive/tar doesn't support CreationTime, but does handle PAX time parsing,
|
||||
// and there's no need to re-invent the wheel.
|
||||
// Functions copied from https://github.com/golang/go/blob/master/src/archive/tar/strconv.go
|
||||
// as we need to manage the LIBARCHIVE.creationtime PAXRecord manually.
|
||||
// Idea taken from containerd which did the same thing.
|
||||
|
||||
// parsePAXTime takes a string of the form %d.%d as described in the PAX
|
||||
// specification. Note that this implementation allows for negative timestamps,
|
||||
@ -62,7 +46,25 @@ func parsePAXTime(s string) (time.Time, error) {
|
||||
}
|
||||
nsecs, _ := strconv.ParseInt(sn, 10, 64) // Must succeed
|
||||
if len(ss) > 0 && ss[0] == '-' {
|
||||
return time.Unix(secs, -nsecs), nil // Negative correction
|
||||
return time.Unix(secs, -1*nsecs), nil // Negative correction
|
||||
}
|
||||
return time.Unix(secs, nsecs), nil
|
||||
}
|
||||
|
||||
// formatPAXTime converts ts into a time of the form %d.%d as described in the
|
||||
// PAX specification. This function is capable of negative timestamps.
|
||||
func formatPAXTime(ts time.Time) (s string) {
|
||||
secs, nsecs := ts.Unix(), ts.Nanosecond()
|
||||
if nsecs == 0 {
|
||||
return strconv.FormatInt(secs, 10)
|
||||
}
|
||||
|
||||
// If seconds is negative, then perform correction.
|
||||
sign := ""
|
||||
if secs < 0 {
|
||||
sign = "-" // Remember sign
|
||||
secs = -(secs + 1) // Add a second to secs
|
||||
nsecs = -(nsecs - 1e9) // Take that second away from nsecs
|
||||
}
|
||||
return strings.TrimRight(fmt.Sprintf("%s%d.%09d", sign, secs, nsecs), "0")
|
||||
}
|
508
vendor/github.com/Microsoft/go-winio/backuptar/tar.go
generated
vendored
Normal file
508
vendor/github.com/Microsoft/go-winio/backuptar/tar.go
generated
vendored
Normal file
@ -0,0 +1,508 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package backuptar
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Microsoft/go-winio"
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
//nolint:deadcode,varcheck // keep unused constants for potential future use
|
||||
const (
|
||||
cISUID = 0004000 // Set uid
|
||||
cISGID = 0002000 // Set gid
|
||||
cISVTX = 0001000 // Save text (sticky bit)
|
||||
cISDIR = 0040000 // Directory
|
||||
cISFIFO = 0010000 // FIFO
|
||||
cISREG = 0100000 // Regular file
|
||||
cISLNK = 0120000 // Symbolic link
|
||||
cISBLK = 0060000 // Block special file
|
||||
cISCHR = 0020000 // Character special file
|
||||
cISSOCK = 0140000 // Socket
|
||||
)
|
||||
|
||||
const (
|
||||
hdrFileAttributes = "MSWINDOWS.fileattr"
|
||||
hdrSecurityDescriptor = "MSWINDOWS.sd"
|
||||
hdrRawSecurityDescriptor = "MSWINDOWS.rawsd"
|
||||
hdrMountPoint = "MSWINDOWS.mountpoint"
|
||||
hdrEaPrefix = "MSWINDOWS.xattr."
|
||||
|
||||
hdrCreationTime = "LIBARCHIVE.creationtime"
|
||||
)
|
||||
|
||||
// zeroReader is an io.Reader that always returns 0s.
|
||||
type zeroReader struct{}
|
||||
|
||||
func (zeroReader) Read(b []byte) (int, error) {
|
||||
for i := range b {
|
||||
b[i] = 0
|
||||
}
|
||||
return len(b), nil
|
||||
}
|
||||
|
||||
func copySparse(t *tar.Writer, br *winio.BackupStreamReader) error {
|
||||
curOffset := int64(0)
|
||||
for {
|
||||
bhdr, err := br.Next()
|
||||
if err == io.EOF { //nolint:errorlint
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if bhdr.Id != winio.BackupSparseBlock {
|
||||
return fmt.Errorf("unexpected stream %d", bhdr.Id)
|
||||
}
|
||||
|
||||
// We can't seek backwards, since we have already written that data to the tar.Writer.
|
||||
if bhdr.Offset < curOffset {
|
||||
return fmt.Errorf("cannot seek back from %d to %d", curOffset, bhdr.Offset)
|
||||
}
|
||||
// archive/tar does not support writing sparse files
|
||||
// so just write zeroes to catch up to the current offset.
|
||||
if _, err = io.CopyN(t, zeroReader{}, bhdr.Offset-curOffset); err != nil {
|
||||
return fmt.Errorf("seek to offset %d: %w", bhdr.Offset, err)
|
||||
}
|
||||
if bhdr.Size == 0 {
|
||||
// A sparse block with size = 0 is used to mark the end of the sparse blocks.
|
||||
break
|
||||
}
|
||||
n, err := io.Copy(t, br)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if n != bhdr.Size {
|
||||
return fmt.Errorf("copied %d bytes instead of %d at offset %d", n, bhdr.Size, bhdr.Offset)
|
||||
}
|
||||
curOffset = bhdr.Offset + n
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// BasicInfoHeader creates a tar header from basic file information.
|
||||
func BasicInfoHeader(name string, size int64, fileInfo *winio.FileBasicInfo) *tar.Header {
|
||||
hdr := &tar.Header{
|
||||
Format: tar.FormatPAX,
|
||||
Name: filepath.ToSlash(name),
|
||||
Size: size,
|
||||
Typeflag: tar.TypeReg,
|
||||
ModTime: time.Unix(0, fileInfo.LastWriteTime.Nanoseconds()),
|
||||
ChangeTime: time.Unix(0, fileInfo.ChangeTime.Nanoseconds()),
|
||||
AccessTime: time.Unix(0, fileInfo.LastAccessTime.Nanoseconds()),
|
||||
PAXRecords: make(map[string]string),
|
||||
}
|
||||
hdr.PAXRecords[hdrFileAttributes] = fmt.Sprintf("%d", fileInfo.FileAttributes)
|
||||
hdr.PAXRecords[hdrCreationTime] = formatPAXTime(time.Unix(0, fileInfo.CreationTime.Nanoseconds()))
|
||||
|
||||
if (fileInfo.FileAttributes & windows.FILE_ATTRIBUTE_DIRECTORY) != 0 {
|
||||
hdr.Mode |= cISDIR
|
||||
hdr.Size = 0
|
||||
hdr.Typeflag = tar.TypeDir
|
||||
}
|
||||
return hdr
|
||||
}
|
||||
|
||||
// SecurityDescriptorFromTarHeader reads the SDDL associated with the header of the current file
|
||||
// from the tar header and returns the security descriptor into a byte slice.
|
||||
func SecurityDescriptorFromTarHeader(hdr *tar.Header) ([]byte, error) {
|
||||
if sdraw, ok := hdr.PAXRecords[hdrRawSecurityDescriptor]; ok {
|
||||
sd, err := base64.StdEncoding.DecodeString(sdraw)
|
||||
if err != nil {
|
||||
// Not returning sd as-is in the error-case, as base64.DecodeString
|
||||
// may return partially decoded data (not nil or empty slice) in case
|
||||
// of a failure: https://github.com/golang/go/blob/go1.17.7/src/encoding/base64/base64.go#L382-L387
|
||||
return nil, err
|
||||
}
|
||||
return sd, nil
|
||||
}
|
||||
// Maintaining old SDDL-based behavior for backward compatibility. All new
|
||||
// tar headers written by this library will have raw binary for the security
|
||||
// descriptor.
|
||||
if sddl, ok := hdr.PAXRecords[hdrSecurityDescriptor]; ok {
|
||||
return winio.SddlToSecurityDescriptor(sddl)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// ExtendedAttributesFromTarHeader reads the EAs associated with the header of the
|
||||
// current file from the tar header and returns it as a byte slice.
|
||||
func ExtendedAttributesFromTarHeader(hdr *tar.Header) ([]byte, error) {
|
||||
var eas []winio.ExtendedAttribute //nolint:prealloc // len(eas) <= len(hdr.PAXRecords); prealloc is wasteful
|
||||
for k, v := range hdr.PAXRecords {
|
||||
if !strings.HasPrefix(k, hdrEaPrefix) {
|
||||
continue
|
||||
}
|
||||
data, err := base64.StdEncoding.DecodeString(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
eas = append(eas, winio.ExtendedAttribute{
|
||||
Name: k[len(hdrEaPrefix):],
|
||||
Value: data,
|
||||
})
|
||||
}
|
||||
var eaData []byte
|
||||
var err error
|
||||
if len(eas) != 0 {
|
||||
eaData, err = winio.EncodeExtendedAttributes(eas)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return eaData, nil
|
||||
}
|
||||
|
||||
// EncodeReparsePointFromTarHeader reads the ReparsePoint structure from the tar header
|
||||
// and encodes it into a byte slice. The file for which this function is called must be a
|
||||
// symlink.
|
||||
func EncodeReparsePointFromTarHeader(hdr *tar.Header) []byte {
|
||||
_, isMountPoint := hdr.PAXRecords[hdrMountPoint]
|
||||
rp := winio.ReparsePoint{
|
||||
Target: filepath.FromSlash(hdr.Linkname),
|
||||
IsMountPoint: isMountPoint,
|
||||
}
|
||||
return winio.EncodeReparsePoint(&rp)
|
||||
}
|
||||
|
||||
// WriteTarFileFromBackupStream writes a file to a tar writer using data from a Win32 backup stream.
|
||||
//
|
||||
// This encodes Win32 metadata as tar pax vendor extensions starting with MSWINDOWS.
|
||||
//
|
||||
// The additional Win32 metadata is:
|
||||
//
|
||||
// - MSWINDOWS.fileattr: The Win32 file attributes, as a decimal value
|
||||
// - MSWINDOWS.rawsd: The Win32 security descriptor, in raw binary format
|
||||
// - MSWINDOWS.mountpoint: If present, this is a mount point and not a symlink, even though the type is '2' (symlink)
|
||||
func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size int64, fileInfo *winio.FileBasicInfo) error {
|
||||
name = filepath.ToSlash(name)
|
||||
hdr := BasicInfoHeader(name, size, fileInfo)
|
||||
|
||||
// If r can be seeked, then this function is two-pass: pass 1 collects the
|
||||
// tar header data, and pass 2 copies the data stream. If r cannot be
|
||||
// seeked, then some header data (in particular EAs) will be silently lost.
|
||||
var (
|
||||
restartPos int64
|
||||
err error
|
||||
)
|
||||
sr, readTwice := r.(io.Seeker)
|
||||
if readTwice {
|
||||
if restartPos, err = sr.Seek(0, io.SeekCurrent); err != nil {
|
||||
readTwice = false
|
||||
}
|
||||
}
|
||||
|
||||
br := winio.NewBackupStreamReader(r)
|
||||
var dataHdr *winio.BackupHeader
|
||||
for dataHdr == nil {
|
||||
bhdr, err := br.Next()
|
||||
if err == io.EOF { //nolint:errorlint
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch bhdr.Id {
|
||||
case winio.BackupData:
|
||||
hdr.Mode |= cISREG
|
||||
if !readTwice {
|
||||
dataHdr = bhdr
|
||||
}
|
||||
case winio.BackupSecurity:
|
||||
sd, err := io.ReadAll(br)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hdr.PAXRecords[hdrRawSecurityDescriptor] = base64.StdEncoding.EncodeToString(sd)
|
||||
|
||||
case winio.BackupReparseData:
|
||||
hdr.Mode |= cISLNK
|
||||
hdr.Typeflag = tar.TypeSymlink
|
||||
reparseBuffer, _ := io.ReadAll(br)
|
||||
rp, err := winio.DecodeReparsePoint(reparseBuffer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if rp.IsMountPoint {
|
||||
hdr.PAXRecords[hdrMountPoint] = "1"
|
||||
}
|
||||
hdr.Linkname = rp.Target
|
||||
|
||||
case winio.BackupEaData:
|
||||
eab, err := io.ReadAll(br)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
eas, err := winio.DecodeExtendedAttributes(eab)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, ea := range eas {
|
||||
// Use base64 encoding for the binary value. Note that there
|
||||
// is no way to encode the EA's flags, since their use doesn't
|
||||
// make any sense for persisted EAs.
|
||||
hdr.PAXRecords[hdrEaPrefix+ea.Name] = base64.StdEncoding.EncodeToString(ea.Value)
|
||||
}
|
||||
|
||||
case winio.BackupAlternateData, winio.BackupLink, winio.BackupPropertyData, winio.BackupObjectId, winio.BackupTxfsData:
|
||||
// ignore these streams
|
||||
default:
|
||||
return fmt.Errorf("%s: unknown stream ID %d", name, bhdr.Id)
|
||||
}
|
||||
}
|
||||
|
||||
err = t.WriteHeader(hdr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if readTwice {
|
||||
// Get back to the data stream.
|
||||
if _, err = sr.Seek(restartPos, io.SeekStart); err != nil {
|
||||
return err
|
||||
}
|
||||
for dataHdr == nil {
|
||||
bhdr, err := br.Next()
|
||||
if err == io.EOF { //nolint:errorlint
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if bhdr.Id == winio.BackupData {
|
||||
dataHdr = bhdr
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// The logic for copying file contents is fairly complicated due to the need for handling sparse files,
|
||||
// and the weird ways they are represented by BackupRead. A normal file will always either have a data stream
|
||||
// with size and content, or no data stream at all (if empty). However, for a sparse file, the content can also
|
||||
// be represented using a series of sparse block streams following the data stream. Additionally, the way sparse
|
||||
// files are handled by BackupRead has changed in the OS recently. The specifics of the representation are described
|
||||
// in the list at the bottom of this block comment.
|
||||
//
|
||||
// Sparse files can be represented in four different ways, based on the specifics of the file.
|
||||
// - Size = 0:
|
||||
// Previously: BackupRead yields no data stream and no sparse block streams.
|
||||
// Recently: BackupRead yields a data stream with size = 0. There are no following sparse block streams.
|
||||
// - Size > 0, no allocated ranges:
|
||||
// BackupRead yields a data stream with size = 0. Following is a single sparse block stream with
|
||||
// size = 0 and offset = <file size>.
|
||||
// - Size > 0, one allocated range:
|
||||
// BackupRead yields a data stream with size = <file size> containing the file contents. There are no
|
||||
// sparse block streams. This is the case if you take a normal file with contents and simply set the
|
||||
// sparse flag on it.
|
||||
// - Size > 0, multiple allocated ranges:
|
||||
// BackupRead yields a data stream with size = 0. Following are sparse block streams for each allocated
|
||||
// range of the file containing the range contents. Finally there is a sparse block stream with
|
||||
// size = 0 and offset = <file size>.
|
||||
|
||||
if dataHdr != nil { //nolint:nestif // todo: reduce nesting complexity
|
||||
// A data stream was found. Copy the data.
|
||||
// We assume that we will either have a data stream size > 0 XOR have sparse block streams.
|
||||
if dataHdr.Size > 0 || (dataHdr.Attributes&winio.StreamSparseAttributes) == 0 {
|
||||
if size != dataHdr.Size {
|
||||
return fmt.Errorf("%s: mismatch between file size %d and header size %d", name, size, dataHdr.Size)
|
||||
}
|
||||
if _, err = io.Copy(t, br); err != nil {
|
||||
return fmt.Errorf("%s: copying contents from data stream: %w", name, err)
|
||||
}
|
||||
} else if size > 0 {
|
||||
// As of a recent OS change, BackupRead now returns a data stream for empty sparse files.
|
||||
// These files have no sparse block streams, so skip the copySparse call if file size = 0.
|
||||
if err = copySparse(t, br); err != nil {
|
||||
return fmt.Errorf("%s: copying contents from sparse block stream: %w", name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Look for streams after the data stream. The only ones we handle are alternate data streams.
|
||||
// Other streams may have metadata that could be serialized, but the tar header has already
|
||||
// been written. In practice, this means that we don't get EA or TXF metadata.
|
||||
for {
|
||||
bhdr, err := br.Next()
|
||||
if err == io.EOF { //nolint:errorlint
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch bhdr.Id {
|
||||
case winio.BackupAlternateData:
|
||||
if (bhdr.Attributes & winio.StreamSparseAttributes) != 0 {
|
||||
// Unsupported for now, since the size of the alternate stream is not present
|
||||
// in the backup stream until after the data has been read.
|
||||
return fmt.Errorf("%s: tar of sparse alternate data streams is unsupported", name)
|
||||
}
|
||||
altName := strings.TrimSuffix(bhdr.Name, ":$DATA")
|
||||
hdr = &tar.Header{
|
||||
Format: hdr.Format,
|
||||
Name: name + altName,
|
||||
Mode: hdr.Mode,
|
||||
Typeflag: tar.TypeReg,
|
||||
Size: bhdr.Size,
|
||||
ModTime: hdr.ModTime,
|
||||
AccessTime: hdr.AccessTime,
|
||||
ChangeTime: hdr.ChangeTime,
|
||||
}
|
||||
err = t.WriteHeader(hdr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = io.Copy(t, br)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case winio.BackupEaData, winio.BackupLink, winio.BackupPropertyData, winio.BackupObjectId, winio.BackupTxfsData:
|
||||
// ignore these streams
|
||||
default:
|
||||
return fmt.Errorf("%s: unknown stream ID %d after data", name, bhdr.Id)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// FileInfoFromHeader retrieves basic Win32 file information from a tar header, using the additional metadata written by
|
||||
// WriteTarFileFromBackupStream.
|
||||
func FileInfoFromHeader(hdr *tar.Header) (name string, size int64, fileInfo *winio.FileBasicInfo, err error) {
|
||||
name = hdr.Name
|
||||
if hdr.Typeflag == tar.TypeReg {
|
||||
size = hdr.Size
|
||||
}
|
||||
fileInfo = &winio.FileBasicInfo{
|
||||
LastAccessTime: windows.NsecToFiletime(hdr.AccessTime.UnixNano()),
|
||||
LastWriteTime: windows.NsecToFiletime(hdr.ModTime.UnixNano()),
|
||||
ChangeTime: windows.NsecToFiletime(hdr.ChangeTime.UnixNano()),
|
||||
// Default to ModTime, we'll pull hdrCreationTime below if present
|
||||
CreationTime: windows.NsecToFiletime(hdr.ModTime.UnixNano()),
|
||||
}
|
||||
if attrStr, ok := hdr.PAXRecords[hdrFileAttributes]; ok {
|
||||
attr, err := strconv.ParseUint(attrStr, 10, 32)
|
||||
if err != nil {
|
||||
return "", 0, nil, err
|
||||
}
|
||||
fileInfo.FileAttributes = uint32(attr)
|
||||
} else {
|
||||
if hdr.Typeflag == tar.TypeDir {
|
||||
fileInfo.FileAttributes |= windows.FILE_ATTRIBUTE_DIRECTORY
|
||||
}
|
||||
}
|
||||
if creationTimeStr, ok := hdr.PAXRecords[hdrCreationTime]; ok {
|
||||
creationTime, err := parsePAXTime(creationTimeStr)
|
||||
if err != nil {
|
||||
return "", 0, nil, err
|
||||
}
|
||||
fileInfo.CreationTime = windows.NsecToFiletime(creationTime.UnixNano())
|
||||
}
|
||||
return name, size, fileInfo, err
|
||||
}
|
||||
|
||||
// WriteBackupStreamFromTarFile writes a Win32 backup stream from the current tar file. Since this function may process multiple
|
||||
// tar file entries in order to collect all the alternate data streams for the file, it returns the next
|
||||
// tar file that was not processed, or io.EOF is there are no more.
|
||||
func WriteBackupStreamFromTarFile(w io.Writer, t *tar.Reader, hdr *tar.Header) (*tar.Header, error) {
|
||||
bw := winio.NewBackupStreamWriter(w)
|
||||
|
||||
sd, err := SecurityDescriptorFromTarHeader(hdr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(sd) != 0 {
|
||||
bhdr := winio.BackupHeader{
|
||||
Id: winio.BackupSecurity,
|
||||
Size: int64(len(sd)),
|
||||
}
|
||||
err := bw.WriteHeader(&bhdr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err = bw.Write(sd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
eadata, err := ExtendedAttributesFromTarHeader(hdr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(eadata) != 0 {
|
||||
bhdr := winio.BackupHeader{
|
||||
Id: winio.BackupEaData,
|
||||
Size: int64(len(eadata)),
|
||||
}
|
||||
err = bw.WriteHeader(&bhdr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err = bw.Write(eadata)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if hdr.Typeflag == tar.TypeSymlink {
|
||||
reparse := EncodeReparsePointFromTarHeader(hdr)
|
||||
bhdr := winio.BackupHeader{
|
||||
Id: winio.BackupReparseData,
|
||||
Size: int64(len(reparse)),
|
||||
}
|
||||
err := bw.WriteHeader(&bhdr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err = bw.Write(reparse)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if hdr.Typeflag == tar.TypeReg {
|
||||
bhdr := winio.BackupHeader{
|
||||
Id: winio.BackupData,
|
||||
Size: hdr.Size,
|
||||
}
|
||||
err := bw.WriteHeader(&bhdr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err = io.Copy(bw, t)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
// Copy all the alternate data streams and return the next non-ADS header.
|
||||
for {
|
||||
ahdr, err := t.Next()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ahdr.Typeflag != tar.TypeReg || !strings.HasPrefix(ahdr.Name, hdr.Name+":") {
|
||||
return ahdr, nil
|
||||
}
|
||||
bhdr := winio.BackupHeader{
|
||||
Id: winio.BackupAlternateData,
|
||||
Size: ahdr.Size,
|
||||
Name: ahdr.Name[len(hdr.Name):] + ":$DATA",
|
||||
}
|
||||
err = bw.WriteHeader(&bhdr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err = io.Copy(bw, t)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
22
vendor/github.com/Microsoft/go-winio/doc.go
generated
vendored
Normal file
22
vendor/github.com/Microsoft/go-winio/doc.go
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
||||
// This package provides utilities for efficiently performing Win32 IO operations in Go.
|
||||
// Currently, this package is provides support for genreal IO and management of
|
||||
// - named pipes
|
||||
// - files
|
||||
// - [Hyper-V sockets]
|
||||
//
|
||||
// This code is similar to Go's [net] package, and uses IO completion ports to avoid
|
||||
// blocking IO on system threads, allowing Go to reuse the thread to schedule other goroutines.
|
||||
//
|
||||
// This limits support to Windows Vista and newer operating systems.
|
||||
//
|
||||
// Additionally, this package provides support for:
|
||||
// - creating and managing GUIDs
|
||||
// - writing to [ETW]
|
||||
// - opening and manageing VHDs
|
||||
// - parsing [Windows Image files]
|
||||
// - auto-generating Win32 API code
|
||||
//
|
||||
// [Hyper-V sockets]: https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/user-guide/make-integration-service
|
||||
// [ETW]: https://docs.microsoft.com/en-us/windows-hardware/drivers/devtest/event-tracing-for-windows--etw-
|
||||
// [Windows Image files]: https://docs.microsoft.com/en-us/windows-hardware/manufacture/desktop/work-with-windows-images
|
||||
package winio
|
8
vendor/github.com/Microsoft/go-winio/ea.go
generated
vendored
8
vendor/github.com/Microsoft/go-winio/ea.go
generated
vendored
@ -33,7 +33,7 @@ func parseEa(b []byte) (ea ExtendedAttribute, nb []byte, err error) {
|
||||
err = binary.Read(bytes.NewReader(b), binary.LittleEndian, &info)
|
||||
if err != nil {
|
||||
err = errInvalidEaBuffer
|
||||
return
|
||||
return ea, nb, err
|
||||
}
|
||||
|
||||
nameOffset := fileFullEaInformationSize
|
||||
@ -43,7 +43,7 @@ func parseEa(b []byte) (ea ExtendedAttribute, nb []byte, err error) {
|
||||
nextOffset := int(info.NextEntryOffset)
|
||||
if valueLen+valueOffset > len(b) || nextOffset < 0 || nextOffset > len(b) {
|
||||
err = errInvalidEaBuffer
|
||||
return
|
||||
return ea, nb, err
|
||||
}
|
||||
|
||||
ea.Name = string(b[nameOffset : nameOffset+nameLen])
|
||||
@ -52,7 +52,7 @@ func parseEa(b []byte) (ea ExtendedAttribute, nb []byte, err error) {
|
||||
if info.NextEntryOffset != 0 {
|
||||
nb = b[info.NextEntryOffset:]
|
||||
}
|
||||
return
|
||||
return ea, nb, err
|
||||
}
|
||||
|
||||
// DecodeExtendedAttributes decodes a list of EAs from a FILE_FULL_EA_INFORMATION
|
||||
@ -67,7 +67,7 @@ func DecodeExtendedAttributes(b []byte) (eas []ExtendedAttribute, err error) {
|
||||
eas = append(eas, ea)
|
||||
b = nb
|
||||
}
|
||||
return
|
||||
return eas, err
|
||||
}
|
||||
|
||||
func writeEa(buf *bytes.Buffer, ea *ExtendedAttribute, last bool) error {
|
||||
|
141
vendor/github.com/Microsoft/go-winio/file.go
generated
vendored
141
vendor/github.com/Microsoft/go-winio/file.go
generated
vendored
@ -1,3 +1,4 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package winio
|
||||
@ -10,31 +11,15 @@ import (
|
||||
"sync/atomic"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
//sys cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) = CancelIoEx
|
||||
//sys createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) = CreateIoCompletionPort
|
||||
//sys getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) = GetQueuedCompletionStatus
|
||||
//sys setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) = SetFileCompletionNotificationModes
|
||||
//sys wsaGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) = ws2_32.WSAGetOverlappedResult
|
||||
|
||||
type atomicBool int32
|
||||
|
||||
func (b *atomicBool) isSet() bool { return atomic.LoadInt32((*int32)(b)) != 0 }
|
||||
func (b *atomicBool) setFalse() { atomic.StoreInt32((*int32)(b), 0) }
|
||||
func (b *atomicBool) setTrue() { atomic.StoreInt32((*int32)(b), 1) }
|
||||
func (b *atomicBool) swap(new bool) bool {
|
||||
var newInt int32
|
||||
if new {
|
||||
newInt = 1
|
||||
}
|
||||
return atomic.SwapInt32((*int32)(b), newInt) == 1
|
||||
}
|
||||
|
||||
const (
|
||||
cFILE_SKIP_COMPLETION_PORT_ON_SUCCESS = 1
|
||||
cFILE_SKIP_SET_EVENT_ON_HANDLE = 2
|
||||
)
|
||||
//sys cancelIoEx(file windows.Handle, o *windows.Overlapped) (err error) = CancelIoEx
|
||||
//sys createIoCompletionPort(file windows.Handle, port windows.Handle, key uintptr, threadCount uint32) (newport windows.Handle, err error) = CreateIoCompletionPort
|
||||
//sys getQueuedCompletionStatus(port windows.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) = GetQueuedCompletionStatus
|
||||
//sys setFileCompletionNotificationModes(h windows.Handle, flags uint8) (err error) = SetFileCompletionNotificationModes
|
||||
//sys wsaGetOverlappedResult(h windows.Handle, o *windows.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) = ws2_32.WSAGetOverlappedResult
|
||||
|
||||
var (
|
||||
ErrFileClosed = errors.New("file has already been closed")
|
||||
@ -43,29 +28,29 @@ var (
|
||||
|
||||
type timeoutError struct{}
|
||||
|
||||
func (e *timeoutError) Error() string { return "i/o timeout" }
|
||||
func (e *timeoutError) Timeout() bool { return true }
|
||||
func (e *timeoutError) Temporary() bool { return true }
|
||||
func (*timeoutError) Error() string { return "i/o timeout" }
|
||||
func (*timeoutError) Timeout() bool { return true }
|
||||
func (*timeoutError) Temporary() bool { return true }
|
||||
|
||||
type timeoutChan chan struct{}
|
||||
|
||||
var ioInitOnce sync.Once
|
||||
var ioCompletionPort syscall.Handle
|
||||
var ioCompletionPort windows.Handle
|
||||
|
||||
// ioResult contains the result of an asynchronous IO operation
|
||||
// ioResult contains the result of an asynchronous IO operation.
|
||||
type ioResult struct {
|
||||
bytes uint32
|
||||
err error
|
||||
}
|
||||
|
||||
// ioOperation represents an outstanding asynchronous Win32 IO
|
||||
// ioOperation represents an outstanding asynchronous Win32 IO.
|
||||
type ioOperation struct {
|
||||
o syscall.Overlapped
|
||||
o windows.Overlapped
|
||||
ch chan ioResult
|
||||
}
|
||||
|
||||
func initIo() {
|
||||
h, err := createIoCompletionPort(syscall.InvalidHandle, 0, 0, 0xffffffff)
|
||||
func initIO() {
|
||||
h, err := createIoCompletionPort(windows.InvalidHandle, 0, 0, 0xffffffff)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@ -76,10 +61,10 @@ func initIo() {
|
||||
// win32File implements Reader, Writer, and Closer on a Win32 handle without blocking in a syscall.
|
||||
// It takes ownership of this handle and will close it if it is garbage collected.
|
||||
type win32File struct {
|
||||
handle syscall.Handle
|
||||
handle windows.Handle
|
||||
wg sync.WaitGroup
|
||||
wgLock sync.RWMutex
|
||||
closing atomicBool
|
||||
closing atomic.Bool
|
||||
socket bool
|
||||
readDeadline deadlineHandler
|
||||
writeDeadline deadlineHandler
|
||||
@ -90,18 +75,18 @@ type deadlineHandler struct {
|
||||
channel timeoutChan
|
||||
channelLock sync.RWMutex
|
||||
timer *time.Timer
|
||||
timedout atomicBool
|
||||
timedout atomic.Bool
|
||||
}
|
||||
|
||||
// makeWin32File makes a new win32File from an existing file handle
|
||||
func makeWin32File(h syscall.Handle) (*win32File, error) {
|
||||
// makeWin32File makes a new win32File from an existing file handle.
|
||||
func makeWin32File(h windows.Handle) (*win32File, error) {
|
||||
f := &win32File{handle: h}
|
||||
ioInitOnce.Do(initIo)
|
||||
ioInitOnce.Do(initIO)
|
||||
_, err := createIoCompletionPort(h, ioCompletionPort, 0, 0xffffffff)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = setFileCompletionNotificationModes(h, cFILE_SKIP_COMPLETION_PORT_ON_SUCCESS|cFILE_SKIP_SET_EVENT_ON_HANDLE)
|
||||
err = setFileCompletionNotificationModes(h, windows.FILE_SKIP_COMPLETION_PORT_ON_SUCCESS|windows.FILE_SKIP_SET_EVENT_ON_HANDLE)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -110,7 +95,12 @@ func makeWin32File(h syscall.Handle) (*win32File, error) {
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// Deprecated: use NewOpenFile instead.
|
||||
func MakeOpenFile(h syscall.Handle) (io.ReadWriteCloser, error) {
|
||||
return NewOpenFile(windows.Handle(h))
|
||||
}
|
||||
|
||||
func NewOpenFile(h windows.Handle) (io.ReadWriteCloser, error) {
|
||||
// If we return the result of makeWin32File directly, it can result in an
|
||||
// interface-wrapped nil, rather than a nil interface value.
|
||||
f, err := makeWin32File(h)
|
||||
@ -120,17 +110,17 @@ func MakeOpenFile(h syscall.Handle) (io.ReadWriteCloser, error) {
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// closeHandle closes the resources associated with a Win32 handle
|
||||
// closeHandle closes the resources associated with a Win32 handle.
|
||||
func (f *win32File) closeHandle() {
|
||||
f.wgLock.Lock()
|
||||
// Atomically set that we are closing, releasing the resources only once.
|
||||
if !f.closing.swap(true) {
|
||||
if !f.closing.Swap(true) {
|
||||
f.wgLock.Unlock()
|
||||
// cancel all IO and wait for it to complete
|
||||
cancelIoEx(f.handle, nil)
|
||||
_ = cancelIoEx(f.handle, nil)
|
||||
f.wg.Wait()
|
||||
// at this point, no new IO can start
|
||||
syscall.Close(f.handle)
|
||||
windows.Close(f.handle)
|
||||
f.handle = 0
|
||||
} else {
|
||||
f.wgLock.Unlock()
|
||||
@ -143,11 +133,16 @@ func (f *win32File) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// prepareIo prepares for a new IO operation.
|
||||
// IsClosed checks if the file has been closed.
|
||||
func (f *win32File) IsClosed() bool {
|
||||
return f.closing.Load()
|
||||
}
|
||||
|
||||
// prepareIO prepares for a new IO operation.
|
||||
// The caller must call f.wg.Done() when the IO is finished, prior to Close() returning.
|
||||
func (f *win32File) prepareIo() (*ioOperation, error) {
|
||||
func (f *win32File) prepareIO() (*ioOperation, error) {
|
||||
f.wgLock.RLock()
|
||||
if f.closing.isSet() {
|
||||
if f.closing.Load() {
|
||||
f.wgLock.RUnlock()
|
||||
return nil, ErrFileClosed
|
||||
}
|
||||
@ -158,13 +153,13 @@ func (f *win32File) prepareIo() (*ioOperation, error) {
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// ioCompletionProcessor processes completed async IOs forever
|
||||
func ioCompletionProcessor(h syscall.Handle) {
|
||||
// ioCompletionProcessor processes completed async IOs forever.
|
||||
func ioCompletionProcessor(h windows.Handle) {
|
||||
for {
|
||||
var bytes uint32
|
||||
var key uintptr
|
||||
var op *ioOperation
|
||||
err := getQueuedCompletionStatus(h, &bytes, &key, &op, syscall.INFINITE)
|
||||
err := getQueuedCompletionStatus(h, &bytes, &key, &op, windows.INFINITE)
|
||||
if op == nil {
|
||||
panic(err)
|
||||
}
|
||||
@ -172,15 +167,17 @@ func ioCompletionProcessor(h syscall.Handle) {
|
||||
}
|
||||
}
|
||||
|
||||
// asyncIo processes the return value from ReadFile or WriteFile, blocking until
|
||||
// todo: helsaawy - create an asyncIO version that takes a context
|
||||
|
||||
// asyncIO processes the return value from ReadFile or WriteFile, blocking until
|
||||
// the operation has actually completed.
|
||||
func (f *win32File) asyncIo(c *ioOperation, d *deadlineHandler, bytes uint32, err error) (int, error) {
|
||||
if err != syscall.ERROR_IO_PENDING {
|
||||
func (f *win32File) asyncIO(c *ioOperation, d *deadlineHandler, bytes uint32, err error) (int, error) {
|
||||
if err != windows.ERROR_IO_PENDING { //nolint:errorlint // err is Errno
|
||||
return int(bytes), err
|
||||
}
|
||||
|
||||
if f.closing.isSet() {
|
||||
cancelIoEx(f.handle, &c.o)
|
||||
if f.closing.Load() {
|
||||
_ = cancelIoEx(f.handle, &c.o)
|
||||
}
|
||||
|
||||
var timeout timeoutChan
|
||||
@ -194,8 +191,8 @@ func (f *win32File) asyncIo(c *ioOperation, d *deadlineHandler, bytes uint32, er
|
||||
select {
|
||||
case r = <-c.ch:
|
||||
err = r.err
|
||||
if err == syscall.ERROR_OPERATION_ABORTED {
|
||||
if f.closing.isSet() {
|
||||
if err == windows.ERROR_OPERATION_ABORTED { //nolint:errorlint // err is Errno
|
||||
if f.closing.Load() {
|
||||
err = ErrFileClosed
|
||||
}
|
||||
} else if err != nil && f.socket {
|
||||
@ -204,10 +201,10 @@ func (f *win32File) asyncIo(c *ioOperation, d *deadlineHandler, bytes uint32, er
|
||||
err = wsaGetOverlappedResult(f.handle, &c.o, &bytes, false, &flags)
|
||||
}
|
||||
case <-timeout:
|
||||
cancelIoEx(f.handle, &c.o)
|
||||
_ = cancelIoEx(f.handle, &c.o)
|
||||
r = <-c.ch
|
||||
err = r.err
|
||||
if err == syscall.ERROR_OPERATION_ABORTED {
|
||||
if err == windows.ERROR_OPERATION_ABORTED { //nolint:errorlint // err is Errno
|
||||
err = ErrTimeout
|
||||
}
|
||||
}
|
||||
@ -215,52 +212,52 @@ func (f *win32File) asyncIo(c *ioOperation, d *deadlineHandler, bytes uint32, er
|
||||
// runtime.KeepAlive is needed, as c is passed via native
|
||||
// code to ioCompletionProcessor, c must remain alive
|
||||
// until the channel read is complete.
|
||||
// todo: (de)allocate *ioOperation via win32 heap functions, instead of needing to KeepAlive?
|
||||
runtime.KeepAlive(c)
|
||||
return int(r.bytes), err
|
||||
}
|
||||
|
||||
// Read reads from a file handle.
|
||||
func (f *win32File) Read(b []byte) (int, error) {
|
||||
c, err := f.prepareIo()
|
||||
c, err := f.prepareIO()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer f.wg.Done()
|
||||
|
||||
if f.readDeadline.timedout.isSet() {
|
||||
if f.readDeadline.timedout.Load() {
|
||||
return 0, ErrTimeout
|
||||
}
|
||||
|
||||
var bytes uint32
|
||||
err = syscall.ReadFile(f.handle, b, &bytes, &c.o)
|
||||
n, err := f.asyncIo(c, &f.readDeadline, bytes, err)
|
||||
err = windows.ReadFile(f.handle, b, &bytes, &c.o)
|
||||
n, err := f.asyncIO(c, &f.readDeadline, bytes, err)
|
||||
runtime.KeepAlive(b)
|
||||
|
||||
// Handle EOF conditions.
|
||||
if err == nil && n == 0 && len(b) != 0 {
|
||||
return 0, io.EOF
|
||||
} else if err == syscall.ERROR_BROKEN_PIPE {
|
||||
} else if err == windows.ERROR_BROKEN_PIPE { //nolint:errorlint // err is Errno
|
||||
return 0, io.EOF
|
||||
} else {
|
||||
return n, err
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Write writes to a file handle.
|
||||
func (f *win32File) Write(b []byte) (int, error) {
|
||||
c, err := f.prepareIo()
|
||||
c, err := f.prepareIO()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer f.wg.Done()
|
||||
|
||||
if f.writeDeadline.timedout.isSet() {
|
||||
if f.writeDeadline.timedout.Load() {
|
||||
return 0, ErrTimeout
|
||||
}
|
||||
|
||||
var bytes uint32
|
||||
err = syscall.WriteFile(f.handle, b, &bytes, &c.o)
|
||||
n, err := f.asyncIo(c, &f.writeDeadline, bytes, err)
|
||||
err = windows.WriteFile(f.handle, b, &bytes, &c.o)
|
||||
n, err := f.asyncIO(c, &f.writeDeadline, bytes, err)
|
||||
runtime.KeepAlive(b)
|
||||
return n, err
|
||||
}
|
||||
@ -274,7 +271,7 @@ func (f *win32File) SetWriteDeadline(deadline time.Time) error {
|
||||
}
|
||||
|
||||
func (f *win32File) Flush() error {
|
||||
return syscall.FlushFileBuffers(f.handle)
|
||||
return windows.FlushFileBuffers(f.handle)
|
||||
}
|
||||
|
||||
func (f *win32File) Fd() uintptr {
|
||||
@ -291,7 +288,7 @@ func (d *deadlineHandler) set(deadline time.Time) error {
|
||||
}
|
||||
d.timer = nil
|
||||
}
|
||||
d.timedout.setFalse()
|
||||
d.timedout.Store(false)
|
||||
|
||||
select {
|
||||
case <-d.channel:
|
||||
@ -306,7 +303,7 @@ func (d *deadlineHandler) set(deadline time.Time) error {
|
||||
}
|
||||
|
||||
timeoutIO := func() {
|
||||
d.timedout.setTrue()
|
||||
d.timedout.Store(true)
|
||||
close(d.channel)
|
||||
}
|
||||
|
||||
|
75
vendor/github.com/Microsoft/go-winio/fileinfo.go
generated
vendored
75
vendor/github.com/Microsoft/go-winio/fileinfo.go
generated
vendored
@ -1,3 +1,4 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package winio
|
||||
@ -5,44 +6,83 @@ package winio
|
||||
import (
|
||||
"os"
|
||||
"runtime"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
//sys getFileInformationByHandleEx(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) = GetFileInformationByHandleEx
|
||||
//sys setFileInformationByHandle(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) = SetFileInformationByHandle
|
||||
|
||||
const (
|
||||
fileBasicInfo = 0
|
||||
fileIDInfo = 0x12
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
// FileBasicInfo contains file access time and file attributes information.
|
||||
type FileBasicInfo struct {
|
||||
CreationTime, LastAccessTime, LastWriteTime, ChangeTime syscall.Filetime
|
||||
CreationTime, LastAccessTime, LastWriteTime, ChangeTime windows.Filetime
|
||||
FileAttributes uint32
|
||||
pad uint32 // padding
|
||||
_ uint32 // padding
|
||||
}
|
||||
|
||||
// alignedFileBasicInfo is a FileBasicInfo, but aligned to uint64 by containing
|
||||
// uint64 rather than windows.Filetime. Filetime contains two uint32s. uint64
|
||||
// alignment is necessary to pass this as FILE_BASIC_INFO.
|
||||
type alignedFileBasicInfo struct {
|
||||
CreationTime, LastAccessTime, LastWriteTime, ChangeTime uint64
|
||||
FileAttributes uint32
|
||||
_ uint32 // padding
|
||||
}
|
||||
|
||||
// GetFileBasicInfo retrieves times and attributes for a file.
|
||||
func GetFileBasicInfo(f *os.File) (*FileBasicInfo, error) {
|
||||
bi := &FileBasicInfo{}
|
||||
if err := getFileInformationByHandleEx(syscall.Handle(f.Fd()), fileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil {
|
||||
bi := &alignedFileBasicInfo{}
|
||||
if err := windows.GetFileInformationByHandleEx(
|
||||
windows.Handle(f.Fd()),
|
||||
windows.FileBasicInfo,
|
||||
(*byte)(unsafe.Pointer(bi)),
|
||||
uint32(unsafe.Sizeof(*bi)),
|
||||
); err != nil {
|
||||
return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err}
|
||||
}
|
||||
runtime.KeepAlive(f)
|
||||
return bi, nil
|
||||
// Reinterpret the alignedFileBasicInfo as a FileBasicInfo so it matches the
|
||||
// public API of this module. The data may be unnecessarily aligned.
|
||||
return (*FileBasicInfo)(unsafe.Pointer(bi)), nil
|
||||
}
|
||||
|
||||
// SetFileBasicInfo sets times and attributes for a file.
|
||||
func SetFileBasicInfo(f *os.File, bi *FileBasicInfo) error {
|
||||
if err := setFileInformationByHandle(syscall.Handle(f.Fd()), fileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil {
|
||||
// Create an alignedFileBasicInfo based on a FileBasicInfo. The copy is
|
||||
// suitable to pass to GetFileInformationByHandleEx.
|
||||
biAligned := *(*alignedFileBasicInfo)(unsafe.Pointer(bi))
|
||||
if err := windows.SetFileInformationByHandle(
|
||||
windows.Handle(f.Fd()),
|
||||
windows.FileBasicInfo,
|
||||
(*byte)(unsafe.Pointer(&biAligned)),
|
||||
uint32(unsafe.Sizeof(biAligned)),
|
||||
); err != nil {
|
||||
return &os.PathError{Op: "SetFileInformationByHandle", Path: f.Name(), Err: err}
|
||||
}
|
||||
runtime.KeepAlive(f)
|
||||
return nil
|
||||
}
|
||||
|
||||
// FileStandardInfo contains extended information for the file.
|
||||
// FILE_STANDARD_INFO in WinBase.h
|
||||
// https://docs.microsoft.com/en-us/windows/win32/api/winbase/ns-winbase-file_standard_info
|
||||
type FileStandardInfo struct {
|
||||
AllocationSize, EndOfFile int64
|
||||
NumberOfLinks uint32
|
||||
DeletePending, Directory bool
|
||||
}
|
||||
|
||||
// GetFileStandardInfo retrieves ended information for the file.
|
||||
func GetFileStandardInfo(f *os.File) (*FileStandardInfo, error) {
|
||||
si := &FileStandardInfo{}
|
||||
if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()),
|
||||
windows.FileStandardInfo,
|
||||
(*byte)(unsafe.Pointer(si)),
|
||||
uint32(unsafe.Sizeof(*si))); err != nil {
|
||||
return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err}
|
||||
}
|
||||
runtime.KeepAlive(f)
|
||||
return si, nil
|
||||
}
|
||||
|
||||
// FileIDInfo contains the volume serial number and file ID for a file. This pair should be
|
||||
// unique on a system.
|
||||
type FileIDInfo struct {
|
||||
@ -53,7 +93,12 @@ type FileIDInfo struct {
|
||||
// GetFileID retrieves the unique (volume, file ID) pair for a file.
|
||||
func GetFileID(f *os.File) (*FileIDInfo, error) {
|
||||
fileID := &FileIDInfo{}
|
||||
if err := getFileInformationByHandleEx(syscall.Handle(f.Fd()), fileIDInfo, (*byte)(unsafe.Pointer(fileID)), uint32(unsafe.Sizeof(*fileID))); err != nil {
|
||||
if err := windows.GetFileInformationByHandleEx(
|
||||
windows.Handle(f.Fd()),
|
||||
windows.FileIdInfo,
|
||||
(*byte)(unsafe.Pointer(fileID)),
|
||||
uint32(unsafe.Sizeof(*fileID)),
|
||||
); err != nil {
|
||||
return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err}
|
||||
}
|
||||
runtime.KeepAlive(f)
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user