mirror of
https://github.com/openfaas/faas.git
synced 2025-06-21 14:23:25 +00:00
Compare commits
65 Commits
openfaaslt
...
streaming
Author | SHA1 | Date | |
---|---|---|---|
5aa8f5aec2 | |||
02205b8b19 | |||
9ba4a73d5d | |||
479285caf6 | |||
4cf5fb8369 | |||
ed5bd7546e | |||
4f4e3d288a | |||
d0eec5fbbf | |||
25e44f0b57 | |||
6a9ece3cc1 | |||
0036d6ac78 | |||
2a88b5d2f7 | |||
55776acc0d | |||
c3800da6fa | |||
472291b40b | |||
128b450a88 | |||
5c851cdf31 | |||
9e6f814f6f | |||
68ec0f59d6 | |||
c0d710c97f | |||
373a79256f | |||
06ade37420 | |||
910b8dae1b | |||
00613347f8 | |||
e0144b0573 | |||
4315101191 | |||
b4b7e2d450 | |||
0972fa6093 | |||
e44448c5dc | |||
bf63bbf88f | |||
a128df471f | |||
c26ec5221e | |||
8e1c34e222 | |||
21a8f0cec1 | |||
8d38b4befe | |||
1fc7bbce4e | |||
231e3ed426 | |||
fbc0ebdf4a | |||
4f9c61b5d2 | |||
a7d486eee6 | |||
b1ef4b49b7 | |||
f9245ebbb3 | |||
f3599f4699 | |||
3bafff7e09 | |||
e3171b49b0 | |||
e1c62f4875 | |||
b31419c8de | |||
004bbddadb | |||
88bedf78bd | |||
9d0436e511 | |||
c07bebbbc9 | |||
208b1b2235 | |||
0255a9480b | |||
f7f71f1497 | |||
03b6d6c01b | |||
efffd83990 | |||
06433e11c0 | |||
806585b434 | |||
32b828b25e | |||
bb163760ff | |||
1a00a55c77 | |||
bc2eeff467 | |||
887c804254 | |||
9da2ec244f | |||
8e711b3a0c |
5
.github/ISSUE_TEMPLATE.md
vendored
5
.github/ISSUE_TEMPLATE.md
vendored
@ -7,6 +7,9 @@
|
||||
<!-- How is this affecting you? What task are you trying to accomplish? -->
|
||||
## Why do you need this?
|
||||
|
||||
## Who is this for?
|
||||
|
||||
What company is this for? Are you listed in the [ADOPTERS.md](https://github.com/openfaas/faas/blob/master/ADOPTERS.md) file?
|
||||
|
||||
<!--- Provide a general summary of the issue in the Title above -->
|
||||
## Expected Behaviour
|
||||
@ -20,7 +23,7 @@
|
||||
|
||||
|
||||
## Are you a GitHub Sponsor (Yes/No?)
|
||||
<!--- Given this request for help, how are you supporting the project? -->
|
||||
<!-- Issues created by customers or monthly sponsors get priority -->
|
||||
|
||||
Check at: https://github.com/sponsors/openfaas
|
||||
- [ ] Yes
|
||||
|
66
.github/workflows/build.yml
vendored
66
.github/workflows/build.yml
vendored
@ -9,71 +9,37 @@ on:
|
||||
- '*'
|
||||
|
||||
jobs:
|
||||
|
||||
build-gateway:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.18.x]
|
||||
steps:
|
||||
- uses: actions/checkout@master
|
||||
with:
|
||||
fetch-depth: 1
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v1
|
||||
uses: docker/setup-qemu-action@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
- name: Get TAG
|
||||
id: get_tag
|
||||
run: echo ::set-output name=TAG::latest-dev
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- name: Get git commit
|
||||
id: get_git_commit
|
||||
run: echo "GIT_COMMIT=$(git rev-parse HEAD)" >> $GITHUB_ENV
|
||||
- name: Get version
|
||||
id: get_version
|
||||
run: echo "VERSION=$(git describe --tags --dirty)" >> $GITHUB_ENV
|
||||
- name: Get Repo Owner
|
||||
id: get_repo_owner
|
||||
run: echo ::set-output name=repo_owner::$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')
|
||||
run: echo "REPO_OWNER=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" > $GITHUB_ENV
|
||||
|
||||
- name: Build ${{ matrix.svc }}
|
||||
uses: docker/build-push-action@v2
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
context: ./gateway
|
||||
file: ./gateway/Dockerfile
|
||||
outputs: "type=image,push=false"
|
||||
platforms: linux/amd64,linux/arm/v7,linux/arm64
|
||||
build-args: |
|
||||
VERSION=${{ steps.get_tag.outputs.TAG }}
|
||||
VERSION=${{ env.TAG }}
|
||||
GIT_COMMIT=${{ github.sha }}
|
||||
tags: |
|
||||
ghcr.io/${{ steps.get_repo_owner.outputs.repo_owner }}/gateway:${{ steps.get_tag.outputs.TAG }}
|
||||
ghcr.io/${{ steps.get_repo_owner.outputs.repo_owner }}/gateway:${{ github.sha }}
|
||||
ghcr.io/${{ steps.get_repo_owner.outputs.repo_owner }}/gateway:latest
|
||||
|
||||
build-auth-plugins:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.17.x]
|
||||
svc: [
|
||||
basic-auth
|
||||
]
|
||||
steps:
|
||||
- uses: actions/checkout@master
|
||||
with:
|
||||
fetch-depth: 1
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v1
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
- name: Get TAG
|
||||
id: get_tag
|
||||
run: echo ::set-output name=TAG::latest-dev
|
||||
- name: Get Repo Owner
|
||||
id: get_repo_owner
|
||||
run: echo ::set-output name=repo_owner::$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')
|
||||
- name: Build ${{ matrix.svc }}
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: ./auth/${{ matrix.svc }}
|
||||
file: ./auth/${{ matrix.svc }}/Dockerfile
|
||||
outputs: "type=image,push=false"
|
||||
platforms: linux/amd64,linux/arm/v7,linux/arm64
|
||||
tags: |
|
||||
ghcr.io/${{ steps.get_repo_owner.outputs.repo_owner }}/${{ matrix.svc }}:${{ steps.get_tag.outputs.TAG }}
|
||||
ghcr.io/${{ steps.get_repo_owner.outputs.repo_owner }}/${{ matrix.svc }}:${{ github.sha }}
|
||||
ghcr.io/${{ steps.get_repo_owner.outputs.repo_owner }}/${{ matrix.svc }}:latest
|
||||
ghcr.io/${{ env.REPO_OWNER }}/gateway:${{ github.sha }}
|
||||
ghcr.io/${{ env.REPO_OWNER }}/gateway:latest
|
||||
|
85
.github/workflows/publish.yml
vendored
85
.github/workflows/publish.yml
vendored
@ -6,82 +6,57 @@ on:
|
||||
- '*'
|
||||
|
||||
jobs:
|
||||
|
||||
publish-gateway:
|
||||
publish:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.18.x]
|
||||
permissions:
|
||||
actions: read
|
||||
checks: write
|
||||
issues: read
|
||||
packages: write
|
||||
pull-requests: read
|
||||
repository-projects: read
|
||||
statuses: read
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@master
|
||||
with:
|
||||
fetch-depth: 1
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v1
|
||||
uses: docker/setup-qemu-action@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to Docker Registry
|
||||
uses: docker/login-action@v1
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
registry: ghcr.io
|
||||
|
||||
- name: Get TAG
|
||||
id: get_tag
|
||||
run: echo ::set-output name=TAG::${GITHUB_REF#refs/tags/}
|
||||
run: echo TAG=${GITHUB_REF#refs/tags/} >> $GITHUB_ENV
|
||||
|
||||
- name: Get git commit
|
||||
id: get_git_commit
|
||||
run: echo "GIT_COMMIT=$(git rev-parse HEAD)" >> $GITHUB_ENV
|
||||
- name: Get version
|
||||
id: get_version
|
||||
run: echo "VERSION=$(git describe --tags --dirty)" >> $GITHUB_ENV
|
||||
- name: Get Repo Owner
|
||||
id: get_repo_owner
|
||||
run: echo ::set-output name=repo_owner::$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')
|
||||
run: echo "REPO_OWNER=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" > $GITHUB_ENV
|
||||
|
||||
- name: Publish ${{ matrix.svc }}
|
||||
uses: docker/build-push-action@v2
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
context: ./gateway
|
||||
file: ./gateway/Dockerfile
|
||||
outputs: "type=registry,push=true"
|
||||
platforms: linux/amd64,linux/arm/v7,linux/arm64
|
||||
build-args: |
|
||||
VERSION=${{ steps.get_tag.outputs.TAG }}
|
||||
VERSION=${{ env.TAG }}
|
||||
GIT_COMMIT=${{ github.sha }}
|
||||
tags: |
|
||||
ghcr.io/${{ steps.get_repo_owner.outputs.repo_owner }}/gateway:${{ steps.get_tag.outputs.TAG }}
|
||||
ghcr.io/${{ steps.get_repo_owner.outputs.repo_owner }}/gateway:${{ github.sha }}
|
||||
ghcr.io/${{ steps.get_repo_owner.outputs.repo_owner }}/gateway:latest
|
||||
publish-auth-plugins:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.17.x]
|
||||
svc: [
|
||||
basic-auth
|
||||
]
|
||||
steps:
|
||||
- uses: actions/checkout@master
|
||||
with:
|
||||
fetch-depth: 1
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v1
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
- name: Get TAG
|
||||
id: get_tag
|
||||
run: echo ::set-output name=TAG::${GITHUB_REF#refs/tags/}
|
||||
- name: Login to Docker Registry
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
registry: ghcr.io
|
||||
- name: Get Repo Owner
|
||||
id: get_repo_owner
|
||||
run: echo ::set-output name=repo_owner::$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')
|
||||
- name: Publish ${{ matrix.svc }}
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: ./auth/${{ matrix.svc }}
|
||||
file: ./auth/${{ matrix.svc }}/Dockerfile
|
||||
outputs: "type=registry,push=true"
|
||||
platforms: linux/amd64,linux/arm/v7,linux/arm64
|
||||
tags: |
|
||||
ghcr.io/${{ steps.get_repo_owner.outputs.repo_owner }}/${{ matrix.svc }}:${{ steps.get_tag.outputs.TAG }}
|
||||
ghcr.io/${{ steps.get_repo_owner.outputs.repo_owner }}/${{ matrix.svc }}:${{ github.sha }}
|
||||
ghcr.io/${{ steps.get_repo_owner.outputs.repo_owner }}/${{ matrix.svc }}:latest
|
||||
ghcr.io/${{ env.REPO_OWNER }}/gateway:${{ github.sha }}
|
||||
ghcr.io/${{ env.REPO_OWNER }}/gateway:${{ env.TAG }}
|
||||
ghcr.io/${{ env.REPO_OWNER }}/gateway:latest
|
||||
|
126
ADOPTERS.md
126
ADOPTERS.md
@ -1,16 +1,29 @@
|
||||
# Adopters
|
||||
|
||||
This list shows adopters of OpenFaaS. If you're using OpenFaaS in some way, then please get in touch.
|
||||
This list shows adopters of OpenFaaS. If you're using OpenFaaS in some way, then please add your team and use-case to this file
|
||||
|
||||
## Further resources
|
||||
## How else can you support this project?
|
||||
|
||||
Become a GitHub Sponsor - either as an individual practitioner using or introducing OpenFaaS, or as a company, or both.
|
||||
### Individual or company sponsor
|
||||
|
||||
You can sponsor OpenFaaS on GitHub, most users do this using their personal accounts.
|
||||
|
||||
You'll show up as a sponsor on issues and PRs, which is going to make it more likely you'll get a timely response and help from the community.
|
||||
|
||||
* [Sponsor OpenFaaS on GitHub](https://github.com/openfaas/sponsors)
|
||||
|
||||
Support & OpenFaaS PRO
|
||||
### Help yourself, with the manual for OpenFaaS
|
||||
|
||||
* Buy [OpenFaaS PRO or Enterprise Subscription](https://openfaas.com/support) from OpenFaaS Ltd
|
||||
Help yourself learn and grow, whilst supporting us.
|
||||
|
||||
* [Serverless For Everyone Else](https://openfaas.gumroad.com/l/serverless-for-everyone-else) is the official manual for OpenFaaS, brimming with examples written in Node.js
|
||||
* [Everyday Go](https://openfaas.gumroad.com/l/everyday-golang) is the official reference material from the OpenFaaS founder Alex Ellis for learning Go, and writing functions in Go.
|
||||
|
||||
### Running OpenFaaS in production?
|
||||
|
||||
See how Community Edition (CE) which you are using now, compares to OpenFaaS Pro, which is designed for commercial use:
|
||||
|
||||
[Overview and comparison of OpenFaaS Pro](https://docs.openfaas.com/openfaas-pro/introduction/)
|
||||
|
||||
Tell us more:
|
||||
|
||||
@ -21,20 +34,38 @@ Tell us more:
|
||||
|
||||
* [3fs](https://3fs.si) - 3fs is using OpenFaaS for automating repetitive development tasks like automatic rebasing, vendoring of dependencies on merge requests and many other things that make our developers lives easier
|
||||
|
||||
* [BT](https://www.bt.com) - BT are using OpenFaaS to enable collaboration between data-scientists and developers. The teams are going from 3-years to build and deliver a PoC, to 3 months. See: [KubeCon video](https://www.youtube.com/watch?v=y77HlN2Fa-w)
|
||||
* [911 Security](https://www.911security.com/) - "We migrated our Python functions from AWS Lambda using automation, and now run them in airgapped environments for customers using OpenFaaS and arkade" - Scott Creager
|
||||
|
||||
* [Altair Engineering](https://altair.com/) - OpenFaaS powers the customer functions capability of the IoT SaaS platform, and separately each private on-prem installation of the product.
|
||||
|
||||
* [Axa France](https://www.axa.fr) - Axa uses OpenFaaS for inference and predictions at scale using ML models - Pierre-Henri Gache
|
||||
|
||||
* [Baidu](https://baidu.com) - A team within Baidu provides ML models to customers which are hosted on OpenFaaS - He Sun.
|
||||
|
||||
* [BCubed Engineering](https://bcubed-corp.com) - "We use OpenFaaS to provide a serverless platform for our customers to run their code on."
|
||||
|
||||
* [Black.ai](https://black.ai) - video encoding, transcoding - object detection for CCTV using AI.
|
||||
|
||||
* [Breu](https://breu.io) - Breu is using OpenFaaS to build an end user monitoring solution for hybrid cloud.
|
||||
|
||||
* [BT](https://www.bt.com) - BT are using OpenFaaS to enable collaboration between data-scientists and developers. The teams are going from 3-years to build and deliver a PoC, to 3 months. See: [KubeCon video](https://www.youtube.com/watch?v=y77HlN2Fa-w)
|
||||
|
||||
* [BulletProof](https://www.bulletproof.co.uk/) - Bulletproof are using OpenFaaS to build an on demand and scalable Vulnerability Scanning (VA) engine. Using OpenFaaS allows us to use compute resource efficiently yet maintain the ability to grow to meet customer scanning demands. We also like the ability to use pure docker containers to compose multiple scanning tools with different technologies into a single, coherent interface. This has reduced the time need to add new tools to the platform.
|
||||
|
||||
* [CDATA](https://cdata.com) - Used for background jobs and tasks such as backing up and exchanging data between systems.
|
||||
|
||||
* [Citrix](https://www.citrix.com/en-gb/) - Citrix built out a closed-source multi-tenant functions platform and UI using OpenFaaS. It is used for testing hardware devices and for automated QA testing.
|
||||
|
||||
* [Civo](https://www.civo.com) - Civo Cloud provide a 1-click Kubernetes marketplace application for OpenFaaS
|
||||
|
||||
* [Cloud Initiatives](https://cloudinitiatives.com) - Used for customer installations for custom functionality, and for main product providing educational course metrics.
|
||||
|
||||
* [Cognite](https://www.cognite.com) - Cognite targets heavy asset industries such as oil and gas, shipping and energy sector. They provide data integration tools that help you extract, import, and transform data from siloed source systems, and OpenFaaS is used to provide a cloud function service for heavy tasks.
|
||||
|
||||
* [Contiamo](https://www.contiamo.com) - data-science platform hosting jupyter notebooks and functions for multiple tenants.
|
||||
|
||||
* [Corva.ai](https://corva.ai) - "Corva is an information-sharing, collaboration-driving, and productivity-powering solution for your Drilling, Completions, Geoscience, and Sustainability teams."
|
||||
|
||||
* [DB2 Limited](https://db2.io) - mobile and web development company in Ukraine. Our internal projects using OpenFaaS functions to run customers code in Kubernetes cluster.
|
||||
|
||||
* [DigitalOcean](https://www.digitalocean.com) - DigitalOcean provide a one-click droplet and a 1-click Kubernetes marketplace application for OpenFaaS
|
||||
@ -43,51 +74,87 @@ Tell us more:
|
||||
|
||||
* [Dragonchain](https://dragonchain.com/) - "At Dragonchain, we focus on creating a hybrid blockchain-as-a-service product, with integrations of OpenFaaS as our 'smart contract' platform, to be able to automatically run customer code based on interactions that occur on the blockchain. This allows us to be extremely flexible, as customers only have to create a docker container and give it to us in order to create a 'smart contract' which can have deep integrations with our blockchain itself.". Blog: [Dragonchain & OpenFaaS](https://dragonchain.com/blog/blockchain-as-a-service-at-scale-for-enterprise)
|
||||
|
||||
* [Edge Delta](https://www.edgedelta.com/) - "OpenFaaS powers parts of the "edge observability platform"
|
||||
* [Edge Delta](https://www.edgedelta.com/) - "OpenFaaS powers parts of our "edge observability platform""
|
||||
|
||||
* [First Baptist Church Carrollton](https://www.fbcc.us) - "We use faasd as the backend for a Slack bot connected to our internal Slack workspace. The bot was initially created to facilitate remote question and answer sessions at our church by allowing viewers of our live stream to text or email questions in, have a staff member ask their question in the room, and then allow the staff member to send a response back to the sender. The texting is facilitated by Twilio while the email is done by interacting with a Gmail account via IMAP and SMTP."
|
||||
|
||||
* [Fonix Telematics](https://fonixtelematics.com/) - "We are using OpenFaaS to build our new generation of APIs."
|
||||
|
||||
* [FTI Consulting](https://www.fticonsulting.com/) - "We've built a cloud-based analytical framework using Netflix Conductor for the workflow engine and OpenFaaS for our serverless function implementation, where each function can be called from a workflow. We've currently deployed several dozen OpenFaaS functions to our on-premise Kubernetes clusters" - Jason Cullison
|
||||
|
||||
* [GalaxyCard](https://www.galaxycard.in/) - "GalaxyCard is a happy user of OpenFaaS"
|
||||
|
||||
* [GH Electronic GmbH](https://gselectronic.com/) - "We've been using OpenFaaS in production for over 5 years and have 30 C# functions which are used in our manufacturing process."
|
||||
|
||||
* [GMO Internet](https://www.gmo.jp/en/)
|
||||
|
||||
* [HPE](https://www.hpe.com/) - HPE Ezmeral is a purpose-built, hybrid cloud platform for data science and analytics workloads.
|
||||
* [HelloSafe](https://hellosafe.ca/en/) - "HelloSafe is one of the leading website of financial products comparison in Canada. We're using OpenFaas on our production applications."
|
||||
|
||||
* [HM Planning Inspectorate](http://www.planninginspectorate.gov.uk) - HM Planning Inspectorate is the UK Government body responsible for dealing with planning appeals, national infrastructure planning applications, examinations of local plans and other specialist casework in England and Wales. OpenFaaS eased the communication between the new planning appeals website and the monolithic back-office application and allowed easy retries in the event of network failure.
|
||||
|
||||
* [Iconscout](https://iconscout.com) - e-commerce site for stock photography and icons. OpenFaaS is used to resize images and to bundle assets for customers.
|
||||
* [HPE](https://www.hpe.com/) - HPE Ezmeral is a purpose-built, hybrid cloud platform for data science and analytics workloads.
|
||||
|
||||
* [Ingrooves](https://ingrooves.com) - Ingrooves is a global music distribution, tech & marketing company, and OpenFaaS is a key component in its finance system for report generation, event publishing, and data ingestion.
|
||||
* [Iconscout](https://iconscout.com) - e-commerce site for stock photography and icons. OpenFaaS is used to resize images and to bundle assets for customers.
|
||||
|
||||
* [Infotechpartners](www.infotechpartners.be)
|
||||
|
||||
* [Ingrooves](https://ingrooves.com) - Ingrooves is a global music distribution, tech & marketing company, and OpenFaaS is a key component in its finance system for report generation, event publishing, and data ingestion.
|
||||
|
||||
* [Intel.com](https://intel.com) - OpenFaaS is used within a commercial service and within the Open Source group for AI model serving.
|
||||
|
||||
* [Intraffic](https://www.intraffic.nl/) - "Using OpenFaaS for integration and callable AI/ML models for asset management."
|
||||
|
||||
* [Klar MX](https://klar.mx) - "Cuenta con Klar" - Klar provides access to credit cards in Mexico for those who have issues with credit history.
|
||||
|
||||
* [Kubiya.ai](https://kubiya.ai) - ChatGPT-like DevOps Virtual Assistant that runs OpenFaaS functions for custom infrastructure automation and management.
|
||||
|
||||
* [LivePerson](https://www.liveperson.com/) - LivePerson extended their chat platform by allowing customers to write functions to execute in client chat flows. See [KubeCon video](https://www.youtube.com/watch?v=bt06Z28uzPA)
|
||||
|
||||
* [Live Time Value (LTV) Co.](https://www.ltvco.com) - "Data is at the heart of what we do" - the data-science team at LTV use OpenFaaS to provide a scalable and cost-effective way to run their models in production.
|
||||
|
||||
* [Mercedes Benz Tech Innovation](https://mercedes-benz.com) - "We are currently using OpenFaaS as a communicative API between the vehicle app and the backend. In the future we plan to offer a service in the company, which every app developer can host an API on the OpenFaaS platform at short notice"
|
||||
|
||||
* [metaspan](https://metaspan.com) - "End-to-end blockchain solutions". metaspan ported all api endpoints from monolith express.js/sails.js to openfaas micro-functions.
|
||||
|
||||
* [MoneyLion](https://www.moneylion.com/)
|
||||
|
||||
* [Naamio](https://naamio.cloud/) - "Naamio are providing an event-based serverless API to developers to enable rapid development of decentralized applications on the cloud. By providing progressive enhancement within the developer tools, OpenFaaS has enabled Naamio to go from clustered Docker container deployments with REST APIs using Kubernetes, to load balanced deployable functions over an open event queue interface. It was key to enabling a standard multilingual development kit across cloud providers."
|
||||
|
||||
* [Nexylan](nexylan.com/) - "We are a French professional hoster that use OpenFaaS in dev and production inside our private extranet. We use OpenFaaS to split our historic monolith project and then simplify development/maintainability and speed up development times."
|
||||
* [Neoskop](https://www.neoskop.de) - Neoskop is using OpenFaaS in production to provide our developers with a self-service platform for backend functionality and thereby our customers agile and rapid feature development.
|
||||
|
||||
* [Nexylan](nexylan.com/) - "We are a French professional host that use OpenFaaS in dev and production inside our private extranet. We use OpenFaaS to split our historic monolith project and then simplify development/maintainability and speed up development times."
|
||||
|
||||
* [NGC](https://www.ngcsoftware.com/)
|
||||
|
||||
* [Northwestern Mutual](https://www.northwesternmutual.com/) - "OpenFaaS is a great platform and Alex and team are a great resource. They will work very diligently with your team to help you get the most out of OpenFaaS, and he will always be able to provide valuable insight into issues that a team might face while developing software for the cloud." Kieran Gordon
|
||||
|
||||
* [Optiv](https://optiv.com) - Cyber Security Solutions
|
||||
|
||||
* [Outsystems](https://outsystems.com) - "In my team, we're using OpenFaaS to help the orchestration of our CD pipelines. From a high-level perspective, we have a NATS cluster and the OpenFaaS functions subscribing to NATS topics and reacting to them. Our functions are doing some work related to the pipeline, like saving data to the database, sending Slack messages, or just returning something from the database." (Marco Alves)
|
||||
|
||||
* [P. A. Media Group](https://pamediagroup.com/) - "We use OpenFaaS to orchestrate Terraform and Jenkins jobs for our internal infrastructure provisioning" - Rob Stonham
|
||||
|
||||
* [Patchworks Integration Limited](https://www.wearepatchworks.com) - Ecommerce integrations made easy - functions provide custom enrichment for data and integrations with third-party APIs. Customers can provide their own PHP code to execute in a sandboxed environment.
|
||||
|
||||
* [PathfinderZA](https://www.pathfinderza.com) - PathfinderZA is an IOT security firm selling underground sensors that transmits warnings to users if a person or vehicle goes past it. We're using OpenFaas, with Dockerised functions written in Java (Quarkus) and Rust (Actix/Rocket-RS).
|
||||
|
||||
* [Pentium Network](https://www.pentium.network/)
|
||||
|
||||
* [PiperCI](https://piperci.dreamer-labs.net) - PiperCI is a task management framework that provides users with a standard library of CI/CD-centric tasks and the [OpenFaas](https://www.openfaas.com/) and [Kubernetes](https://kubernetes.io/) based infrastructure required to run them. PiperCI can be used in conjunction with existing CI/CD orchestrators like GitlabCI, Jenkins, TravisCI, or others to create a more scalable, robust, and functional CI/CD system.
|
||||
|
||||
* [Politics Rewired](https://www.politicsrewired.com/) - Politics Rewired uses OpenFaaS to enable organisation of political campaigns and sending of SMS message at scale using functions.
|
||||
|
||||
* [Press Association](https://www.pressassociation.com/) - Press Association is using OpenFaaS in development and production as part of our deployment pipeline.
|
||||
|
||||
* [Pypestream](https://www.pypestream.com) - "We have just migrated 50 of our customers from Kubeless, which is now deprecated to OpenFaaS" - Antoine Hamon
|
||||
|
||||
* [Rapid Circle](https://www.rapidcircle.com) is using OpenFaaS within a Azure Kubernetes cluster to host a large amount of micro-services aiming at automating core activities of their Microsoft 365 Cloud Managed Services offering. Robustness, speed, scalable and simplicity have been major reasons to favor OpenFaaS over Azure Functions.
|
||||
|
||||
* [Ratehub](https://www.ratehub.ca) - Ratehub is Canada's leading personal finance comparison site. We're breaking apart our monolithic PHP and Java codebases into Node, PHP and Java OpenFaaS functions; there's not much that we don't plan on moving to FaaS!
|
||||
|
||||
* [Rapid Circle](https://www.rapidcircle.com) is using OpenFaaS within a Azure Kubernetes cluster to host a large amount of micro-services aiming at automating core activities of their Microsoft 365 Cloud Managed Services offering. Robustness, speed, scalable and simplicity have been major reasons to favor OpenFaaS over Azure Functions.
|
||||
* [skyslope.com](https://skyslope.com) - "We process millions of documents per day and moved from AWS Lambda to Kubernetes. We estimate that OpenFaaS has saved us 60,000 USD each year over the past three years that we've been running it in our business" - Derrick Martinez
|
||||
|
||||
* [smashHit](https://smashhit.eu) - smashHit is a project funded by the European Union's Horizon 2020 research and innovation programme under grant agreement No. 871477. The objective of smashHit is to assure trusted and secure sharing of data streams from both personal and industrial platforms, needed to build sectorial and cross-sectorial services, by establishing a Framework for processing of data owner consent and legal rules (GDPR) and effective contracting, as well as joint security and privacy-preserving mechanisms. We are utilising OpenFaaS to support the need for scalable processing through the use of functions.
|
||||
|
||||
* [Sprucee](https://spruce.casa) - We use [faasd](https://github.com/openfaas/faasd) as part of our base Encryption as a Service platform which were manually managed docker containers. As NATS based platform we were able to scale to every size we want, but deployment takes many labor time as we need to deal with OS level and customer limitations. Now we can use "faas install/up" to accomplish 80% of deployment effort.
|
||||
|
||||
@ -95,31 +162,54 @@ Tell us more:
|
||||
|
||||
* [SURFsara IoT Platform for Sensemakers](https://github.com/sensemakersamsterdam/sensemakers-iot-platform) - The SURFsara IoT Platform for Sensemakers is a platform for storing, monitoring, visualising and analyzing sensor data. It is a collaboration platform designed to host multiple projects carried by the Sensemakers community. In addition, there is a project dedicated to experimentation, available for everyone to use. All data within the platform is shared. OpenFaaS serverless functions give access to the platform through an HTTP entry point, take care of the metadata extraction and enable custom event-driven actions.
|
||||
|
||||
* [Surge](https://www.workwithsurge.com) - Lending Platform and Salesforce integrations
|
||||
|
||||
* [TeamViewer.com](https://teamviewer.com) - "TeamViewer users OpenFaaS across several clusters"
|
||||
|
||||
* [T-Mobile](https://www.t-mobile.com/) - T-Mobile is a global mobile network that provides mobile data, voice and text services to consumers and businesses.
|
||||
|
||||
* [Transmute Industries](https://www.transmute.industries/) - "At Transmute we use OpenFaaS to develop identity and access integrations leveraging decentralized identities that integrate with legacy IAM systems. OpenFaaS helps Transmute and our customers avoid vendor lock in, encourages modularity, and helps us rapidly develop and release integrations for customers."
|
||||
|
||||
* [Traversals](https://traversals.com/) - At Traversals, we use OpenFaaS for processing of incoming data. We take benefit from various programming languages available in OpenFaaS.
|
||||
|
||||
* [T-Mobile](https://www.t-mobile.com/) - T-Mobile is a global mobile network that provides mobile data, voice and text services to consumers and businesses.
|
||||
|
||||
* [UStore](http://ustore.com.br/) - "We're using OpenFaaS in production"
|
||||
|
||||
* [Very Good Security](https://www.verygoodsecurity.com) - VGS uses OpenFaaS to build a solid foundation for the development, deployment, and execution of custom logic on customer payloads as part of their secure compute platform.
|
||||
|
||||
* [Vision Banco SAECA](https://www.visionbanco.com) - self-service home banking portal and asynchronous report/PDF generation. See: [KubeCon Video](https://www.youtube.com/watch?v=mPjI34qj5vU&t=1417s)
|
||||
|
||||
* [Virality](https://www.virality.de/)
|
||||
|
||||
* [Vision Banco SAECA](https://www.visionbanco.com) - self-service home banking portal and asynchronous report/PDF generation. See: [KubeCon Video](https://www.youtube.com/watch?v=mPjI34qj5vU&t=1417s)
|
||||
|
||||
* [VMware](https://vmware.com)
|
||||
* Used in "veba" VMware Event Broker Appliance to extend vSphere by adding event functionality. OpenFaaS functions and the vcenter-connector are used as an appliance.
|
||||
* CAS / vRA8 - The Cloud Automation Services product has an option to deploy "FaaS on-premises", this actually deploys OpenFaaS white-boxed / white-labelled. [CAS Write-up from Swisscom](https://ict.swisscom.ch/2019/08/cloud-automation-services-on-prem-faas-provider-for-vsphere/)
|
||||
* OpenFaaS is also repackaged as "Automation Extensibility" in the ["vRO" product](https://vnuggets.com/2019/08/16/cloud-assembly-extensibility-with-abx-faas-part1/). [See an example](https://vnuggets.com/2019/08/19/cloud-assembly-extensibility-with-abx-faas-part3/)
|
||||
|
||||
* [VNourdin](https://www.vnourdin.dev) - I am a French web developer working with the Jamstack, using 11ty and faasd. I started with faasd as I wanted to make it work on my small VPS, but as I do more and more projects relying on OpenFaaS, I'll probably switch to a K8s cluster to gain scalability.
|
||||
|
||||
* [Waylay](https://www.waylay.io) - We use OpenFaaS to deploy small snippets of code that can be combined in a low-code manner by our clients to do data orchestration and automation. Users of the platform also are able to deploy their own plugins (written in multiple languages), which also get deployed on OpenFaas.
|
||||
|
||||
* [Wireline.io](https://wireline.io) - portable functions that can run on any hardware, indexed through blockchain.
|
||||
|
||||
* [WorldQuant](https://worldquant.com) - Using OpenFaaS as part of WorldQuant's solutions.
|
||||
|
||||
* [Yokogawa Electric](https://en.wikipedia.org/wiki/Yokogawa_Electric)
|
||||
|
||||
* [Ytel](https://www.ytel.com) - Ytel are a Google Cloud customer and deployed OpenFaaS vs. the vendor alternative due to its wide range of templates, Dockerfile support and easier access to services within the VPC. The Dockerfile template allowed for easy migration of existing code. The latency of transactions for customers during purchase process was reduced by offloading synchronous code to NATS which is built into OpenFaaS. OpenFaaS also allowed "hot path" code to be refactored from large services into multiple functions, to take advantage of horizontal scaling.
|
||||
|
||||
* [smashHit](https://smashhit.eu) - smashHit is a project funded by the European Union's Horizon 2020 research and innovation programme under grant agreement No. 871477. The objective of smashHit is to assure trusted and secure sharing of data streams from both personal and industrial platforms, needed to build sectorial and cross-sectorial services, by establishing a Framework for processing of data owner consent and legal rules (GDPR) and effective contracting, as well as joint security and privacy-preserving mechanisms. We are utilising OpenFaaS to support the need for scalable processing through the use of functions.
|
||||
|
||||
See the top of the file for how to participate.
|
||||
|
||||
## Appendix
|
||||
|
||||
### Sorting sections
|
||||
|
||||
Adopeters list should be sorted after it's been updated, here's how you can do that with bash.
|
||||
|
||||
```bash
|
||||
cat | sort --ignore-case
|
||||
# Copy / paste
|
||||
|
||||
# Control + D
|
||||
```
|
||||
|
||||
Please note, for few vendors multiple use case has been listed under same vendor. Hence, the output need to be compared with content in document before updating the document.
|
||||
|
105
CONTRIBUTING.md
105
CONTRIBUTING.md
@ -1,12 +1,14 @@
|
||||
# Contributing
|
||||
# Contributing guidelines
|
||||
|
||||
## Guidelines
|
||||
These are the guidelines for contributing to OpenFaaS Community Edition (CE) and faasd components.
|
||||
|
||||
Guidelines for contributing.
|
||||
OpenFaaS Standard and OpenFaaS For Enterprises are commercial software, and maintained solely by employees of OpenFaaS Ltd.
|
||||
|
||||
### First impressions - introducing yourself and your use-case
|
||||
Customers can provide feedback via the [openfaas/customers](https://github.com/openfaas/customers) repository
|
||||
|
||||
One of the best ways to participate within a new open source communities is to introduce yourself and your use-case. This builds goodwill, but also means the community can start to understand your needs and how best to help you.
|
||||
## First impressions - introducing yourself and your use-case
|
||||
|
||||
One of the best ways to participate within a new open source community is to introduce yourself and your use-case. This builds goodwill, but also means the community can start to understand your needs and how best to help you.
|
||||
|
||||
Given that the community is made up of volunteers, making a good first impression is important to getting their ear and attention.
|
||||
|
||||
@ -31,7 +33,7 @@ The primary ways to engage with the community are via GitHub Issues and [Enterpr
|
||||
|
||||
See also: [The no-excuses guide to introducing yourself to a new open source project](https://opensource.com/education/13/7/introduce-yourself-open-source-project)
|
||||
|
||||
### How can I get involved?
|
||||
## How can I get involved?
|
||||
|
||||
There are a number of areas where contributions can be accepted:
|
||||
|
||||
@ -49,17 +51,19 @@ There are a number of areas where contributions can be accepted:
|
||||
|
||||
This is just a short list of ideas, if you have other ideas for contributing please make a suggestion.
|
||||
|
||||
### I want to contribute on GitHub
|
||||
If you'd like help getting involved, [join our weekly community call on Zoom](https://docs.openfaas.com/community).
|
||||
|
||||
#### I've found a security issue
|
||||
## I want to contribute on GitHub
|
||||
|
||||
Please follow [responsible disclosure practices](https://en.wikipedia.org/wiki/Responsible_disclosure) and send an email to support@openfaas.com. Bear in mind that instructions on how to reproduce the issue are key to proving an issue exists, and getting it resolved. Suggested solutions are also weclome.
|
||||
### I've found a security issue
|
||||
|
||||
#### I've found a typo
|
||||
Please follow [responsible disclosure practices](https://en.wikipedia.org/wiki/Responsible_disclosure) and send an email to support@openfaas.com. Bear in mind that instructions on how to reproduce the issue are key to proving an issue exists, and getting it resolved.
|
||||
|
||||
### I've found a typo
|
||||
|
||||
* A Pull Request is not necessary. Raise an [Issue](https://github.com/openfaas/faas/issues) and we'll fix it as soon as we can.
|
||||
|
||||
#### I have a (great) idea
|
||||
### I have a (great) idea
|
||||
|
||||
The OpenFaaS maintainers would like to make OpenFaaS the best it can be and welcome new contributions that align with the project's goals. Our time is limited so we'd like to make sure we agree on the proposed work before you spend time doing it. Saying "no" is hard which is why we'd rather say "yes" ahead of time. You need to raise a proposal.
|
||||
|
||||
@ -84,7 +88,7 @@ If you are proposing a new tool or service please do due diligence. Does this to
|
||||
|
||||
Every effort will be made to work with contributors who do not follow the process. Your PR may be closed or marked as `invalid` if it is left inactive, or the proposal cannot move into a `design/approved` status.
|
||||
|
||||
#### Paperwork for Pull Requests
|
||||
### Paperwork for Pull Requests
|
||||
|
||||
Please read this whole guide and make sure you agree to the Developer Certificate of Origin (DCO) agreement (included below):
|
||||
|
||||
@ -95,7 +99,7 @@ Please read this whole guide and make sure you agree to the Developer Certificat
|
||||
* Always give instructions for testing
|
||||
* Provide us CLI commands and output or screenshots where you can
|
||||
|
||||
##### Commit messages
|
||||
#### Commit messages
|
||||
|
||||
The first line of the commit message is the *subject*, this should be followed by a blank line and then a message describing the intent and purpose of the commit. These guidelines are based upon a [post by Chris Beams](https://chris.beams.io/posts/git-commit/).
|
||||
|
||||
@ -178,47 +182,50 @@ defer goleak.VerifyNoLeaks(t)
|
||||
|
||||
at the very beginning of the test, and it will fail the test if it detects goroutines that were opened but never cleaned up at the end of the test.
|
||||
|
||||
#### I have a question, a suggestion or need help
|
||||
#### I need to add a dependency
|
||||
|
||||
All projects use [Go modules](https://github.com/golang/go/wiki/Modules) and vendoring. The concept of `vendoring` is still broadly used in projects written in Go. This means that a copy of the source-code of dependencies is stored within each repository in the `vendor` folder. It allows for a repeatable build and isolates change.
|
||||
|
||||
Components must be licensed with an MIT, BSD, or Apache 2.0 license. We may ask you to write your own code when dependencies are trivial, or unmaintained by their authors.
|
||||
|
||||
### I have a question, a suggestion or need help
|
||||
|
||||
If you have a deeply technical request or need help debugging your application then you should prepare a simple, public GitHub repository with the minimum amount of code required to reproduce the issue.
|
||||
|
||||
If you feel there is an issue with OpenFaaS or were unable to get the help you needed from the GitHub, [then send us an email](https://openfaas.com/support/)
|
||||
|
||||
#### Setting expectations, support and SLAs
|
||||
|
||||
* What kind of support can I expect for free?
|
||||
* What kind of support can I expect?
|
||||
|
||||
OpenFaaS is licensed in a way that enables you to use the source code in or with your project or product.
|
||||
OpenFaaS Standard customers have self-service support, and can directly contact the OpenFaaS Ltd team via the [openfaas/customers](https://github.com/openfaas/customers) repository using Discussions.
|
||||
|
||||
If you are using one of the Open Source projects within the openfaas or openfaas-incubator repository, then help may be offered on a limited, good-will basis by volunteers, but if you are a commercial user, you will need to purchase support for timely help.
|
||||
|
||||
Please be respectful of any time given to you and your needs. The person you are requesting help from may not reside in your timezone and contacting them via direct message is inappropriate.
|
||||
Support is only offered to free users to fix bugs and issues in the codebase, where the full Issue Template is filled out with sufficient instructions to reproduce the issue. We will not debug your application, or comment on your architecture on GitHub.
|
||||
|
||||
Enterprise support is the best place to ask questions, suggest features, and to get help. The GitHub issue tracker can be used for suspected issues with the codebase or deployment artifacts. The whole template must be filled out in detail.
|
||||
* Can we talk to you in person?
|
||||
|
||||
There is a weekly Zoom call for any free user or customer to attend, topics are taken at the beginning of the call, and we will strive to give everyone time to talk.
|
||||
|
||||
* Doesn't Open Source mean that everything is free?
|
||||
|
||||
The OpenFaaS projects are licensed as MIT which means that you are free to use, modify and distribute the software within the terms of the license.
|
||||
The OpenFaaS Community Edition (CE) projects are licensed as MIT which means that you are free to use, modify and distribute the software within the terms of the license.
|
||||
|
||||
Contributions, suggestions and feedback is welcomed in the appropriate channels as outlined in this guide. The MIT license does not cover support for PRs, Issues, Technical
|
||||
Support questions, feature requests and technical support/professional services which you may require; the preceding are not free and have a cost to those providing the services. Where possible, this time may be volunteered for free, but it is not unlimited.
|
||||
Contributions, suggestions and feedback is welcomed in the appropriate channels as outlined in this guide. The MIT license does not cover support for PRs, Issues, Technical Support questions, feature requests and technical support/professional services which you may require; the preceding are not free and have a cost to those providing the services.
|
||||
|
||||
* What is the SLA for my Issue?
|
||||
|
||||
Issues are examined, triaged and answered on a best effort basis by volunteers and community contributors. This means that you may receive an initial response within any time period such as: 1 minute, 1 hour, 1 day, or 1 week. There is no implicit meaning to the time between you raising an issue and it being answered or resolved.
|
||||
|
||||
If you see an issue which does not have a response or does not have a resolution, it does not mean that it is not important, or that it is being ignored. It simply means it has not been worked on by a volunteer yet.
|
||||
If you see an issue which does not have a response or does not have a resolution, it does not mean that it is not important, or that it is being ignored. It simply means it has not been worked on yet, or may have been missed.
|
||||
|
||||
Please take responsibility for following up on your Issues if you feel further action is required.
|
||||
|
||||
If you are a business using OpenFaaS and need timely and attentive responses, then you should purchase Enterprise Support from OpenFaaS Ltd.
|
||||
If you're an OpenFaaS customer, then you will have a direct line of communication with the OpenFaaS Ltd team, feel free to reach out for an update.
|
||||
|
||||
* What is the SLA for my Pull Request?
|
||||
|
||||
In a similar way to Issues, Pull Requests are triaged, reviewed, and considered by a team of volunteers - the Core Team, Members Team and the Project Lead. There are dozens of components that make up the OpenFaaS project and a limited amount of people. Sometimes PRs may become blocked or require further action.
|
||||
|
||||
Please take responsibility for following up on your Pull Requests if you feel further action is required.
|
||||
|
||||
|
||||
* Why may your PR be delayed?
|
||||
|
||||
* The contributing guide was not followed in some way
|
||||
@ -229,25 +236,19 @@ If you feel there is an issue with OpenFaaS or were unable to get the help you n
|
||||
|
||||
* Changes have been requested
|
||||
|
||||
More information, a use-case, or context may be required for the change to be accepted.
|
||||
* The PR is low priority or low impact
|
||||
|
||||
In addition, more information, a use-case, or context may be required for the change to be accepted.
|
||||
|
||||
* What if I am a GitHub Sponsor?
|
||||
|
||||
If you [sponsor OpenFaaS on GitHub](https://github.com/sponsors/openfaas), then you will show up as a Sponsor on your issues and PRs which is one way to show your support for the community and project. Whilst the entry-level sponsorship is only 25 USD / mo, you will benefit from access to regular updates on project development via the [Treasure Trove portal](https://faasd.exit.openfaas.pro/function/trove/). Your company can also take up a GitHub Sponsorship using their GitHub organisation's existing billing relationship.
|
||||
If you [sponsor OpenFaaS on GitHub](https://github.com/sponsors/openfaas), then you will show up as a Sponsor on your issues and PRs which is one way to show your support for the community and project. Thank you for your contribution.
|
||||
|
||||
Most sponsors are individuals, not corporations. But if your organisation can also take up a GitHub Sponsorship using their GitHub organisation's existing billing relationship.
|
||||
|
||||
* What if I need more than that?
|
||||
* What if I need more?
|
||||
|
||||
If you're a company using any of these projects, you can get the following through an [Enterprise Support agreement with OpenFaaS Ltd](https://openfaas.com/support/) so that the time and resources required to support your business are paid for.
|
||||
|
||||
A support agreement can be tailored to your needs, you may benefit from support, if you need any of the following:
|
||||
|
||||
* security issues patched in a timely manner for all 40 +/- open source components
|
||||
* priority responses to issues/PRs
|
||||
* immediate help and access to experts
|
||||
|
||||
#### I need to add a dependency
|
||||
|
||||
All projects use [Go modules](https://github.com/golang/go/wiki/Modules) and vendoring. The concept of `vendoring` is still broadly used in projects written in Go. This means that a copy of the source-code of dependencies is stored within each repository in the `vendor` folder. It allows for a repeatable build and isolates change.
|
||||
[Check out the options for self-service and enterprise support](https://openfaas.com/pricing/).
|
||||
|
||||
### How are releases made?
|
||||
|
||||
@ -302,8 +303,10 @@ Core Team attend all project meetings and calls. Allowances will be made for tim
|
||||
|
||||
The Core Team includes:
|
||||
|
||||
- Alex Ellis (@alexellis) - Lead
|
||||
- Lucas Roesler (@LucasRoesler) - SME for logs, provider model and secrets
|
||||
- Alex Ellis (@alexellis) - Founder, OpenFaaS Ltd
|
||||
- Han Verstraete (@welteki) - Junior Software Developer, OpenFaaS Ltd
|
||||
- Lucas Roesler (@LucasRoesler) - SME for logs, provider model and secrets. Lead Developer @ Contiamo
|
||||
- Nitishkumar Singh (@nitishkumar71) - Senior Engineer, CTO.ai
|
||||
|
||||
#### Members Team
|
||||
|
||||
@ -385,17 +388,13 @@ The [community.md](https://github.com/openfaas/faas/blob/master/community.md) fi
|
||||
|
||||
### Roadmap
|
||||
|
||||
* See the [2019 Project Update](https://www.openfaas.com/blog/project-update/)
|
||||
|
||||
* Browse open issues in [openfaas/faas](https://github.com/openfaas/faas/issues)
|
||||
|
||||
* Join the [2020 Roadmap on Trello](https://trello.com/invite/b/5OpMyrBP/ade103a10ae1e38eb5d3eee7955260a9/2020-openfaas-roadmap)
|
||||
|
||||
For commercial users, please feel free to ask about support, backlog prioritisation and feature development. Email sales@openfaas.com.
|
||||
See also: [OpenFaaS Pro](https://docs.openfaas.com/openfaas-pro/introduction/)
|
||||
|
||||
## License
|
||||
|
||||
This project is licensed under the MIT License.
|
||||
OpenFaaS Community Edition (CE) is licensed under the MIT License.
|
||||
|
||||
OpenFaaS Standard and OpenFaaS for Enterprises are proprietary and binaries are licensed under the commercial [OpenFaaS Pro EULA](https://github.com/openfaas/faas/blob/master/pro/EULA.md).
|
||||
|
||||
### Copyright notice
|
||||
|
||||
@ -404,7 +403,7 @@ It is important to state that you retain copyright for your contributions, but a
|
||||
Please add a Copyright notice to new files you add where this is not already present.
|
||||
|
||||
```
|
||||
// Copyright (c) OpenFaaS Author(s) 2018. All rights reserved.
|
||||
// Copyright (c) OpenFaaS Author(s) 2023. All rights reserved.
|
||||
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
|
||||
```
|
||||
|
||||
|
6
Makefile
6
Makefile
@ -5,6 +5,12 @@ NS?=openfaas
|
||||
build-gateway:
|
||||
(cd gateway; docker buildx build --platform linux/amd64 -t ${NS}/gateway:latest-dev .)
|
||||
|
||||
|
||||
# generate Go models from the OpenAPI spec using https://github.com/contiamo/openapi-generator-go
|
||||
generate:
|
||||
rm gateway/models/model_*.go || true
|
||||
openapi-generator-go generate models -s api-docs/spec.openapi.yml -o gateway/models --package-name models
|
||||
|
||||
# .PHONY: test-ci
|
||||
# test-ci:
|
||||
# ./contrib/ci.sh
|
||||
|
@ -20,7 +20,7 @@ OpenFaaS® makes it easy for developers to deploy event-driven functions and
|
||||
* Portable: runs on existing hardware or public/private cloud by leveraging [Kubernetes](https://github.com/openfaas/faas-netes)
|
||||
* [CLI](http://github.com/openfaas/faas-cli) available with YAML format for templating and defining functions
|
||||
* Auto-scales as demand increases [including to zero](https://docs.openfaas.com/architecture/autoscaling/)
|
||||
* [Commercially supported distribution by the team behind OpenFaaS](https://openfaas.com/support/)
|
||||
* [Commercially supported Pro distribution by the team behind OpenFaaS](https://openfaas.com/pricing/)
|
||||
|
||||
**Want to dig deeper into OpenFaaS?**
|
||||
|
||||
@ -153,9 +153,9 @@ Have you written a blog about OpenFaaS? Do you have a speaking event? Send a Pul
|
||||
|
||||
* [Read blogs/articles and find events about OpenFaaS](https://github.com/openfaas/faas/blob/master/community.md)
|
||||
|
||||
### Roadmap and contributing
|
||||
### Contributing
|
||||
|
||||
OpenFaaS is written in Golang and is MIT licensed - contributions are welcomed whether that means providing feedback, testing existing and new feature or hacking on the source.
|
||||
OpenFaaS Community Edition is written in Golang and is MIT licensed. Various types of contributions are welcomed whether that means providing feedback, testing existing and new feature or hacking on the source code.
|
||||
|
||||
#### How do I become a contributor?
|
||||
|
||||
|
34
ROADMAP.md
34
ROADMAP.md
@ -1,34 +0,0 @@
|
||||
# Roadmap
|
||||
|
||||
## GitHub projects / source code
|
||||
|
||||
You can find a detailed breakdown of the [openfaas](https://github.com/openfaas/) and [openfaas-incubator](https://github.com/openfaas-incubator/) organisations and projects [in the docs](https://docs.openfaas.com/contributing/get-started/).
|
||||
|
||||
## Feature overview
|
||||
|
||||
For an overview see [the docs](https://docs.openfaas.com/) or see a [feature comparison between OpenFaaS and OpenFaaS Cloud](https://docs.openfaas.com/openfaas-cloud/intro/).
|
||||
|
||||
### OpenFaaS
|
||||
|
||||
OpenFaaS is a platform for building Serverless Functions and/or deploying existing microservices. Any programming language or binary is supported with a range of [templates](https://github.com/openfaas/templates) available to help you get started.
|
||||
|
||||
The core services which make up OpenFaaS need to run on a Linux master, but Windows worker nodes can be added to your cluster to run Windows binaries and functions.
|
||||
|
||||
Platforms: the x86_64 platform has first class support, with 32-bit arm and 64-bit arm provided on a best-effort basis.
|
||||
|
||||
Orchestrators: there is official support for Kubernetes & faasd (containerd) with the community providing support for AWS Fargate, Hashicorp Nomad and others.
|
||||
|
||||
### OpenFaaS Cloud
|
||||
|
||||
OpenFaaS Cloud is a multi-user distribution of OpenFaaS with a built-in CI/CD pipeline, OAuth delegation, a dashboard and a git-based workflow with public/private GitHub and self-hosted GitLab.
|
||||
|
||||
## What is coming next?
|
||||
|
||||
Proposals and feature requests are tracked [on the 2020 Roadmap on Trello](https://trello.com/invite/b/5OpMyrBP/ade103a10ae1e38eb5d3eee7955260a9/2020-openfaas-roadmap) and through the GitHub issue tracker of each project in the two organisations.
|
||||
|
||||
* [openfaas](https://github.com/openfaas/)
|
||||
* [openfaas-incubator](https://github.com/openfaas-incubator/)
|
||||
|
||||
## Contributing
|
||||
|
||||
Please see [CONTRIBUTING.md](https://github.com/openfaas/faas/blob/master/CONTRIBUTING.md).
|
996
api-docs/spec.openapi.yml
Normal file
996
api-docs/spec.openapi.yml
Normal file
@ -0,0 +1,996 @@
|
||||
openapi: 3.0.1
|
||||
info:
|
||||
title: OpenFaaS API Gateway
|
||||
description: OpenFaaS API documentation
|
||||
license:
|
||||
name: MIT
|
||||
version: 0.8.12
|
||||
contact:
|
||||
name: OpenFaaS Ltd
|
||||
url: https://www.openfaas.com/support/
|
||||
servers:
|
||||
- url: "http://localhost:8080"
|
||||
description: Local server
|
||||
tags:
|
||||
- name: internal
|
||||
description: Internal use only
|
||||
- name: system
|
||||
description: System endpoints for managing functions and related objects
|
||||
- name: function
|
||||
description: Endpoints for invoking functions
|
||||
paths:
|
||||
"/healthz":
|
||||
get:
|
||||
summary: Healthcheck
|
||||
operationId: healthcheck
|
||||
description: Healthcheck for the gateway, indicates if the gateway is running and available
|
||||
tags:
|
||||
- internal
|
||||
responses:
|
||||
'200':
|
||||
description: Healthy
|
||||
'500':
|
||||
description: Not healthy
|
||||
"/metrics":
|
||||
get:
|
||||
summary: Prometheus metrics
|
||||
operationId: metrics
|
||||
description: Prometheus metrics for the gateway
|
||||
tags:
|
||||
- internal
|
||||
responses:
|
||||
'200':
|
||||
description: Prometheus metrics in text format
|
||||
"/system/info":
|
||||
get:
|
||||
operationId: GetSystemInfo
|
||||
description: Get system provider information
|
||||
summary: Get info such as provider version number and provider orchestrator
|
||||
tags:
|
||||
- system
|
||||
responses:
|
||||
'200':
|
||||
description: Info result
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
"$ref": "#/components/schemas/GatewayInfo"
|
||||
'500':
|
||||
description: Internal Server Error
|
||||
"/system/alert":
|
||||
post:
|
||||
operationId: ScaleAlert
|
||||
description: Scale a function based on an alert
|
||||
summary: |
|
||||
Event-sink for AlertManager, for auto-scaling
|
||||
|
||||
Internal use for AlertManager, requires valid AlertManager alert
|
||||
JSON
|
||||
tags:
|
||||
- internal
|
||||
requestBody:
|
||||
description: Incoming alert
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/PrometheusAlert'
|
||||
required: false
|
||||
responses:
|
||||
'200':
|
||||
description: Alert handled successfully
|
||||
'500':
|
||||
description: Internal error with swarm or request JSON invalid
|
||||
"/system/functions":
|
||||
get:
|
||||
operationId: GetFunctions
|
||||
description: Get a list of deployed functions
|
||||
summary: 'Get a list of deployed functions with: stats and image digest'
|
||||
tags:
|
||||
- system
|
||||
responses:
|
||||
'200':
|
||||
description: List of deployed functions.
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
"$ref": "#/components/schemas/FunctionStatus"
|
||||
put:
|
||||
operationId: UpdateFunction
|
||||
description: update a function spec
|
||||
summary: Update a function.
|
||||
tags:
|
||||
- system
|
||||
requestBody:
|
||||
description: Function to update
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
"$ref": "#/components/schemas/FunctionDeployment"
|
||||
required: true
|
||||
responses:
|
||||
'200':
|
||||
description: Accepted
|
||||
'400':
|
||||
description: Bad Request
|
||||
'404':
|
||||
description: Not Found
|
||||
'500':
|
||||
description: Internal Server Error
|
||||
post:
|
||||
operationId: DeployFunction
|
||||
description: Deploy a new function.
|
||||
summary: Deploy a new function.
|
||||
tags:
|
||||
- system
|
||||
requestBody:
|
||||
description: Function to deploy
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
"$ref": "#/components/schemas/FunctionDeployment"
|
||||
required: true
|
||||
responses:
|
||||
'202':
|
||||
description: Accepted
|
||||
'400':
|
||||
description: Bad Request
|
||||
'500':
|
||||
description: Internal Server Error
|
||||
delete:
|
||||
operationId: DeleteFunction
|
||||
description: Remove a deployed function.
|
||||
summary: Remove a deployed function.
|
||||
tags:
|
||||
- system
|
||||
requestBody:
|
||||
description: Function to delete
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
"$ref": "#/components/schemas/DeleteFunctionRequest"
|
||||
required: true
|
||||
responses:
|
||||
'200':
|
||||
description: OK
|
||||
'400':
|
||||
description: Bad Request
|
||||
'404':
|
||||
description: Not Found
|
||||
'500':
|
||||
description: Internal Server Error
|
||||
"/system/scale-function/{functionName}":
|
||||
post:
|
||||
operationId: ScaleFunction
|
||||
description: Scale a function
|
||||
summary: Scale a function to a specific replica count
|
||||
tags:
|
||||
- system
|
||||
parameters:
|
||||
- name: functionName
|
||||
in: path
|
||||
description: Function name
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
requestBody:
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/ScaleServiceRequest'
|
||||
responses:
|
||||
'200':
|
||||
description: Scaling OK
|
||||
'202':
|
||||
description: Scaling OK
|
||||
'404':
|
||||
description: Function not found
|
||||
'500':
|
||||
description: Error scaling function
|
||||
|
||||
"/system/function/{functionName}":
|
||||
get:
|
||||
operationId: GetFunctionStatus
|
||||
description: Get the status of a function by name
|
||||
tags:
|
||||
- system
|
||||
parameters:
|
||||
- name: functionName
|
||||
in: path
|
||||
description: Function name
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
- name: namespace
|
||||
in: query
|
||||
description: Namespace of the function
|
||||
required: false
|
||||
schema:
|
||||
type: string
|
||||
responses:
|
||||
'200':
|
||||
description: Function Summary
|
||||
content:
|
||||
"*/*":
|
||||
schema:
|
||||
"$ref": "#/components/schemas/FunctionStatus"
|
||||
'404':
|
||||
description: Not Found
|
||||
'500':
|
||||
description: Internal Server Error
|
||||
"/system/secrets":
|
||||
get:
|
||||
operationId: ListSecrets
|
||||
description: Get a list of secret names and metadata from the provider
|
||||
summary: Get a list of secret names and metadata from the provider
|
||||
tags:
|
||||
- system
|
||||
responses:
|
||||
'200':
|
||||
description: List of submitted secrets.
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
"$ref": "#/components/schemas/SecretDescription"
|
||||
put:
|
||||
operationId: UpdateSecret
|
||||
description: Update a secret.
|
||||
summary: Update a secret, the value is replaced.
|
||||
tags:
|
||||
- system
|
||||
requestBody:
|
||||
description: Secret to update
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
"$ref": "#/components/schemas/Secret"
|
||||
required: true
|
||||
responses:
|
||||
'200':
|
||||
description: Ok
|
||||
'400':
|
||||
description: Bad Request
|
||||
'404':
|
||||
description: Not Found
|
||||
'405':
|
||||
description: Method Not Allowed. Secret update is not allowed in faas-swarm.
|
||||
'500':
|
||||
description: Internal Server Error
|
||||
post:
|
||||
operationId: CreateSecret
|
||||
description: Create a new secret.
|
||||
tags:
|
||||
- system
|
||||
requestBody:
|
||||
description: A new secret to create
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
"$ref": "#/components/schemas/Secret"
|
||||
required: true
|
||||
responses:
|
||||
'201':
|
||||
description: Created
|
||||
'400':
|
||||
description: Bad Request
|
||||
'500':
|
||||
description: Internal Server Error
|
||||
delete:
|
||||
operationId: DeleteSecret
|
||||
description: Remove a secret.
|
||||
tags:
|
||||
- system
|
||||
requestBody:
|
||||
description: Secret to delete
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
"$ref": "#/components/schemas/SecretDescription"
|
||||
required: true
|
||||
responses:
|
||||
'204':
|
||||
description: OK
|
||||
'400':
|
||||
description: Bad Request
|
||||
'404':
|
||||
description: Not Found
|
||||
'500':
|
||||
description: Internal Server Error
|
||||
"/system/logs":
|
||||
get:
|
||||
operationId: GetFunctionLogs
|
||||
description: Get a stream of the logs for a specific function
|
||||
tags:
|
||||
- system
|
||||
parameters:
|
||||
- name: name
|
||||
in: query
|
||||
description: Function name
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
- name: namespace
|
||||
in: query
|
||||
description: Namespace of the function
|
||||
required: false
|
||||
schema:
|
||||
type: string
|
||||
- name: instance
|
||||
in: query
|
||||
description: Instance of the function
|
||||
required: false
|
||||
schema:
|
||||
type: string
|
||||
- name: tail
|
||||
in: query
|
||||
description: Sets the maximum number of log messages to return, <=0 means
|
||||
unlimited
|
||||
schema:
|
||||
type: integer
|
||||
- name: follow
|
||||
in: query
|
||||
description: When true, the request will stream logs until the request timeout
|
||||
schema:
|
||||
type: boolean
|
||||
- name: since
|
||||
in: query
|
||||
description: Only return logs after a specific date (RFC3339)
|
||||
schema:
|
||||
type: string
|
||||
format: date-time
|
||||
responses:
|
||||
'200':
|
||||
description: Newline delimited stream of log messages
|
||||
content:
|
||||
application/x-ndjson:
|
||||
schema:
|
||||
"$ref": "#/components/schemas/LogEntry"
|
||||
'404':
|
||||
description: Not Found
|
||||
'500':
|
||||
description: Internal Server Error
|
||||
|
||||
"/system/namespaces":
|
||||
get:
|
||||
operationId: ListNamespaces
|
||||
description: Get a list of namespaces
|
||||
tags:
|
||||
- system
|
||||
responses:
|
||||
'200':
|
||||
description: List of namespaces
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/ListNamespaceResponse'
|
||||
'500':
|
||||
description: Internal Server Error
|
||||
|
||||
"/async-function/{functionName}":
|
||||
post:
|
||||
operationId: InvokeAsync
|
||||
description: Invoke a function asynchronously
|
||||
summary: |
|
||||
Invoke a function asynchronously in the default OpenFaaS namespace
|
||||
|
||||
Any additional path segments and query parameters will be passed to the function as is.
|
||||
|
||||
See https://docs.openfaas.com/reference/async/.
|
||||
tags:
|
||||
- function
|
||||
parameters:
|
||||
- name: functionName
|
||||
in: path
|
||||
description: Function name
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
requestBody:
|
||||
description: "(Optional) data to pass to function"
|
||||
content:
|
||||
"*/*":
|
||||
schema:
|
||||
type: string
|
||||
format: binary
|
||||
example: '{"hello": "world"}'
|
||||
required: false
|
||||
responses:
|
||||
'202':
|
||||
description: Request accepted and queued
|
||||
'404':
|
||||
description: Not Found
|
||||
'500':
|
||||
description: Internal Server Error
|
||||
|
||||
"/async-function/{functionName}.{namespace}":
|
||||
post:
|
||||
operationId: InvokeAsyncNamespaced
|
||||
description: Invoke a function asynchronously in an OpenFaaS namespace.
|
||||
summary: |
|
||||
Invoke a function asynchronously in an OpenFaaS namespace.
|
||||
|
||||
Any additional path segments and query parameters will be passed to the function as is.
|
||||
|
||||
See https://docs.openfaas.com/reference/async/.
|
||||
tags:
|
||||
- function
|
||||
parameters:
|
||||
- name: functionName
|
||||
in: path
|
||||
description: Function name
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
- name: namespace
|
||||
in: path
|
||||
description: Namespace of the function
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
requestBody:
|
||||
description: "(Optional) data to pass to function"
|
||||
content:
|
||||
"*/*":
|
||||
schema:
|
||||
type: string
|
||||
format: binary
|
||||
example: '{"hello": "world"}'
|
||||
required: false
|
||||
responses:
|
||||
'202':
|
||||
description: Request accepted and queued
|
||||
'404':
|
||||
description: Not Found
|
||||
'500':
|
||||
description: Internal Server Error
|
||||
|
||||
"/function/{functionName}":
|
||||
post:
|
||||
operationId: InvokeFunction
|
||||
description: Invoke a function in the default OpenFaaS namespace.
|
||||
summary: |
|
||||
Synchronously invoke a function defined in te default OpenFaaS namespace.
|
||||
|
||||
Any additional path segments and query parameters will be passed to the function as is.
|
||||
tags:
|
||||
- function
|
||||
parameters:
|
||||
- name: functionName
|
||||
in: path
|
||||
description: Function name
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
requestBody:
|
||||
description: "(Optional) data to pass to function"
|
||||
content:
|
||||
"*/*":
|
||||
schema:
|
||||
type: string
|
||||
format: binary
|
||||
example: '{"hello": "world"}'
|
||||
required: false
|
||||
responses:
|
||||
'200':
|
||||
description: Value returned from function
|
||||
'404':
|
||||
description: Not Found
|
||||
'500':
|
||||
description: Internal server error
|
||||
|
||||
"/function/{functionName}.{namespace}":
|
||||
post:
|
||||
operationId: InvokeFunctionNamespaced
|
||||
description: Invoke a function in an OpenFaaS namespace.
|
||||
summary: |
|
||||
Synchronously invoke a function defined in the specified namespace.
|
||||
|
||||
Any additional path segments and query parameters will be passed to the function as is.
|
||||
tags:
|
||||
- function
|
||||
parameters:
|
||||
- name: functionName
|
||||
in: path
|
||||
description: Function name
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
- name: namespace
|
||||
in: path
|
||||
description: Namespace of the function
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
requestBody:
|
||||
description: "(Optional) data to pass to function"
|
||||
content:
|
||||
"*/*":
|
||||
schema:
|
||||
type: string
|
||||
format: binary
|
||||
example: '{"hello": "world"}'
|
||||
required: false
|
||||
responses:
|
||||
'200':
|
||||
description: Value returned from function
|
||||
'404':
|
||||
description: Not Found
|
||||
'500':
|
||||
description: Internal server error
|
||||
components:
|
||||
securitySchemes:
|
||||
basicAuth:
|
||||
type: http
|
||||
scheme: basic
|
||||
|
||||
schemas:
|
||||
GatewayInfo:
|
||||
required:
|
||||
- provider
|
||||
- version
|
||||
- arch
|
||||
type: object
|
||||
properties:
|
||||
provider:
|
||||
nullable: true
|
||||
allOf:
|
||||
- $ref: "#/components/schemas/ProviderInfo"
|
||||
version:
|
||||
nullable: true
|
||||
description: version of the gateway
|
||||
allOf:
|
||||
- $ref: "#/components/schemas/VersionInfo"
|
||||
arch:
|
||||
type: string
|
||||
description: Platform architecture
|
||||
example: x86_64
|
||||
VersionInfo:
|
||||
type: object
|
||||
required:
|
||||
- sha
|
||||
- release
|
||||
properties:
|
||||
commit_message:
|
||||
type: string
|
||||
example: Sample Message
|
||||
sha:
|
||||
type: string
|
||||
example: 7108418d9dd6b329ddff40e7393b3166f8160a88
|
||||
release:
|
||||
type: string
|
||||
format: semver
|
||||
example: 0.8.9
|
||||
ProviderInfo:
|
||||
type: object
|
||||
required:
|
||||
- provider
|
||||
- orchestration
|
||||
- version
|
||||
properties:
|
||||
provider:
|
||||
type: string
|
||||
description: The orchestration provider / implementation
|
||||
example: faas-netes
|
||||
orchestration:
|
||||
type: string
|
||||
example: kubernetes
|
||||
version:
|
||||
description: The version of the provider
|
||||
nullable: true
|
||||
allOf:
|
||||
- $ref: "#/components/schemas/VersionInfo"
|
||||
|
||||
PrometheusAlert:
|
||||
type: object
|
||||
description: Prometheus alert produced by AlertManager. This is only a subset of the full alert payload.
|
||||
required:
|
||||
- status
|
||||
- receiver
|
||||
- alerts
|
||||
properties:
|
||||
status:
|
||||
type: string
|
||||
description: The status of the alert
|
||||
example: resolved
|
||||
receiver:
|
||||
type: string
|
||||
description: The name of the receiver
|
||||
example: webhook
|
||||
alerts:
|
||||
type: array
|
||||
description: The list of alerts
|
||||
items:
|
||||
$ref: "#/components/schemas/PrometheusInnerAlert"
|
||||
example:
|
||||
{
|
||||
"receiver": "scale-up",
|
||||
"status": "firing",
|
||||
"alerts": [{
|
||||
"status": "firing",
|
||||
"labels": {
|
||||
"alertname": "APIHighInvocationRate",
|
||||
"code": "200",
|
||||
"function_name": "func_nodeinfo",
|
||||
"instance": "gateway:8080",
|
||||
"job": "gateway",
|
||||
"monitor": "faas-monitor",
|
||||
"service": "gateway",
|
||||
"severity": "major",
|
||||
"value": "8.998200359928017"
|
||||
},
|
||||
"annotations": {
|
||||
"description": "High invocation total on gateway:8080",
|
||||
"summary": "High invocation total on gateway:8080"
|
||||
},
|
||||
"startsAt": "2017-03-15T15:52:57.805Z",
|
||||
"endsAt": "0001-01-01T00:00:00Z",
|
||||
"generatorURL": "http://4156cb797423:9090/graph?g0.expr=rate%28gateway_function_invocation_total%5B10s%5D%29+%3E+5\u0026g0.tab=0"
|
||||
}],
|
||||
"groupLabels": {
|
||||
"alertname": "APIHighInvocationRate",
|
||||
"service": "gateway"
|
||||
},
|
||||
"commonLabels": {
|
||||
"alertname": "APIHighInvocationRate",
|
||||
"code": "200",
|
||||
"function_name": "func_nodeinfo",
|
||||
"instance": "gateway:8080",
|
||||
"job": "gateway",
|
||||
"monitor": "faas-monitor",
|
||||
"service": "gateway",
|
||||
"severity": "major",
|
||||
"value": "8.998200359928017"
|
||||
},
|
||||
"commonAnnotations": {
|
||||
"description": "High invocation total on gateway:8080",
|
||||
"summary": "High invocation total on gateway:8080"
|
||||
},
|
||||
"externalURL": "http://f054879d97db:9093",
|
||||
"version": "3",
|
||||
"groupKey": 18195285354214864953
|
||||
}
|
||||
|
||||
PrometheusInnerAlert:
|
||||
type: object
|
||||
description: A single alert produced by Prometheus
|
||||
required:
|
||||
- status
|
||||
- labels
|
||||
properties:
|
||||
status:
|
||||
type: string
|
||||
description: The status of the alert
|
||||
example: resolved
|
||||
labels:
|
||||
$ref: "#/components/schemas/PrometheusInnerAlertLabel"
|
||||
|
||||
PrometheusInnerAlertLabel:
|
||||
type: object
|
||||
description: A single label of a Prometheus alert
|
||||
required:
|
||||
- alertname
|
||||
- function_name
|
||||
properties:
|
||||
alertname:
|
||||
type: string
|
||||
description: The name of the alert
|
||||
function_name:
|
||||
type: string
|
||||
description: The name of the function
|
||||
example: nodeinfo
|
||||
|
||||
FunctionDeployment:
|
||||
required:
|
||||
- service
|
||||
- image
|
||||
type: object
|
||||
properties:
|
||||
service:
|
||||
type: string
|
||||
description: Name of deployed function
|
||||
example: nodeinfo
|
||||
image:
|
||||
type: string
|
||||
description: Docker image in accessible registry
|
||||
example: functions/nodeinfo:latest
|
||||
namespace:
|
||||
type: string
|
||||
description: Namespace to deploy function to. When omitted, the default namespace
|
||||
is used, typically this is `openfaas-fn` but is configured by the provider.
|
||||
example: openfaas-fn
|
||||
envProcess:
|
||||
type: string
|
||||
description: |
|
||||
Process for watchdog to fork, i.e. the command to start the function process.
|
||||
|
||||
This value configures the `fprocess` env variable.
|
||||
example: node main.js
|
||||
constraints:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
description: Constraints are specific to OpenFaaS Provider
|
||||
example: node.platform.os == linux
|
||||
envVars:
|
||||
type: object
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: Overrides to environmental variables
|
||||
secrets:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
description: An array of names of secrets that are required to be loaded
|
||||
from the Docker Swarm.
|
||||
example: secret-name-1
|
||||
labels:
|
||||
type: object
|
||||
nullable: true
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: A map of labels for making scheduling or routing decisions
|
||||
example:
|
||||
foo: bar
|
||||
annotations:
|
||||
type: object
|
||||
nullable: true
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: A map of annotations for management, orchestration, events
|
||||
and build tasks
|
||||
example:
|
||||
topics: awesome-kafka-topic
|
||||
foo: bar
|
||||
limits:
|
||||
nullable: true
|
||||
allOf:
|
||||
- $ref: "#/components/schemas/FunctionResources"
|
||||
requests:
|
||||
nullable: true
|
||||
allOf:
|
||||
- $ref: "#/components/schemas/FunctionResources"
|
||||
readOnlyRootFilesystem:
|
||||
type: boolean
|
||||
description: Make the root filesystem of the function read-only
|
||||
|
||||
# DEPRECATED FIELDS, these fields are ignored in all current providers
|
||||
registryAuth:
|
||||
type: string
|
||||
description: |
|
||||
Deprecated: Private registry base64-encoded basic auth (as present in ~/.docker/config.json)
|
||||
|
||||
Use a Kubernetes Secret with registry-auth secret type to provide this value instead.
|
||||
|
||||
This value is completely ignored.
|
||||
example: dXNlcjpwYXNzd29yZA==
|
||||
deprecated: true
|
||||
network:
|
||||
type: string
|
||||
description: |
|
||||
Deprecated: Network, usually func_functions for Swarm.
|
||||
|
||||
This value is completely ignored.
|
||||
deprecated: true
|
||||
example: func_functions
|
||||
|
||||
FunctionStatus:
|
||||
type: object
|
||||
required:
|
||||
- name
|
||||
- image
|
||||
properties:
|
||||
name:
|
||||
type: string
|
||||
description: The name of the function
|
||||
example: nodeinfo
|
||||
image:
|
||||
type: string
|
||||
description: The fully qualified docker image name of the function
|
||||
example: functions/nodeinfo:latest
|
||||
namespace:
|
||||
type: string
|
||||
description: The namespace of the function
|
||||
example: openfaas-fn
|
||||
envProcess:
|
||||
type: string
|
||||
description: Process for watchdog to fork
|
||||
example: node main.js
|
||||
envVars:
|
||||
type: object
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: environment variables for the function runtime
|
||||
constraints:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
description: Constraints are specific to OpenFaaS Provider
|
||||
example: node.platform.os == linux
|
||||
secrets:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
description: An array of names of secrets that are made available to the function
|
||||
labels:
|
||||
type: object
|
||||
nullable: true
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: A map of labels for making scheduling or routing decisions
|
||||
example:
|
||||
foo: bar
|
||||
annotations:
|
||||
type: object
|
||||
nullable: true
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: A map of annotations for management, orchestration, events
|
||||
and build tasks
|
||||
example:
|
||||
topics: awesome-kafka-topic
|
||||
foo: bar
|
||||
limits:
|
||||
nullable: true
|
||||
allOf:
|
||||
- $ref: "#/components/schemas/FunctionResources"
|
||||
requests:
|
||||
nullable: true
|
||||
allOf:
|
||||
- $ref: "#/components/schemas/FunctionResources"
|
||||
readOnlyRootFilesystem:
|
||||
type: boolean
|
||||
description: removes write-access from the root filesystem mount-point.
|
||||
invocationCount:
|
||||
type: number
|
||||
description: The amount of invocations for the specified function
|
||||
format: integer
|
||||
example: 1337
|
||||
replicas:
|
||||
type: number
|
||||
description: The current minimal ammount of replicas
|
||||
format: integer
|
||||
example: 2
|
||||
availableReplicas:
|
||||
type: number
|
||||
description: The current available amount of replicas
|
||||
format: integer
|
||||
example: 2
|
||||
createdAt:
|
||||
type: string
|
||||
description: |
|
||||
is the time read back from the faas backend's
|
||||
data store for when the function or its container was created.
|
||||
format: date-time
|
||||
usage:
|
||||
nullable: true
|
||||
allOf:
|
||||
- $ref: "#/components/schemas/FunctionUsage"
|
||||
|
||||
FunctionResources:
|
||||
type: object
|
||||
properties:
|
||||
memory:
|
||||
type: string
|
||||
description: The amount of memory that is allocated for the function
|
||||
example: 128M
|
||||
cpu:
|
||||
type: string
|
||||
description: The amount of cpu that is allocated for the function
|
||||
example: '0.01'
|
||||
|
||||
FunctionUsage:
|
||||
type: object
|
||||
properties:
|
||||
cpu:
|
||||
type: number
|
||||
description: |
|
||||
is the increase in CPU usage since the last measurement
|
||||
equivalent to Kubernetes' concept of millicores.
|
||||
format: double
|
||||
example: 0.01
|
||||
totalMemoryBytes:
|
||||
type: number
|
||||
description: is the total memory usage in bytes.
|
||||
format: double
|
||||
example: 1337
|
||||
|
||||
DeleteFunctionRequest:
|
||||
required:
|
||||
- functionName
|
||||
type: object
|
||||
properties:
|
||||
functionName:
|
||||
type: string
|
||||
description: Name of deployed function
|
||||
example: nodeinfo
|
||||
|
||||
ScaleServiceRequest:
|
||||
required:
|
||||
- serviceName
|
||||
- namespace
|
||||
- replicas
|
||||
type: object
|
||||
properties:
|
||||
serviceName:
|
||||
type: string
|
||||
description: Name of deployed function
|
||||
example: nodeinfo
|
||||
namespace:
|
||||
type: string
|
||||
description: Namespace the function is deployed to.
|
||||
example: openfaas-fn
|
||||
replicas:
|
||||
type: integer
|
||||
format: int64
|
||||
minimum: 0
|
||||
description: Number of replicas to scale to
|
||||
example: 2
|
||||
|
||||
SecretDescription:
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
properties:
|
||||
name:
|
||||
type: string
|
||||
description: Name of secret
|
||||
example: aws-key
|
||||
namespace:
|
||||
type: string
|
||||
description: Namespace of secret
|
||||
example: openfaas-fn
|
||||
SecretValues:
|
||||
type: object
|
||||
properties:
|
||||
value:
|
||||
type: string
|
||||
description: Value of secret in plain-text
|
||||
example: changeme
|
||||
rawValue:
|
||||
type: string
|
||||
format: byte
|
||||
description: |
|
||||
Value of secret in base64.
|
||||
|
||||
This can be used to provide raw binary data when the `value` field is omitted.
|
||||
example: Y2hhbmdlbWU=
|
||||
|
||||
Secret:
|
||||
type: object
|
||||
allOf:
|
||||
- $ref: "#/components/schemas/SecretDescription"
|
||||
- $ref: "#/components/schemas/SecretValues"
|
||||
|
||||
LogEntry:
|
||||
type: object
|
||||
required:
|
||||
- name
|
||||
- namespace
|
||||
- instance
|
||||
- timestamp
|
||||
- text
|
||||
properties:
|
||||
name:
|
||||
type: string
|
||||
description: the function name
|
||||
namespace:
|
||||
type: string
|
||||
description: the namespace of the function
|
||||
instance:
|
||||
type: string
|
||||
description: the name/id of the specific function instance
|
||||
timestamp:
|
||||
type: string
|
||||
description: the timestamp of when the log message was recorded
|
||||
format: date-time
|
||||
text:
|
||||
type: string
|
||||
description: raw log message content
|
||||
|
||||
ListNamespaceResponse:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
description: Namespace name
|
||||
example: openfaas-fn
|
@ -1,628 +0,0 @@
|
||||
swagger: '2.0'
|
||||
info:
|
||||
description: OpenFaaS API documentation
|
||||
version: 0.8.12
|
||||
title: OpenFaaS API Gateway
|
||||
license:
|
||||
name: MIT
|
||||
basePath: /
|
||||
schemes:
|
||||
- http
|
||||
paths:
|
||||
'/system/functions':
|
||||
get:
|
||||
summary: 'Get a list of deployed functions with: stats and image digest'
|
||||
consumes:
|
||||
- application/json
|
||||
produces:
|
||||
- application/json
|
||||
responses:
|
||||
'200':
|
||||
description: List of deployed functions.
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
$ref: '#/definitions/FunctionListEntry'
|
||||
post:
|
||||
summary: Deploy a new function.
|
||||
description: ''
|
||||
consumes:
|
||||
- application/json
|
||||
produces:
|
||||
- application/json
|
||||
parameters:
|
||||
- in: body
|
||||
name: body
|
||||
description: Function to deploy
|
||||
required: true
|
||||
schema:
|
||||
$ref: '#/definitions/FunctionDefinition'
|
||||
responses:
|
||||
'202':
|
||||
description: Accepted
|
||||
'400':
|
||||
description: Bad Request
|
||||
'500':
|
||||
description: Internal Server Error
|
||||
put:
|
||||
summary: Update a function.
|
||||
description: ''
|
||||
consumes:
|
||||
- application/json
|
||||
produces:
|
||||
- application/json
|
||||
parameters:
|
||||
- in: body
|
||||
name: body
|
||||
description: Function to update
|
||||
required: true
|
||||
schema:
|
||||
$ref: '#/definitions/FunctionDefinition'
|
||||
responses:
|
||||
'200':
|
||||
description: Accepted
|
||||
'400':
|
||||
description: Bad Request
|
||||
'404':
|
||||
description: Not Found
|
||||
'500':
|
||||
description: Internal Server Error
|
||||
delete:
|
||||
summary: Remove a deployed function.
|
||||
description: ''
|
||||
consumes:
|
||||
- application/json
|
||||
produces:
|
||||
- application/json
|
||||
parameters:
|
||||
- in: body
|
||||
name: body
|
||||
description: Function to delete
|
||||
required: true
|
||||
schema:
|
||||
$ref: '#/definitions/DeleteFunctionRequest'
|
||||
responses:
|
||||
'200':
|
||||
description: OK
|
||||
'400':
|
||||
description: Bad Request
|
||||
'404':
|
||||
description: Not Found
|
||||
'500':
|
||||
description: Internal Server Error
|
||||
'/system/alert':
|
||||
post:
|
||||
summary: 'Event-sink for AlertManager, for auto-scaling'
|
||||
description: 'Internal use for AlertManager, requires valid AlertManager alert JSON'
|
||||
consumes:
|
||||
- application/json
|
||||
produces:
|
||||
- application/json
|
||||
parameters:
|
||||
- in: body
|
||||
name: body
|
||||
description: Incoming alert
|
||||
schema:
|
||||
type: object
|
||||
example: |-
|
||||
{"receiver": "scale-up",
|
||||
"status": "firing",
|
||||
"alerts": [{
|
||||
"status": "firing",
|
||||
"labels": {
|
||||
"alertname": "APIHighInvocationRate",
|
||||
"code": "200",
|
||||
"function_name": "func_nodeinfo",
|
||||
"instance": "gateway:8080",
|
||||
"job": "gateway",
|
||||
"monitor": "faas-monitor",
|
||||
"service": "gateway",
|
||||
"severity": "major",
|
||||
"value": "8.998200359928017"
|
||||
},
|
||||
"annotations": {
|
||||
"description": "High invocation total on gateway:8080",
|
||||
"summary": "High invocation total on gateway:8080"
|
||||
},
|
||||
"startsAt": "2017-03-15T15:52:57.805Z",
|
||||
"endsAt": "0001-01-01T00:00:00Z",
|
||||
"generatorURL": "http://4156cb797423:9090/graph?g0.expr=rate%28gateway_function_invocation_total%5B10s%5D%29+%3E+5\u0026g0.tab=0"
|
||||
}],
|
||||
"groupLabels": {
|
||||
"alertname": "APIHighInvocationRate",
|
||||
"service": "gateway"
|
||||
},
|
||||
"commonLabels": {
|
||||
"alertname": "APIHighInvocationRate",
|
||||
"code": "200",
|
||||
"function_name": "func_nodeinfo",
|
||||
"instance": "gateway:8080",
|
||||
"job": "gateway",
|
||||
"monitor": "faas-monitor",
|
||||
"service": "gateway",
|
||||
"severity": "major",
|
||||
"value": "8.998200359928017"
|
||||
},
|
||||
"commonAnnotations": {
|
||||
"description": "High invocation total on gateway:8080",
|
||||
"summary": "High invocation total on gateway:8080"
|
||||
},
|
||||
"externalURL": "http://f054879d97db:9093",
|
||||
"version": "3",
|
||||
"groupKey": 18195285354214864953
|
||||
}
|
||||
responses:
|
||||
'200':
|
||||
description: Alert handled successfully
|
||||
'500':
|
||||
description: Internal error with swarm or request JSON invalid
|
||||
'/async-function/{functionName}':
|
||||
post:
|
||||
summary: 'Invoke a function asynchronously in OpenFaaS'
|
||||
description: >-
|
||||
See https://docs.openfaas.com/reference/async/.
|
||||
parameters:
|
||||
- in: path
|
||||
name: functionName
|
||||
description: Function name
|
||||
type: string
|
||||
required: true
|
||||
- in: body
|
||||
name: input
|
||||
description: (Optional) data to pass to function
|
||||
schema:
|
||||
type: string
|
||||
format: binary
|
||||
example:
|
||||
'{"hello": "world"}'
|
||||
required: false
|
||||
responses:
|
||||
'202':
|
||||
description: Request accepted and queued
|
||||
'404':
|
||||
description: Not Found
|
||||
'500':
|
||||
description: Internal Server Error
|
||||
'/function/{functionName}':
|
||||
post:
|
||||
summary: Invoke a function defined in OpenFaaS
|
||||
parameters:
|
||||
- in: path
|
||||
name: functionName
|
||||
description: Function name
|
||||
type: string
|
||||
required: true
|
||||
- in: body
|
||||
name: input
|
||||
description: (Optional) data to pass to function
|
||||
schema:
|
||||
type: string
|
||||
format: binary
|
||||
example:
|
||||
'{"hello": "world"}'
|
||||
required: false
|
||||
responses:
|
||||
'200':
|
||||
description: Value returned from function
|
||||
'404':
|
||||
description: Not Found
|
||||
'500':
|
||||
description: Internal server error
|
||||
'/system/scale-function/{functionName}':
|
||||
post:
|
||||
summary: Scale a function
|
||||
parameters:
|
||||
- in: path
|
||||
name: functionName
|
||||
description: Function name
|
||||
type: string
|
||||
required: true
|
||||
- in: body
|
||||
name: input
|
||||
description: Function to scale plus replica count
|
||||
schema:
|
||||
type: string
|
||||
format: binary
|
||||
example:
|
||||
'{"service": "hello-world", "replicas": 10}'
|
||||
required: false
|
||||
responses:
|
||||
'200':
|
||||
description: Scaling OK
|
||||
'202':
|
||||
description: Scaling OK
|
||||
'404':
|
||||
description: Function not found
|
||||
'500':
|
||||
description: Error scaling function
|
||||
'/system/function/{functionName}':
|
||||
get:
|
||||
summary: Get a summary of an OpenFaaS function
|
||||
parameters:
|
||||
- in: path
|
||||
name: functionName
|
||||
description: Function name
|
||||
type: string
|
||||
required: true
|
||||
responses:
|
||||
'200':
|
||||
description: Function Summary
|
||||
schema:
|
||||
$ref: '#/definitions/FunctionListEntry'
|
||||
'404':
|
||||
description: Not Found
|
||||
'500':
|
||||
description: Internal Server Error
|
||||
'/system/secrets':
|
||||
get:
|
||||
summary: 'Get a list of secret names and metadata from the provider'
|
||||
consumes:
|
||||
- application/json
|
||||
produces:
|
||||
- application/json
|
||||
responses:
|
||||
'200':
|
||||
description: List of submitted secrets.
|
||||
schema:
|
||||
$ref: '#/definitions/SecretName'
|
||||
post:
|
||||
summary: Create a new secret.
|
||||
description: ''
|
||||
consumes:
|
||||
- application/json
|
||||
produces:
|
||||
- application/json
|
||||
parameters:
|
||||
- in: body
|
||||
name: body
|
||||
description: A new secret to create
|
||||
required: true
|
||||
schema:
|
||||
$ref: '#/definitions/Secret'
|
||||
responses:
|
||||
'201':
|
||||
description: Created
|
||||
'400':
|
||||
description: Bad Request
|
||||
'500':
|
||||
description: Internal Server Error
|
||||
put:
|
||||
summary: Update a secret.
|
||||
description: ''
|
||||
consumes:
|
||||
- application/json
|
||||
produces:
|
||||
- application/json
|
||||
parameters:
|
||||
- in: body
|
||||
name: body
|
||||
description: Secret to update
|
||||
required: true
|
||||
schema:
|
||||
$ref: '#/definitions/Secret'
|
||||
responses:
|
||||
'200':
|
||||
description: Ok
|
||||
'400':
|
||||
description: Bad Request
|
||||
'404':
|
||||
description: Not Found
|
||||
'405':
|
||||
description: Method Not Allowed. Secret update is not allowed in faas-swarm.
|
||||
'500':
|
||||
description: Internal Server Error
|
||||
delete:
|
||||
summary: Remove a secret.
|
||||
description: ''
|
||||
consumes:
|
||||
- application/json
|
||||
produces:
|
||||
- application/json
|
||||
parameters:
|
||||
- in: body
|
||||
name: body
|
||||
description: Secret to delete
|
||||
required: true
|
||||
schema:
|
||||
$ref: '#/definitions/SecretName'
|
||||
responses:
|
||||
'204':
|
||||
description: OK
|
||||
'400':
|
||||
description: Bad Request
|
||||
'404':
|
||||
description: Not Found
|
||||
'500':
|
||||
description: Internal Server Error
|
||||
'/system/logs':
|
||||
get:
|
||||
summary: Get a stream of the logs for a specific function
|
||||
produces:
|
||||
- application/x-ndjson
|
||||
parameters:
|
||||
- in: query
|
||||
name: name
|
||||
description: Function name
|
||||
type: string
|
||||
required: true
|
||||
- in: query
|
||||
name: since
|
||||
description: Only return logs after a specific date (RFC3339)
|
||||
type: string
|
||||
required: false
|
||||
- in: query
|
||||
name: tail
|
||||
description: Sets the maximum number of log messages to return, <=0 means unlimited
|
||||
type: integer
|
||||
required: false
|
||||
- in: query
|
||||
name: follow
|
||||
description: When true, the request will stream logs until the request timeout
|
||||
type: boolean
|
||||
required: false
|
||||
responses:
|
||||
'200':
|
||||
description: Newline delimited stream of log messages
|
||||
schema:
|
||||
$ref: '#/definitions/LogEntry'
|
||||
'404':
|
||||
description: Not Found
|
||||
'500':
|
||||
description: Internal Server Error
|
||||
'/system/info':
|
||||
get:
|
||||
summary: Get info such as provider version number and provider orchestrator
|
||||
produces:
|
||||
- application/json
|
||||
responses:
|
||||
'200':
|
||||
description: Info result
|
||||
schema:
|
||||
$ref: '#/definitions/Info'
|
||||
'404':
|
||||
description: Provider does not support info endpoint
|
||||
'500':
|
||||
description: Internal Server Error
|
||||
'/healthz':
|
||||
get:
|
||||
summary: Healthcheck
|
||||
responses:
|
||||
'200':
|
||||
description: Healthy
|
||||
'500':
|
||||
description: Not healthy
|
||||
securityDefinitions:
|
||||
basicAuth:
|
||||
type: basic
|
||||
definitions:
|
||||
Info:
|
||||
type: object
|
||||
properties:
|
||||
provider:
|
||||
type: object
|
||||
description: The OpenFaaS Provider
|
||||
properties:
|
||||
provider:
|
||||
type: string
|
||||
example: faas-netes
|
||||
orchestration:
|
||||
type: string
|
||||
example: kubernetes
|
||||
version:
|
||||
type: object
|
||||
description: Version of the OpenFaaS Provider
|
||||
properties:
|
||||
commit_message:
|
||||
type: string
|
||||
example: Sample Message
|
||||
sha:
|
||||
type: string
|
||||
example: 7108418d9dd6b329ddff40e7393b3166f8160a88
|
||||
release:
|
||||
type: string
|
||||
format: semver
|
||||
example: 0.2.6
|
||||
version:
|
||||
type: object
|
||||
description: Version of the Gateway
|
||||
properties:
|
||||
commit_message:
|
||||
type: string
|
||||
example: Sample Message
|
||||
sha:
|
||||
type: string
|
||||
example: 7108418d9dd6b329ddff40e7393b3166f8160a88
|
||||
release:
|
||||
type: string
|
||||
format: semver
|
||||
example: 0.8.9
|
||||
arch:
|
||||
type: string
|
||||
description: "Platform architecture"
|
||||
example: "x86_64"
|
||||
required:
|
||||
- provider
|
||||
- version
|
||||
DeleteFunctionRequest:
|
||||
type: object
|
||||
properties:
|
||||
functionName:
|
||||
type: string
|
||||
description: Name of deployed function
|
||||
example: nodeinfo
|
||||
required:
|
||||
- functionName
|
||||
FunctionDefinition:
|
||||
type: object
|
||||
properties:
|
||||
service:
|
||||
type: string
|
||||
description: Name of deployed function
|
||||
example: nodeinfo
|
||||
network:
|
||||
type: string
|
||||
description: Network, usually func_functions for Swarm (deprecated)
|
||||
example: func_functions
|
||||
image:
|
||||
type: string
|
||||
description: Docker image in accessible registry
|
||||
example: functions/nodeinfo:latest
|
||||
envProcess:
|
||||
type: string
|
||||
description: Process for watchdog to fork
|
||||
example: node main.js
|
||||
envVars:
|
||||
type: object
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: Overrides to environmental variables
|
||||
constraints:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
description: Constraints are specific to OpenFaaS Provider
|
||||
example: "node.platform.os == linux"
|
||||
labels:
|
||||
description: A map of labels for making scheduling or routing decisions
|
||||
type: object
|
||||
additionalProperties:
|
||||
type: string
|
||||
example:
|
||||
foo: bar
|
||||
annotations:
|
||||
description: A map of annotations for management, orchestration, events and build tasks
|
||||
type: object
|
||||
additionalProperties:
|
||||
type: string
|
||||
example:
|
||||
topics: awesome-kafka-topic
|
||||
foo: bar
|
||||
secrets:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
description: An array of names of secrets that are required to be loaded from the Docker Swarm.
|
||||
example: "secret-name-1"
|
||||
registryAuth:
|
||||
type: string
|
||||
description: >-
|
||||
Private registry base64-encoded basic auth (as present in
|
||||
~/.docker/config.json)
|
||||
example: dXNlcjpwYXNzd29yZA==
|
||||
limits:
|
||||
type: object
|
||||
properties:
|
||||
memory:
|
||||
type: string
|
||||
example: "128M"
|
||||
cpu:
|
||||
type: string
|
||||
example: "0.01"
|
||||
requests:
|
||||
type: object
|
||||
properties:
|
||||
memory:
|
||||
type: string
|
||||
example: "128M"
|
||||
cpu:
|
||||
type: string
|
||||
example: "0.01"
|
||||
readOnlyRootFilesystem:
|
||||
type: boolean
|
||||
description: Make the root filesystem of the function read-only
|
||||
required:
|
||||
- service
|
||||
- image
|
||||
- envProcess
|
||||
FunctionListEntry:
|
||||
type: object
|
||||
properties:
|
||||
name:
|
||||
description: The name of the function
|
||||
type: string
|
||||
example: nodeinfo
|
||||
image:
|
||||
description: The fully qualified docker image name of the function
|
||||
type: string
|
||||
example: functions/nodeinfo:latest
|
||||
invocationCount:
|
||||
description: The amount of invocations for the specified function
|
||||
type: number
|
||||
format: integer
|
||||
example: 1337
|
||||
replicas:
|
||||
description: The current minimal ammount of replicas
|
||||
type: number
|
||||
format: integer
|
||||
example: 2
|
||||
availableReplicas:
|
||||
description: The current available amount of replicas
|
||||
type: number
|
||||
format: integer
|
||||
example: 2
|
||||
envProcess:
|
||||
description: Process for watchdog to fork
|
||||
type: string
|
||||
example: node main.js
|
||||
labels:
|
||||
description: A map of labels for making scheduling or routing decisions
|
||||
type: object
|
||||
additionalProperties:
|
||||
type: string
|
||||
example:
|
||||
foo: bar
|
||||
annotations:
|
||||
description: A map of annotations for management, orchestration, events and build tasks
|
||||
type: object
|
||||
additionalProperties:
|
||||
type: string
|
||||
example:
|
||||
topics: awesome-kafka-topic
|
||||
foo: bar
|
||||
required:
|
||||
- name
|
||||
- image
|
||||
- invocationCount
|
||||
- replicas
|
||||
- availableReplicas
|
||||
- envProcess
|
||||
- labels
|
||||
Secret:
|
||||
type: object
|
||||
properties:
|
||||
name:
|
||||
type: string
|
||||
description: Name of secret
|
||||
example: aws-key
|
||||
value:
|
||||
type: string
|
||||
description: Value of secret in plain-text
|
||||
example: changeme
|
||||
required:
|
||||
- name
|
||||
LogEntry:
|
||||
type: object
|
||||
properties:
|
||||
name:
|
||||
type: string
|
||||
description: the function name
|
||||
instance:
|
||||
type: string
|
||||
description: the name/id of the specific function instance
|
||||
timestamp:
|
||||
type: string
|
||||
format: date-time
|
||||
description: the timestamp of when the log message was recorded
|
||||
text:
|
||||
type: string
|
||||
description: raw log message content
|
||||
SecretName:
|
||||
type: object
|
||||
properties:
|
||||
name:
|
||||
type: string
|
||||
description: Name of secret
|
||||
example: aws-key
|
||||
externalDocs:
|
||||
description: More documentation available on Github
|
||||
url: 'https://github.com/openfaas/faas'
|
@ -1,13 +0,0 @@
|
||||
auth plugins
|
||||
============
|
||||
|
||||
Auth plugins must implement request checking on a HTTP port and path such as `:8080/validate`.
|
||||
|
||||
* Valid requests: return 2xx
|
||||
* Invalid requests: return non 2xx
|
||||
|
||||
It is up to the developer to pick whether a request body is required for validation. For strategies such as [Basic Authentication](https://en.wikipedia.org/wiki/Basic_access_authentication), headers are sufficient.
|
||||
|
||||
Plugins available:
|
||||
|
||||
* [basic-auth](./basic-auth/)
|
1
auth/basic-auth/.gitignore
vendored
1
auth/basic-auth/.gitignore
vendored
@ -1 +0,0 @@
|
||||
basic-auth
|
@ -1,45 +0,0 @@
|
||||
FROM --platform=${BUILDPLATFORM:-linux/amd64} ghcr.io/openfaas/license-check:0.4.0 as license-check
|
||||
|
||||
FROM --platform=${BUILDPLATFORM:-linux/amd64} golang:1.18 as build
|
||||
|
||||
ENV GO111MODULE=off
|
||||
ENV CGO_ENABLED=0
|
||||
|
||||
ARG TARGETPLATFORM
|
||||
ARG BUILDPLATFORM
|
||||
ARG TARGETOS
|
||||
ARG TARGETARCH
|
||||
|
||||
COPY --from=license-check /license-check /usr/bin/
|
||||
|
||||
WORKDIR /go/src/handler
|
||||
COPY . .
|
||||
|
||||
# Run a gofmt and exclude all vendored code.
|
||||
|
||||
RUN license-check -path ./ --verbose=false "OpenFaaS Authors" "OpenFaaS Author(s)"
|
||||
|
||||
RUN test -z "$(gofmt -l $(find . -type f -name '*.go' -not -path "./vendor/*"))"
|
||||
|
||||
RUN CGO_ENABLED=${CGO_ENABLED} GOOS=${TARGETOS} GOARCH=${TARGETARCH} go test -v ./...
|
||||
|
||||
RUN CGO_ENABLED=${CGO_ENABLED} GOOS=${TARGETOS} GOARCH=${TARGETARCH} go build \
|
||||
--ldflags "-s -w" -a -installsuffix cgo -o handler .
|
||||
|
||||
FROM --platform=${TARGETPLATFORM:-linux/amd64} alpine:3.16.2 as ship
|
||||
# Add non-root user
|
||||
RUN addgroup -S app && adduser -S -g app app \
|
||||
&& mkdir -p /home/app \
|
||||
&& chown app /home/app
|
||||
|
||||
WORKDIR /home/app
|
||||
|
||||
COPY --from=build /go/src/handler/handler .
|
||||
|
||||
RUN chown -R app /home/app
|
||||
|
||||
USER app
|
||||
|
||||
WORKDIR /home/app
|
||||
|
||||
CMD ["./handler"]
|
@ -1,13 +0,0 @@
|
||||
basic-auth
|
||||
============
|
||||
|
||||
This component implements [Basic Authentication](https://en.wikipedia.org/wiki/Basic_access_authentication) as an OpenFaaS authentication plug-in.
|
||||
|
||||
To run this plugin you will need to create and bind a secret named `basic-auth-user` and `basic-auth-password`
|
||||
|
||||
| Option | Usage |
|
||||
|---------------------------------|--------------|
|
||||
| `port` | Set the HTTP port |
|
||||
| `secret_mount_path` | It is recommended that this is set to `/var/openfaas/secrets` |
|
||||
| `user_filename` | File to read from disk for username, default empty |
|
||||
| `pass_filename` | File to read from disk for username, default empty |
|
@ -1,8 +0,0 @@
|
||||
module github.com/openfaas/faas/auth/basic-auth
|
||||
|
||||
go 1.17
|
||||
|
||||
require (
|
||||
github.com/openfaas/faas-provider v0.19.0
|
||||
github.com/pkg/errors v0.9.1
|
||||
)
|
@ -1,465 +0,0 @@
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
|
||||
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
|
||||
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
|
||||
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
|
||||
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
|
||||
cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
|
||||
cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
|
||||
cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
|
||||
cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
|
||||
cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
|
||||
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
|
||||
cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
|
||||
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
|
||||
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
|
||||
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
|
||||
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
|
||||
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
|
||||
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
|
||||
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
||||
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
|
||||
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
|
||||
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
|
||||
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
|
||||
cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
|
||||
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
|
||||
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
|
||||
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
|
||||
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
|
||||
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
|
||||
github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||
github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||
github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||
github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||
github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
|
||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
|
||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/openfaas/faas-provider v0.19.0 h1:1dv4HDkWa9/yVkUll23/06y9lf8+tISOxYoHwBXZaJI=
|
||||
github.com/openfaas/faas-provider v0.19.0/go.mod h1:Farrp+9Med8LeK3aoYpqplMP8f5ebTILbCSLg2LPLZk=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
||||
github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
||||
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
|
||||
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
|
||||
golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
|
||||
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
|
||||
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
|
||||
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
|
||||
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
||||
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
|
||||
golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
|
||||
golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
|
||||
golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
||||
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
|
||||
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||
google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||
google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||
google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||
google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
||||
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
||||
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
|
||||
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
|
||||
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
|
||||
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
|
||||
google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
|
||||
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
||||
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
|
||||
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
|
||||
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
|
||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
||||
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
@ -1,81 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/openfaas/faas-provider/auth"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func main() {
|
||||
port := 8080
|
||||
|
||||
if val, ok := os.LookupEnv("port"); ok {
|
||||
intOut, err := strconv.Atoi(val)
|
||||
if err != nil {
|
||||
panic(errors.Wrap(err, fmt.Sprintf("value of `port`: %s, not a valid port", val)))
|
||||
}
|
||||
port = intOut
|
||||
}
|
||||
|
||||
s := &http.Server{
|
||||
Addr: fmt.Sprintf(":%d", port),
|
||||
ReadTimeout: 5 * time.Second,
|
||||
WriteTimeout: 5 * time.Second,
|
||||
MaxHeaderBytes: 1 << 20, // Max header of 1MB
|
||||
}
|
||||
|
||||
credentialsReader := auth.ReadBasicAuthFromDisk{
|
||||
SecretMountPath: os.Getenv("secret_mount_path"),
|
||||
UserFilename: os.Getenv("user_filename"),
|
||||
PasswordFilename: os.Getenv("pass_filename"),
|
||||
}
|
||||
|
||||
credentials, err := credentialsReader.Read()
|
||||
if err != nil {
|
||||
panic(errors.Wrap(err, "unable to read basic auth credentials, check `secret_mount_path`"))
|
||||
}
|
||||
|
||||
authHandler := auth.DecorateWithBasicAuth(func(w http.ResponseWriter, r *http.Request) {
|
||||
}, credentials)
|
||||
http.HandleFunc("/validate", makeLogger(authHandler))
|
||||
|
||||
http.HandleFunc("/health", func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
})
|
||||
|
||||
log.Printf("Listening on: %d\n", port)
|
||||
log.Fatal(s.ListenAndServe())
|
||||
}
|
||||
|
||||
func makeLogger(next http.Handler) func(w http.ResponseWriter, r *http.Request) {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
rr := httptest.NewRecorder()
|
||||
|
||||
next.ServeHTTP(rr, r)
|
||||
log.Printf("Validated request %d.\n", rr.Code)
|
||||
|
||||
resHeader := rr.Header()
|
||||
copyHeaders(w.Header(), &resHeader)
|
||||
|
||||
w.WriteHeader(rr.Code)
|
||||
if rr.Body != nil {
|
||||
w.Write(rr.Body.Bytes())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func copyHeaders(destination http.Header, source *http.Header) {
|
||||
for k, v := range *source {
|
||||
vClone := make([]string, len(v))
|
||||
copy(vClone, v)
|
||||
(destination)[k] = vClone
|
||||
}
|
||||
}
|
@ -1,29 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func Test_makeLogger_CopiesResponseHeaders(t *testing.T) {
|
||||
handler := http.HandlerFunc(makeLogger(http.HandlerFunc(
|
||||
func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("X-Unit-Test", "true")
|
||||
})))
|
||||
|
||||
s := httptest.NewServer(handler)
|
||||
defer s.Close()
|
||||
|
||||
req := httptest.NewRequest(http.MethodGet, s.URL, nil)
|
||||
rr := httptest.NewRecorder()
|
||||
|
||||
handler.ServeHTTP(rr, req)
|
||||
|
||||
got := rr.Header().Get("X-Unit-Test")
|
||||
want := "true"
|
||||
if want != got {
|
||||
t.Errorf("Header X-Unit-Test, want: %s, got %s", want, got)
|
||||
}
|
||||
|
||||
}
|
21
auth/basic-auth/vendor/github.com/openfaas/faas-provider/LICENSE
generated
vendored
21
auth/basic-auth/vendor/github.com/openfaas/faas-provider/LICENSE
generated
vendored
@ -1,21 +0,0 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2017 Alex Ellis
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
30
auth/basic-auth/vendor/github.com/openfaas/faas-provider/auth/basic_auth.go
generated
vendored
30
auth/basic-auth/vendor/github.com/openfaas/faas-provider/auth/basic_auth.go
generated
vendored
@ -1,30 +0,0 @@
|
||||
// Copyright (c) OpenFaaS Author(s). All rights reserved.
|
||||
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
|
||||
|
||||
package auth
|
||||
|
||||
import (
|
||||
"crypto/subtle"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// DecorateWithBasicAuth enforces basic auth as a middleware with given credentials
|
||||
func DecorateWithBasicAuth(next http.HandlerFunc, credentials *BasicAuthCredentials) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
user, password, ok := r.BasicAuth()
|
||||
|
||||
const noMatch = 0
|
||||
if !ok ||
|
||||
user != credentials.User ||
|
||||
subtle.ConstantTimeCompare([]byte(credentials.Password), []byte(password)) == noMatch {
|
||||
|
||||
w.Header().Set("WWW-Authenticate", `Basic realm="Restricted"`)
|
||||
w.WriteHeader(http.StatusUnauthorized)
|
||||
w.Write([]byte("invalid credentials"))
|
||||
return
|
||||
}
|
||||
|
||||
next.ServeHTTP(w, r)
|
||||
}
|
||||
}
|
66
auth/basic-auth/vendor/github.com/openfaas/faas-provider/auth/credentials.go
generated
vendored
66
auth/basic-auth/vendor/github.com/openfaas/faas-provider/auth/credentials.go
generated
vendored
@ -1,66 +0,0 @@
|
||||
// Copyright (c) OpenFaaS Author(s). All rights reserved.
|
||||
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
|
||||
|
||||
package auth
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"path"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// BasicAuthCredentials for credentials
|
||||
type BasicAuthCredentials struct {
|
||||
User string
|
||||
Password string
|
||||
}
|
||||
|
||||
type ReadBasicAuth interface {
|
||||
Read() (*BasicAuthCredentials, error)
|
||||
}
|
||||
|
||||
type ReadBasicAuthFromDisk struct {
|
||||
SecretMountPath string
|
||||
|
||||
UserFilename string
|
||||
|
||||
PasswordFilename string
|
||||
}
|
||||
|
||||
func (r *ReadBasicAuthFromDisk) Read() (*BasicAuthCredentials, error) {
|
||||
var credentials *BasicAuthCredentials
|
||||
|
||||
if len(r.SecretMountPath) == 0 {
|
||||
return nil, fmt.Errorf("invalid SecretMountPath specified for reading secrets")
|
||||
}
|
||||
|
||||
userKey := "basic-auth-user"
|
||||
if len(r.UserFilename) > 0 {
|
||||
userKey = r.UserFilename
|
||||
}
|
||||
|
||||
passwordKey := "basic-auth-password"
|
||||
if len(r.PasswordFilename) > 0 {
|
||||
passwordKey = r.PasswordFilename
|
||||
}
|
||||
|
||||
userPath := path.Join(r.SecretMountPath, userKey)
|
||||
user, userErr := ioutil.ReadFile(userPath)
|
||||
if userErr != nil {
|
||||
return nil, fmt.Errorf("unable to load %s", userPath)
|
||||
}
|
||||
|
||||
userPassword := path.Join(r.SecretMountPath, passwordKey)
|
||||
password, passErr := ioutil.ReadFile(userPassword)
|
||||
if passErr != nil {
|
||||
return nil, fmt.Errorf("Unable to load %s", userPassword)
|
||||
}
|
||||
|
||||
credentials = &BasicAuthCredentials{
|
||||
User: strings.TrimSpace(string(user)),
|
||||
Password: strings.TrimSpace(string(password)),
|
||||
}
|
||||
|
||||
return credentials, nil
|
||||
}
|
24
auth/basic-auth/vendor/github.com/pkg/errors/.gitignore
generated
vendored
24
auth/basic-auth/vendor/github.com/pkg/errors/.gitignore
generated
vendored
@ -1,24 +0,0 @@
|
||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
*.test
|
||||
*.prof
|
10
auth/basic-auth/vendor/github.com/pkg/errors/.travis.yml
generated
vendored
10
auth/basic-auth/vendor/github.com/pkg/errors/.travis.yml
generated
vendored
@ -1,10 +0,0 @@
|
||||
language: go
|
||||
go_import_path: github.com/pkg/errors
|
||||
go:
|
||||
- 1.11.x
|
||||
- 1.12.x
|
||||
- 1.13.x
|
||||
- tip
|
||||
|
||||
script:
|
||||
- make check
|
23
auth/basic-auth/vendor/github.com/pkg/errors/LICENSE
generated
vendored
23
auth/basic-auth/vendor/github.com/pkg/errors/LICENSE
generated
vendored
@ -1,23 +0,0 @@
|
||||
Copyright (c) 2015, Dave Cheney <dave@cheney.net>
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
* Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
44
auth/basic-auth/vendor/github.com/pkg/errors/Makefile
generated
vendored
44
auth/basic-auth/vendor/github.com/pkg/errors/Makefile
generated
vendored
@ -1,44 +0,0 @@
|
||||
PKGS := github.com/pkg/errors
|
||||
SRCDIRS := $(shell go list -f '{{.Dir}}' $(PKGS))
|
||||
GO := go
|
||||
|
||||
check: test vet gofmt misspell unconvert staticcheck ineffassign unparam
|
||||
|
||||
test:
|
||||
$(GO) test $(PKGS)
|
||||
|
||||
vet: | test
|
||||
$(GO) vet $(PKGS)
|
||||
|
||||
staticcheck:
|
||||
$(GO) get honnef.co/go/tools/cmd/staticcheck
|
||||
staticcheck -checks all $(PKGS)
|
||||
|
||||
misspell:
|
||||
$(GO) get github.com/client9/misspell/cmd/misspell
|
||||
misspell \
|
||||
-locale GB \
|
||||
-error \
|
||||
*.md *.go
|
||||
|
||||
unconvert:
|
||||
$(GO) get github.com/mdempsky/unconvert
|
||||
unconvert -v $(PKGS)
|
||||
|
||||
ineffassign:
|
||||
$(GO) get github.com/gordonklaus/ineffassign
|
||||
find $(SRCDIRS) -name '*.go' | xargs ineffassign
|
||||
|
||||
pedantic: check errcheck
|
||||
|
||||
unparam:
|
||||
$(GO) get mvdan.cc/unparam
|
||||
unparam ./...
|
||||
|
||||
errcheck:
|
||||
$(GO) get github.com/kisielk/errcheck
|
||||
errcheck $(PKGS)
|
||||
|
||||
gofmt:
|
||||
@echo Checking code is gofmted
|
||||
@test -z "$(shell gofmt -s -l -d -e $(SRCDIRS) | tee /dev/stderr)"
|
59
auth/basic-auth/vendor/github.com/pkg/errors/README.md
generated
vendored
59
auth/basic-auth/vendor/github.com/pkg/errors/README.md
generated
vendored
@ -1,59 +0,0 @@
|
||||
# errors [](https://travis-ci.org/pkg/errors) [](https://ci.appveyor.com/project/davecheney/errors/branch/master) [](http://godoc.org/github.com/pkg/errors) [](https://goreportcard.com/report/github.com/pkg/errors) [](https://sourcegraph.com/github.com/pkg/errors?badge)
|
||||
|
||||
Package errors provides simple error handling primitives.
|
||||
|
||||
`go get github.com/pkg/errors`
|
||||
|
||||
The traditional error handling idiom in Go is roughly akin to
|
||||
```go
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
```
|
||||
which applied recursively up the call stack results in error reports without context or debugging information. The errors package allows programmers to add context to the failure path in their code in a way that does not destroy the original value of the error.
|
||||
|
||||
## Adding context to an error
|
||||
|
||||
The errors.Wrap function returns a new error that adds context to the original error. For example
|
||||
```go
|
||||
_, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "read failed")
|
||||
}
|
||||
```
|
||||
## Retrieving the cause of an error
|
||||
|
||||
Using `errors.Wrap` constructs a stack of errors, adding context to the preceding error. Depending on the nature of the error it may be necessary to reverse the operation of errors.Wrap to retrieve the original error for inspection. Any error value which implements this interface can be inspected by `errors.Cause`.
|
||||
```go
|
||||
type causer interface {
|
||||
Cause() error
|
||||
}
|
||||
```
|
||||
`errors.Cause` will recursively retrieve the topmost error which does not implement `causer`, which is assumed to be the original cause. For example:
|
||||
```go
|
||||
switch err := errors.Cause(err).(type) {
|
||||
case *MyError:
|
||||
// handle specifically
|
||||
default:
|
||||
// unknown error
|
||||
}
|
||||
```
|
||||
|
||||
[Read the package documentation for more information](https://godoc.org/github.com/pkg/errors).
|
||||
|
||||
## Roadmap
|
||||
|
||||
With the upcoming [Go2 error proposals](https://go.googlesource.com/proposal/+/master/design/go2draft.md) this package is moving into maintenance mode. The roadmap for a 1.0 release is as follows:
|
||||
|
||||
- 0.9. Remove pre Go 1.9 and Go 1.10 support, address outstanding pull requests (if possible)
|
||||
- 1.0. Final release.
|
||||
|
||||
## Contributing
|
||||
|
||||
Because of the Go2 errors changes, this package is not accepting proposals for new functionality. With that said, we welcome pull requests, bug fixes and issue reports.
|
||||
|
||||
Before sending a PR, please discuss your change by raising an issue.
|
||||
|
||||
## License
|
||||
|
||||
BSD-2-Clause
|
32
auth/basic-auth/vendor/github.com/pkg/errors/appveyor.yml
generated
vendored
32
auth/basic-auth/vendor/github.com/pkg/errors/appveyor.yml
generated
vendored
@ -1,32 +0,0 @@
|
||||
version: build-{build}.{branch}
|
||||
|
||||
clone_folder: C:\gopath\src\github.com\pkg\errors
|
||||
shallow_clone: true # for startup speed
|
||||
|
||||
environment:
|
||||
GOPATH: C:\gopath
|
||||
|
||||
platform:
|
||||
- x64
|
||||
|
||||
# http://www.appveyor.com/docs/installed-software
|
||||
install:
|
||||
# some helpful output for debugging builds
|
||||
- go version
|
||||
- go env
|
||||
# pre-installed MinGW at C:\MinGW is 32bit only
|
||||
# but MSYS2 at C:\msys64 has mingw64
|
||||
- set PATH=C:\msys64\mingw64\bin;%PATH%
|
||||
- gcc --version
|
||||
- g++ --version
|
||||
|
||||
build_script:
|
||||
- go install -v ./...
|
||||
|
||||
test_script:
|
||||
- set PATH=C:\gopath\bin;%PATH%
|
||||
- go test -v ./...
|
||||
|
||||
#artifacts:
|
||||
# - path: '%GOPATH%\bin\*.exe'
|
||||
deploy: off
|
288
auth/basic-auth/vendor/github.com/pkg/errors/errors.go
generated
vendored
288
auth/basic-auth/vendor/github.com/pkg/errors/errors.go
generated
vendored
@ -1,288 +0,0 @@
|
||||
// Package errors provides simple error handling primitives.
|
||||
//
|
||||
// The traditional error handling idiom in Go is roughly akin to
|
||||
//
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
//
|
||||
// which when applied recursively up the call stack results in error reports
|
||||
// without context or debugging information. The errors package allows
|
||||
// programmers to add context to the failure path in their code in a way
|
||||
// that does not destroy the original value of the error.
|
||||
//
|
||||
// Adding context to an error
|
||||
//
|
||||
// The errors.Wrap function returns a new error that adds context to the
|
||||
// original error by recording a stack trace at the point Wrap is called,
|
||||
// together with the supplied message. For example
|
||||
//
|
||||
// _, err := ioutil.ReadAll(r)
|
||||
// if err != nil {
|
||||
// return errors.Wrap(err, "read failed")
|
||||
// }
|
||||
//
|
||||
// If additional control is required, the errors.WithStack and
|
||||
// errors.WithMessage functions destructure errors.Wrap into its component
|
||||
// operations: annotating an error with a stack trace and with a message,
|
||||
// respectively.
|
||||
//
|
||||
// Retrieving the cause of an error
|
||||
//
|
||||
// Using errors.Wrap constructs a stack of errors, adding context to the
|
||||
// preceding error. Depending on the nature of the error it may be necessary
|
||||
// to reverse the operation of errors.Wrap to retrieve the original error
|
||||
// for inspection. Any error value which implements this interface
|
||||
//
|
||||
// type causer interface {
|
||||
// Cause() error
|
||||
// }
|
||||
//
|
||||
// can be inspected by errors.Cause. errors.Cause will recursively retrieve
|
||||
// the topmost error that does not implement causer, which is assumed to be
|
||||
// the original cause. For example:
|
||||
//
|
||||
// switch err := errors.Cause(err).(type) {
|
||||
// case *MyError:
|
||||
// // handle specifically
|
||||
// default:
|
||||
// // unknown error
|
||||
// }
|
||||
//
|
||||
// Although the causer interface is not exported by this package, it is
|
||||
// considered a part of its stable public interface.
|
||||
//
|
||||
// Formatted printing of errors
|
||||
//
|
||||
// All error values returned from this package implement fmt.Formatter and can
|
||||
// be formatted by the fmt package. The following verbs are supported:
|
||||
//
|
||||
// %s print the error. If the error has a Cause it will be
|
||||
// printed recursively.
|
||||
// %v see %s
|
||||
// %+v extended format. Each Frame of the error's StackTrace will
|
||||
// be printed in detail.
|
||||
//
|
||||
// Retrieving the stack trace of an error or wrapper
|
||||
//
|
||||
// New, Errorf, Wrap, and Wrapf record a stack trace at the point they are
|
||||
// invoked. This information can be retrieved with the following interface:
|
||||
//
|
||||
// type stackTracer interface {
|
||||
// StackTrace() errors.StackTrace
|
||||
// }
|
||||
//
|
||||
// The returned errors.StackTrace type is defined as
|
||||
//
|
||||
// type StackTrace []Frame
|
||||
//
|
||||
// The Frame type represents a call site in the stack trace. Frame supports
|
||||
// the fmt.Formatter interface that can be used for printing information about
|
||||
// the stack trace of this error. For example:
|
||||
//
|
||||
// if err, ok := err.(stackTracer); ok {
|
||||
// for _, f := range err.StackTrace() {
|
||||
// fmt.Printf("%+s:%d\n", f, f)
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// Although the stackTracer interface is not exported by this package, it is
|
||||
// considered a part of its stable public interface.
|
||||
//
|
||||
// See the documentation for Frame.Format for more details.
|
||||
package errors
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
// New returns an error with the supplied message.
|
||||
// New also records the stack trace at the point it was called.
|
||||
func New(message string) error {
|
||||
return &fundamental{
|
||||
msg: message,
|
||||
stack: callers(),
|
||||
}
|
||||
}
|
||||
|
||||
// Errorf formats according to a format specifier and returns the string
|
||||
// as a value that satisfies error.
|
||||
// Errorf also records the stack trace at the point it was called.
|
||||
func Errorf(format string, args ...interface{}) error {
|
||||
return &fundamental{
|
||||
msg: fmt.Sprintf(format, args...),
|
||||
stack: callers(),
|
||||
}
|
||||
}
|
||||
|
||||
// fundamental is an error that has a message and a stack, but no caller.
|
||||
type fundamental struct {
|
||||
msg string
|
||||
*stack
|
||||
}
|
||||
|
||||
func (f *fundamental) Error() string { return f.msg }
|
||||
|
||||
func (f *fundamental) Format(s fmt.State, verb rune) {
|
||||
switch verb {
|
||||
case 'v':
|
||||
if s.Flag('+') {
|
||||
io.WriteString(s, f.msg)
|
||||
f.stack.Format(s, verb)
|
||||
return
|
||||
}
|
||||
fallthrough
|
||||
case 's':
|
||||
io.WriteString(s, f.msg)
|
||||
case 'q':
|
||||
fmt.Fprintf(s, "%q", f.msg)
|
||||
}
|
||||
}
|
||||
|
||||
// WithStack annotates err with a stack trace at the point WithStack was called.
|
||||
// If err is nil, WithStack returns nil.
|
||||
func WithStack(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
return &withStack{
|
||||
err,
|
||||
callers(),
|
||||
}
|
||||
}
|
||||
|
||||
type withStack struct {
|
||||
error
|
||||
*stack
|
||||
}
|
||||
|
||||
func (w *withStack) Cause() error { return w.error }
|
||||
|
||||
// Unwrap provides compatibility for Go 1.13 error chains.
|
||||
func (w *withStack) Unwrap() error { return w.error }
|
||||
|
||||
func (w *withStack) Format(s fmt.State, verb rune) {
|
||||
switch verb {
|
||||
case 'v':
|
||||
if s.Flag('+') {
|
||||
fmt.Fprintf(s, "%+v", w.Cause())
|
||||
w.stack.Format(s, verb)
|
||||
return
|
||||
}
|
||||
fallthrough
|
||||
case 's':
|
||||
io.WriteString(s, w.Error())
|
||||
case 'q':
|
||||
fmt.Fprintf(s, "%q", w.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// Wrap returns an error annotating err with a stack trace
|
||||
// at the point Wrap is called, and the supplied message.
|
||||
// If err is nil, Wrap returns nil.
|
||||
func Wrap(err error, message string) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
err = &withMessage{
|
||||
cause: err,
|
||||
msg: message,
|
||||
}
|
||||
return &withStack{
|
||||
err,
|
||||
callers(),
|
||||
}
|
||||
}
|
||||
|
||||
// Wrapf returns an error annotating err with a stack trace
|
||||
// at the point Wrapf is called, and the format specifier.
|
||||
// If err is nil, Wrapf returns nil.
|
||||
func Wrapf(err error, format string, args ...interface{}) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
err = &withMessage{
|
||||
cause: err,
|
||||
msg: fmt.Sprintf(format, args...),
|
||||
}
|
||||
return &withStack{
|
||||
err,
|
||||
callers(),
|
||||
}
|
||||
}
|
||||
|
||||
// WithMessage annotates err with a new message.
|
||||
// If err is nil, WithMessage returns nil.
|
||||
func WithMessage(err error, message string) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
return &withMessage{
|
||||
cause: err,
|
||||
msg: message,
|
||||
}
|
||||
}
|
||||
|
||||
// WithMessagef annotates err with the format specifier.
|
||||
// If err is nil, WithMessagef returns nil.
|
||||
func WithMessagef(err error, format string, args ...interface{}) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
return &withMessage{
|
||||
cause: err,
|
||||
msg: fmt.Sprintf(format, args...),
|
||||
}
|
||||
}
|
||||
|
||||
type withMessage struct {
|
||||
cause error
|
||||
msg string
|
||||
}
|
||||
|
||||
func (w *withMessage) Error() string { return w.msg + ": " + w.cause.Error() }
|
||||
func (w *withMessage) Cause() error { return w.cause }
|
||||
|
||||
// Unwrap provides compatibility for Go 1.13 error chains.
|
||||
func (w *withMessage) Unwrap() error { return w.cause }
|
||||
|
||||
func (w *withMessage) Format(s fmt.State, verb rune) {
|
||||
switch verb {
|
||||
case 'v':
|
||||
if s.Flag('+') {
|
||||
fmt.Fprintf(s, "%+v\n", w.Cause())
|
||||
io.WriteString(s, w.msg)
|
||||
return
|
||||
}
|
||||
fallthrough
|
||||
case 's', 'q':
|
||||
io.WriteString(s, w.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// Cause returns the underlying cause of the error, if possible.
|
||||
// An error value has a cause if it implements the following
|
||||
// interface:
|
||||
//
|
||||
// type causer interface {
|
||||
// Cause() error
|
||||
// }
|
||||
//
|
||||
// If the error does not implement Cause, the original error will
|
||||
// be returned. If the error is nil, nil will be returned without further
|
||||
// investigation.
|
||||
func Cause(err error) error {
|
||||
type causer interface {
|
||||
Cause() error
|
||||
}
|
||||
|
||||
for err != nil {
|
||||
cause, ok := err.(causer)
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
err = cause.Cause()
|
||||
}
|
||||
return err
|
||||
}
|
38
auth/basic-auth/vendor/github.com/pkg/errors/go113.go
generated
vendored
38
auth/basic-auth/vendor/github.com/pkg/errors/go113.go
generated
vendored
@ -1,38 +0,0 @@
|
||||
// +build go1.13
|
||||
|
||||
package errors
|
||||
|
||||
import (
|
||||
stderrors "errors"
|
||||
)
|
||||
|
||||
// Is reports whether any error in err's chain matches target.
|
||||
//
|
||||
// The chain consists of err itself followed by the sequence of errors obtained by
|
||||
// repeatedly calling Unwrap.
|
||||
//
|
||||
// An error is considered to match a target if it is equal to that target or if
|
||||
// it implements a method Is(error) bool such that Is(target) returns true.
|
||||
func Is(err, target error) bool { return stderrors.Is(err, target) }
|
||||
|
||||
// As finds the first error in err's chain that matches target, and if so, sets
|
||||
// target to that error value and returns true.
|
||||
//
|
||||
// The chain consists of err itself followed by the sequence of errors obtained by
|
||||
// repeatedly calling Unwrap.
|
||||
//
|
||||
// An error matches target if the error's concrete value is assignable to the value
|
||||
// pointed to by target, or if the error has a method As(interface{}) bool such that
|
||||
// As(target) returns true. In the latter case, the As method is responsible for
|
||||
// setting target.
|
||||
//
|
||||
// As will panic if target is not a non-nil pointer to either a type that implements
|
||||
// error, or to any interface type. As returns false if err is nil.
|
||||
func As(err error, target interface{}) bool { return stderrors.As(err, target) }
|
||||
|
||||
// Unwrap returns the result of calling the Unwrap method on err, if err's
|
||||
// type contains an Unwrap method returning error.
|
||||
// Otherwise, Unwrap returns nil.
|
||||
func Unwrap(err error) error {
|
||||
return stderrors.Unwrap(err)
|
||||
}
|
177
auth/basic-auth/vendor/github.com/pkg/errors/stack.go
generated
vendored
177
auth/basic-auth/vendor/github.com/pkg/errors/stack.go
generated
vendored
@ -1,177 +0,0 @@
|
||||
package errors
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Frame represents a program counter inside a stack frame.
|
||||
// For historical reasons if Frame is interpreted as a uintptr
|
||||
// its value represents the program counter + 1.
|
||||
type Frame uintptr
|
||||
|
||||
// pc returns the program counter for this frame;
|
||||
// multiple frames may have the same PC value.
|
||||
func (f Frame) pc() uintptr { return uintptr(f) - 1 }
|
||||
|
||||
// file returns the full path to the file that contains the
|
||||
// function for this Frame's pc.
|
||||
func (f Frame) file() string {
|
||||
fn := runtime.FuncForPC(f.pc())
|
||||
if fn == nil {
|
||||
return "unknown"
|
||||
}
|
||||
file, _ := fn.FileLine(f.pc())
|
||||
return file
|
||||
}
|
||||
|
||||
// line returns the line number of source code of the
|
||||
// function for this Frame's pc.
|
||||
func (f Frame) line() int {
|
||||
fn := runtime.FuncForPC(f.pc())
|
||||
if fn == nil {
|
||||
return 0
|
||||
}
|
||||
_, line := fn.FileLine(f.pc())
|
||||
return line
|
||||
}
|
||||
|
||||
// name returns the name of this function, if known.
|
||||
func (f Frame) name() string {
|
||||
fn := runtime.FuncForPC(f.pc())
|
||||
if fn == nil {
|
||||
return "unknown"
|
||||
}
|
||||
return fn.Name()
|
||||
}
|
||||
|
||||
// Format formats the frame according to the fmt.Formatter interface.
|
||||
//
|
||||
// %s source file
|
||||
// %d source line
|
||||
// %n function name
|
||||
// %v equivalent to %s:%d
|
||||
//
|
||||
// Format accepts flags that alter the printing of some verbs, as follows:
|
||||
//
|
||||
// %+s function name and path of source file relative to the compile time
|
||||
// GOPATH separated by \n\t (<funcname>\n\t<path>)
|
||||
// %+v equivalent to %+s:%d
|
||||
func (f Frame) Format(s fmt.State, verb rune) {
|
||||
switch verb {
|
||||
case 's':
|
||||
switch {
|
||||
case s.Flag('+'):
|
||||
io.WriteString(s, f.name())
|
||||
io.WriteString(s, "\n\t")
|
||||
io.WriteString(s, f.file())
|
||||
default:
|
||||
io.WriteString(s, path.Base(f.file()))
|
||||
}
|
||||
case 'd':
|
||||
io.WriteString(s, strconv.Itoa(f.line()))
|
||||
case 'n':
|
||||
io.WriteString(s, funcname(f.name()))
|
||||
case 'v':
|
||||
f.Format(s, 's')
|
||||
io.WriteString(s, ":")
|
||||
f.Format(s, 'd')
|
||||
}
|
||||
}
|
||||
|
||||
// MarshalText formats a stacktrace Frame as a text string. The output is the
|
||||
// same as that of fmt.Sprintf("%+v", f), but without newlines or tabs.
|
||||
func (f Frame) MarshalText() ([]byte, error) {
|
||||
name := f.name()
|
||||
if name == "unknown" {
|
||||
return []byte(name), nil
|
||||
}
|
||||
return []byte(fmt.Sprintf("%s %s:%d", name, f.file(), f.line())), nil
|
||||
}
|
||||
|
||||
// StackTrace is stack of Frames from innermost (newest) to outermost (oldest).
|
||||
type StackTrace []Frame
|
||||
|
||||
// Format formats the stack of Frames according to the fmt.Formatter interface.
|
||||
//
|
||||
// %s lists source files for each Frame in the stack
|
||||
// %v lists the source file and line number for each Frame in the stack
|
||||
//
|
||||
// Format accepts flags that alter the printing of some verbs, as follows:
|
||||
//
|
||||
// %+v Prints filename, function, and line number for each Frame in the stack.
|
||||
func (st StackTrace) Format(s fmt.State, verb rune) {
|
||||
switch verb {
|
||||
case 'v':
|
||||
switch {
|
||||
case s.Flag('+'):
|
||||
for _, f := range st {
|
||||
io.WriteString(s, "\n")
|
||||
f.Format(s, verb)
|
||||
}
|
||||
case s.Flag('#'):
|
||||
fmt.Fprintf(s, "%#v", []Frame(st))
|
||||
default:
|
||||
st.formatSlice(s, verb)
|
||||
}
|
||||
case 's':
|
||||
st.formatSlice(s, verb)
|
||||
}
|
||||
}
|
||||
|
||||
// formatSlice will format this StackTrace into the given buffer as a slice of
|
||||
// Frame, only valid when called with '%s' or '%v'.
|
||||
func (st StackTrace) formatSlice(s fmt.State, verb rune) {
|
||||
io.WriteString(s, "[")
|
||||
for i, f := range st {
|
||||
if i > 0 {
|
||||
io.WriteString(s, " ")
|
||||
}
|
||||
f.Format(s, verb)
|
||||
}
|
||||
io.WriteString(s, "]")
|
||||
}
|
||||
|
||||
// stack represents a stack of program counters.
|
||||
type stack []uintptr
|
||||
|
||||
func (s *stack) Format(st fmt.State, verb rune) {
|
||||
switch verb {
|
||||
case 'v':
|
||||
switch {
|
||||
case st.Flag('+'):
|
||||
for _, pc := range *s {
|
||||
f := Frame(pc)
|
||||
fmt.Fprintf(st, "\n%+v", f)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *stack) StackTrace() StackTrace {
|
||||
f := make([]Frame, len(*s))
|
||||
for i := 0; i < len(f); i++ {
|
||||
f[i] = Frame((*s)[i])
|
||||
}
|
||||
return f
|
||||
}
|
||||
|
||||
func callers() *stack {
|
||||
const depth = 32
|
||||
var pcs [depth]uintptr
|
||||
n := runtime.Callers(3, pcs[:])
|
||||
var st stack = pcs[0:n]
|
||||
return &st
|
||||
}
|
||||
|
||||
// funcname removes the path prefix component of a function's name reported by func.Name().
|
||||
func funcname(name string) string {
|
||||
i := strings.LastIndex(name, "/")
|
||||
name = name[i+1:]
|
||||
i = strings.Index(name, ".")
|
||||
return name[i+1:]
|
||||
}
|
6
auth/basic-auth/vendor/modules.txt
vendored
6
auth/basic-auth/vendor/modules.txt
vendored
@ -1,6 +0,0 @@
|
||||
# github.com/openfaas/faas-provider v0.19.0
|
||||
## explicit; go 1.17
|
||||
github.com/openfaas/faas-provider/auth
|
||||
# github.com/pkg/errors v0.9.1
|
||||
## explicit
|
||||
github.com/pkg/errors
|
49
community.md
49
community.md
@ -84,20 +84,55 @@ It would be great to hear from you especially if you have any of the above and w
|
||||
| [Digital Transformation of Vision Banco Paraguay with Serverless Functions @ KubeCon](https://kccna18.sched.com/event/GraO/digital-transformation-of-vision-banco-paraguay-with-serverless-functions-alex-ellis-vmware-patricio-diaz-vision-banco-saeca) | Alex Ellis & Patricio Diaz | 13-Dec-2018 |
|
||||
| [Introducing "faas" - Cool Hacks Keynote at Dockercon 2017](https://blog.docker.com/2017/04/dockercon-2017-mobys-cool-hack-sessions/) | Alex Ellis | 04-April-2017 |
|
||||
|
||||
### 2022
|
||||
### 2023
|
||||
|
||||
#### Blog posts and write-ups 2022
|
||||
#### Blog posts, write-ups and videos 2023
|
||||
|
||||
[Back to top](#openfaas-community)
|
||||
|
||||
| Blog/repo name and description | Author | Site | Date |
|
||||
| Blog/video/repo name and description | Author | Site | Date |
|
||||
|-------------------------------------------------------------------------|--------------|----------|-------------|
|
||||
| [The Event-Driven Edge with OpenFaaS](https://www.openfaas.com/blog/eventdriven-edge/) | Han Verstraete | openfaas.com | 01-June-2022 |
|
||||
| [Building OpenFaaS Serverless function to detect weather using OpenWeatherMap and Python](https://www.faizanbashir.me/building-openfaas-serverless-function-to-detect-weather-using-openweathermap-and-python) | Faizan Bashir | faizanbashir.me | 02-Apr-2023 |
|
||||
| [Fine-tuning the cold-start in OpenFaaS ](https://www.openfaas.com/blog/fine-tuning-the-cold-start/) | Alex Ellis | openfaas.com | 28-Mar-2023 |
|
||||
| [How do changes to the Docker Hub affect OpenFaaS?](https://www.openfaas.com/blog/how-does-docker-hub-affect-openfaas/) | Alex Ellis | openfaas.com | 20-Mar-2023 |
|
||||
| [Cluster auto-scaling with DigitalOcean Kubernetes and OpenFaaS](https://www.openfaas.com/blog/cluster-autoscaling-with-digitalocean/) | Alex Ellis | openfaas.com | 16-Mar-2023 |
|
||||
| [Import leads from Google Forms into your CRM with functions](https://www.openfaas.com/blog/import-leads-from-google-forms-to-crm/) | Alex Ellis | openfaas.com | 02-Mar-2023 |
|
||||
| [Using OpenFaaS on AKS](https://learn.microsoft.com/en-us/azure/aks/openfaas) | Various | learn.microsoft.com | 27-Feb-2023 |
|
||||
| [How to integrate OpenFaaS functions with managed AWS services](https://www.openfaas.com/blog/integrate-openfaas-with-managed-aws-services/) | Han Verstraete | openfaas.com | 19-Jan-2023 |
|
||||
|
||||
### 2022
|
||||
|
||||
#### Blog posts, write-ups and videos 2022
|
||||
|
||||
[Back to top](#openfaas-community)
|
||||
|
||||
| Blog/video/repo name and description | Author | Site | Date |
|
||||
|-------------------------------------------------------------------------|--------------|----------|-------------|
|
||||
| [Trigger OpenFaaS functions from PostgreSQL with AWS Aurora](https://www.openfaas.com/blog/trigger-functions-from-postgres/) | Han Verstraete | openfaas.com | 16-Dec-2022 |
|
||||
| [Introducing our new Python template for production](https://www.openfaas.com/blog/openfaas-pro-python-template/) | Han Verstraete | openfaas.com | 06-Dec-2022 |
|
||||
| [Deploy Serverless Function on k3s/Kubernetes with OpenFaaS (x86/Arm, Linux VM, Go)](https://www.youtube.com/watch?v=-8MrDWg6K6s) | David Hwang | youtube.com | 09-Nov-2022 |
|
||||
| [Rethinking Auto-scaling for OpenFaaS](https://www.openfaas.com/blog/autoscaling-functions/) | Han Verstraete | openfaas.com | 05-Nov-2022 |
|
||||
| [Custom health and readiness checks for your OpenFaaS Functions](https://www.openfaas.com/blog/health-and-readiness-for-functions/) | Alex Ellis | openfaas.com | 26-Oct-2022 |
|
||||
| [Generate PDFs at scale on Kubernetes using OpenFaaS and Puppeteer](https://www.openfaas.com/blog/pdf-generation-at-scale-on-kubernetes/) | Han Verstraete | openfaas.com | 06-Oct-2022 |
|
||||
| [Eliminate vendor locking of Serverless workloads with OpenFaaS](https://awstip.com/eliminate-vendor-lock-in-of-serverless-workloads-with-openfaas-474807383ce1) | Meher Chaitanya | medium.com | 06-Oct-2022 |
|
||||
| [Use the Serverless Function Method to Build a Machine Learning Microservice System](https://blog.infuseai.io/use-serverless-function-method-to-build-a-ml-microservice-system-a108f3f2c1c) | SimonLiu | blog.infuseai.io | 30-Aug-2022 |
|
||||
| [Go Functions as a Service With Kubernetes and OpenFaaS](https://dominikbraun.io/blog/go-functions-as-a-service-with-kubernetes-and-openfaas/) | Dominik Braun | dominikbraun.io | 24-Aug-2022 |
|
||||
| [Exploring the Fan out and Fan in pattern with OpenFaaS](https://www.openfaas.com/blog/fan-out-and-back-in-using-functions/) | Han Verstraete | openfaas.com | 22-Aug-2022 |
|
||||
| [Finding Raspberry Pis with Raspberry Pis](https://www.openfaas.com/blog/searching-for-raspberrypi/) | Alex Ellis | openfaas.com | 08-Aug-2022 |
|
||||
| [The Next Generation of Queuing: JetStream for OpenFaaS](https://www.openfaas.com/blog/jetstream-for-openfaas/) | Han Verstraete | openfaas.com | 21-Jul-2022 |
|
||||
| [How to update your OpenFaaS functions automatically with the Argo CD Image Updater](https://www.openfaas.com/blog/argocd-image-updater-for-functions/) | Han Verstraete | openfaas.com | 04-Jul-2022 |
|
||||
| [How to build functions from source code with the Function Builder API](https://www.openfaas.com/blog/how-to-build-via-api/) | Han Verstraete | openfaas.com | 23-Jun-2022 |
|
||||
| [OpenFaaS First Function](https://rpi4cluster.com/k3s/k3s-openfaas-function/) | Vlado Portos | rpi4cluster.com | 22-Jun-2022 |
|
||||
| [OpenFaaS](https://rpi4cluster.com/k3s/k3s-openfaas/) | Vlado Portos | rpi4cluster.com | 22-Jun-2022 |
|
||||
| [How to package OpenFaaS functions with Helm](https://www.openfaas.com/blog/howto-package-functions-with-helm/) | Han Verstraete | openfaas.com | 09-Jun-2022 |
|
||||
| [The Event-Driven Edge with OpenFaaS](https://www.openfaas.com/blog/eventdriven-edge/) | Han Verstraete | openfaas.com | 01-Jun-2022 |
|
||||
| [Running faasd on Azure Arm-based Virtual Machines](https://blog.ediri.io/running-faasd-on-azure-arm-based-virtual-machines) | Engin Diri | blog.ediri.io | 27-May-2022 |
|
||||
| [WebAssembly functions in OpenFaaS using Sat (Part1)](https://www.wasm.builders/suborbital/webassembly-functions-in-openfaas-using-sat-part-1-2omk) | Connor Hicks | wasm.builders | 04-May-2022 |
|
||||
| [Building a RESTful API with functions](https://simonemms.com/blog/2022/04/24/building-a-restful-api-with-serverless-functions/) | Simon Emms | simonemms.com | 24-April-2022 |
|
||||
| [Building a RESTful API with functions](https://simonemms.com/blog/2022/04/24/building-a-restful-api-with-serverless-functions/) | Simon Emms | simonemms.com | 24-Apr-2022 |
|
||||
| [How to process your data the resilient way with back pressure](https://www.openfaas.com/blog/limits-and-backpressure/) | Alex Ellis | openfaas.com | 12-May-2022 |
|
||||
| [A Deep Dive into Golang for OpenFaaS Functions](https://www.openfaas.com/blog/golang-deep-dive/) | Alex Ellis | openfaas.com | 13-April-2022 |
|
||||
| [Open-Faas on Centos 7](https://medium.com/geekculture/open-faas-on-centos-7-c4dc629f28fe) | Heshani Samarasekara | medium.com | 07-May-2022 |
|
||||
| [A Deep Dive into Golang for OpenFaaS Functions](https://www.openfaas.com/blog/golang-deep-dive/) | Alex Ellis | openfaas.com | 13-Apr-2022 |
|
||||
| [Serverless Architecture with OpenFaaS and Java](https://www.xenonstack.com/blog/serverless-open-faas-java) | Navdeep Singh Gill | xenonstack.com | 13-Mar-2022 |
|
||||
| [My Journey Contributing To OpenFaaS So Far](https://www.openfaas.com/blog/my-journey-contributing-to-openfaas/) | Nitishkumar Singh | openfaas.com | 02-Mar-2022 |
|
||||
| [Your pocket-sized cloud with a Raspberry Pi](https://blog.alexellis.io/your-pocket-sized-cloud/) | Alex Ellis | openfaas.com | 23-Mar-2022 |
|
||||
| [Hosting a React App with OpenFaaS](https://www.openfaas.com/blog/react-app/) | Alex Ellis | openfaas.com | 01-Mar-2022 |
|
||||
@ -126,6 +161,7 @@ Mainly virtual due to pandemic.
|
||||
|-------------------------------------------------------------------------|--------------|----------|-------------|
|
||||
| [Configure your OpenFaaS functions for staging and production](https://www.openfaas.com/blog/custom-environments/) | Alex Ellis | openfaas.com | 09-Dec-2021 |
|
||||
| [OpenFaaS - Run Containerized Functions On Your Own Terms](https://iximiuz.com/en/posts/openfaas-case-study/) | Ivan Velichko | iximiuz.com | 02-Dec-2021 |
|
||||
| [Making a Docker Dev Environment for OpenFaaS](https://www.felipecruz.es/making-a-docker-dev-environment-for-openfaas/) | Felipe Cruz | felipecruz.es | 30-Nov-2021 |
|
||||
| [Build at the Edge with OpenFaaS and GitHub Actions](https://www.openfaas.com/blog/edge-actions/) | Alex Ellis | openfaas.com | 29-Nov-2021 |
|
||||
| [Improving long-running jobs for OpenFaaS users](https://www.openfaas.com/blog/long-running-jobs/) | Alex Ellis | openfaas.com | 05-Nov-2021 |
|
||||
| [Derek says goodbye to Docker Swarm](https://www.openfaas.com/blog/migrating-derek-from-docker-swarm/) | Alex Ellis | openfaas.com | 05-Oct-2021 |
|
||||
@ -147,6 +183,7 @@ Mainly virtual due to pandemic.
|
||||
| [How to integrate with GitHub the right way with GitHub Apps](https://www.openfaas.com/blog/integrate-with-github-apps-and-faasd/) | Batuhan Apaydın | openfaas.com | 26-Jan-2021 |
|
||||
| [Serverless with OpenFaaS and .NET](https://goncalo-a-oliveira.medium.com/serverless-with-openfaas-and-net-6a66b5c30a5f) | Batuhan Apaydın | medium.com | 20-Jan-2021 |
|
||||
| [How I discovered faas and what it changed for me](https://releasecandidate.dev/posts/2021/discovery-faasd-and-openfaas/) | Peter Thaleikis | releasecandidate.dev | 05-Jan-2021 |
|
||||
| [Installing OpenFaaS On k3s (Single Node)](https://midnightprogrammer.net/post/installing-openfaas-on-k3s-single-node/) | Pashant Khandelwal | midnightprogrammer.net | 04-Jan-2021 |
|
||||
|
||||
#### Events in 2021
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
FROM --platform=${BUILDPLATFORM:-linux/amd64} ghcr.io/openfaas/license-check:0.4.0 as license-check
|
||||
FROM --platform=${BUILDPLATFORM:-linux/amd64} ghcr.io/openfaas/license-check:0.4.1 as license-check
|
||||
|
||||
FROM --platform=${BUILDPLATFORM:-linux/amd64} golang:1.18 as build
|
||||
FROM --platform=${BUILDPLATFORM:-linux/amd64} golang:1.21 as build
|
||||
|
||||
ENV GO111MODULE=on
|
||||
ENV CGO_ENABLED=0
|
||||
@ -29,7 +29,6 @@ COPY types types
|
||||
COPY plugin plugin
|
||||
COPY version version
|
||||
COPY scaling scaling
|
||||
COPY probing probing
|
||||
COPY pkg pkg
|
||||
COPY main.go .
|
||||
|
||||
@ -43,9 +42,9 @@ RUN CGO_ENABLED=${CGO_ENABLED} GOOS=${TARGETOS} GOARCH=${TARGETARCH} go build --
|
||||
-X \"github.com/openfaas/faas/gateway/version.GitCommitSHA=${GIT_COMMIT}\" \
|
||||
-X \"github.com/openfaas/faas/gateway/version.Version=${VERSION}\" \
|
||||
-X github.com/openfaas/faas/gateway/types.Arch=${TARGETARCH}" \
|
||||
-a -installsuffix cgo -o gateway .
|
||||
-o gateway .
|
||||
|
||||
FROM --platform=${TARGETPLATFORM:-linux/amd64} alpine:3.16.1 as ship
|
||||
FROM --platform=${TARGETPLATFORM:-linux/amd64} alpine:3.19.0 as ship
|
||||
|
||||
LABEL org.label-schema.license="MIT" \
|
||||
org.label-schema.vcs-url="https://github.com/openfaas/faas" \
|
||||
|
@ -1,14 +1,14 @@
|
||||
export DOCKER_CLI_EXPERIMENTAL=enabled
|
||||
|
||||
PLATFORM := "linux/amd64,linux/arm/v7,linux/arm64"
|
||||
PLATFORM?="linux/amd64,linux/arm/v7,linux/arm64"
|
||||
|
||||
TAG?=latest
|
||||
SERVER?=docker.io
|
||||
OWNER?=alexellis2
|
||||
TAG?=dev
|
||||
SERVER?=ttl.sh
|
||||
OWNER?=openfaas
|
||||
NAME=gateway
|
||||
|
||||
.PHONY: local-docker
|
||||
build-local:
|
||||
.PHONY: buildx-local
|
||||
buildx-local:
|
||||
@echo $(SERVER)/$(OWNER)/$(NAME):$(TAG) \
|
||||
&& docker buildx create --use --name=multiarch --node multiarch \
|
||||
&& docker buildx build \
|
||||
@ -17,8 +17,18 @@ build-local:
|
||||
--output "type=docker,push=false" \
|
||||
--tag $(SERVER)/$(OWNER)/$(NAME):$(TAG) .
|
||||
|
||||
.PHONY: push-docker
|
||||
push-docker:
|
||||
.PHONY: buildx-push
|
||||
buildx-push:
|
||||
@echo $(SERVER)/$(OWNER)/$(NAME):$(TAG) \
|
||||
&& docker buildx create --use --name=multiarch --node multiarch \
|
||||
&& docker buildx build \
|
||||
--progress=plain \
|
||||
--platform linux/amd64 \
|
||||
--output "type=image,push=true" \
|
||||
--tag $(SERVER)/$(OWNER)/$(NAME):$(TAG) .
|
||||
|
||||
.PHONY: buildx-push-all
|
||||
buildx-push-all:
|
||||
@echo $(SERVER)/$(OWNER)/$(NAME):$(TAG) \
|
||||
&& docker buildx create --use --name=multiarch --node multiarch \
|
||||
&& docker buildx build \
|
||||
|
@ -1,31 +1,35 @@
|
||||
module github.com/openfaas/faas/gateway
|
||||
|
||||
go 1.17
|
||||
go 1.21
|
||||
|
||||
require (
|
||||
github.com/docker/distribution v2.8.1+incompatible
|
||||
github.com/docker/distribution v2.8.3+incompatible
|
||||
github.com/gorilla/mux v1.8.0
|
||||
github.com/openfaas/faas-provider v0.19.1
|
||||
github.com/openfaas/nats-queue-worker v0.0.0-20220805080536-d1d72d857b1c
|
||||
github.com/prometheus/client_golang v1.13.0
|
||||
github.com/prometheus/client_model v0.2.0
|
||||
go.uber.org/goleak v1.1.12
|
||||
golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde
|
||||
github.com/openfaas/faas-provider v0.25.2
|
||||
github.com/openfaas/nats-queue-worker v0.0.0-20231023101743-fa54e89c9db2
|
||||
github.com/prometheus/client_golang v1.17.0
|
||||
github.com/prometheus/client_model v0.5.0
|
||||
go.uber.org/goleak v1.2.1
|
||||
golang.org/x/sync v0.4.0
|
||||
)
|
||||
|
||||
// replace github.com/openfaas/faas-provider => ../../faas-provider
|
||||
|
||||
require (
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
|
||||
github.com/nats-io/nats.go v1.11.1-0.20210623165838-4b75fc59ae30 // indirect
|
||||
github.com/nats-io/nkeys v0.3.0 // indirect
|
||||
github.com/golang/protobuf v1.5.3 // indirect
|
||||
github.com/klauspost/compress v1.17.2 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||
github.com/nats-io/nats.go v1.31.0 // indirect
|
||||
github.com/nats-io/nkeys v0.4.5 // indirect
|
||||
github.com/nats-io/nuid v1.0.1 // indirect
|
||||
github.com/nats-io/stan.go v0.9.0 // indirect
|
||||
github.com/prometheus/common v0.37.0 // indirect
|
||||
github.com/prometheus/procfs v0.8.0 // indirect
|
||||
golang.org/x/crypto v0.0.0-20220817201139-bc19a97f63c8 // indirect
|
||||
golang.org/x/sys v0.0.0-20220823224334-20c2bfdbfe24 // indirect
|
||||
google.golang.org/protobuf v1.28.1 // indirect
|
||||
github.com/nats-io/stan.go v0.10.4 // indirect
|
||||
github.com/prometheus/common v0.44.0 // indirect
|
||||
github.com/prometheus/procfs v0.12.0 // indirect
|
||||
github.com/stretchr/testify v1.8.2 // indirect
|
||||
golang.org/x/crypto v0.14.0 // indirect
|
||||
golang.org/x/sys v0.13.0 // indirect
|
||||
google.golang.org/protobuf v1.31.0 // indirect
|
||||
)
|
||||
|
201
gateway/go.sum
201
gateway/go.sum
@ -33,52 +33,45 @@ cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
|
||||
github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878 h1:EFSB7Zo9Eg91v7MJPVsifUysc/wPdN+NOnVe6bWbdBM=
|
||||
github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878/go.mod h1:3AMJUQhVx52RsWOnlkpikZr01T/yAVN2gn0861vByNg=
|
||||
github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
|
||||
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
|
||||
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
|
||||
github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68=
|
||||
github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk=
|
||||
github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w=
|
||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
|
||||
github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
||||
github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
|
||||
github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
@ -109,8 +102,9 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD
|
||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
|
||||
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
|
||||
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
@ -122,8 +116,7 @@ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
||||
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
|
||||
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
||||
@ -139,22 +132,14 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
|
||||
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
|
||||
github.com/hashicorp/go-hclog v0.9.1/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
|
||||
github.com/hashicorp/go-hclog v0.16.1 h1:IVQwpTGNRRIHafnTs2dQLIk4ENtneRIEEJWOVDqz99o=
|
||||
github.com/hashicorp/go-hclog v0.16.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
|
||||
github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c=
|
||||
github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0=
|
||||
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
||||
github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
|
||||
github.com/hashicorp/go-msgpack v1.1.5 h1:9byZdVjKTe5mce63pRVNP1L7UAmdHOTEMGehn6KvJWs=
|
||||
github.com/hashicorp/go-msgpack v1.1.5/go.mod h1:gWVc3sv/wbDmR3rQsj1CAktEZzoz1YNK9NfGLXJ69/4=
|
||||
github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
|
||||
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI=
|
||||
github.com/hashicorp/go-msgpack/v2 v2.1.0 h1:J2g2hMyjSefUPTnkLRU2MnsLLsPRB1n4Z/wJRN07GuA=
|
||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=
|
||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/raft v1.3.1 h1:zDT8ke8y2aP4wf9zPTB2uSIeavJ3Hx/ceY4jxI2JxuY=
|
||||
github.com/hashicorp/raft v1.3.1/go.mod h1:4Ak7FSPnuvmb0GV6vgIAJ4vYT4bek9bb6Q+7HVbyzqM=
|
||||
github.com/hashicorp/raft v1.5.0 h1:uNs9EfJ4FwiArZRxxfd/dQ5d33nV31/CdCHArH89hT8=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
@ -167,24 +152,20 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V
|
||||
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.11.12 h1:famVnQVu7QwryBN4jNseQdUKES71ZAOnB6UQQJPZvqk=
|
||||
github.com/klauspost/compress v1.11.12/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||
github.com/klauspost/compress v1.17.2 h1:RlWWUY/Dr4fL8qk9YG7DTZ7PDgME2V4csBXA8L/ixi4=
|
||||
github.com/klauspost/compress v1.17.2/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA=
|
||||
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.10 h1:qxFzApOv4WsAL965uUPIsXzAKCZxN2p9UqdhFS4ZW10=
|
||||
github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
||||
github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40=
|
||||
github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/minio/highwayhash v1.0.1 h1:dZ6IIu8Z14VlC0VpfKofAhCy74wu/Qb5gcn52yWoz/0=
|
||||
github.com/minio/highwayhash v1.0.1/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
||||
github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA/g=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
@ -192,105 +173,107 @@ github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3Rllmb
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/nats-io/jwt v1.2.2 h1:w3GMTO969dFg+UOKTmmyuu7IGdusK+7Ytlt//OYH/uU=
|
||||
github.com/nats-io/jwt v1.2.2/go.mod h1:/xX356yQA6LuXI9xWW7mZNpxgF2mBmGecH+Fj34sP5Q=
|
||||
github.com/nats-io/jwt/v2 v2.0.2 h1:ejVCLO8gu6/4bOKIHQpmB5UhhUJfAQw55yvLWpfmKjI=
|
||||
github.com/nats-io/jwt/v2 v2.0.2/go.mod h1:VRP+deawSXyhNjXmxPCHskrR6Mq50BqpEI5SEcNiGlY=
|
||||
github.com/nats-io/nats-server/v2 v2.2.6/go.mod h1:sEnFaxqe09cDmfMgACxZbziXnhQFhwk+aKkZjBBRYrI=
|
||||
github.com/nats-io/nats-server/v2 v2.3.2 h1:SGJLWrjBHsl0DsdY8PeTR3YKEfiUEYVVq2STw9d8MSY=
|
||||
github.com/nats-io/nats-server/v2 v2.3.2/go.mod h1:dUf7Cm5z5LbciFVwWx54owyCKm8x4/hL6p7rrljhLFY=
|
||||
github.com/nats-io/nats-streaming-server v0.22.0 h1:2egnq86o9roTqUfELlqykf7ZZkNvRsXjVf4EbaLysHo=
|
||||
github.com/nats-io/nats-streaming-server v0.22.0/go.mod h1:Jyu3eUQaUAjwd5TiBuLagKdQRofPrHoIXt1kL0U/e5o=
|
||||
github.com/nats-io/nats.go v1.11.0/go.mod h1:BPko4oXsySz4aSWeFgOHLZs3G4Jq4ZAyE6/zMCxRT6w=
|
||||
github.com/nats-io/nats.go v1.11.1-0.20210623165838-4b75fc59ae30 h1:9GqilBhZaR3xYis0JgMlJjNw933WIobdjKhilXm+Vls=
|
||||
github.com/nats-io/nats.go v1.11.1-0.20210623165838-4b75fc59ae30/go.mod h1:BPko4oXsySz4aSWeFgOHLZs3G4Jq4ZAyE6/zMCxRT6w=
|
||||
github.com/nats-io/nkeys v0.2.0/go.mod h1:XdZpAbhgyyODYqjTawOnIOI7VlbKSarI9Gfy1tqEu/s=
|
||||
github.com/nats-io/nkeys v0.3.0 h1:cgM5tL53EvYRU+2YLXIK0G2mJtK12Ft9oeooSZMA2G8=
|
||||
github.com/nats-io/jwt/v2 v2.5.2 h1:DhGH+nKt+wIkDxM6qnVSKjokq5t59AZV5HRcFW0zJwU=
|
||||
github.com/nats-io/nats-server/v2 v2.10.3 h1:nk2QVLpJUh3/AhZCJlQdTfj2oeLDvWnn1Z6XzGlNFm0=
|
||||
github.com/nats-io/nats-server/v2 v2.10.3/go.mod h1:lzrskZ/4gyMAh+/66cCd+q74c6v7muBypzfWhP/MAaM=
|
||||
github.com/nats-io/nats-streaming-server v0.25.5 h1:DX6xaPhKvVLhdpNsuEmmD+O9LfWSnw8cvxQU/H9LRy8=
|
||||
github.com/nats-io/nats-streaming-server v0.25.5/go.mod h1:dSBVdHGsT/tV91lT4MWFfE6+yjRCNhRIYJpBaTHFdAo=
|
||||
github.com/nats-io/nats.go v1.22.1/go.mod h1:tLqubohF7t4z3du1QDPYJIQQyhb4wl6DhjxEajSI7UA=
|
||||
github.com/nats-io/nats.go v1.31.0 h1:/WFBHEc/dOKBF6qf1TZhrdEfTmOZ5JzdJ+Y3m6Y/p7E=
|
||||
github.com/nats-io/nats.go v1.31.0/go.mod h1:di3Bm5MLsoB4Bx61CBTsxuarI36WbhAwOm8QrW39+i8=
|
||||
github.com/nats-io/nkeys v0.3.0/go.mod h1:gvUNGjVcM2IPr5rCsRsC6Wb3Hr2CQAm08dsxtV6A5y4=
|
||||
github.com/nats-io/nkeys v0.4.5 h1:Zdz2BUlFm4fJlierwvGK+yl20IAKUm7eV6AAZXEhkPk=
|
||||
github.com/nats-io/nkeys v0.4.5/go.mod h1:XUkxdLPTufzlihbamfzQ7mw/VGx6ObUs+0bN5sNvt64=
|
||||
github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=
|
||||
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
|
||||
github.com/nats-io/stan.go v0.9.0 h1:TB73Y31au++0sU0VmnBy2pYkSrwH0zUFNRB9YePHqC4=
|
||||
github.com/nats-io/stan.go v0.9.0/go.mod h1:0jEuBXKauB1HHJswHM/lx05K48TJ1Yxj6VIfM4k+aB4=
|
||||
github.com/openfaas/faas-provider v0.18.6/go.mod h1:fq1JL0mX4rNvVVvRLaLRJ3H6o667sHuyP5p/7SZEe98=
|
||||
github.com/nats-io/stan.go v0.10.4 h1:19GS/eD1SeQJaVkeM9EkvEYattnvnWrZ3wkSWSw4uXw=
|
||||
github.com/nats-io/stan.go v0.10.4/go.mod h1:3XJXH8GagrGqajoO/9+HgPyKV5MWsv7S5ccdda+pc6k=
|
||||
github.com/openfaas/faas-provider v0.19.1 h1:xH8lTWabfDZwzIvC0u1AO48ghD3BNw6Vo231DLqTeI0=
|
||||
github.com/openfaas/faas-provider v0.19.1/go.mod h1:Farrp+9Med8LeK3aoYpqplMP8f5ebTILbCSLg2LPLZk=
|
||||
github.com/openfaas/nats-queue-worker v0.0.0-20220805080536-d1d72d857b1c h1:ZVpAJIpDdHkX5NGdz49kCzyvG+H+S/KvMAVVfZAN8EI=
|
||||
github.com/openfaas/nats-queue-worker v0.0.0-20220805080536-d1d72d857b1c/go.mod h1:ajlN2z+D8JPBq3kWNv4WLT6mtKPqlgeE3dYEx39d1tk=
|
||||
github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||
github.com/openfaas/faas-provider v0.24.4 h1:Zzbkabgd0PoQmnRjy53NbMXjhLaIyoIiwP3qaLkm9rE=
|
||||
github.com/openfaas/faas-provider v0.24.4/go.mod h1:NsETIfEndZn4mn/w/XnBTcDTwKqULCziphLp7KgeRcA=
|
||||
github.com/openfaas/faas-provider v0.25.2 h1:sAyL96CzAk/YnuXQZiRJcHo7UrcYMaf7RDvKxsQb/2o=
|
||||
github.com/openfaas/faas-provider v0.25.2/go.mod h1:NsETIfEndZn4mn/w/XnBTcDTwKqULCziphLp7KgeRcA=
|
||||
github.com/openfaas/nats-queue-worker v0.0.0-20230303171817-9dfe6fa61387 h1:D4xbdy309Wdyhlm6PgJqUV/aR77VQQG8UTF+q0ay71c=
|
||||
github.com/openfaas/nats-queue-worker v0.0.0-20230303171817-9dfe6fa61387/go.mod h1:s86POyW6C8S4CALFRhO8ax5sR2uaQUJQ0HaQGvbTpTc=
|
||||
github.com/openfaas/nats-queue-worker v0.0.0-20231023101743-fa54e89c9db2 h1:I8U2kq2h7Wl6pkd4hjRK6P0/o3AcCNdfmNJS5gdgxKU=
|
||||
github.com/openfaas/nats-queue-worker v0.0.0-20231023101743-fa54e89c9db2/go.mod h1:Ckz9JKcyzKtzLd9Obc5wVAXJhq05lKL3Ck+5r3MnKE0=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM=
|
||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
||||
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
|
||||
github.com/prometheus/client_golang v1.12.2 h1:51L9cDoUHVrXx4zWYlcLQIZ+d+VXHgqnYKkIuq4g/34=
|
||||
github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
|
||||
github.com/prometheus/client_golang v1.13.0 h1:b71QUfeo5M8gq2+evJdTPfZhYMAU0uKPkyPJ7TPsloU=
|
||||
github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ=
|
||||
github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8=
|
||||
github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc=
|
||||
github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q=
|
||||
github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
|
||||
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw=
|
||||
github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI=
|
||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
||||
github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4=
|
||||
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
||||
github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE=
|
||||
github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
|
||||
github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM=
|
||||
github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc=
|
||||
github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY=
|
||||
github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo=
|
||||
github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
|
||||
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
|
||||
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
|
||||
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
go.etcd.io/bbolt v1.3.5 h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0=
|
||||
go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ=
|
||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.uber.org/goleak v1.1.0/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
|
||||
go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA=
|
||||
go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
|
||||
go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A=
|
||||
go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||
golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8=
|
||||
golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20220817201139-bc19a97f63c8 h1:GIAS/yBem/gq2MUqgNIzUHW7cJMmx3TGZOrnyYaNQ6c=
|
||||
golang.org/x/crypto v0.0.0-20220817201139-bc19a97f63c8/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU=
|
||||
golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc=
|
||||
golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
@ -312,7 +295,6 @@ golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHl
|
||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
|
||||
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k=
|
||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
|
||||
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
|
||||
@ -322,11 +304,10 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB
|
||||
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
@ -354,18 +335,15 @@ golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81R
|
||||
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
@ -376,16 +354,13 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ
|
||||
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde h1:ejfdSekXMDxDLbRrJMwUk6KnSLZ2McaUCVcIKM+N6jc=
|
||||
golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.4.0 h1:zxkM55ReGkDlKSM+Fu41A+zmbZuaPVbGMzvvdUPznYQ=
|
||||
golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@ -395,7 +370,6 @@ golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@ -417,19 +391,18 @@ golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210525143221-35b2ab0089ea/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220823224334-20c2bfdbfe24 h1:TyKJRhyo17yWxOMCTHKWrc5rddHORMlnZ/j57umaUd8=
|
||||
golang.org/x/sys v0.0.0-20220823224334-20c2bfdbfe24/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE=
|
||||
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
@ -437,18 +410,17 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1 h1:NusfzzA6yGQ+ua51ck7E3omNUX/JuqbFSaRGqU8CcLI=
|
||||
golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190424220101-1e8e1cfdf96b/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
@ -458,7 +430,6 @@ golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgw
|
||||
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
@ -486,8 +457,7 @@ golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc
|
||||
golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.5 h1:ouewzE6p+/VEB31YYnTbEJdi8pFqKp4P4n85vwo3DHA=
|
||||
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
@ -567,8 +537,8 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj
|
||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
|
||||
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
|
||||
google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
@ -580,8 +550,9 @@ gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
|
@ -6,7 +6,7 @@ package handlers
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"log"
|
||||
"math"
|
||||
"net/http"
|
||||
@ -27,7 +27,7 @@ func MakeAlertHandler(service scaling.ServiceQuery, defaultNamespace string) htt
|
||||
|
||||
defer r.Body.Close()
|
||||
|
||||
body, err := ioutil.ReadAll(r.Body)
|
||||
body, err := io.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
w.Write([]byte("Unable to read alert."))
|
||||
@ -44,7 +44,7 @@ func MakeAlertHandler(service scaling.ServiceQuery, defaultNamespace string) htt
|
||||
return
|
||||
}
|
||||
|
||||
errors := handleAlerts(&req, service, defaultNamespace)
|
||||
errors := handleAlerts(req, service, defaultNamespace)
|
||||
if len(errors) > 0 {
|
||||
log.Println(errors)
|
||||
var errorOutput string
|
||||
@ -60,7 +60,7 @@ func MakeAlertHandler(service scaling.ServiceQuery, defaultNamespace string) htt
|
||||
}
|
||||
}
|
||||
|
||||
func handleAlerts(req *requests.PrometheusAlert, service scaling.ServiceQuery, defaultNamespace string) []error {
|
||||
func handleAlerts(req requests.PrometheusAlert, service scaling.ServiceQuery, defaultNamespace string) []error {
|
||||
var errors []error
|
||||
for _, alert := range req.Alerts {
|
||||
if err := scaleService(alert, service, defaultNamespace); err != nil {
|
||||
@ -102,6 +102,7 @@ func scaleService(alert requests.PrometheusInnerAlert, service scaling.ServiceQu
|
||||
func CalculateReplicas(status string, currentReplicas uint64, maxReplicas uint64, minReplicas uint64, scalingFactor uint64) uint64 {
|
||||
var newReplicas uint64
|
||||
|
||||
maxReplicas = uint64(math.Min(float64(maxReplicas), float64(scaling.DefaultMaxReplicas)))
|
||||
step := uint64(math.Ceil(float64(maxReplicas) / 100 * float64(scalingFactor)))
|
||||
|
||||
if status == "firing" && step > 0 {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright (c) Alex Ellis 2017. All rights reserved.
|
||||
// Copyright (c) Alex Ellis 1017. All rights reserved.
|
||||
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
|
||||
|
||||
package handlers
|
||||
@ -12,9 +12,9 @@ import (
|
||||
func TestDisabledScale(t *testing.T) {
|
||||
minReplicas := uint64(1)
|
||||
scalingFactor := uint64(0)
|
||||
newReplicas := CalculateReplicas("firing", scaling.DefaultMinReplicas, scaling.DefaultMaxReplicas, minReplicas, scalingFactor)
|
||||
if newReplicas != minReplicas {
|
||||
t.Logf("Expected not to scale, but replicas were: %d", newReplicas)
|
||||
got := CalculateReplicas("firing", scaling.DefaultMinReplicas, scaling.DefaultMaxReplicas, minReplicas, scalingFactor)
|
||||
if got != minReplicas {
|
||||
t.Logf("Expected not to scale, but replicas were: %d", got)
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
@ -22,20 +22,23 @@ func TestDisabledScale(t *testing.T) {
|
||||
func TestParameterEdge(t *testing.T) {
|
||||
minReplicas := uint64(0)
|
||||
scalingFactor := uint64(0)
|
||||
newReplicas := CalculateReplicas("firing", scaling.DefaultMinReplicas, scaling.DefaultMaxReplicas, minReplicas, scalingFactor)
|
||||
if newReplicas != 0 {
|
||||
got := CalculateReplicas("firing", scaling.DefaultMinReplicas, scaling.DefaultMaxReplicas, minReplicas, scalingFactor)
|
||||
if got != 0 {
|
||||
t.Log("Expected not to scale")
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
|
||||
func TestScalingWithSameUpperLowerLimit(t *testing.T) {
|
||||
minReplicas := uint64(1)
|
||||
scalingFactor := uint64(20)
|
||||
// status string, currentReplicas uint64, maxReplicas uint64, minReplicas uint64, scalingFactor uint64)
|
||||
newReplicas := CalculateReplicas("firing", minReplicas, minReplicas, minReplicas, scalingFactor)
|
||||
if newReplicas != 1 {
|
||||
t.Logf("Replicas - want: %d, got: %d", minReplicas, newReplicas)
|
||||
func TestScaling_SameUpperLowerLimit(t *testing.T) {
|
||||
minReplicas := uint64(5)
|
||||
maxReplicas := uint64(5)
|
||||
scalingFactor := uint64(10)
|
||||
|
||||
got := CalculateReplicas("firing", minReplicas, minReplicas, maxReplicas, scalingFactor)
|
||||
|
||||
want := minReplicas
|
||||
if want != got {
|
||||
t.Logf("Replicas - want: %d, got: %d", want, got)
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
@ -43,58 +46,62 @@ func TestScalingWithSameUpperLowerLimit(t *testing.T) {
|
||||
func TestMaxScale(t *testing.T) {
|
||||
minReplicas := uint64(1)
|
||||
scalingFactor := uint64(100)
|
||||
newReplicas := CalculateReplicas("firing", scaling.DefaultMinReplicas, scaling.DefaultMaxReplicas, minReplicas, scalingFactor)
|
||||
if newReplicas != 20 {
|
||||
t.Log("Expected ceiling of 20 replicas")
|
||||
t.Fail()
|
||||
got := CalculateReplicas("firing", scaling.DefaultMinReplicas, scaling.DefaultMaxReplicas*2, minReplicas, scalingFactor)
|
||||
if got != scaling.DefaultMaxReplicas {
|
||||
t.Fatalf("want ceiling: %d, but got: %d", scaling.DefaultMaxReplicas, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestInitialScale(t *testing.T) {
|
||||
func TestInitialScale_From1_Factor10(t *testing.T) {
|
||||
minReplicas := uint64(1)
|
||||
scalingFactor := uint64(20)
|
||||
newReplicas := CalculateReplicas("firing", scaling.DefaultMinReplicas, scaling.DefaultMaxReplicas, minReplicas, scalingFactor)
|
||||
if newReplicas != 5 {
|
||||
t.Log("Expected the increment to equal 5")
|
||||
t.Fail()
|
||||
scalingFactor := uint64(10)
|
||||
got := CalculateReplicas("firing", scaling.DefaultMinReplicas, scaling.DefaultMaxReplicas, minReplicas, scalingFactor)
|
||||
want := uint64(2)
|
||||
|
||||
if got != want {
|
||||
t.Fatalf("want: %d, but got: %d", want, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestScale(t *testing.T) {
|
||||
func TestScale_midrange_factor25(t *testing.T) {
|
||||
minReplicas := uint64(1)
|
||||
scalingFactor := uint64(20)
|
||||
newReplicas := CalculateReplicas("firing", 4, scaling.DefaultMaxReplicas, minReplicas, scalingFactor)
|
||||
if newReplicas != 8 {
|
||||
t.Log("Expected newReplicas to equal 8")
|
||||
t.Fail()
|
||||
scalingFactor := uint64(25)
|
||||
current := uint64(4)
|
||||
maxReplicas := uint64(scaling.DefaultMaxReplicas)
|
||||
|
||||
got := CalculateReplicas("firing", current, maxReplicas, minReplicas, scalingFactor)
|
||||
want := uint64(5)
|
||||
if want != got {
|
||||
t.Fatalf("want: %d, but got: %d", want, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestScaleCeiling(t *testing.T) {
|
||||
func TestScale_Ceiling_IsDefaultMaxReplicas(t *testing.T) {
|
||||
minReplicas := uint64(1)
|
||||
scalingFactor := uint64(20)
|
||||
newReplicas := CalculateReplicas("firing", 20, scaling.DefaultMaxReplicas, minReplicas, scalingFactor)
|
||||
if newReplicas != 20 {
|
||||
t.Log("Expected ceiling of 20 replicas")
|
||||
t.Fail()
|
||||
scalingFactor := uint64(10)
|
||||
current := uint64(scaling.DefaultMaxReplicas)
|
||||
|
||||
got := CalculateReplicas("firing", current, scaling.DefaultMaxReplicas, minReplicas, scalingFactor)
|
||||
if got != scaling.DefaultMaxReplicas {
|
||||
t.Fatalf("want: %d, but got: %d", scaling.DefaultMaxReplicas, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestScaleCeilingEdge(t *testing.T) {
|
||||
func TestScaleCeilingReplicasOver(t *testing.T) {
|
||||
minReplicas := uint64(1)
|
||||
scalingFactor := uint64(20)
|
||||
newReplicas := CalculateReplicas("firing", 19, scaling.DefaultMaxReplicas, minReplicas, scalingFactor)
|
||||
if newReplicas != 20 {
|
||||
t.Log("Expected ceiling of 20 replicas")
|
||||
t.Fail()
|
||||
scalingFactor := uint64(10)
|
||||
got := CalculateReplicas("firing", 19, scaling.DefaultMaxReplicas, minReplicas, scalingFactor)
|
||||
|
||||
if got != scaling.DefaultMaxReplicas {
|
||||
t.Fatalf("want: %d, but got: %d", scaling.DefaultMaxReplicas, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBackingOff(t *testing.T) {
|
||||
minReplicas := uint64(1)
|
||||
scalingFactor := uint64(20)
|
||||
newReplicas := CalculateReplicas("resolved", 8, scaling.DefaultMaxReplicas, minReplicas, scalingFactor)
|
||||
if newReplicas != 1 {
|
||||
scalingFactor := uint64(10)
|
||||
got := CalculateReplicas("resolved", 8, scaling.DefaultMaxReplicas, minReplicas, scalingFactor)
|
||||
if got != 1 {
|
||||
t.Log("Expected backing off to 1 replica")
|
||||
t.Fail()
|
||||
}
|
||||
@ -104,9 +111,9 @@ func TestScaledUpFrom1(t *testing.T) {
|
||||
currentReplicas := uint64(1)
|
||||
maxReplicas := uint64(5)
|
||||
scalingFactor := uint64(30)
|
||||
newReplicas := CalculateReplicas("firing", currentReplicas, maxReplicas, scaling.DefaultMinReplicas, scalingFactor)
|
||||
if newReplicas <= currentReplicas {
|
||||
t.Log("Expected newReplicas > currentReplica")
|
||||
got := CalculateReplicas("firing", currentReplicas, maxReplicas, scaling.DefaultMinReplicas, scalingFactor)
|
||||
if got <= currentReplicas {
|
||||
t.Log("Expected got > currentReplica")
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
@ -115,9 +122,9 @@ func TestScaledUpWithSmallParam(t *testing.T) {
|
||||
currentReplicas := uint64(1)
|
||||
maxReplicas := uint64(4)
|
||||
scalingFactor := uint64(1)
|
||||
newReplicas := CalculateReplicas("firing", currentReplicas, maxReplicas, scaling.DefaultMinReplicas, scalingFactor)
|
||||
if newReplicas <= currentReplicas {
|
||||
t.Log("Expected newReplicas > currentReplica")
|
||||
got := CalculateReplicas("firing", currentReplicas, maxReplicas, scaling.DefaultMinReplicas, scalingFactor)
|
||||
if got <= currentReplicas {
|
||||
t.Log("Expected got > currentReplica")
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
|
@ -9,10 +9,14 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/docker/distribution/uuid"
|
||||
"github.com/openfaas/faas/gateway/version"
|
||||
)
|
||||
|
||||
// MakeCallIDMiddleware middleware tags a request with a uid
|
||||
func MakeCallIDMiddleware(next http.HandlerFunc) http.HandlerFunc {
|
||||
|
||||
version := version.Version
|
||||
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
start := time.Now()
|
||||
if len(r.Header.Get("X-Call-Id")) == 0 {
|
||||
@ -24,6 +28,8 @@ func MakeCallIDMiddleware(next http.HandlerFunc) http.HandlerFunc {
|
||||
r.Header.Add("X-Start-Time", fmt.Sprintf("%d", start.UTC().UnixNano()))
|
||||
w.Header().Add("X-Start-Time", fmt.Sprintf("%d", start.UTC().UnixNano()))
|
||||
|
||||
w.Header().Add("X-Served-By", fmt.Sprintf("openfaas-community/%s", version))
|
||||
|
||||
next(w, r)
|
||||
}
|
||||
}
|
||||
|
@ -1,47 +0,0 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
// MakeExternalAuthHandler make an authentication proxy handler
|
||||
func MakeExternalAuthHandler(next http.HandlerFunc, upstreamTimeout time.Duration, upstreamURL string, passBody bool) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
req, _ := http.NewRequest(http.MethodGet, upstreamURL, nil)
|
||||
|
||||
copyHeaders(req.Header, &r.Header)
|
||||
|
||||
deadlineContext, cancel := context.WithTimeout(
|
||||
context.Background(),
|
||||
upstreamTimeout)
|
||||
|
||||
defer cancel()
|
||||
|
||||
res, err := http.DefaultClient.Do(req.WithContext(deadlineContext))
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
log.Printf("ExternalAuthHandler: %s", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
if res.Body != nil {
|
||||
defer res.Body.Close()
|
||||
}
|
||||
|
||||
if res.StatusCode == http.StatusOK {
|
||||
next.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
copyHeaders(w.Header(), &res.Header)
|
||||
w.WriteHeader(res.StatusCode)
|
||||
|
||||
if res.Body != nil {
|
||||
io.Copy(w, res.Body)
|
||||
}
|
||||
}
|
||||
}
|
@ -1,239 +0,0 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func Test_External_Auth_Wrapper_FailsInvalidAuth(t *testing.T) {
|
||||
|
||||
s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusForbidden)
|
||||
}))
|
||||
defer s.Close()
|
||||
|
||||
next := func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusNotImplemented)
|
||||
}
|
||||
|
||||
passBody := false
|
||||
handler := MakeExternalAuthHandler(next, time.Second*5, s.URL, passBody)
|
||||
|
||||
req := httptest.NewRequest(http.MethodGet, s.URL, nil)
|
||||
rr := httptest.NewRecorder()
|
||||
handler(rr, req)
|
||||
|
||||
if rr.Code == http.StatusOK {
|
||||
t.Errorf("Status incorrect, did not want: %d, but got %d", http.StatusOK, rr.Code)
|
||||
}
|
||||
}
|
||||
|
||||
func Test_External_Auth_Wrapper_FailsInvalidAuth_WritesBody(t *testing.T) {
|
||||
|
||||
wantBody := []byte(`invalid credentials`)
|
||||
s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusForbidden)
|
||||
w.Write(wantBody)
|
||||
}))
|
||||
|
||||
defer s.Close()
|
||||
|
||||
next := func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusNotImplemented)
|
||||
}
|
||||
|
||||
passBody := false
|
||||
handler := MakeExternalAuthHandler(next, time.Second*5, s.URL, passBody)
|
||||
|
||||
req := httptest.NewRequest(http.MethodGet, s.URL, nil)
|
||||
rr := httptest.NewRecorder()
|
||||
handler(rr, req)
|
||||
|
||||
if rr.Code == http.StatusOK {
|
||||
t.Errorf("Status incorrect, did not want: %d, but got %d", http.StatusOK, rr.Code)
|
||||
}
|
||||
|
||||
if bytes.Compare(rr.Body.Bytes(), wantBody) != 0 {
|
||||
t.Errorf("Body incorrect, want: %s, but got %s", []byte(wantBody), rr.Body)
|
||||
}
|
||||
}
|
||||
|
||||
func Test_External_Auth_Wrapper_PassesValidAuth(t *testing.T) {
|
||||
|
||||
s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}))
|
||||
defer s.Close()
|
||||
|
||||
next := func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusNotImplemented)
|
||||
}
|
||||
|
||||
passBody := false
|
||||
handler := MakeExternalAuthHandler(next, time.Second*5, s.URL, passBody)
|
||||
|
||||
req := httptest.NewRequest(http.MethodGet, s.URL, nil)
|
||||
rr := httptest.NewRecorder()
|
||||
handler(rr, req)
|
||||
want := http.StatusNotImplemented
|
||||
if rr.Code != want {
|
||||
t.Errorf("Status incorrect, want: %d, but got %d", want, rr.Code)
|
||||
}
|
||||
}
|
||||
|
||||
func Test_External_Auth_Wrapper_WithoutRequiredHeaderFailsAuth(t *testing.T) {
|
||||
wantToken := "secret-key"
|
||||
s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Header.Get("X-Token") == wantToken {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
return
|
||||
}
|
||||
w.WriteHeader(http.StatusUnauthorized)
|
||||
}))
|
||||
defer s.Close()
|
||||
|
||||
next := func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusNotImplemented)
|
||||
}
|
||||
|
||||
passBody := false
|
||||
handler := MakeExternalAuthHandler(next, time.Second*5, s.URL, passBody)
|
||||
|
||||
req := httptest.NewRequest(http.MethodGet, s.URL, nil)
|
||||
|
||||
// use an invalid token
|
||||
req.Header.Set("X-Token", "invalid-key")
|
||||
|
||||
rr := httptest.NewRecorder()
|
||||
handler(rr, req)
|
||||
want := http.StatusUnauthorized
|
||||
if rr.Code != want {
|
||||
t.Errorf("Status incorrect, want: %d, but got %d", want, rr.Code)
|
||||
}
|
||||
}
|
||||
|
||||
func Test_External_Auth_Wrapper_WithoutRequiredHeaderFailsAuth_ProxiesServerHeaders(t *testing.T) {
|
||||
wantToken := "secret-key"
|
||||
wantRealm := `Basic realm="Restricted"`
|
||||
s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Header.Get("X-Token") == wantToken {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Www-Authenticate", wantRealm)
|
||||
w.WriteHeader(http.StatusUnauthorized)
|
||||
}))
|
||||
defer s.Close()
|
||||
|
||||
next := func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusNotImplemented)
|
||||
}
|
||||
|
||||
passBody := false
|
||||
handler := MakeExternalAuthHandler(next, time.Second*5, s.URL, passBody)
|
||||
|
||||
req := httptest.NewRequest(http.MethodGet, s.URL, nil)
|
||||
|
||||
// use an invalid token
|
||||
req.Header.Set("X-Token", "invalid-key")
|
||||
|
||||
rr := httptest.NewRecorder()
|
||||
handler(rr, req)
|
||||
want := http.StatusUnauthorized
|
||||
if rr.Code != want {
|
||||
t.Errorf("Status incorrect, want: %d, but got %d", want, rr.Code)
|
||||
}
|
||||
|
||||
got := rr.Header().Get("Www-Authenticate")
|
||||
if got != wantRealm {
|
||||
t.Errorf("Www-Authenticate header, want: %s, but got %s, %q", wantRealm, got, rr.Header())
|
||||
}
|
||||
}
|
||||
|
||||
func Test_External_Auth_Wrapper_WithRequiredHeaderPassesValidAuth(t *testing.T) {
|
||||
wantToken := "secret-key"
|
||||
s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Header.Get("X-Token") == wantToken {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
return
|
||||
}
|
||||
w.WriteHeader(http.StatusUnauthorized)
|
||||
}))
|
||||
defer s.Close()
|
||||
|
||||
next := func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusNotImplemented)
|
||||
}
|
||||
|
||||
passBody := false
|
||||
handler := MakeExternalAuthHandler(next, time.Second*5, s.URL, passBody)
|
||||
|
||||
req := httptest.NewRequest(http.MethodGet, s.URL, nil)
|
||||
req.Header.Set("X-Token", wantToken)
|
||||
|
||||
rr := httptest.NewRecorder()
|
||||
handler(rr, req)
|
||||
want := http.StatusNotImplemented
|
||||
if rr.Code != want {
|
||||
t.Errorf("Status incorrect, want: %d, but got %d", want, rr.Code)
|
||||
}
|
||||
}
|
||||
|
||||
func Test_External_Auth_Wrapper_TimeoutGivesInternalServerError(t *testing.T) {
|
||||
|
||||
s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}))
|
||||
defer s.Close()
|
||||
|
||||
next := func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusNotImplemented)
|
||||
}
|
||||
|
||||
passBody := false
|
||||
handler := MakeExternalAuthHandler(next, time.Millisecond*10, s.URL, passBody)
|
||||
|
||||
req := httptest.NewRequest(http.MethodGet, s.URL, nil)
|
||||
rr := httptest.NewRecorder()
|
||||
handler(rr, req)
|
||||
|
||||
want := http.StatusInternalServerError
|
||||
if rr.Code != want {
|
||||
t.Errorf("Status incorrect, want: %d, but got %d", want, rr.Code)
|
||||
}
|
||||
wantSubstring := "context deadline exceeded\n"
|
||||
if !strings.HasSuffix(string(rr.Body.Bytes()), wantSubstring) {
|
||||
t.Errorf("Body incorrect, want to have suffix: %q, but got %q", []byte(wantSubstring), rr.Body)
|
||||
}
|
||||
}
|
||||
|
||||
// // Test_External_Auth_Wrapper_PassesValidAuthButOnly200IsValid this test exists
|
||||
// // to document the TODO action to consider all "2xx" statuses as valid.
|
||||
// func Test_External_Auth_Wrapper_PassesValidAuthButOnly200IsValid(t *testing.T) {
|
||||
|
||||
// s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// w.WriteHeader(http.StatusAccepted)
|
||||
// }))
|
||||
// defer s.Close()
|
||||
|
||||
// next := func(w http.ResponseWriter, r *http.Request) {
|
||||
// w.WriteHeader(http.StatusNotImplemented)
|
||||
// }
|
||||
|
||||
// passBody := false
|
||||
// handler := MakeExternalAuthHandler(next, time.Second*5, s.URL, passBody)
|
||||
|
||||
// req := httptest.NewRequest(http.MethodGet, s.URL, nil)
|
||||
// rr := httptest.NewRecorder()
|
||||
// handler(rr, req)
|
||||
// want := http.StatusUnauthorized
|
||||
// if rr.Code != want {
|
||||
// t.Errorf("Status incorrect, want: %d, but got %d", want, rr.Code)
|
||||
// }
|
||||
// }
|
@ -9,9 +9,12 @@ import (
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
fhttputil "github.com/openfaas/faas-provider/httputil"
|
||||
"github.com/openfaas/faas/gateway/pkg/middleware"
|
||||
"github.com/openfaas/faas/gateway/types"
|
||||
)
|
||||
@ -28,7 +31,10 @@ func MakeForwardingProxyHandler(proxy *types.HTTPClientReverseProxy,
|
||||
writeRequestURI = exists
|
||||
}
|
||||
|
||||
reverseProxy := makeRewriteProxy(baseURLResolver, urlPathTransformer)
|
||||
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
baseURL := baseURLResolver.Resolve(r)
|
||||
originalURL := r.URL.String()
|
||||
requestURL := urlPathTransformer.Transform(r)
|
||||
@ -39,13 +45,13 @@ func MakeForwardingProxyHandler(proxy *types.HTTPClientReverseProxy,
|
||||
|
||||
start := time.Now()
|
||||
|
||||
statusCode, err := forwardRequest(w, r, proxy.Client, baseURL, requestURL, proxy.Timeout, writeRequestURI, serviceAuthInjector)
|
||||
|
||||
seconds := time.Since(start)
|
||||
statusCode, err := forwardRequest(w, r, proxy.Client, baseURL, requestURL, proxy.Timeout, writeRequestURI, serviceAuthInjector, reverseProxy)
|
||||
if err != nil {
|
||||
log.Printf("error with upstream request to: %s, %s\n", requestURL, err.Error())
|
||||
}
|
||||
|
||||
seconds := time.Since(start)
|
||||
|
||||
for _, notifier := range notifiers {
|
||||
notifier.Notify(r.Method, requestURL, originalURL, statusCode, "completed", seconds)
|
||||
}
|
||||
@ -86,7 +92,12 @@ func forwardRequest(w http.ResponseWriter,
|
||||
requestURL string,
|
||||
timeout time.Duration,
|
||||
writeRequestURI bool,
|
||||
serviceAuthInjector middleware.AuthInjector) (int, error) {
|
||||
serviceAuthInjector middleware.AuthInjector,
|
||||
reverseProxy *httputil.ReverseProxy) (int, error) {
|
||||
|
||||
if r.Body != nil {
|
||||
defer r.Body.Close()
|
||||
}
|
||||
|
||||
upstreamReq := buildUpstreamRequest(r, baseURL, requestURL)
|
||||
if upstreamReq.Body != nil {
|
||||
@ -101,14 +112,20 @@ func forwardRequest(w http.ResponseWriter,
|
||||
log.Printf("forwardRequest: %s %s\n", upstreamReq.Host, upstreamReq.URL.String())
|
||||
}
|
||||
|
||||
if strings.HasPrefix(r.Header.Get("Accept"), "text/event-stream") {
|
||||
ww := fhttputil.NewHttpWriteInterceptor(w)
|
||||
reverseProxy.ServeHTTP(ww, upstreamReq)
|
||||
return ww.Status(), nil
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(r.Context(), timeout)
|
||||
defer cancel()
|
||||
|
||||
res, resErr := proxyClient.Do(upstreamReq.WithContext(ctx))
|
||||
if resErr != nil {
|
||||
res, err := proxyClient.Do(upstreamReq.WithContext(ctx))
|
||||
if err != nil {
|
||||
badStatus := http.StatusBadGateway
|
||||
w.WriteHeader(badStatus)
|
||||
return badStatus, resErr
|
||||
return badStatus, err
|
||||
}
|
||||
|
||||
if res.Body != nil {
|
||||
@ -117,12 +134,10 @@ func forwardRequest(w http.ResponseWriter,
|
||||
|
||||
copyHeaders(w.Header(), &res.Header)
|
||||
|
||||
// Write status code
|
||||
w.WriteHeader(res.StatusCode)
|
||||
|
||||
if res.Body != nil {
|
||||
// Copy the body over
|
||||
io.CopyBuffer(w, res.Body, nil)
|
||||
io.Copy(w, res.Body)
|
||||
}
|
||||
|
||||
return res.StatusCode, nil
|
||||
@ -159,3 +174,14 @@ var hopHeaders = []string{
|
||||
"Transfer-Encoding",
|
||||
"Upgrade",
|
||||
}
|
||||
|
||||
func makeRewriteProxy(baseURLResolver middleware.BaseURLResolver, urlPathTransformer middleware.URLPathTransformer) *httputil.ReverseProxy {
|
||||
return &httputil.ReverseProxy{
|
||||
ErrorLog: log.New(io.Discard, "proxy:", 0),
|
||||
Transport: http.DefaultClient.Transport,
|
||||
ErrorHandler: func(w http.ResponseWriter, r *http.Request, err error) {
|
||||
},
|
||||
Director: func(r *http.Request) {
|
||||
},
|
||||
}
|
||||
}
|
||||
|
@ -6,7 +6,7 @@ package handlers
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"testing"
|
||||
@ -33,7 +33,7 @@ func Test_buildUpstreamRequest_Body_Method_Query(t *testing.T) {
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
upstreamBytes, _ := ioutil.ReadAll(upstream.Body)
|
||||
upstreamBytes, _ := io.ReadAll(upstream.Body)
|
||||
|
||||
if string(upstreamBytes) != string(srcBytes) {
|
||||
t.Errorf("Body - want: %s, got: %s", string(upstreamBytes), string(srcBytes))
|
||||
@ -212,7 +212,7 @@ func Test_buildUpstreamRequest_WithPathNoQuery(t *testing.T) {
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
upstreamBytes, _ := ioutil.ReadAll(upstream.Body)
|
||||
upstreamBytes, _ := io.ReadAll(upstream.Body)
|
||||
|
||||
if string(upstreamBytes) != string(srcBytes) {
|
||||
t.Errorf("Body - want: %s, got: %s", string(upstreamBytes), string(srcBytes))
|
||||
@ -268,7 +268,7 @@ func Test_buildUpstreamRequest_WithNoPathNoQuery(t *testing.T) {
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
upstreamBytes, _ := ioutil.ReadAll(upstream.Body)
|
||||
upstreamBytes, _ := io.ReadAll(upstream.Body)
|
||||
|
||||
if string(upstreamBytes) != string(srcBytes) {
|
||||
t.Errorf("Body - want: %s, got: %s", string(upstreamBytes), string(srcBytes))
|
||||
@ -322,7 +322,7 @@ func Test_buildUpstreamRequest_WithPathAndQuery(t *testing.T) {
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
upstreamBytes, _ := ioutil.ReadAll(upstream.Body)
|
||||
upstreamBytes, _ := io.ReadAll(upstream.Body)
|
||||
|
||||
if string(upstreamBytes) != string(srcBytes) {
|
||||
t.Errorf("Body - want: %s, got: %s", string(upstreamBytes), string(srcBytes))
|
||||
|
@ -5,7 +5,7 @@ package handlers
|
||||
|
||||
import "net/http"
|
||||
|
||||
//HealthzHandler healthz hanlder for mertics server
|
||||
// HealthzHandler healthz hanlder for mertics server
|
||||
func HealthzHandler(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
switch r.Method {
|
||||
|
@ -5,10 +5,9 @@ package handlers
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
|
||||
"io/ioutil"
|
||||
"net/http/httptest"
|
||||
|
||||
providerTypes "github.com/openfaas/faas-provider/types"
|
||||
@ -27,7 +26,7 @@ func MakeInfoHandler(h http.Handler) http.HandlerFunc {
|
||||
|
||||
var provider *providerTypes.ProviderInfo
|
||||
|
||||
upstreamBody, _ := ioutil.ReadAll(upstreamCall.Body)
|
||||
upstreamBody, _ := io.ReadAll(upstreamCall.Body)
|
||||
err := json.Unmarshal(upstreamBody, &provider)
|
||||
if err != nil {
|
||||
log.Printf("Error unmarshalling provider json from body %s. Error %s\n", upstreamBody, err.Error())
|
||||
|
@ -3,7 +3,7 @@ package handlers
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
@ -52,7 +52,7 @@ func Test_logsProxyDoesNotLeakGoroutinesWhenProviderClosesConnection(t *testing.
|
||||
t.Fatalf("unexpected error sending log request: %s", err)
|
||||
}
|
||||
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error reading the response body: %s", err)
|
||||
}
|
||||
@ -126,7 +126,7 @@ func Test_logsProxyDoesNotLeakGoroutinesWhenClientClosesConnection(t *testing.T)
|
||||
go func() {
|
||||
defer resp.Body.Close()
|
||||
defer close(errCh)
|
||||
_, err := ioutil.ReadAll(resp.Body)
|
||||
_, err := io.ReadAll(resp.Body)
|
||||
errCh <- err
|
||||
}()
|
||||
cancel()
|
||||
|
@ -6,50 +6,21 @@ package handlers
|
||||
import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/openfaas/faas-provider/httputil"
|
||||
)
|
||||
|
||||
// MakeNotifierWrapper wraps a http.HandlerFunc in an interceptor to pass to HTTPNotifier
|
||||
func MakeNotifierWrapper(next http.HandlerFunc, notifiers []HTTPNotifier) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
then := time.Now()
|
||||
|
||||
writer := newWriteInterceptor(w)
|
||||
next(&writer, r)
|
||||
|
||||
url := r.URL.String()
|
||||
|
||||
writer := httputil.NewHttpWriteInterceptor(w)
|
||||
next(writer, r)
|
||||
|
||||
for _, notifier := range notifiers {
|
||||
notifier.Notify(r.Method, url, url, writer.Status(), "completed", time.Since(then))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func newWriteInterceptor(w http.ResponseWriter) writeInterceptor {
|
||||
return writeInterceptor{
|
||||
w: w,
|
||||
}
|
||||
}
|
||||
|
||||
type writeInterceptor struct {
|
||||
CapturedStatusCode int
|
||||
w http.ResponseWriter
|
||||
}
|
||||
|
||||
func (c *writeInterceptor) Status() int {
|
||||
if c.CapturedStatusCode == 0 {
|
||||
return http.StatusOK
|
||||
}
|
||||
return c.CapturedStatusCode
|
||||
}
|
||||
|
||||
func (c *writeInterceptor) Header() http.Header {
|
||||
return c.w.Header()
|
||||
}
|
||||
|
||||
func (c *writeInterceptor) Write(data []byte) (int, error) {
|
||||
return c.w.Write(data)
|
||||
}
|
||||
|
||||
func (c *writeInterceptor) WriteHeader(code int) {
|
||||
c.CapturedStatusCode = code
|
||||
c.w.WriteHeader(code)
|
||||
}
|
||||
|
@ -17,20 +17,6 @@ type HTTPNotifier interface {
|
||||
Notify(method string, URL string, originalURL string, statusCode int, event string, duration time.Duration)
|
||||
}
|
||||
|
||||
// PrometheusServiceNotifier notifier for core service endpoints
|
||||
type PrometheusServiceNotifier struct {
|
||||
ServiceMetrics *metrics.ServiceMetricOptions
|
||||
}
|
||||
|
||||
// Notify about service metrics
|
||||
func (psn PrometheusServiceNotifier) Notify(method string, URL string, originalURL string, statusCode int, event string, duration time.Duration) {
|
||||
code := fmt.Sprintf("%d", statusCode)
|
||||
path := urlToLabel(URL)
|
||||
|
||||
psn.ServiceMetrics.Counter.WithLabelValues(method, path, code).Inc()
|
||||
psn.ServiceMetrics.Histogram.WithLabelValues(method, path, code).Observe(duration.Seconds())
|
||||
}
|
||||
|
||||
func urlToLabel(path string) string {
|
||||
if len(path) > 0 {
|
||||
path = strings.TrimRight(path, "/")
|
||||
|
@ -1,45 +0,0 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"golang.org/x/sync/singleflight"
|
||||
|
||||
"github.com/openfaas/faas/gateway/pkg/middleware"
|
||||
"github.com/openfaas/faas/gateway/probing"
|
||||
)
|
||||
|
||||
func MakeProbeHandler(prober probing.FunctionProber, cache probing.ProbeCacher, resolver middleware.BaseURLResolver, next http.HandlerFunc, defaultNamespace string) http.HandlerFunc {
|
||||
|
||||
group := singleflight.Group{}
|
||||
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
functionName, namespace := middleware.GetNamespace(defaultNamespace, middleware.GetServiceName(r.URL.String()))
|
||||
|
||||
key := fmt.Sprintf("Probe-%s.%s", functionName, namespace)
|
||||
res, _, _ := group.Do(key, func() (interface{}, error) {
|
||||
|
||||
cached, hit := cache.Get(functionName, namespace)
|
||||
var probeResult probing.FunctionProbeResult
|
||||
if hit && cached != nil && cached.Available {
|
||||
probeResult = *cached
|
||||
} else {
|
||||
probeResult = prober.Probe(functionName, namespace)
|
||||
cache.Set(functionName, namespace, &probeResult)
|
||||
}
|
||||
|
||||
return probeResult, nil
|
||||
})
|
||||
|
||||
fnRes := res.(probing.FunctionProbeResult)
|
||||
|
||||
if !fnRes.Available {
|
||||
http.Error(w, fmt.Sprintf("unable to probe function endpoint %s", fnRes.Error),
|
||||
http.StatusServiceUnavailable)
|
||||
return
|
||||
}
|
||||
|
||||
next(w, r)
|
||||
}
|
||||
}
|
@ -5,7 +5,7 @@ package handlers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
@ -19,20 +19,19 @@ import (
|
||||
"github.com/openfaas/faas/gateway/scaling"
|
||||
)
|
||||
|
||||
const queueAnnotation = "com.openfaas.queue"
|
||||
|
||||
// MakeQueuedProxy accepts work onto a queue
|
||||
func MakeQueuedProxy(metrics metrics.MetricOptions, queuer ftypes.RequestQueuer, pathTransformer middleware.URLPathTransformer, defaultNS string, functionQuery scaling.FunctionQuery) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
var body []byte
|
||||
if r.Body != nil {
|
||||
defer r.Body.Close()
|
||||
}
|
||||
|
||||
body, err := ioutil.ReadAll(r.Body)
|
||||
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
var err error
|
||||
body, err = io.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
callbackURL, err := getCallbackURLHeader(r.Header)
|
||||
@ -44,12 +43,6 @@ func MakeQueuedProxy(metrics metrics.MetricOptions, queuer ftypes.RequestQueuer,
|
||||
vars := mux.Vars(r)
|
||||
name := vars["name"]
|
||||
|
||||
queueName, err := getQueueName(name, functionQuery)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
req := &ftypes.QueueRequest{
|
||||
Function: name,
|
||||
Body: body,
|
||||
@ -59,16 +52,12 @@ func MakeQueuedProxy(metrics metrics.MetricOptions, queuer ftypes.RequestQueuer,
|
||||
Header: r.Header,
|
||||
Host: r.Host,
|
||||
CallbackURL: callbackURL,
|
||||
QueueName: queueName,
|
||||
}
|
||||
|
||||
if len(queueName) > 0 {
|
||||
log.Printf("Queueing %s to: %s\n", name, queueName)
|
||||
}
|
||||
|
||||
if err = queuer.Queue(req); err != nil {
|
||||
fmt.Printf("Queue error: %v\n", err)
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
log.Printf("Error queuing request: %v", err)
|
||||
http.Error(w, fmt.Sprintf("Error queuing request: %s", err.Error()),
|
||||
http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
@ -92,21 +81,6 @@ func getCallbackURLHeader(header http.Header) (*url.URL, error) {
|
||||
return callbackURL, nil
|
||||
}
|
||||
|
||||
func getQueueName(name string, fnQuery scaling.FunctionQuery) (queueName string, err error) {
|
||||
fn, ns := getNameParts(name)
|
||||
|
||||
annotations, err := fnQuery.GetAnnotations(fn, ns)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
queueName = ""
|
||||
if v := annotations[queueAnnotation]; len(v) > 0 {
|
||||
queueName = v
|
||||
}
|
||||
|
||||
return queueName, err
|
||||
}
|
||||
|
||||
func getNameParts(name string) (fn, ns string) {
|
||||
fn = name
|
||||
ns = ""
|
||||
|
@ -15,7 +15,6 @@ import (
|
||||
"github.com/openfaas/faas/gateway/metrics"
|
||||
"github.com/openfaas/faas/gateway/pkg/middleware"
|
||||
"github.com/openfaas/faas/gateway/plugin"
|
||||
"github.com/openfaas/faas/gateway/probing"
|
||||
"github.com/openfaas/faas/gateway/scaling"
|
||||
"github.com/openfaas/faas/gateway/types"
|
||||
"github.com/openfaas/faas/gateway/version"
|
||||
@ -83,13 +82,9 @@ func main() {
|
||||
FunctionNamespace: config.Namespace,
|
||||
}
|
||||
|
||||
prometheusServiceNotifier := handlers.PrometheusServiceNotifier{
|
||||
ServiceMetrics: metricsOptions.ServiceMetrics,
|
||||
}
|
||||
|
||||
functionNotifiers := []handlers.HTTPNotifier{loggingNotifier, prometheusNotifier}
|
||||
forwardingNotifiers := []handlers.HTTPNotifier{loggingNotifier, prometheusServiceNotifier}
|
||||
quietNotifier := []handlers.HTTPNotifier{prometheusServiceNotifier}
|
||||
forwardingNotifiers := []handlers.HTTPNotifier{loggingNotifier}
|
||||
quietNotifier := []handlers.HTTPNotifier{}
|
||||
|
||||
urlResolver := middleware.SingleHostBaseURLResolver{BaseURL: config.FunctionsProviderURL.String()}
|
||||
var functionURLResolver middleware.BaseURLResolver
|
||||
@ -97,16 +92,8 @@ func main() {
|
||||
nilURLTransformer := middleware.TransparentURLPathTransformer{}
|
||||
trimURLTransformer := middleware.FunctionPrefixTrimmingURLPathTransformer{}
|
||||
|
||||
if config.DirectFunctions {
|
||||
functionURLResolver = middleware.FunctionAsHostBaseURLResolver{
|
||||
FunctionSuffix: config.DirectFunctionsSuffix,
|
||||
FunctionNamespace: config.Namespace,
|
||||
}
|
||||
functionURLTransformer = trimURLTransformer
|
||||
} else {
|
||||
functionURLResolver = urlResolver
|
||||
functionURLTransformer = nilURLTransformer
|
||||
}
|
||||
functionURLResolver = urlResolver
|
||||
functionURLTransformer = nilURLTransformer
|
||||
|
||||
var serviceAuthInjector middleware.AuthInjector
|
||||
|
||||
@ -114,8 +101,6 @@ func main() {
|
||||
serviceAuthInjector = &middleware.BasicAuthInjector{Credentials: credentials}
|
||||
}
|
||||
|
||||
decorateExternalAuth := handlers.MakeExternalAuthHandler
|
||||
|
||||
// externalServiceQuery is used to query metadata from the provider about a function
|
||||
externalServiceQuery := plugin.NewExternalServiceQuery(*config.FunctionsProviderURL, serviceAuthInjector)
|
||||
|
||||
@ -145,6 +130,7 @@ func main() {
|
||||
faasHandlers.SecretHandler = handlers.MakeForwardingProxyHandler(reverseProxy, forwardingNotifiers, urlResolver, nilURLTransformer, serviceAuthInjector)
|
||||
|
||||
faasHandlers.NamespaceListerHandler = handlers.MakeForwardingProxyHandler(reverseProxy, forwardingNotifiers, urlResolver, nilURLTransformer, serviceAuthInjector)
|
||||
faasHandlers.NamespaceMutatorHandler = handlers.MakeForwardingProxyHandler(reverseProxy, forwardingNotifiers, urlResolver, nilURLTransformer, serviceAuthInjector)
|
||||
|
||||
faasHandlers.Alert = handlers.MakeNotifierWrapper(
|
||||
handlers.MakeAlertHandler(externalServiceQuery, config.Namespace),
|
||||
@ -155,13 +141,6 @@ func main() {
|
||||
|
||||
functionProxy := faasHandlers.Proxy
|
||||
|
||||
if config.ProbeFunctions {
|
||||
prober := probing.NewFunctionProber(cachedFunctionQuery, functionURLResolver)
|
||||
// Default of 5 seconds between refreshing probes for function invocations
|
||||
probeCache := probing.NewProbeCache(time.Second * 5)
|
||||
functionProxy = handlers.MakeProbeHandler(prober, probeCache, functionURLResolver, functionProxy, config.Namespace)
|
||||
}
|
||||
|
||||
if config.ScaleFromZero {
|
||||
scalingFunctionCache := scaling.NewFunctionCache(scalingConfig.CacheExpiry)
|
||||
scaler := scaling.NewFunctionScaler(scalingConfig, scalingFunctionCache)
|
||||
@ -169,7 +148,9 @@ func main() {
|
||||
}
|
||||
|
||||
if config.UseNATS() {
|
||||
log.Println("Async enabled: Using NATS Streaming.")
|
||||
log.Println("Async enabled: Using NATS Streaming")
|
||||
log.Println("Deprecation Notice: NATS Streaming is no longer maintained and won't receive updates from June 2023")
|
||||
|
||||
maxReconnect := 60
|
||||
interval := time.Second * 2
|
||||
|
||||
@ -188,31 +169,33 @@ func main() {
|
||||
|
||||
prometheusQuery := metrics.NewPrometheusQuery(config.PrometheusHost, config.PrometheusPort, &http.Client{})
|
||||
faasHandlers.ListFunctions = metrics.AddMetricsHandler(faasHandlers.ListFunctions, prometheusQuery)
|
||||
faasHandlers.ScaleFunction = handlers.MakeForwardingProxyHandler(reverseProxy, forwardingNotifiers, urlResolver, nilURLTransformer, serviceAuthInjector)
|
||||
faasHandlers.ScaleFunction = scaling.MakeHorizontalScalingHandler(handlers.MakeForwardingProxyHandler(reverseProxy, forwardingNotifiers, urlResolver, nilURLTransformer, serviceAuthInjector))
|
||||
|
||||
if credentials != nil {
|
||||
faasHandlers.Alert =
|
||||
decorateExternalAuth(faasHandlers.Alert, config.UpstreamTimeout, config.AuthProxyURL, config.AuthProxyPassBody)
|
||||
auth.DecorateWithBasicAuth(faasHandlers.Alert, credentials)
|
||||
faasHandlers.UpdateFunction =
|
||||
decorateExternalAuth(faasHandlers.UpdateFunction, config.UpstreamTimeout, config.AuthProxyURL, config.AuthProxyPassBody)
|
||||
auth.DecorateWithBasicAuth(faasHandlers.UpdateFunction, credentials)
|
||||
faasHandlers.DeleteFunction =
|
||||
decorateExternalAuth(faasHandlers.DeleteFunction, config.UpstreamTimeout, config.AuthProxyURL, config.AuthProxyPassBody)
|
||||
auth.DecorateWithBasicAuth(faasHandlers.DeleteFunction, credentials)
|
||||
faasHandlers.DeployFunction =
|
||||
decorateExternalAuth(faasHandlers.DeployFunction, config.UpstreamTimeout, config.AuthProxyURL, config.AuthProxyPassBody)
|
||||
auth.DecorateWithBasicAuth(faasHandlers.DeployFunction, credentials)
|
||||
faasHandlers.ListFunctions =
|
||||
decorateExternalAuth(faasHandlers.ListFunctions, config.UpstreamTimeout, config.AuthProxyURL, config.AuthProxyPassBody)
|
||||
auth.DecorateWithBasicAuth(faasHandlers.ListFunctions, credentials)
|
||||
faasHandlers.ScaleFunction =
|
||||
decorateExternalAuth(faasHandlers.ScaleFunction, config.UpstreamTimeout, config.AuthProxyURL, config.AuthProxyPassBody)
|
||||
auth.DecorateWithBasicAuth(faasHandlers.ScaleFunction, credentials)
|
||||
faasHandlers.FunctionStatus =
|
||||
decorateExternalAuth(faasHandlers.FunctionStatus, config.UpstreamTimeout, config.AuthProxyURL, config.AuthProxyPassBody)
|
||||
auth.DecorateWithBasicAuth(faasHandlers.FunctionStatus, credentials)
|
||||
faasHandlers.InfoHandler =
|
||||
decorateExternalAuth(faasHandlers.InfoHandler, config.UpstreamTimeout, config.AuthProxyURL, config.AuthProxyPassBody)
|
||||
auth.DecorateWithBasicAuth(faasHandlers.InfoHandler, credentials)
|
||||
faasHandlers.SecretHandler =
|
||||
decorateExternalAuth(faasHandlers.SecretHandler, config.UpstreamTimeout, config.AuthProxyURL, config.AuthProxyPassBody)
|
||||
auth.DecorateWithBasicAuth(faasHandlers.SecretHandler, credentials)
|
||||
faasHandlers.LogProxyHandler =
|
||||
decorateExternalAuth(faasHandlers.LogProxyHandler, config.UpstreamTimeout, config.AuthProxyURL, config.AuthProxyPassBody)
|
||||
auth.DecorateWithBasicAuth(faasHandlers.LogProxyHandler, credentials)
|
||||
faasHandlers.NamespaceListerHandler =
|
||||
decorateExternalAuth(faasHandlers.NamespaceListerHandler, config.UpstreamTimeout, config.AuthProxyURL, config.AuthProxyPassBody)
|
||||
auth.DecorateWithBasicAuth(faasHandlers.NamespaceListerHandler, credentials)
|
||||
faasHandlers.NamespaceMutatorHandler =
|
||||
auth.DecorateWithBasicAuth(faasHandlers.NamespaceMutatorHandler, credentials)
|
||||
}
|
||||
|
||||
r := mux.NewRouter()
|
||||
@ -236,6 +219,8 @@ func main() {
|
||||
r.HandleFunc("/system/logs", faasHandlers.LogProxyHandler).Methods(http.MethodGet)
|
||||
|
||||
r.HandleFunc("/system/namespaces", faasHandlers.NamespaceListerHandler).Methods(http.MethodGet)
|
||||
r.HandleFunc("/system/namespace/{namespace:["+NameExpression+"]*}", faasHandlers.NamespaceMutatorHandler).
|
||||
Methods(http.MethodPost, http.MethodDelete, http.MethodPut, http.MethodGet)
|
||||
|
||||
if faasHandlers.QueuedProxy != nil {
|
||||
r.HandleFunc("/async-function/{name:["+NameExpression+"]+}/", faasHandlers.QueuedProxy).Methods(http.MethodPost)
|
||||
@ -252,9 +237,11 @@ func main() {
|
||||
uiHandler := http.StripPrefix("/ui", fsCORS)
|
||||
if credentials != nil {
|
||||
r.PathPrefix("/ui/").Handler(
|
||||
decorateExternalAuth(uiHandler.ServeHTTP, config.UpstreamTimeout, config.AuthProxyURL, config.AuthProxyPassBody)).Methods(http.MethodGet)
|
||||
auth.DecorateWithBasicAuth(uiHandler.ServeHTTP, credentials)).
|
||||
Methods(http.MethodGet)
|
||||
} else {
|
||||
r.PathPrefix("/ui/").Handler(uiHandler).Methods(http.MethodGet)
|
||||
r.PathPrefix("/ui/").Handler(uiHandler).
|
||||
Methods(http.MethodGet)
|
||||
}
|
||||
|
||||
//Start metrics server in a goroutine
|
||||
@ -278,7 +265,7 @@ func main() {
|
||||
log.Fatal(s.ListenAndServe())
|
||||
}
|
||||
|
||||
//runMetricsServer Listen on a separate HTTP port for Prometheus metrics to keep this accessible from
|
||||
// runMetricsServer Listen on a separate HTTP port for Prometheus metrics to keep this accessible from
|
||||
// the internal network only.
|
||||
func runMetricsServer() {
|
||||
metricsHandler := metrics.PrometheusHandler()
|
||||
|
@ -3,7 +3,7 @@ package metrics
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
@ -28,25 +28,23 @@ func AddMetricsHandler(handler http.HandlerFunc, prometheusQuery PrometheusQuery
|
||||
}
|
||||
|
||||
defer upstreamCall.Body.Close()
|
||||
upstreamBody, _ := ioutil.ReadAll(upstreamCall.Body)
|
||||
upstreamBody, _ := io.ReadAll(upstreamCall.Body)
|
||||
|
||||
if recorder.Code != http.StatusOK {
|
||||
log.Printf("List functions responded with code %d, body: %s",
|
||||
recorder.Code,
|
||||
string(upstreamBody))
|
||||
|
||||
http.Error(w, "Metrics hander: unexpected status code retrieving functions from backend", http.StatusInternalServerError)
|
||||
http.Error(w, string(upstreamBody), recorder.Code)
|
||||
return
|
||||
}
|
||||
|
||||
var functions []types.FunctionStatus
|
||||
|
||||
err := json.Unmarshal(upstreamBody, &functions)
|
||||
|
||||
if err != nil {
|
||||
log.Printf("Metrics upstream error: %s", err)
|
||||
log.Printf("Metrics upstream error: %s, value: %s", err, string(upstreamBody))
|
||||
|
||||
http.Error(w, "Error parsing metrics from upstream provider/backend", http.StatusInternalServerError)
|
||||
http.Error(w, "Unable to parse list of functions from provider", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
@ -63,8 +61,8 @@ func AddMetricsHandler(handler http.HandlerFunc, prometheusQuery PrometheusQuery
|
||||
|
||||
results, err := prometheusQuery.Fetch(url.QueryEscape(q))
|
||||
if err != nil {
|
||||
// log the error but continue, the mixIn will correctly handle the empty results.
|
||||
log.Printf("Error querying Prometheus: %s\n", err.Error())
|
||||
return
|
||||
}
|
||||
mixIn(&functions, results)
|
||||
}
|
||||
@ -72,7 +70,7 @@ func AddMetricsHandler(handler http.HandlerFunc, prometheusQuery PrometheusQuery
|
||||
bytesOut, err := json.Marshal(functions)
|
||||
if err != nil {
|
||||
log.Printf("Error serializing functions: %s", err)
|
||||
http.Error(w, "error writing response after adding metrics", http.StatusInternalServerError)
|
||||
http.Error(w, "Error writing response after adding metrics", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -5,6 +5,7 @@ import (
|
||||
"log"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
types "github.com/openfaas/faas-provider/types"
|
||||
@ -55,6 +56,34 @@ func Test_PrometheusMetrics_MixedInto_Services(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func Test_MetricHandler_ForwardsErrors(t *testing.T) {
|
||||
functionsHandler := func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusConflict)
|
||||
w.Write([]byte("test error case"))
|
||||
}
|
||||
// explicitly set the query fetcher to nil because it should
|
||||
// not be called when a non-200 response is returned from the
|
||||
// functions handler, if it is called then the test will panic
|
||||
handler := AddMetricsHandler(functionsHandler, nil)
|
||||
|
||||
rr := httptest.NewRecorder()
|
||||
request, _ := http.NewRequest(http.MethodGet, "/system/functions", nil)
|
||||
handler.ServeHTTP(rr, request)
|
||||
|
||||
if status := rr.Code; status != http.StatusConflict {
|
||||
t.Errorf("handler returned wrong status code: got %v want %v", status, http.StatusConflict)
|
||||
}
|
||||
|
||||
if rr.Header().Get("Content-Type") != "text/plain; charset=utf-8" {
|
||||
t.Errorf("Want 'text/plain; charset=utf-8' content-type, got: %s", rr.Header().Get("Content-Type"))
|
||||
}
|
||||
body := strings.TrimSpace(rr.Body.String())
|
||||
if body != "test error case" {
|
||||
t.Errorf("Want 'test error case', got: %q", body)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func Test_FunctionsHandler_ReturnsJSONAndOneFunction(t *testing.T) {
|
||||
functionsHandler := makeFunctionsHandler()
|
||||
|
||||
|
@ -7,7 +7,7 @@ package metrics
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
@ -46,9 +46,6 @@ func (e *Exporter) Describe(ch chan<- *prometheus.Desc) {
|
||||
e.metricOptions.GatewayFunctionsHistogram.Describe(ch)
|
||||
e.metricOptions.ServiceReplicasGauge.Describe(ch)
|
||||
e.metricOptions.GatewayFunctionInvocationStarted.Describe(ch)
|
||||
|
||||
e.metricOptions.ServiceMetrics.Counter.Describe(ch)
|
||||
e.metricOptions.ServiceMetrics.Histogram.Describe(ch)
|
||||
}
|
||||
|
||||
// Collect collects data to be consumed by prometheus
|
||||
@ -75,9 +72,6 @@ func (e *Exporter) Collect(ch chan<- prometheus.Metric) {
|
||||
}
|
||||
|
||||
e.metricOptions.ServiceReplicasGauge.Collect(ch)
|
||||
|
||||
e.metricOptions.ServiceMetrics.Counter.Collect(ch)
|
||||
e.metricOptions.ServiceMetrics.Histogram.Collect(ch)
|
||||
}
|
||||
|
||||
// StartServiceWatcher starts a ticker and collects service replica counts to expose to prometheus
|
||||
@ -144,7 +138,7 @@ func (e *Exporter) getHTTPClient(timeout time.Duration) http.Client {
|
||||
}
|
||||
|
||||
func (e *Exporter) getFunctions(endpointURL url.URL, namespace string) ([]types.FunctionStatus, error) {
|
||||
timeout := 3 * time.Second
|
||||
timeout := 5 * time.Second
|
||||
proxyClient := e.getHTTPClient(timeout)
|
||||
|
||||
endpointURL.Path = path.Join(endpointURL.Path, "/system/functions")
|
||||
@ -165,15 +159,16 @@ func (e *Exporter) getFunctions(endpointURL url.URL, namespace string) ([]types.
|
||||
return services, err
|
||||
}
|
||||
|
||||
bytesOut, readErr := ioutil.ReadAll(res.Body)
|
||||
bytesOut, readErr := io.ReadAll(res.Body)
|
||||
if readErr != nil {
|
||||
return services, readErr
|
||||
}
|
||||
|
||||
unmarshalErr := json.Unmarshal(bytesOut, &services)
|
||||
if unmarshalErr != nil {
|
||||
return services, unmarshalErr
|
||||
if err := json.Unmarshal(bytesOut, &services); err != nil {
|
||||
return services, fmt.Errorf("error unmarshalling response: %s, error: %s",
|
||||
string(bytesOut), err)
|
||||
}
|
||||
|
||||
return services, nil
|
||||
}
|
||||
|
||||
@ -186,7 +181,7 @@ func (e *Exporter) getNamespaces(endpointURL url.URL) ([]string, error) {
|
||||
get.SetBasicAuth(e.credentials.User, e.credentials.Password)
|
||||
}
|
||||
|
||||
timeout := 3 * time.Second
|
||||
timeout := 5 * time.Second
|
||||
proxyClient := e.getHTTPClient(timeout)
|
||||
|
||||
res, err := proxyClient.Do(get)
|
||||
@ -198,14 +193,13 @@ func (e *Exporter) getNamespaces(endpointURL url.URL) ([]string, error) {
|
||||
return namespaces, nil
|
||||
}
|
||||
|
||||
bytesOut, readErr := ioutil.ReadAll(res.Body)
|
||||
bytesOut, readErr := io.ReadAll(res.Body)
|
||||
if readErr != nil {
|
||||
return namespaces, readErr
|
||||
}
|
||||
|
||||
unmarshalErr := json.Unmarshal(bytesOut, &namespaces)
|
||||
if unmarshalErr != nil {
|
||||
return namespaces, unmarshalErr
|
||||
if err := json.Unmarshal(bytesOut, &namespaces); err != nil {
|
||||
return namespaces, fmt.Errorf("error unmarshalling response: %s, error: %s", string(bytesOut), err)
|
||||
}
|
||||
return namespaces, nil
|
||||
}
|
||||
|
@ -41,21 +41,21 @@ func Test_Describe_DescribesThePrometheusMetrics(t *testing.T) {
|
||||
go exporter.Describe(ch)
|
||||
|
||||
d := <-ch
|
||||
expectedGatewayFunctionInvocationDesc := `Desc{fqName: "gateway_function_invocation_total", help: "Function metrics", constLabels: {}, variableLabels: [function_name code]}`
|
||||
expectedGatewayFunctionInvocationDesc := `Desc{fqName: "gateway_function_invocation_total", help: "Function metrics", constLabels: {}, variableLabels: {function_name,code}}`
|
||||
actualGatewayFunctionInvocationDesc := d.String()
|
||||
if expectedGatewayFunctionInvocationDesc != actualGatewayFunctionInvocationDesc {
|
||||
t.Errorf("Want\n%s\ngot\n%s", expectedGatewayFunctionInvocationDesc, actualGatewayFunctionInvocationDesc)
|
||||
}
|
||||
|
||||
d = <-ch
|
||||
expectedGatewayFunctionsHistogramDesc := `Desc{fqName: "gateway_functions_seconds", help: "Function time taken", constLabels: {}, variableLabels: [function_name code]}`
|
||||
expectedGatewayFunctionsHistogramDesc := `Desc{fqName: "gateway_functions_seconds", help: "Function time taken", constLabels: {}, variableLabels: {function_name,code}}`
|
||||
actualGatewayFunctionsHistogramDesc := d.String()
|
||||
if expectedGatewayFunctionsHistogramDesc != actualGatewayFunctionsHistogramDesc {
|
||||
t.Errorf("Want\n%s\ngot\n%s", expectedGatewayFunctionsHistogramDesc, actualGatewayFunctionsHistogramDesc)
|
||||
}
|
||||
|
||||
d = <-ch
|
||||
expectedServiceReplicasGaugeDesc := `Desc{fqName: "gateway_service_count", help: "Current count of replicas for function", constLabels: {}, variableLabels: [function_name]}`
|
||||
expectedServiceReplicasGaugeDesc := `Desc{fqName: "gateway_service_count", help: "Current count of replicas for function", constLabels: {}, variableLabels: {function_name}}`
|
||||
actualServiceReplicasGaugeDesc := d.String()
|
||||
if expectedServiceReplicasGaugeDesc != actualServiceReplicasGaugeDesc {
|
||||
t.Errorf("Want\n%s\ngot\n%s", expectedServiceReplicasGaugeDesc, actualServiceReplicasGaugeDesc)
|
||||
|
@ -18,8 +18,6 @@ type MetricOptions struct {
|
||||
GatewayFunctionInvocationStarted *prometheus.CounterVec
|
||||
|
||||
ServiceReplicasGauge *prometheus.GaugeVec
|
||||
|
||||
ServiceMetrics *ServiceMetricOptions
|
||||
}
|
||||
|
||||
// ServiceMetricOptions provides RED metrics
|
||||
@ -69,24 +67,6 @@ func BuildMetricsOptions() MetricOptions {
|
||||
[]string{"function_name"},
|
||||
)
|
||||
|
||||
// For automatic monitoring and alerting (RED method)
|
||||
histogram := prometheus.NewHistogramVec(prometheus.HistogramOpts{
|
||||
Subsystem: "http",
|
||||
Name: "request_duration_seconds",
|
||||
Help: "Seconds spent serving HTTP requests.",
|
||||
Buckets: prometheus.DefBuckets,
|
||||
}, []string{"method", "path", "status"})
|
||||
|
||||
// Can be used Kubernetes HPA v2
|
||||
counter := prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Subsystem: "http",
|
||||
Name: "requests_total",
|
||||
Help: "The total number of HTTP requests.",
|
||||
},
|
||||
[]string{"method", "path", "status"},
|
||||
)
|
||||
|
||||
gatewayFunctionInvocationStarted := prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "gateway",
|
||||
@ -97,16 +77,10 @@ func BuildMetricsOptions() MetricOptions {
|
||||
[]string{"function_name"},
|
||||
)
|
||||
|
||||
serviceMetricOptions := &ServiceMetricOptions{
|
||||
Counter: counter,
|
||||
Histogram: histogram,
|
||||
}
|
||||
|
||||
metricsOptions := MetricOptions{
|
||||
GatewayFunctionsHistogram: gatewayFunctionsHistogram,
|
||||
GatewayFunctionInvocation: gatewayFunctionInvocation,
|
||||
ServiceReplicasGauge: serviceReplicas,
|
||||
ServiceMetrics: serviceMetricOptions,
|
||||
GatewayFunctionInvocationStarted: gatewayFunctionInvocationStarted,
|
||||
}
|
||||
|
||||
|
@ -3,7 +3,7 @@ package metrics
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
@ -44,7 +44,7 @@ func (q PrometheusQuery) Fetch(query string) (*VectorQueryResponse, error) {
|
||||
defer res.Body.Close()
|
||||
}
|
||||
|
||||
bytesOut, readErr := ioutil.ReadAll(res.Body)
|
||||
bytesOut, readErr := io.ReadAll(res.Body)
|
||||
if readErr != nil {
|
||||
return nil, readErr
|
||||
}
|
||||
|
@ -2,6 +2,8 @@ package middleware
|
||||
|
||||
import "net/http"
|
||||
|
||||
// AuthInjector is an interface for injecting authentication information into a request
|
||||
// which will be proxied or made to a remote/upstream service.
|
||||
type AuthInjector interface {
|
||||
Inject(r *http.Request)
|
||||
}
|
||||
|
@ -7,7 +7,7 @@ import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
@ -89,7 +89,7 @@ func (s ExternalServiceQuery) GetReplicas(serviceName, serviceNamespace string)
|
||||
|
||||
var bytesOut []byte
|
||||
if res.Body != nil {
|
||||
bytesOut, _ = ioutil.ReadAll(res.Body)
|
||||
bytesOut, _ = io.ReadAll(res.Body)
|
||||
defer res.Body.Close()
|
||||
}
|
||||
|
||||
@ -111,20 +111,17 @@ func (s ExternalServiceQuery) GetReplicas(serviceName, serviceNamespace string)
|
||||
scalingFactor := uint64(scaling.DefaultScalingFactor)
|
||||
availableReplicas := function.AvailableReplicas
|
||||
|
||||
targetLoad := uint64(scaling.DefaultTargetLoad)
|
||||
|
||||
if function.Labels != nil {
|
||||
labels := *function.Labels
|
||||
|
||||
minReplicas = extractLabelValue(labels[scaling.MinScaleLabel], minReplicas)
|
||||
maxReplicas = extractLabelValue(labels[scaling.MaxScaleLabel], maxReplicas)
|
||||
extractedScalingFactor := extractLabelValue(labels[scaling.ScalingFactorLabel], scalingFactor)
|
||||
targetLoad = extractLabelValue(labels[scaling.TargetLoadLabel], targetLoad)
|
||||
|
||||
if extractedScalingFactor >= 0 && extractedScalingFactor <= 100 {
|
||||
if extractedScalingFactor > 0 && extractedScalingFactor <= 100 {
|
||||
scalingFactor = extractedScalingFactor
|
||||
} else {
|
||||
log.Printf("Bad Scaling Factor: %d, is not in range of [0 - 100]. Will fallback to %d", extractedScalingFactor, scalingFactor)
|
||||
return scaling.ServiceQueryResponse{}, fmt.Errorf("bad scaling factor: %d, is not in range of [0 - 100]", extractedScalingFactor)
|
||||
}
|
||||
}
|
||||
|
||||
@ -135,7 +132,6 @@ func (s ExternalServiceQuery) GetReplicas(serviceName, serviceNamespace string)
|
||||
ScalingFactor: scalingFactor,
|
||||
AvailableReplicas: availableReplicas,
|
||||
Annotations: function.Annotations,
|
||||
TargetLoad: targetLoad,
|
||||
}, err
|
||||
}
|
||||
|
||||
|
@ -75,7 +75,6 @@ func TestGetReplicasExistentFn(t *testing.T) {
|
||||
MinReplicas: uint64(scaling.DefaultMinReplicas),
|
||||
ScalingFactor: uint64(scaling.DefaultScalingFactor),
|
||||
AvailableReplicas: 0,
|
||||
TargetLoad: 10,
|
||||
}
|
||||
|
||||
var injector middleware.AuthInjector
|
||||
|
@ -1,58 +0,0 @@
|
||||
// Copyright (c) OpenFaaS Author(s). All rights reserved.
|
||||
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
|
||||
|
||||
package probing
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// ProbeCacher queries functions and caches the results
|
||||
type ProbeCacher interface {
|
||||
Set(functionName, namespace string, result *FunctionProbeResult)
|
||||
Get(functionName, namespace string) (result *FunctionProbeResult, hit bool)
|
||||
}
|
||||
|
||||
// ProbeCache provides a cache of Probe replica counts
|
||||
type ProbeCache struct {
|
||||
Cache map[string]*FunctionProbeResult
|
||||
Expiry time.Duration
|
||||
Sync sync.RWMutex
|
||||
}
|
||||
|
||||
// NewProbeCache creates a function cache to query function metadata
|
||||
func NewProbeCache(cacheExpiry time.Duration) ProbeCacher {
|
||||
return &ProbeCache{
|
||||
Cache: make(map[string]*FunctionProbeResult),
|
||||
Expiry: cacheExpiry,
|
||||
}
|
||||
}
|
||||
|
||||
// Set replica count for functionName
|
||||
func (fc *ProbeCache) Set(functionName, namespace string, result *FunctionProbeResult) {
|
||||
fc.Sync.Lock()
|
||||
defer fc.Sync.Unlock()
|
||||
|
||||
fc.Cache[functionName+"."+namespace] = result
|
||||
}
|
||||
|
||||
func (fc *ProbeCache) Get(functionName, namespace string) (*FunctionProbeResult, bool) {
|
||||
|
||||
result := &FunctionProbeResult{
|
||||
Available: false,
|
||||
Error: fmt.Errorf("unavailable in cache"),
|
||||
}
|
||||
|
||||
hit := false
|
||||
fc.Sync.RLock()
|
||||
defer fc.Sync.RUnlock()
|
||||
|
||||
if val, exists := fc.Cache[functionName+"."+namespace]; exists {
|
||||
hit = val.Expired(fc.Expiry) == false
|
||||
result = val
|
||||
}
|
||||
|
||||
return result, hit
|
||||
}
|
@ -1,116 +0,0 @@
|
||||
// Copyright (c) OpenFaaS Author(s). All rights reserved.
|
||||
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
|
||||
|
||||
package probing
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/openfaas/faas/gateway/pkg/middleware"
|
||||
"github.com/openfaas/faas/gateway/scaling"
|
||||
"github.com/openfaas/faas/gateway/types"
|
||||
)
|
||||
|
||||
// NewFunctionProber create a new scaler with the specified
|
||||
// ScalingConfig
|
||||
func NewFunctionProber(functionQuery scaling.FunctionQuery, resolver middleware.BaseURLResolver) FunctionProber {
|
||||
// if directFunctions {
|
||||
return &FunctionHTTPProber{
|
||||
Query: functionQuery,
|
||||
Resolver: resolver,
|
||||
}
|
||||
}
|
||||
|
||||
// FunctionHTTPProber probes a function's health endpoint
|
||||
type FunctionHTTPProber struct {
|
||||
Query scaling.FunctionQuery
|
||||
Resolver middleware.BaseURLResolver
|
||||
DirectFunctions bool
|
||||
}
|
||||
|
||||
type FunctionNonProber struct {
|
||||
}
|
||||
|
||||
func (f *FunctionNonProber) Probe(functionName, namespace string) FunctionProbeResult {
|
||||
return FunctionProbeResult{
|
||||
Found: true,
|
||||
Available: true,
|
||||
}
|
||||
}
|
||||
|
||||
type FunctionProber interface {
|
||||
Probe(functionName, namespace string) FunctionProbeResult
|
||||
}
|
||||
|
||||
// FunctionProbeResult holds the result of scaling from zero
|
||||
type FunctionProbeResult struct {
|
||||
Available bool
|
||||
Error error
|
||||
Found bool
|
||||
Duration time.Duration
|
||||
Updated time.Time
|
||||
}
|
||||
|
||||
// Expired find out whether the cache item has expired with
|
||||
// the given expiry duration from when it was stored.
|
||||
func (res *FunctionProbeResult) Expired(expiry time.Duration) bool {
|
||||
return time.Now().After(res.Updated.Add(expiry))
|
||||
}
|
||||
|
||||
// Scale scales a function from zero replicas to 1 or the value set in
|
||||
// the minimum replicas metadata
|
||||
func (f *FunctionHTTPProber) Probe(functionName, namespace string) FunctionProbeResult {
|
||||
start := time.Now()
|
||||
|
||||
cachedResponse, _ := f.Query.Get(functionName, namespace)
|
||||
probePath := "/_/health"
|
||||
|
||||
if cachedResponse.Annotations != nil {
|
||||
if v, ok := (*cachedResponse.Annotations)["com.openfaas.http.path"]; ok && len(v) > 0 {
|
||||
probePath = v
|
||||
}
|
||||
}
|
||||
|
||||
maxCount := 10
|
||||
pollInterval := time.Millisecond * 50
|
||||
|
||||
err := types.Retry(func(attempt int) error {
|
||||
u := f.Resolver.BuildURL(functionName, namespace, probePath, true)
|
||||
|
||||
r, _ := http.NewRequest(http.MethodGet, u, nil)
|
||||
r.Header.Set("User-Agent", "com.openfaas.gateway/probe")
|
||||
|
||||
resp, err := http.DefaultClient.Do(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Printf("[Probe] %s => %d", u, resp.StatusCode)
|
||||
|
||||
if resp.StatusCode == http.StatusOK {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("failed with status: %s", resp.Status)
|
||||
}, "Probe", maxCount, pollInterval)
|
||||
|
||||
if err != nil {
|
||||
return FunctionProbeResult{
|
||||
Error: err,
|
||||
Available: false,
|
||||
Found: true,
|
||||
Duration: time.Since(start),
|
||||
Updated: time.Now(),
|
||||
}
|
||||
}
|
||||
|
||||
return FunctionProbeResult{
|
||||
Error: nil,
|
||||
Available: true,
|
||||
Found: true,
|
||||
Duration: time.Since(start),
|
||||
Updated: time.Now(),
|
||||
}
|
||||
}
|
@ -1,11 +0,0 @@
|
||||
// Copyright (c) Alex Ellis 2017. All rights reserved.
|
||||
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
|
||||
|
||||
// Package requests package provides a client SDK or library for
|
||||
// the OpenFaaS gateway REST API
|
||||
package requests
|
||||
|
||||
// DeleteFunctionRequest delete a deployed function
|
||||
type DeleteFunctionRequest struct {
|
||||
FunctionName string `json:"functionName"`
|
||||
}
|
@ -1,17 +1,23 @@
|
||||
package scaling
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"github.com/openfaas/faas-provider/types"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultMinReplicas is the minimal amount of replicas for a service.
|
||||
DefaultMinReplicas = 1
|
||||
|
||||
// DefaultMaxReplicas is the amount of replicas a service will auto-scale up to.
|
||||
DefaultMaxReplicas = 20
|
||||
DefaultMaxReplicas = 5
|
||||
|
||||
// DefaultScalingFactor is the defining proportion for the scaling increments.
|
||||
DefaultScalingFactor = 20
|
||||
|
||||
// DefaultTargetLoad
|
||||
DefaultTargetLoad = 10
|
||||
DefaultScalingFactor = 10
|
||||
|
||||
DefaultTypeScale = "rps"
|
||||
|
||||
@ -23,10 +29,44 @@ const (
|
||||
|
||||
// ScalingFactorLabel label indicates the scaling factor for a function
|
||||
ScalingFactorLabel = "com.openfaas.scale.factor"
|
||||
|
||||
// TargetLoadLabel see also DefaultTargetScale
|
||||
TargetLoadLabel = "com.openfaas.scale.target"
|
||||
|
||||
// ScaleTypeLabel see also DefaultScaleType
|
||||
ScaleTypeLabel = "com.openfaas.scale.type"
|
||||
)
|
||||
|
||||
func MakeHorizontalScalingHandler(next http.HandlerFunc) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != http.MethodPost {
|
||||
http.Error(w, "Only POST is allowed", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
|
||||
if r.Body == nil {
|
||||
http.Error(w, "Error reading request body", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
body, err := io.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
http.Error(w, "Error reading request body", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
scaleRequest := types.ScaleServiceRequest{}
|
||||
if err := json.Unmarshal(body, &scaleRequest); err != nil {
|
||||
http.Error(w, "Error unmarshalling request body", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
if scaleRequest.Replicas < 1 {
|
||||
scaleRequest.Replicas = 1
|
||||
}
|
||||
|
||||
if scaleRequest.Replicas > DefaultMaxReplicas {
|
||||
scaleRequest.Replicas = DefaultMaxReplicas
|
||||
}
|
||||
|
||||
upstreamReq, _ := json.Marshal(scaleRequest)
|
||||
// Restore the io.ReadCloser to its original state
|
||||
r.Body = io.NopCloser(bytes.NewBuffer(upstreamReq))
|
||||
|
||||
next.ServeHTTP(w, r)
|
||||
}
|
||||
}
|
||||
|
@ -17,5 +17,4 @@ type ServiceQueryResponse struct {
|
||||
ScalingFactor uint64
|
||||
AvailableReplicas uint64
|
||||
Annotations *map[string]string
|
||||
TargetLoad uint64
|
||||
}
|
||||
|
@ -42,4 +42,6 @@ type HandlerSet struct {
|
||||
|
||||
// NamespaceListerHandler lists namespaces
|
||||
NamespaceListerHandler http.HandlerFunc
|
||||
|
||||
NamespaceMutatorHandler http.HandlerFunc
|
||||
}
|
||||
|
@ -8,7 +8,6 @@ import (
|
||||
"net/url"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
@ -129,9 +128,6 @@ func (ReadConfig) Read(hasEnv HasEnv) (*GatewayConfig, error) {
|
||||
cfg.PrometheusHost = prometheusHost
|
||||
}
|
||||
|
||||
cfg.DirectFunctions = parseBoolValue(hasEnv.Getenv("direct_functions"))
|
||||
cfg.DirectFunctionsSuffix = hasEnv.Getenv("direct_functions_suffix")
|
||||
|
||||
cfg.UseBasicAuth = parseBoolValue(hasEnv.Getenv("basic_auth"))
|
||||
|
||||
secretPath := hasEnv.Getenv("secret_mount_path")
|
||||
@ -169,14 +165,6 @@ func (ReadConfig) Read(hasEnv HasEnv) (*GatewayConfig, error) {
|
||||
|
||||
cfg.Namespace = hasEnv.Getenv("function_namespace")
|
||||
|
||||
if len(cfg.DirectFunctionsSuffix) > 0 && len(cfg.Namespace) > 0 {
|
||||
if strings.HasPrefix(cfg.DirectFunctionsSuffix, cfg.Namespace) == false {
|
||||
return nil, fmt.Errorf("function_namespace must be a sub-string of direct_functions_suffix")
|
||||
}
|
||||
}
|
||||
|
||||
cfg.ProbeFunctions = parseBoolValue(hasEnv.Getenv("probe_functions"))
|
||||
|
||||
return &cfg, nil
|
||||
}
|
||||
|
||||
@ -216,12 +204,6 @@ type GatewayConfig struct {
|
||||
// Port to connect to Prometheus.
|
||||
PrometheusPort int
|
||||
|
||||
// If set to true we will access upstream functions directly rather than through the upstream provider
|
||||
DirectFunctions bool
|
||||
|
||||
// If set this will be used to resolve functions directly
|
||||
DirectFunctionsSuffix string
|
||||
|
||||
// If set, reads secrets from file-system for enabling basic auth.
|
||||
UseBasicAuth bool
|
||||
|
||||
@ -245,9 +227,6 @@ type GatewayConfig struct {
|
||||
|
||||
// Namespace for endpoints
|
||||
Namespace string
|
||||
|
||||
// ProbeFunctions requires the gateway to probe the health endpoint of a function before invoking it
|
||||
ProbeFunctions bool
|
||||
}
|
||||
|
||||
// UseNATS Use NATSor not
|
||||
|
@ -38,16 +38,6 @@ func TestRead_UseExternalProvider_Defaults(t *testing.T) {
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
if config.DirectFunctions != false {
|
||||
t.Log("Default for DirectFunctions should be false")
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
if len(config.DirectFunctionsSuffix) > 0 {
|
||||
t.Log("Default for DirectFunctionsSuffix should be empty")
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
if len(config.Namespace) > 0 {
|
||||
t.Log("Default for Namespace should be empty")
|
||||
t.Fail()
|
||||
@ -89,86 +79,6 @@ func TestRead_NamespaceOverrideAgressWithFunctionSuffix_Valid(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestRead_NamespaceOverrideAgressWithFunctionSuffix_Invalid(t *testing.T) {
|
||||
|
||||
defaults := NewEnvBucket()
|
||||
readConfig := ReadConfig{}
|
||||
|
||||
defaults.Setenv("direct_functions", "true")
|
||||
wantSuffix := "openfaas-fn.cluster.local.svc."
|
||||
|
||||
defaults.Setenv("direct_functions_suffix", wantSuffix)
|
||||
defaults.Setenv("function_namespace", "fn")
|
||||
|
||||
_, err := readConfig.Read(defaults)
|
||||
|
||||
if err == nil {
|
||||
t.Logf("Expected an error because function_namespace should be a sub-string of direct_functions_suffix")
|
||||
t.Fail()
|
||||
return
|
||||
}
|
||||
|
||||
want := "function_namespace must be a sub-string of direct_functions_suffix"
|
||||
|
||||
if want != err.Error() {
|
||||
t.Logf("Error want: %s, got: %s", want, err.Error())
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestRead_DirectFunctionsOverride(t *testing.T) {
|
||||
defaults := NewEnvBucket()
|
||||
readConfig := ReadConfig{}
|
||||
defaults.Setenv("direct_functions", "true")
|
||||
wantSuffix := "openfaas-fn.cluster.local.svc."
|
||||
defaults.Setenv("direct_functions_suffix", wantSuffix)
|
||||
|
||||
config, _ := readConfig.Read(defaults)
|
||||
|
||||
if config.DirectFunctions != true {
|
||||
t.Logf("DirectFunctions should be true, got: %v", config.DirectFunctions)
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
if config.DirectFunctionsSuffix != wantSuffix {
|
||||
t.Logf("DirectFunctionsSuffix want: %s, got: %s", wantSuffix, config.DirectFunctionsSuffix)
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
|
||||
func TestRead_ProbeFunctions_Default(t *testing.T) {
|
||||
defaults := NewEnvBucket()
|
||||
readConfig := ReadConfig{}
|
||||
defaults.Setenv("probe_functions", "")
|
||||
|
||||
want := false
|
||||
|
||||
config, _ := readConfig.Read(defaults)
|
||||
|
||||
got := config.ProbeFunctions
|
||||
if want != got {
|
||||
t.Logf("ProbeFunctions want %v, but got %v", want, got)
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
|
||||
func TestRead_ProbeFunctions_Enabled(t *testing.T) {
|
||||
defaults := NewEnvBucket()
|
||||
readConfig := ReadConfig{}
|
||||
defaults.Setenv("probe_functions", "true")
|
||||
|
||||
want := true
|
||||
|
||||
config, _ := readConfig.Read(defaults)
|
||||
|
||||
got := config.ProbeFunctions
|
||||
if want != got {
|
||||
t.Logf("ProbeFunctions want %v, but got %v", want, got)
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
|
||||
func TestRead_ScaleZeroDefaultAndOverride(t *testing.T) {
|
||||
defaults := NewEnvBucket()
|
||||
readConfig := ReadConfig{}
|
||||
|
31
gateway/vendor/github.com/cespare/xxhash/v2/README.md
generated
vendored
31
gateway/vendor/github.com/cespare/xxhash/v2/README.md
generated
vendored
@ -3,8 +3,7 @@
|
||||
[](https://pkg.go.dev/github.com/cespare/xxhash/v2)
|
||||
[](https://github.com/cespare/xxhash/actions/workflows/test.yml)
|
||||
|
||||
xxhash is a Go implementation of the 64-bit
|
||||
[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a
|
||||
xxhash is a Go implementation of the 64-bit [xxHash] algorithm, XXH64. This is a
|
||||
high-quality hashing algorithm that is much faster than anything in the Go
|
||||
standard library.
|
||||
|
||||
@ -25,8 +24,11 @@ func (*Digest) WriteString(string) (int, error)
|
||||
func (*Digest) Sum64() uint64
|
||||
```
|
||||
|
||||
This implementation provides a fast pure-Go implementation and an even faster
|
||||
assembly implementation for amd64.
|
||||
The package is written with optimized pure Go and also contains even faster
|
||||
assembly implementations for amd64 and arm64. If desired, the `purego` build tag
|
||||
opts into using the Go code even on those architectures.
|
||||
|
||||
[xxHash]: http://cyan4973.github.io/xxHash/
|
||||
|
||||
## Compatibility
|
||||
|
||||
@ -45,19 +47,20 @@ I recommend using the latest release of Go.
|
||||
Here are some quick benchmarks comparing the pure-Go and assembly
|
||||
implementations of Sum64.
|
||||
|
||||
| input size | purego | asm |
|
||||
| --- | --- | --- |
|
||||
| 5 B | 979.66 MB/s | 1291.17 MB/s |
|
||||
| 100 B | 7475.26 MB/s | 7973.40 MB/s |
|
||||
| 4 KB | 17573.46 MB/s | 17602.65 MB/s |
|
||||
| 10 MB | 17131.46 MB/s | 17142.16 MB/s |
|
||||
| input size | purego | asm |
|
||||
| ---------- | --------- | --------- |
|
||||
| 4 B | 1.3 GB/s | 1.2 GB/s |
|
||||
| 16 B | 2.9 GB/s | 3.5 GB/s |
|
||||
| 100 B | 6.9 GB/s | 8.1 GB/s |
|
||||
| 4 KB | 11.7 GB/s | 16.7 GB/s |
|
||||
| 10 MB | 12.0 GB/s | 17.3 GB/s |
|
||||
|
||||
These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using
|
||||
the following commands under Go 1.11.2:
|
||||
These numbers were generated on Ubuntu 20.04 with an Intel Xeon Platinum 8252C
|
||||
CPU using the following commands under Go 1.19.2:
|
||||
|
||||
```
|
||||
$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes'
|
||||
$ go test -benchtime 10s -bench '/xxhash,direct,bytes'
|
||||
benchstat <(go test -tags purego -benchtime 500ms -count 15 -bench 'Sum64$')
|
||||
benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$')
|
||||
```
|
||||
|
||||
## Projects using this package
|
||||
|
10
gateway/vendor/github.com/cespare/xxhash/v2/testall.sh
generated
vendored
Normal file
10
gateway/vendor/github.com/cespare/xxhash/v2/testall.sh
generated
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
#!/bin/bash
|
||||
set -eu -o pipefail
|
||||
|
||||
# Small convenience script for running the tests with various combinations of
|
||||
# arch/tags. This assumes we're running on amd64 and have qemu available.
|
||||
|
||||
go test ./...
|
||||
go test -tags purego ./...
|
||||
GOARCH=arm64 go test
|
||||
GOARCH=arm64 go test -tags purego
|
47
gateway/vendor/github.com/cespare/xxhash/v2/xxhash.go
generated
vendored
47
gateway/vendor/github.com/cespare/xxhash/v2/xxhash.go
generated
vendored
@ -16,19 +16,11 @@ const (
|
||||
prime5 uint64 = 2870177450012600261
|
||||
)
|
||||
|
||||
// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where
|
||||
// possible in the Go code is worth a small (but measurable) performance boost
|
||||
// by avoiding some MOVQs. Vars are needed for the asm and also are useful for
|
||||
// convenience in the Go code in a few places where we need to intentionally
|
||||
// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the
|
||||
// result overflows a uint64).
|
||||
var (
|
||||
prime1v = prime1
|
||||
prime2v = prime2
|
||||
prime3v = prime3
|
||||
prime4v = prime4
|
||||
prime5v = prime5
|
||||
)
|
||||
// Store the primes in an array as well.
|
||||
//
|
||||
// The consts are used when possible in Go code to avoid MOVs but we need a
|
||||
// contiguous array of the assembly code.
|
||||
var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5}
|
||||
|
||||
// Digest implements hash.Hash64.
|
||||
type Digest struct {
|
||||
@ -50,10 +42,10 @@ func New() *Digest {
|
||||
|
||||
// Reset clears the Digest's state so that it can be reused.
|
||||
func (d *Digest) Reset() {
|
||||
d.v1 = prime1v + prime2
|
||||
d.v1 = primes[0] + prime2
|
||||
d.v2 = prime2
|
||||
d.v3 = 0
|
||||
d.v4 = -prime1v
|
||||
d.v4 = -primes[0]
|
||||
d.total = 0
|
||||
d.n = 0
|
||||
}
|
||||
@ -69,21 +61,23 @@ func (d *Digest) Write(b []byte) (n int, err error) {
|
||||
n = len(b)
|
||||
d.total += uint64(n)
|
||||
|
||||
memleft := d.mem[d.n&(len(d.mem)-1):]
|
||||
|
||||
if d.n+n < 32 {
|
||||
// This new data doesn't even fill the current block.
|
||||
copy(d.mem[d.n:], b)
|
||||
copy(memleft, b)
|
||||
d.n += n
|
||||
return
|
||||
}
|
||||
|
||||
if d.n > 0 {
|
||||
// Finish off the partial block.
|
||||
copy(d.mem[d.n:], b)
|
||||
c := copy(memleft, b)
|
||||
d.v1 = round(d.v1, u64(d.mem[0:8]))
|
||||
d.v2 = round(d.v2, u64(d.mem[8:16]))
|
||||
d.v3 = round(d.v3, u64(d.mem[16:24]))
|
||||
d.v4 = round(d.v4, u64(d.mem[24:32]))
|
||||
b = b[32-d.n:]
|
||||
b = b[c:]
|
||||
d.n = 0
|
||||
}
|
||||
|
||||
@ -133,21 +127,20 @@ func (d *Digest) Sum64() uint64 {
|
||||
|
||||
h += d.total
|
||||
|
||||
i, end := 0, d.n
|
||||
for ; i+8 <= end; i += 8 {
|
||||
k1 := round(0, u64(d.mem[i:i+8]))
|
||||
b := d.mem[:d.n&(len(d.mem)-1)]
|
||||
for ; len(b) >= 8; b = b[8:] {
|
||||
k1 := round(0, u64(b[:8]))
|
||||
h ^= k1
|
||||
h = rol27(h)*prime1 + prime4
|
||||
}
|
||||
if i+4 <= end {
|
||||
h ^= uint64(u32(d.mem[i:i+4])) * prime1
|
||||
if len(b) >= 4 {
|
||||
h ^= uint64(u32(b[:4])) * prime1
|
||||
h = rol23(h)*prime2 + prime3
|
||||
i += 4
|
||||
b = b[4:]
|
||||
}
|
||||
for i < end {
|
||||
h ^= uint64(d.mem[i]) * prime5
|
||||
for ; len(b) > 0; b = b[1:] {
|
||||
h ^= uint64(b[0]) * prime5
|
||||
h = rol11(h) * prime1
|
||||
i++
|
||||
}
|
||||
|
||||
h ^= h >> 33
|
||||
|
308
gateway/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
generated
vendored
308
gateway/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
generated
vendored
@ -1,215 +1,209 @@
|
||||
//go:build !appengine && gc && !purego
|
||||
// +build !appengine
|
||||
// +build gc
|
||||
// +build !purego
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
// Register allocation:
|
||||
// AX h
|
||||
// SI pointer to advance through b
|
||||
// DX n
|
||||
// BX loop end
|
||||
// R8 v1, k1
|
||||
// R9 v2
|
||||
// R10 v3
|
||||
// R11 v4
|
||||
// R12 tmp
|
||||
// R13 prime1v
|
||||
// R14 prime2v
|
||||
// DI prime4v
|
||||
// Registers:
|
||||
#define h AX
|
||||
#define d AX
|
||||
#define p SI // pointer to advance through b
|
||||
#define n DX
|
||||
#define end BX // loop end
|
||||
#define v1 R8
|
||||
#define v2 R9
|
||||
#define v3 R10
|
||||
#define v4 R11
|
||||
#define x R12
|
||||
#define prime1 R13
|
||||
#define prime2 R14
|
||||
#define prime4 DI
|
||||
|
||||
// round reads from and advances the buffer pointer in SI.
|
||||
// It assumes that R13 has prime1v and R14 has prime2v.
|
||||
#define round(r) \
|
||||
MOVQ (SI), R12 \
|
||||
ADDQ $8, SI \
|
||||
IMULQ R14, R12 \
|
||||
ADDQ R12, r \
|
||||
ROLQ $31, r \
|
||||
IMULQ R13, r
|
||||
#define round(acc, x) \
|
||||
IMULQ prime2, x \
|
||||
ADDQ x, acc \
|
||||
ROLQ $31, acc \
|
||||
IMULQ prime1, acc
|
||||
|
||||
// mergeRound applies a merge round on the two registers acc and val.
|
||||
// It assumes that R13 has prime1v, R14 has prime2v, and DI has prime4v.
|
||||
#define mergeRound(acc, val) \
|
||||
IMULQ R14, val \
|
||||
ROLQ $31, val \
|
||||
IMULQ R13, val \
|
||||
XORQ val, acc \
|
||||
IMULQ R13, acc \
|
||||
ADDQ DI, acc
|
||||
// round0 performs the operation x = round(0, x).
|
||||
#define round0(x) \
|
||||
IMULQ prime2, x \
|
||||
ROLQ $31, x \
|
||||
IMULQ prime1, x
|
||||
|
||||
// mergeRound applies a merge round on the two registers acc and x.
|
||||
// It assumes that prime1, prime2, and prime4 have been loaded.
|
||||
#define mergeRound(acc, x) \
|
||||
round0(x) \
|
||||
XORQ x, acc \
|
||||
IMULQ prime1, acc \
|
||||
ADDQ prime4, acc
|
||||
|
||||
// blockLoop processes as many 32-byte blocks as possible,
|
||||
// updating v1, v2, v3, and v4. It assumes that there is at least one block
|
||||
// to process.
|
||||
#define blockLoop() \
|
||||
loop: \
|
||||
MOVQ +0(p), x \
|
||||
round(v1, x) \
|
||||
MOVQ +8(p), x \
|
||||
round(v2, x) \
|
||||
MOVQ +16(p), x \
|
||||
round(v3, x) \
|
||||
MOVQ +24(p), x \
|
||||
round(v4, x) \
|
||||
ADDQ $32, p \
|
||||
CMPQ p, end \
|
||||
JLE loop
|
||||
|
||||
// func Sum64(b []byte) uint64
|
||||
TEXT ·Sum64(SB), NOSPLIT, $0-32
|
||||
TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32
|
||||
// Load fixed primes.
|
||||
MOVQ ·prime1v(SB), R13
|
||||
MOVQ ·prime2v(SB), R14
|
||||
MOVQ ·prime4v(SB), DI
|
||||
MOVQ ·primes+0(SB), prime1
|
||||
MOVQ ·primes+8(SB), prime2
|
||||
MOVQ ·primes+24(SB), prime4
|
||||
|
||||
// Load slice.
|
||||
MOVQ b_base+0(FP), SI
|
||||
MOVQ b_len+8(FP), DX
|
||||
LEAQ (SI)(DX*1), BX
|
||||
MOVQ b_base+0(FP), p
|
||||
MOVQ b_len+8(FP), n
|
||||
LEAQ (p)(n*1), end
|
||||
|
||||
// The first loop limit will be len(b)-32.
|
||||
SUBQ $32, BX
|
||||
SUBQ $32, end
|
||||
|
||||
// Check whether we have at least one block.
|
||||
CMPQ DX, $32
|
||||
CMPQ n, $32
|
||||
JLT noBlocks
|
||||
|
||||
// Set up initial state (v1, v2, v3, v4).
|
||||
MOVQ R13, R8
|
||||
ADDQ R14, R8
|
||||
MOVQ R14, R9
|
||||
XORQ R10, R10
|
||||
XORQ R11, R11
|
||||
SUBQ R13, R11
|
||||
MOVQ prime1, v1
|
||||
ADDQ prime2, v1
|
||||
MOVQ prime2, v2
|
||||
XORQ v3, v3
|
||||
XORQ v4, v4
|
||||
SUBQ prime1, v4
|
||||
|
||||
// Loop until SI > BX.
|
||||
blockLoop:
|
||||
round(R8)
|
||||
round(R9)
|
||||
round(R10)
|
||||
round(R11)
|
||||
blockLoop()
|
||||
|
||||
CMPQ SI, BX
|
||||
JLE blockLoop
|
||||
MOVQ v1, h
|
||||
ROLQ $1, h
|
||||
MOVQ v2, x
|
||||
ROLQ $7, x
|
||||
ADDQ x, h
|
||||
MOVQ v3, x
|
||||
ROLQ $12, x
|
||||
ADDQ x, h
|
||||
MOVQ v4, x
|
||||
ROLQ $18, x
|
||||
ADDQ x, h
|
||||
|
||||
MOVQ R8, AX
|
||||
ROLQ $1, AX
|
||||
MOVQ R9, R12
|
||||
ROLQ $7, R12
|
||||
ADDQ R12, AX
|
||||
MOVQ R10, R12
|
||||
ROLQ $12, R12
|
||||
ADDQ R12, AX
|
||||
MOVQ R11, R12
|
||||
ROLQ $18, R12
|
||||
ADDQ R12, AX
|
||||
|
||||
mergeRound(AX, R8)
|
||||
mergeRound(AX, R9)
|
||||
mergeRound(AX, R10)
|
||||
mergeRound(AX, R11)
|
||||
mergeRound(h, v1)
|
||||
mergeRound(h, v2)
|
||||
mergeRound(h, v3)
|
||||
mergeRound(h, v4)
|
||||
|
||||
JMP afterBlocks
|
||||
|
||||
noBlocks:
|
||||
MOVQ ·prime5v(SB), AX
|
||||
MOVQ ·primes+32(SB), h
|
||||
|
||||
afterBlocks:
|
||||
ADDQ DX, AX
|
||||
ADDQ n, h
|
||||
|
||||
// Right now BX has len(b)-32, and we want to loop until SI > len(b)-8.
|
||||
ADDQ $24, BX
|
||||
ADDQ $24, end
|
||||
CMPQ p, end
|
||||
JG try4
|
||||
|
||||
CMPQ SI, BX
|
||||
JG fourByte
|
||||
loop8:
|
||||
MOVQ (p), x
|
||||
ADDQ $8, p
|
||||
round0(x)
|
||||
XORQ x, h
|
||||
ROLQ $27, h
|
||||
IMULQ prime1, h
|
||||
ADDQ prime4, h
|
||||
|
||||
wordLoop:
|
||||
// Calculate k1.
|
||||
MOVQ (SI), R8
|
||||
ADDQ $8, SI
|
||||
IMULQ R14, R8
|
||||
ROLQ $31, R8
|
||||
IMULQ R13, R8
|
||||
CMPQ p, end
|
||||
JLE loop8
|
||||
|
||||
XORQ R8, AX
|
||||
ROLQ $27, AX
|
||||
IMULQ R13, AX
|
||||
ADDQ DI, AX
|
||||
try4:
|
||||
ADDQ $4, end
|
||||
CMPQ p, end
|
||||
JG try1
|
||||
|
||||
CMPQ SI, BX
|
||||
JLE wordLoop
|
||||
MOVL (p), x
|
||||
ADDQ $4, p
|
||||
IMULQ prime1, x
|
||||
XORQ x, h
|
||||
|
||||
fourByte:
|
||||
ADDQ $4, BX
|
||||
CMPQ SI, BX
|
||||
JG singles
|
||||
ROLQ $23, h
|
||||
IMULQ prime2, h
|
||||
ADDQ ·primes+16(SB), h
|
||||
|
||||
MOVL (SI), R8
|
||||
ADDQ $4, SI
|
||||
IMULQ R13, R8
|
||||
XORQ R8, AX
|
||||
|
||||
ROLQ $23, AX
|
||||
IMULQ R14, AX
|
||||
ADDQ ·prime3v(SB), AX
|
||||
|
||||
singles:
|
||||
ADDQ $4, BX
|
||||
CMPQ SI, BX
|
||||
try1:
|
||||
ADDQ $4, end
|
||||
CMPQ p, end
|
||||
JGE finalize
|
||||
|
||||
singlesLoop:
|
||||
MOVBQZX (SI), R12
|
||||
ADDQ $1, SI
|
||||
IMULQ ·prime5v(SB), R12
|
||||
XORQ R12, AX
|
||||
loop1:
|
||||
MOVBQZX (p), x
|
||||
ADDQ $1, p
|
||||
IMULQ ·primes+32(SB), x
|
||||
XORQ x, h
|
||||
ROLQ $11, h
|
||||
IMULQ prime1, h
|
||||
|
||||
ROLQ $11, AX
|
||||
IMULQ R13, AX
|
||||
|
||||
CMPQ SI, BX
|
||||
JL singlesLoop
|
||||
CMPQ p, end
|
||||
JL loop1
|
||||
|
||||
finalize:
|
||||
MOVQ AX, R12
|
||||
SHRQ $33, R12
|
||||
XORQ R12, AX
|
||||
IMULQ R14, AX
|
||||
MOVQ AX, R12
|
||||
SHRQ $29, R12
|
||||
XORQ R12, AX
|
||||
IMULQ ·prime3v(SB), AX
|
||||
MOVQ AX, R12
|
||||
SHRQ $32, R12
|
||||
XORQ R12, AX
|
||||
MOVQ h, x
|
||||
SHRQ $33, x
|
||||
XORQ x, h
|
||||
IMULQ prime2, h
|
||||
MOVQ h, x
|
||||
SHRQ $29, x
|
||||
XORQ x, h
|
||||
IMULQ ·primes+16(SB), h
|
||||
MOVQ h, x
|
||||
SHRQ $32, x
|
||||
XORQ x, h
|
||||
|
||||
MOVQ AX, ret+24(FP)
|
||||
MOVQ h, ret+24(FP)
|
||||
RET
|
||||
|
||||
// writeBlocks uses the same registers as above except that it uses AX to store
|
||||
// the d pointer.
|
||||
|
||||
// func writeBlocks(d *Digest, b []byte) int
|
||||
TEXT ·writeBlocks(SB), NOSPLIT, $0-40
|
||||
TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40
|
||||
// Load fixed primes needed for round.
|
||||
MOVQ ·prime1v(SB), R13
|
||||
MOVQ ·prime2v(SB), R14
|
||||
MOVQ ·primes+0(SB), prime1
|
||||
MOVQ ·primes+8(SB), prime2
|
||||
|
||||
// Load slice.
|
||||
MOVQ b_base+8(FP), SI
|
||||
MOVQ b_len+16(FP), DX
|
||||
LEAQ (SI)(DX*1), BX
|
||||
SUBQ $32, BX
|
||||
MOVQ b_base+8(FP), p
|
||||
MOVQ b_len+16(FP), n
|
||||
LEAQ (p)(n*1), end
|
||||
SUBQ $32, end
|
||||
|
||||
// Load vN from d.
|
||||
MOVQ d+0(FP), AX
|
||||
MOVQ 0(AX), R8 // v1
|
||||
MOVQ 8(AX), R9 // v2
|
||||
MOVQ 16(AX), R10 // v3
|
||||
MOVQ 24(AX), R11 // v4
|
||||
MOVQ s+0(FP), d
|
||||
MOVQ 0(d), v1
|
||||
MOVQ 8(d), v2
|
||||
MOVQ 16(d), v3
|
||||
MOVQ 24(d), v4
|
||||
|
||||
// We don't need to check the loop condition here; this function is
|
||||
// always called with at least one block of data to process.
|
||||
blockLoop:
|
||||
round(R8)
|
||||
round(R9)
|
||||
round(R10)
|
||||
round(R11)
|
||||
|
||||
CMPQ SI, BX
|
||||
JLE blockLoop
|
||||
blockLoop()
|
||||
|
||||
// Copy vN back to d.
|
||||
MOVQ R8, 0(AX)
|
||||
MOVQ R9, 8(AX)
|
||||
MOVQ R10, 16(AX)
|
||||
MOVQ R11, 24(AX)
|
||||
MOVQ v1, 0(d)
|
||||
MOVQ v2, 8(d)
|
||||
MOVQ v3, 16(d)
|
||||
MOVQ v4, 24(d)
|
||||
|
||||
// The number of bytes written is SI minus the old base pointer.
|
||||
SUBQ b_base+8(FP), SI
|
||||
MOVQ SI, ret+32(FP)
|
||||
// The number of bytes written is p minus the old base pointer.
|
||||
SUBQ b_base+8(FP), p
|
||||
MOVQ p, ret+32(FP)
|
||||
|
||||
RET
|
||||
|
183
gateway/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s
generated
vendored
Normal file
183
gateway/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s
generated
vendored
Normal file
@ -0,0 +1,183 @@
|
||||
//go:build !appengine && gc && !purego
|
||||
// +build !appengine
|
||||
// +build gc
|
||||
// +build !purego
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
// Registers:
|
||||
#define digest R1
|
||||
#define h R2 // return value
|
||||
#define p R3 // input pointer
|
||||
#define n R4 // input length
|
||||
#define nblocks R5 // n / 32
|
||||
#define prime1 R7
|
||||
#define prime2 R8
|
||||
#define prime3 R9
|
||||
#define prime4 R10
|
||||
#define prime5 R11
|
||||
#define v1 R12
|
||||
#define v2 R13
|
||||
#define v3 R14
|
||||
#define v4 R15
|
||||
#define x1 R20
|
||||
#define x2 R21
|
||||
#define x3 R22
|
||||
#define x4 R23
|
||||
|
||||
#define round(acc, x) \
|
||||
MADD prime2, acc, x, acc \
|
||||
ROR $64-31, acc \
|
||||
MUL prime1, acc
|
||||
|
||||
// round0 performs the operation x = round(0, x).
|
||||
#define round0(x) \
|
||||
MUL prime2, x \
|
||||
ROR $64-31, x \
|
||||
MUL prime1, x
|
||||
|
||||
#define mergeRound(acc, x) \
|
||||
round0(x) \
|
||||
EOR x, acc \
|
||||
MADD acc, prime4, prime1, acc
|
||||
|
||||
// blockLoop processes as many 32-byte blocks as possible,
|
||||
// updating v1, v2, v3, and v4. It assumes that n >= 32.
|
||||
#define blockLoop() \
|
||||
LSR $5, n, nblocks \
|
||||
PCALIGN $16 \
|
||||
loop: \
|
||||
LDP.P 16(p), (x1, x2) \
|
||||
LDP.P 16(p), (x3, x4) \
|
||||
round(v1, x1) \
|
||||
round(v2, x2) \
|
||||
round(v3, x3) \
|
||||
round(v4, x4) \
|
||||
SUB $1, nblocks \
|
||||
CBNZ nblocks, loop
|
||||
|
||||
// func Sum64(b []byte) uint64
|
||||
TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32
|
||||
LDP b_base+0(FP), (p, n)
|
||||
|
||||
LDP ·primes+0(SB), (prime1, prime2)
|
||||
LDP ·primes+16(SB), (prime3, prime4)
|
||||
MOVD ·primes+32(SB), prime5
|
||||
|
||||
CMP $32, n
|
||||
CSEL LT, prime5, ZR, h // if n < 32 { h = prime5 } else { h = 0 }
|
||||
BLT afterLoop
|
||||
|
||||
ADD prime1, prime2, v1
|
||||
MOVD prime2, v2
|
||||
MOVD $0, v3
|
||||
NEG prime1, v4
|
||||
|
||||
blockLoop()
|
||||
|
||||
ROR $64-1, v1, x1
|
||||
ROR $64-7, v2, x2
|
||||
ADD x1, x2
|
||||
ROR $64-12, v3, x3
|
||||
ROR $64-18, v4, x4
|
||||
ADD x3, x4
|
||||
ADD x2, x4, h
|
||||
|
||||
mergeRound(h, v1)
|
||||
mergeRound(h, v2)
|
||||
mergeRound(h, v3)
|
||||
mergeRound(h, v4)
|
||||
|
||||
afterLoop:
|
||||
ADD n, h
|
||||
|
||||
TBZ $4, n, try8
|
||||
LDP.P 16(p), (x1, x2)
|
||||
|
||||
round0(x1)
|
||||
|
||||
// NOTE: here and below, sequencing the EOR after the ROR (using a
|
||||
// rotated register) is worth a small but measurable speedup for small
|
||||
// inputs.
|
||||
ROR $64-27, h
|
||||
EOR x1 @> 64-27, h, h
|
||||
MADD h, prime4, prime1, h
|
||||
|
||||
round0(x2)
|
||||
ROR $64-27, h
|
||||
EOR x2 @> 64-27, h, h
|
||||
MADD h, prime4, prime1, h
|
||||
|
||||
try8:
|
||||
TBZ $3, n, try4
|
||||
MOVD.P 8(p), x1
|
||||
|
||||
round0(x1)
|
||||
ROR $64-27, h
|
||||
EOR x1 @> 64-27, h, h
|
||||
MADD h, prime4, prime1, h
|
||||
|
||||
try4:
|
||||
TBZ $2, n, try2
|
||||
MOVWU.P 4(p), x2
|
||||
|
||||
MUL prime1, x2
|
||||
ROR $64-23, h
|
||||
EOR x2 @> 64-23, h, h
|
||||
MADD h, prime3, prime2, h
|
||||
|
||||
try2:
|
||||
TBZ $1, n, try1
|
||||
MOVHU.P 2(p), x3
|
||||
AND $255, x3, x1
|
||||
LSR $8, x3, x2
|
||||
|
||||
MUL prime5, x1
|
||||
ROR $64-11, h
|
||||
EOR x1 @> 64-11, h, h
|
||||
MUL prime1, h
|
||||
|
||||
MUL prime5, x2
|
||||
ROR $64-11, h
|
||||
EOR x2 @> 64-11, h, h
|
||||
MUL prime1, h
|
||||
|
||||
try1:
|
||||
TBZ $0, n, finalize
|
||||
MOVBU (p), x4
|
||||
|
||||
MUL prime5, x4
|
||||
ROR $64-11, h
|
||||
EOR x4 @> 64-11, h, h
|
||||
MUL prime1, h
|
||||
|
||||
finalize:
|
||||
EOR h >> 33, h
|
||||
MUL prime2, h
|
||||
EOR h >> 29, h
|
||||
MUL prime3, h
|
||||
EOR h >> 32, h
|
||||
|
||||
MOVD h, ret+24(FP)
|
||||
RET
|
||||
|
||||
// func writeBlocks(d *Digest, b []byte) int
|
||||
TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40
|
||||
LDP ·primes+0(SB), (prime1, prime2)
|
||||
|
||||
// Load state. Assume v[1-4] are stored contiguously.
|
||||
MOVD d+0(FP), digest
|
||||
LDP 0(digest), (v1, v2)
|
||||
LDP 16(digest), (v3, v4)
|
||||
|
||||
LDP b_base+8(FP), (p, n)
|
||||
|
||||
blockLoop()
|
||||
|
||||
// Store updated state.
|
||||
STP (v1, v2), 0(digest)
|
||||
STP (v3, v4), 16(digest)
|
||||
|
||||
BIC $31, n
|
||||
MOVD n, ret+32(FP)
|
||||
RET
|
@ -1,3 +1,5 @@
|
||||
//go:build (amd64 || arm64) && !appengine && gc && !purego
|
||||
// +build amd64 arm64
|
||||
// +build !appengine
|
||||
// +build gc
|
||||
// +build !purego
|
22
gateway/vendor/github.com/cespare/xxhash/v2/xxhash_other.go
generated
vendored
22
gateway/vendor/github.com/cespare/xxhash/v2/xxhash_other.go
generated
vendored
@ -1,4 +1,5 @@
|
||||
// +build !amd64 appengine !gc purego
|
||||
//go:build (!amd64 && !arm64) || appengine || !gc || purego
|
||||
// +build !amd64,!arm64 appengine !gc purego
|
||||
|
||||
package xxhash
|
||||
|
||||
@ -14,10 +15,10 @@ func Sum64(b []byte) uint64 {
|
||||
var h uint64
|
||||
|
||||
if n >= 32 {
|
||||
v1 := prime1v + prime2
|
||||
v1 := primes[0] + prime2
|
||||
v2 := prime2
|
||||
v3 := uint64(0)
|
||||
v4 := -prime1v
|
||||
v4 := -primes[0]
|
||||
for len(b) >= 32 {
|
||||
v1 = round(v1, u64(b[0:8:len(b)]))
|
||||
v2 = round(v2, u64(b[8:16:len(b)]))
|
||||
@ -36,19 +37,18 @@ func Sum64(b []byte) uint64 {
|
||||
|
||||
h += uint64(n)
|
||||
|
||||
i, end := 0, len(b)
|
||||
for ; i+8 <= end; i += 8 {
|
||||
k1 := round(0, u64(b[i:i+8:len(b)]))
|
||||
for ; len(b) >= 8; b = b[8:] {
|
||||
k1 := round(0, u64(b[:8]))
|
||||
h ^= k1
|
||||
h = rol27(h)*prime1 + prime4
|
||||
}
|
||||
if i+4 <= end {
|
||||
h ^= uint64(u32(b[i:i+4:len(b)])) * prime1
|
||||
if len(b) >= 4 {
|
||||
h ^= uint64(u32(b[:4])) * prime1
|
||||
h = rol23(h)*prime2 + prime3
|
||||
i += 4
|
||||
b = b[4:]
|
||||
}
|
||||
for ; i < end; i++ {
|
||||
h ^= uint64(b[i]) * prime5
|
||||
for ; len(b) > 0; b = b[1:] {
|
||||
h ^= uint64(b[0]) * prime5
|
||||
h = rol11(h) * prime1
|
||||
}
|
||||
|
||||
|
1
gateway/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
generated
vendored
1
gateway/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
generated
vendored
@ -1,3 +1,4 @@
|
||||
//go:build appengine
|
||||
// +build appengine
|
||||
|
||||
// This file contains the safe implementations of otherwise unsafe-using code.
|
||||
|
3
gateway/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
generated
vendored
3
gateway/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
generated
vendored
@ -1,3 +1,4 @@
|
||||
//go:build !appengine
|
||||
// +build !appengine
|
||||
|
||||
// This file encapsulates usage of unsafe.
|
||||
@ -11,7 +12,7 @@ import (
|
||||
|
||||
// In the future it's possible that compiler optimizations will make these
|
||||
// XxxString functions unnecessary by realizing that calls such as
|
||||
// Sum64([]byte(s)) don't need to copy s. See https://golang.org/issue/2205.
|
||||
// Sum64([]byte(s)) don't need to copy s. See https://go.dev/issue/2205.
|
||||
// If that happens, even if we keep these functions they can be replaced with
|
||||
// the trivial safe code.
|
||||
|
||||
|
64
gateway/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
generated
vendored
64
gateway/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
generated
vendored
@ -1,64 +0,0 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
|
||||
|
||||
package timestamp
|
||||
|
||||
import (
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
|
||||
reflect "reflect"
|
||||
)
|
||||
|
||||
// Symbols defined in public import of google/protobuf/timestamp.proto.
|
||||
|
||||
type Timestamp = timestamppb.Timestamp
|
||||
|
||||
var File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = []byte{
|
||||
0x0a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
|
||||
0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
|
||||
0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2f, 0x74, 0x69,
|
||||
0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67,
|
||||
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74,
|
||||
0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x37,
|
||||
0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
|
||||
0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
|
||||
0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x3b, 0x74, 0x69,
|
||||
0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
||||
0x33,
|
||||
}
|
||||
|
||||
var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = []interface{}{}
|
||||
var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = []int32{
|
||||
0, // [0:0] is the sub-list for method output_type
|
||||
0, // [0:0] is the sub-list for method input_type
|
||||
0, // [0:0] is the sub-list for extension type_name
|
||||
0, // [0:0] is the sub-list for extension extendee
|
||||
0, // [0:0] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() }
|
||||
func file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() {
|
||||
if File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto != nil {
|
||||
return
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 0,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes,
|
||||
DependencyIndexes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs,
|
||||
}.Build()
|
||||
File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto = out.File
|
||||
file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = nil
|
||||
file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = nil
|
||||
file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = nil
|
||||
}
|
304
gateway/vendor/github.com/klauspost/compress/LICENSE
generated
vendored
Normal file
304
gateway/vendor/github.com/klauspost/compress/LICENSE
generated
vendored
Normal file
@ -0,0 +1,304 @@
|
||||
Copyright (c) 2012 The Go Authors. All rights reserved.
|
||||
Copyright (c) 2019 Klaus Post. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
------------------
|
||||
|
||||
Files: gzhttp/*
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright 2016-2017 The New York Times Company
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
------------------
|
||||
|
||||
Files: s2/cmd/internal/readahead/*
|
||||
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2015 Klaus Post
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
||||
---------------------
|
||||
Files: snappy/*
|
||||
Files: internal/snapref/*
|
||||
|
||||
Copyright (c) 2011 The Snappy-Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
-----------------
|
||||
|
||||
Files: s2/cmd/internal/filepathx/*
|
||||
|
||||
Copyright 2016 The filepathx Authors
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
1017
gateway/vendor/github.com/klauspost/compress/flate/deflate.go
generated
vendored
Normal file
1017
gateway/vendor/github.com/klauspost/compress/flate/deflate.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
184
gateway/vendor/github.com/klauspost/compress/flate/dict_decoder.go
generated
vendored
Normal file
184
gateway/vendor/github.com/klauspost/compress/flate/dict_decoder.go
generated
vendored
Normal file
@ -0,0 +1,184 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package flate
|
||||
|
||||
// dictDecoder implements the LZ77 sliding dictionary as used in decompression.
|
||||
// LZ77 decompresses data through sequences of two forms of commands:
|
||||
//
|
||||
// - Literal insertions: Runs of one or more symbols are inserted into the data
|
||||
// stream as is. This is accomplished through the writeByte method for a
|
||||
// single symbol, or combinations of writeSlice/writeMark for multiple symbols.
|
||||
// Any valid stream must start with a literal insertion if no preset dictionary
|
||||
// is used.
|
||||
//
|
||||
// - Backward copies: Runs of one or more symbols are copied from previously
|
||||
// emitted data. Backward copies come as the tuple (dist, length) where dist
|
||||
// determines how far back in the stream to copy from and length determines how
|
||||
// many bytes to copy. Note that it is valid for the length to be greater than
|
||||
// the distance. Since LZ77 uses forward copies, that situation is used to
|
||||
// perform a form of run-length encoding on repeated runs of symbols.
|
||||
// The writeCopy and tryWriteCopy are used to implement this command.
|
||||
//
|
||||
// For performance reasons, this implementation performs little to no sanity
|
||||
// checks about the arguments. As such, the invariants documented for each
|
||||
// method call must be respected.
|
||||
type dictDecoder struct {
|
||||
hist []byte // Sliding window history
|
||||
|
||||
// Invariant: 0 <= rdPos <= wrPos <= len(hist)
|
||||
wrPos int // Current output position in buffer
|
||||
rdPos int // Have emitted hist[:rdPos] already
|
||||
full bool // Has a full window length been written yet?
|
||||
}
|
||||
|
||||
// init initializes dictDecoder to have a sliding window dictionary of the given
|
||||
// size. If a preset dict is provided, it will initialize the dictionary with
|
||||
// the contents of dict.
|
||||
func (dd *dictDecoder) init(size int, dict []byte) {
|
||||
*dd = dictDecoder{hist: dd.hist}
|
||||
|
||||
if cap(dd.hist) < size {
|
||||
dd.hist = make([]byte, size)
|
||||
}
|
||||
dd.hist = dd.hist[:size]
|
||||
|
||||
if len(dict) > len(dd.hist) {
|
||||
dict = dict[len(dict)-len(dd.hist):]
|
||||
}
|
||||
dd.wrPos = copy(dd.hist, dict)
|
||||
if dd.wrPos == len(dd.hist) {
|
||||
dd.wrPos = 0
|
||||
dd.full = true
|
||||
}
|
||||
dd.rdPos = dd.wrPos
|
||||
}
|
||||
|
||||
// histSize reports the total amount of historical data in the dictionary.
|
||||
func (dd *dictDecoder) histSize() int {
|
||||
if dd.full {
|
||||
return len(dd.hist)
|
||||
}
|
||||
return dd.wrPos
|
||||
}
|
||||
|
||||
// availRead reports the number of bytes that can be flushed by readFlush.
|
||||
func (dd *dictDecoder) availRead() int {
|
||||
return dd.wrPos - dd.rdPos
|
||||
}
|
||||
|
||||
// availWrite reports the available amount of output buffer space.
|
||||
func (dd *dictDecoder) availWrite() int {
|
||||
return len(dd.hist) - dd.wrPos
|
||||
}
|
||||
|
||||
// writeSlice returns a slice of the available buffer to write data to.
|
||||
//
|
||||
// This invariant will be kept: len(s) <= availWrite()
|
||||
func (dd *dictDecoder) writeSlice() []byte {
|
||||
return dd.hist[dd.wrPos:]
|
||||
}
|
||||
|
||||
// writeMark advances the writer pointer by cnt.
|
||||
//
|
||||
// This invariant must be kept: 0 <= cnt <= availWrite()
|
||||
func (dd *dictDecoder) writeMark(cnt int) {
|
||||
dd.wrPos += cnt
|
||||
}
|
||||
|
||||
// writeByte writes a single byte to the dictionary.
|
||||
//
|
||||
// This invariant must be kept: 0 < availWrite()
|
||||
func (dd *dictDecoder) writeByte(c byte) {
|
||||
dd.hist[dd.wrPos] = c
|
||||
dd.wrPos++
|
||||
}
|
||||
|
||||
// writeCopy copies a string at a given (dist, length) to the output.
|
||||
// This returns the number of bytes copied and may be less than the requested
|
||||
// length if the available space in the output buffer is too small.
|
||||
//
|
||||
// This invariant must be kept: 0 < dist <= histSize()
|
||||
func (dd *dictDecoder) writeCopy(dist, length int) int {
|
||||
dstBase := dd.wrPos
|
||||
dstPos := dstBase
|
||||
srcPos := dstPos - dist
|
||||
endPos := dstPos + length
|
||||
if endPos > len(dd.hist) {
|
||||
endPos = len(dd.hist)
|
||||
}
|
||||
|
||||
// Copy non-overlapping section after destination position.
|
||||
//
|
||||
// This section is non-overlapping in that the copy length for this section
|
||||
// is always less than or equal to the backwards distance. This can occur
|
||||
// if a distance refers to data that wraps-around in the buffer.
|
||||
// Thus, a backwards copy is performed here; that is, the exact bytes in
|
||||
// the source prior to the copy is placed in the destination.
|
||||
if srcPos < 0 {
|
||||
srcPos += len(dd.hist)
|
||||
dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:])
|
||||
srcPos = 0
|
||||
}
|
||||
|
||||
// Copy possibly overlapping section before destination position.
|
||||
//
|
||||
// This section can overlap if the copy length for this section is larger
|
||||
// than the backwards distance. This is allowed by LZ77 so that repeated
|
||||
// strings can be succinctly represented using (dist, length) pairs.
|
||||
// Thus, a forwards copy is performed here; that is, the bytes copied is
|
||||
// possibly dependent on the resulting bytes in the destination as the copy
|
||||
// progresses along. This is functionally equivalent to the following:
|
||||
//
|
||||
// for i := 0; i < endPos-dstPos; i++ {
|
||||
// dd.hist[dstPos+i] = dd.hist[srcPos+i]
|
||||
// }
|
||||
// dstPos = endPos
|
||||
//
|
||||
for dstPos < endPos {
|
||||
dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos])
|
||||
}
|
||||
|
||||
dd.wrPos = dstPos
|
||||
return dstPos - dstBase
|
||||
}
|
||||
|
||||
// tryWriteCopy tries to copy a string at a given (distance, length) to the
|
||||
// output. This specialized version is optimized for short distances.
|
||||
//
|
||||
// This method is designed to be inlined for performance reasons.
|
||||
//
|
||||
// This invariant must be kept: 0 < dist <= histSize()
|
||||
func (dd *dictDecoder) tryWriteCopy(dist, length int) int {
|
||||
dstPos := dd.wrPos
|
||||
endPos := dstPos + length
|
||||
if dstPos < dist || endPos > len(dd.hist) {
|
||||
return 0
|
||||
}
|
||||
dstBase := dstPos
|
||||
srcPos := dstPos - dist
|
||||
|
||||
// Copy possibly overlapping section before destination position.
|
||||
loop:
|
||||
dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos])
|
||||
if dstPos < endPos {
|
||||
goto loop // Avoid for-loop so that this function can be inlined
|
||||
}
|
||||
|
||||
dd.wrPos = dstPos
|
||||
return dstPos - dstBase
|
||||
}
|
||||
|
||||
// readFlush returns a slice of the historical buffer that is ready to be
|
||||
// emitted to the user. The data returned by readFlush must be fully consumed
|
||||
// before calling any other dictDecoder methods.
|
||||
func (dd *dictDecoder) readFlush() []byte {
|
||||
toRead := dd.hist[dd.rdPos:dd.wrPos]
|
||||
dd.rdPos = dd.wrPos
|
||||
if dd.wrPos == len(dd.hist) {
|
||||
dd.wrPos, dd.rdPos = 0, 0
|
||||
dd.full = true
|
||||
}
|
||||
return toRead
|
||||
}
|
193
gateway/vendor/github.com/klauspost/compress/flate/fast_encoder.go
generated
vendored
Normal file
193
gateway/vendor/github.com/klauspost/compress/flate/fast_encoder.go
generated
vendored
Normal file
@ -0,0 +1,193 @@
|
||||
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
|
||||
// Modified for deflate by Klaus Post (c) 2015.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package flate
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type fastEnc interface {
|
||||
Encode(dst *tokens, src []byte)
|
||||
Reset()
|
||||
}
|
||||
|
||||
func newFastEnc(level int) fastEnc {
|
||||
switch level {
|
||||
case 1:
|
||||
return &fastEncL1{fastGen: fastGen{cur: maxStoreBlockSize}}
|
||||
case 2:
|
||||
return &fastEncL2{fastGen: fastGen{cur: maxStoreBlockSize}}
|
||||
case 3:
|
||||
return &fastEncL3{fastGen: fastGen{cur: maxStoreBlockSize}}
|
||||
case 4:
|
||||
return &fastEncL4{fastGen: fastGen{cur: maxStoreBlockSize}}
|
||||
case 5:
|
||||
return &fastEncL5{fastGen: fastGen{cur: maxStoreBlockSize}}
|
||||
case 6:
|
||||
return &fastEncL6{fastGen: fastGen{cur: maxStoreBlockSize}}
|
||||
default:
|
||||
panic("invalid level specified")
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
tableBits = 15 // Bits used in the table
|
||||
tableSize = 1 << tableBits // Size of the table
|
||||
tableShift = 32 - tableBits // Right-shift to get the tableBits most significant bits of a uint32.
|
||||
baseMatchOffset = 1 // The smallest match offset
|
||||
baseMatchLength = 3 // The smallest match length per the RFC section 3.2.5
|
||||
maxMatchOffset = 1 << 15 // The largest match offset
|
||||
|
||||
bTableBits = 17 // Bits used in the big tables
|
||||
bTableSize = 1 << bTableBits // Size of the table
|
||||
allocHistory = maxStoreBlockSize * 5 // Size to preallocate for history.
|
||||
bufferReset = (1 << 31) - allocHistory - maxStoreBlockSize - 1 // Reset the buffer offset when reaching this.
|
||||
)
|
||||
|
||||
const (
|
||||
prime3bytes = 506832829
|
||||
prime4bytes = 2654435761
|
||||
prime5bytes = 889523592379
|
||||
prime6bytes = 227718039650203
|
||||
prime7bytes = 58295818150454627
|
||||
prime8bytes = 0xcf1bbcdcb7a56463
|
||||
)
|
||||
|
||||
func load3232(b []byte, i int32) uint32 {
|
||||
return binary.LittleEndian.Uint32(b[i:])
|
||||
}
|
||||
|
||||
func load6432(b []byte, i int32) uint64 {
|
||||
return binary.LittleEndian.Uint64(b[i:])
|
||||
}
|
||||
|
||||
type tableEntry struct {
|
||||
offset int32
|
||||
}
|
||||
|
||||
// fastGen maintains the table for matches,
|
||||
// and the previous byte block for level 2.
|
||||
// This is the generic implementation.
|
||||
type fastGen struct {
|
||||
hist []byte
|
||||
cur int32
|
||||
}
|
||||
|
||||
func (e *fastGen) addBlock(src []byte) int32 {
|
||||
// check if we have space already
|
||||
if len(e.hist)+len(src) > cap(e.hist) {
|
||||
if cap(e.hist) == 0 {
|
||||
e.hist = make([]byte, 0, allocHistory)
|
||||
} else {
|
||||
if cap(e.hist) < maxMatchOffset*2 {
|
||||
panic("unexpected buffer size")
|
||||
}
|
||||
// Move down
|
||||
offset := int32(len(e.hist)) - maxMatchOffset
|
||||
// copy(e.hist[0:maxMatchOffset], e.hist[offset:])
|
||||
*(*[maxMatchOffset]byte)(e.hist) = *(*[maxMatchOffset]byte)(e.hist[offset:])
|
||||
e.cur += offset
|
||||
e.hist = e.hist[:maxMatchOffset]
|
||||
}
|
||||
}
|
||||
s := int32(len(e.hist))
|
||||
e.hist = append(e.hist, src...)
|
||||
return s
|
||||
}
|
||||
|
||||
type tableEntryPrev struct {
|
||||
Cur tableEntry
|
||||
Prev tableEntry
|
||||
}
|
||||
|
||||
// hash7 returns the hash of the lowest 7 bytes of u to fit in a hash table with h bits.
|
||||
// Preferably h should be a constant and should always be <64.
|
||||
func hash7(u uint64, h uint8) uint32 {
|
||||
return uint32(((u << (64 - 56)) * prime7bytes) >> ((64 - h) & reg8SizeMask64))
|
||||
}
|
||||
|
||||
// hashLen returns a hash of the lowest mls bytes of with length output bits.
|
||||
// mls must be >=3 and <=8. Any other value will return hash for 4 bytes.
|
||||
// length should always be < 32.
|
||||
// Preferably length and mls should be a constant for inlining.
|
||||
func hashLen(u uint64, length, mls uint8) uint32 {
|
||||
switch mls {
|
||||
case 3:
|
||||
return (uint32(u<<8) * prime3bytes) >> (32 - length)
|
||||
case 5:
|
||||
return uint32(((u << (64 - 40)) * prime5bytes) >> (64 - length))
|
||||
case 6:
|
||||
return uint32(((u << (64 - 48)) * prime6bytes) >> (64 - length))
|
||||
case 7:
|
||||
return uint32(((u << (64 - 56)) * prime7bytes) >> (64 - length))
|
||||
case 8:
|
||||
return uint32((u * prime8bytes) >> (64 - length))
|
||||
default:
|
||||
return (uint32(u) * prime4bytes) >> (32 - length)
|
||||
}
|
||||
}
|
||||
|
||||
// matchlen will return the match length between offsets and t in src.
|
||||
// The maximum length returned is maxMatchLength - 4.
|
||||
// It is assumed that s > t, that t >=0 and s < len(src).
|
||||
func (e *fastGen) matchlen(s, t int32, src []byte) int32 {
|
||||
if debugDecode {
|
||||
if t >= s {
|
||||
panic(fmt.Sprint("t >=s:", t, s))
|
||||
}
|
||||
if int(s) >= len(src) {
|
||||
panic(fmt.Sprint("s >= len(src):", s, len(src)))
|
||||
}
|
||||
if t < 0 {
|
||||
panic(fmt.Sprint("t < 0:", t))
|
||||
}
|
||||
if s-t > maxMatchOffset {
|
||||
panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")"))
|
||||
}
|
||||
}
|
||||
s1 := int(s) + maxMatchLength - 4
|
||||
if s1 > len(src) {
|
||||
s1 = len(src)
|
||||
}
|
||||
|
||||
// Extend the match to be as long as possible.
|
||||
return int32(matchLen(src[s:s1], src[t:]))
|
||||
}
|
||||
|
||||
// matchlenLong will return the match length between offsets and t in src.
|
||||
// It is assumed that s > t, that t >=0 and s < len(src).
|
||||
func (e *fastGen) matchlenLong(s, t int32, src []byte) int32 {
|
||||
if debugDeflate {
|
||||
if t >= s {
|
||||
panic(fmt.Sprint("t >=s:", t, s))
|
||||
}
|
||||
if int(s) >= len(src) {
|
||||
panic(fmt.Sprint("s >= len(src):", s, len(src)))
|
||||
}
|
||||
if t < 0 {
|
||||
panic(fmt.Sprint("t < 0:", t))
|
||||
}
|
||||
if s-t > maxMatchOffset {
|
||||
panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")"))
|
||||
}
|
||||
}
|
||||
// Extend the match to be as long as possible.
|
||||
return int32(matchLen(src[s:], src[t:]))
|
||||
}
|
||||
|
||||
// Reset the encoding table.
|
||||
func (e *fastGen) Reset() {
|
||||
if cap(e.hist) < allocHistory {
|
||||
e.hist = make([]byte, 0, allocHistory)
|
||||
}
|
||||
// We offset current position so everything will be out of reach.
|
||||
// If we are above the buffer reset it will be cleared anyway since len(hist) == 0.
|
||||
if e.cur <= bufferReset {
|
||||
e.cur += maxMatchOffset + int32(len(e.hist))
|
||||
}
|
||||
e.hist = e.hist[:0]
|
||||
}
|
1182
gateway/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
generated
vendored
Normal file
1182
gateway/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
417
gateway/vendor/github.com/klauspost/compress/flate/huffman_code.go
generated
vendored
Normal file
417
gateway/vendor/github.com/klauspost/compress/flate/huffman_code.go
generated
vendored
Normal file
@ -0,0 +1,417 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package flate
|
||||
|
||||
import (
|
||||
"math"
|
||||
"math/bits"
|
||||
)
|
||||
|
||||
const (
|
||||
maxBitsLimit = 16
|
||||
// number of valid literals
|
||||
literalCount = 286
|
||||
)
|
||||
|
||||
// hcode is a huffman code with a bit code and bit length.
|
||||
type hcode uint32
|
||||
|
||||
func (h hcode) len() uint8 {
|
||||
return uint8(h)
|
||||
}
|
||||
|
||||
func (h hcode) code64() uint64 {
|
||||
return uint64(h >> 8)
|
||||
}
|
||||
|
||||
func (h hcode) zero() bool {
|
||||
return h == 0
|
||||
}
|
||||
|
||||
type huffmanEncoder struct {
|
||||
codes []hcode
|
||||
bitCount [17]int32
|
||||
|
||||
// Allocate a reusable buffer with the longest possible frequency table.
|
||||
// Possible lengths are codegenCodeCount, offsetCodeCount and literalCount.
|
||||
// The largest of these is literalCount, so we allocate for that case.
|
||||
freqcache [literalCount + 1]literalNode
|
||||
}
|
||||
|
||||
type literalNode struct {
|
||||
literal uint16
|
||||
freq uint16
|
||||
}
|
||||
|
||||
// A levelInfo describes the state of the constructed tree for a given depth.
|
||||
type levelInfo struct {
|
||||
// Our level. for better printing
|
||||
level int32
|
||||
|
||||
// The frequency of the last node at this level
|
||||
lastFreq int32
|
||||
|
||||
// The frequency of the next character to add to this level
|
||||
nextCharFreq int32
|
||||
|
||||
// The frequency of the next pair (from level below) to add to this level.
|
||||
// Only valid if the "needed" value of the next lower level is 0.
|
||||
nextPairFreq int32
|
||||
|
||||
// The number of chains remaining to generate for this level before moving
|
||||
// up to the next level
|
||||
needed int32
|
||||
}
|
||||
|
||||
// set sets the code and length of an hcode.
|
||||
func (h *hcode) set(code uint16, length uint8) {
|
||||
*h = hcode(length) | (hcode(code) << 8)
|
||||
}
|
||||
|
||||
func newhcode(code uint16, length uint8) hcode {
|
||||
return hcode(length) | (hcode(code) << 8)
|
||||
}
|
||||
|
||||
func reverseBits(number uint16, bitLength byte) uint16 {
|
||||
return bits.Reverse16(number << ((16 - bitLength) & 15))
|
||||
}
|
||||
|
||||
func maxNode() literalNode { return literalNode{math.MaxUint16, math.MaxUint16} }
|
||||
|
||||
func newHuffmanEncoder(size int) *huffmanEncoder {
|
||||
// Make capacity to next power of two.
|
||||
c := uint(bits.Len32(uint32(size - 1)))
|
||||
return &huffmanEncoder{codes: make([]hcode, size, 1<<c)}
|
||||
}
|
||||
|
||||
// Generates a HuffmanCode corresponding to the fixed literal table
|
||||
func generateFixedLiteralEncoding() *huffmanEncoder {
|
||||
h := newHuffmanEncoder(literalCount)
|
||||
codes := h.codes
|
||||
var ch uint16
|
||||
for ch = 0; ch < literalCount; ch++ {
|
||||
var bits uint16
|
||||
var size uint8
|
||||
switch {
|
||||
case ch < 144:
|
||||
// size 8, 000110000 .. 10111111
|
||||
bits = ch + 48
|
||||
size = 8
|
||||
case ch < 256:
|
||||
// size 9, 110010000 .. 111111111
|
||||
bits = ch + 400 - 144
|
||||
size = 9
|
||||
case ch < 280:
|
||||
// size 7, 0000000 .. 0010111
|
||||
bits = ch - 256
|
||||
size = 7
|
||||
default:
|
||||
// size 8, 11000000 .. 11000111
|
||||
bits = ch + 192 - 280
|
||||
size = 8
|
||||
}
|
||||
codes[ch] = newhcode(reverseBits(bits, size), size)
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
func generateFixedOffsetEncoding() *huffmanEncoder {
|
||||
h := newHuffmanEncoder(30)
|
||||
codes := h.codes
|
||||
for ch := range codes {
|
||||
codes[ch] = newhcode(reverseBits(uint16(ch), 5), 5)
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
var fixedLiteralEncoding = generateFixedLiteralEncoding()
|
||||
var fixedOffsetEncoding = generateFixedOffsetEncoding()
|
||||
|
||||
func (h *huffmanEncoder) bitLength(freq []uint16) int {
|
||||
var total int
|
||||
for i, f := range freq {
|
||||
if f != 0 {
|
||||
total += int(f) * int(h.codes[i].len())
|
||||
}
|
||||
}
|
||||
return total
|
||||
}
|
||||
|
||||
func (h *huffmanEncoder) bitLengthRaw(b []byte) int {
|
||||
var total int
|
||||
for _, f := range b {
|
||||
total += int(h.codes[f].len())
|
||||
}
|
||||
return total
|
||||
}
|
||||
|
||||
// canReuseBits returns the number of bits or math.MaxInt32 if the encoder cannot be reused.
|
||||
func (h *huffmanEncoder) canReuseBits(freq []uint16) int {
|
||||
var total int
|
||||
for i, f := range freq {
|
||||
if f != 0 {
|
||||
code := h.codes[i]
|
||||
if code.zero() {
|
||||
return math.MaxInt32
|
||||
}
|
||||
total += int(f) * int(code.len())
|
||||
}
|
||||
}
|
||||
return total
|
||||
}
|
||||
|
||||
// Return the number of literals assigned to each bit size in the Huffman encoding
|
||||
//
|
||||
// This method is only called when list.length >= 3
|
||||
// The cases of 0, 1, and 2 literals are handled by special case code.
|
||||
//
|
||||
// list An array of the literals with non-zero frequencies
|
||||
//
|
||||
// and their associated frequencies. The array is in order of increasing
|
||||
// frequency, and has as its last element a special element with frequency
|
||||
// MaxInt32
|
||||
//
|
||||
// maxBits The maximum number of bits that should be used to encode any literal.
|
||||
//
|
||||
// Must be less than 16.
|
||||
//
|
||||
// return An integer array in which array[i] indicates the number of literals
|
||||
//
|
||||
// that should be encoded in i bits.
|
||||
func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 {
|
||||
if maxBits >= maxBitsLimit {
|
||||
panic("flate: maxBits too large")
|
||||
}
|
||||
n := int32(len(list))
|
||||
list = list[0 : n+1]
|
||||
list[n] = maxNode()
|
||||
|
||||
// The tree can't have greater depth than n - 1, no matter what. This
|
||||
// saves a little bit of work in some small cases
|
||||
if maxBits > n-1 {
|
||||
maxBits = n - 1
|
||||
}
|
||||
|
||||
// Create information about each of the levels.
|
||||
// A bogus "Level 0" whose sole purpose is so that
|
||||
// level1.prev.needed==0. This makes level1.nextPairFreq
|
||||
// be a legitimate value that never gets chosen.
|
||||
var levels [maxBitsLimit]levelInfo
|
||||
// leafCounts[i] counts the number of literals at the left
|
||||
// of ancestors of the rightmost node at level i.
|
||||
// leafCounts[i][j] is the number of literals at the left
|
||||
// of the level j ancestor.
|
||||
var leafCounts [maxBitsLimit][maxBitsLimit]int32
|
||||
|
||||
// Descending to only have 1 bounds check.
|
||||
l2f := int32(list[2].freq)
|
||||
l1f := int32(list[1].freq)
|
||||
l0f := int32(list[0].freq) + int32(list[1].freq)
|
||||
|
||||
for level := int32(1); level <= maxBits; level++ {
|
||||
// For every level, the first two items are the first two characters.
|
||||
// We initialize the levels as if we had already figured this out.
|
||||
levels[level] = levelInfo{
|
||||
level: level,
|
||||
lastFreq: l1f,
|
||||
nextCharFreq: l2f,
|
||||
nextPairFreq: l0f,
|
||||
}
|
||||
leafCounts[level][level] = 2
|
||||
if level == 1 {
|
||||
levels[level].nextPairFreq = math.MaxInt32
|
||||
}
|
||||
}
|
||||
|
||||
// We need a total of 2*n - 2 items at top level and have already generated 2.
|
||||
levels[maxBits].needed = 2*n - 4
|
||||
|
||||
level := uint32(maxBits)
|
||||
for level < 16 {
|
||||
l := &levels[level]
|
||||
if l.nextPairFreq == math.MaxInt32 && l.nextCharFreq == math.MaxInt32 {
|
||||
// We've run out of both leafs and pairs.
|
||||
// End all calculations for this level.
|
||||
// To make sure we never come back to this level or any lower level,
|
||||
// set nextPairFreq impossibly large.
|
||||
l.needed = 0
|
||||
levels[level+1].nextPairFreq = math.MaxInt32
|
||||
level++
|
||||
continue
|
||||
}
|
||||
|
||||
prevFreq := l.lastFreq
|
||||
if l.nextCharFreq < l.nextPairFreq {
|
||||
// The next item on this row is a leaf node.
|
||||
n := leafCounts[level][level] + 1
|
||||
l.lastFreq = l.nextCharFreq
|
||||
// Lower leafCounts are the same of the previous node.
|
||||
leafCounts[level][level] = n
|
||||
e := list[n]
|
||||
if e.literal < math.MaxUint16 {
|
||||
l.nextCharFreq = int32(e.freq)
|
||||
} else {
|
||||
l.nextCharFreq = math.MaxInt32
|
||||
}
|
||||
} else {
|
||||
// The next item on this row is a pair from the previous row.
|
||||
// nextPairFreq isn't valid until we generate two
|
||||
// more values in the level below
|
||||
l.lastFreq = l.nextPairFreq
|
||||
// Take leaf counts from the lower level, except counts[level] remains the same.
|
||||
if true {
|
||||
save := leafCounts[level][level]
|
||||
leafCounts[level] = leafCounts[level-1]
|
||||
leafCounts[level][level] = save
|
||||
} else {
|
||||
copy(leafCounts[level][:level], leafCounts[level-1][:level])
|
||||
}
|
||||
levels[l.level-1].needed = 2
|
||||
}
|
||||
|
||||
if l.needed--; l.needed == 0 {
|
||||
// We've done everything we need to do for this level.
|
||||
// Continue calculating one level up. Fill in nextPairFreq
|
||||
// of that level with the sum of the two nodes we've just calculated on
|
||||
// this level.
|
||||
if l.level == maxBits {
|
||||
// All done!
|
||||
break
|
||||
}
|
||||
levels[l.level+1].nextPairFreq = prevFreq + l.lastFreq
|
||||
level++
|
||||
} else {
|
||||
// If we stole from below, move down temporarily to replenish it.
|
||||
for levels[level-1].needed > 0 {
|
||||
level--
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Somethings is wrong if at the end, the top level is null or hasn't used
|
||||
// all of the leaves.
|
||||
if leafCounts[maxBits][maxBits] != n {
|
||||
panic("leafCounts[maxBits][maxBits] != n")
|
||||
}
|
||||
|
||||
bitCount := h.bitCount[:maxBits+1]
|
||||
bits := 1
|
||||
counts := &leafCounts[maxBits]
|
||||
for level := maxBits; level > 0; level-- {
|
||||
// chain.leafCount gives the number of literals requiring at least "bits"
|
||||
// bits to encode.
|
||||
bitCount[bits] = counts[level] - counts[level-1]
|
||||
bits++
|
||||
}
|
||||
return bitCount
|
||||
}
|
||||
|
||||
// Look at the leaves and assign them a bit count and an encoding as specified
|
||||
// in RFC 1951 3.2.2
|
||||
func (h *huffmanEncoder) assignEncodingAndSize(bitCount []int32, list []literalNode) {
|
||||
code := uint16(0)
|
||||
for n, bits := range bitCount {
|
||||
code <<= 1
|
||||
if n == 0 || bits == 0 {
|
||||
continue
|
||||
}
|
||||
// The literals list[len(list)-bits] .. list[len(list)-bits]
|
||||
// are encoded using "bits" bits, and get the values
|
||||
// code, code + 1, .... The code values are
|
||||
// assigned in literal order (not frequency order).
|
||||
chunk := list[len(list)-int(bits):]
|
||||
|
||||
sortByLiteral(chunk)
|
||||
for _, node := range chunk {
|
||||
h.codes[node.literal] = newhcode(reverseBits(code, uint8(n)), uint8(n))
|
||||
code++
|
||||
}
|
||||
list = list[0 : len(list)-int(bits)]
|
||||
}
|
||||
}
|
||||
|
||||
// Update this Huffman Code object to be the minimum code for the specified frequency count.
|
||||
//
|
||||
// freq An array of frequencies, in which frequency[i] gives the frequency of literal i.
|
||||
// maxBits The maximum number of bits to use for any literal.
|
||||
func (h *huffmanEncoder) generate(freq []uint16, maxBits int32) {
|
||||
list := h.freqcache[:len(freq)+1]
|
||||
codes := h.codes[:len(freq)]
|
||||
// Number of non-zero literals
|
||||
count := 0
|
||||
// Set list to be the set of all non-zero literals and their frequencies
|
||||
for i, f := range freq {
|
||||
if f != 0 {
|
||||
list[count] = literalNode{uint16(i), f}
|
||||
count++
|
||||
} else {
|
||||
codes[i] = 0
|
||||
}
|
||||
}
|
||||
list[count] = literalNode{}
|
||||
|
||||
list = list[:count]
|
||||
if count <= 2 {
|
||||
// Handle the small cases here, because they are awkward for the general case code. With
|
||||
// two or fewer literals, everything has bit length 1.
|
||||
for i, node := range list {
|
||||
// "list" is in order of increasing literal value.
|
||||
h.codes[node.literal].set(uint16(i), 1)
|
||||
}
|
||||
return
|
||||
}
|
||||
sortByFreq(list)
|
||||
|
||||
// Get the number of literals for each bit count
|
||||
bitCount := h.bitCounts(list, maxBits)
|
||||
// And do the assignment
|
||||
h.assignEncodingAndSize(bitCount, list)
|
||||
}
|
||||
|
||||
// atLeastOne clamps the result between 1 and 15.
|
||||
func atLeastOne(v float32) float32 {
|
||||
if v < 1 {
|
||||
return 1
|
||||
}
|
||||
if v > 15 {
|
||||
return 15
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
func histogram(b []byte, h []uint16) {
|
||||
if true && len(b) >= 8<<10 {
|
||||
// Split for bigger inputs
|
||||
histogramSplit(b, h)
|
||||
} else {
|
||||
h = h[:256]
|
||||
for _, t := range b {
|
||||
h[t]++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func histogramSplit(b []byte, h []uint16) {
|
||||
// Tested, and slightly faster than 2-way.
|
||||
// Writing to separate arrays and combining is also slightly slower.
|
||||
h = h[:256]
|
||||
for len(b)&3 != 0 {
|
||||
h[b[0]]++
|
||||
b = b[1:]
|
||||
}
|
||||
n := len(b) / 4
|
||||
x, y, z, w := b[:n], b[n:], b[n+n:], b[n+n+n:]
|
||||
y, z, w = y[:len(x)], z[:len(x)], w[:len(x)]
|
||||
for i, t := range x {
|
||||
v0 := &h[t]
|
||||
v1 := &h[y[i]]
|
||||
v3 := &h[w[i]]
|
||||
v2 := &h[z[i]]
|
||||
*v0++
|
||||
*v1++
|
||||
*v2++
|
||||
*v3++
|
||||
}
|
||||
}
|
159
gateway/vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go
generated
vendored
Normal file
159
gateway/vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go
generated
vendored
Normal file
@ -0,0 +1,159 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package flate
|
||||
|
||||
// Sort sorts data.
|
||||
// It makes one call to data.Len to determine n, and O(n*log(n)) calls to
|
||||
// data.Less and data.Swap. The sort is not guaranteed to be stable.
|
||||
func sortByFreq(data []literalNode) {
|
||||
n := len(data)
|
||||
quickSortByFreq(data, 0, n, maxDepth(n))
|
||||
}
|
||||
|
||||
func quickSortByFreq(data []literalNode, a, b, maxDepth int) {
|
||||
for b-a > 12 { // Use ShellSort for slices <= 12 elements
|
||||
if maxDepth == 0 {
|
||||
heapSort(data, a, b)
|
||||
return
|
||||
}
|
||||
maxDepth--
|
||||
mlo, mhi := doPivotByFreq(data, a, b)
|
||||
// Avoiding recursion on the larger subproblem guarantees
|
||||
// a stack depth of at most lg(b-a).
|
||||
if mlo-a < b-mhi {
|
||||
quickSortByFreq(data, a, mlo, maxDepth)
|
||||
a = mhi // i.e., quickSortByFreq(data, mhi, b)
|
||||
} else {
|
||||
quickSortByFreq(data, mhi, b, maxDepth)
|
||||
b = mlo // i.e., quickSortByFreq(data, a, mlo)
|
||||
}
|
||||
}
|
||||
if b-a > 1 {
|
||||
// Do ShellSort pass with gap 6
|
||||
// It could be written in this simplified form cause b-a <= 12
|
||||
for i := a + 6; i < b; i++ {
|
||||
if data[i].freq == data[i-6].freq && data[i].literal < data[i-6].literal || data[i].freq < data[i-6].freq {
|
||||
data[i], data[i-6] = data[i-6], data[i]
|
||||
}
|
||||
}
|
||||
insertionSortByFreq(data, a, b)
|
||||
}
|
||||
}
|
||||
|
||||
func doPivotByFreq(data []literalNode, lo, hi int) (midlo, midhi int) {
|
||||
m := int(uint(lo+hi) >> 1) // Written like this to avoid integer overflow.
|
||||
if hi-lo > 40 {
|
||||
// Tukey's ``Ninther,'' median of three medians of three.
|
||||
s := (hi - lo) / 8
|
||||
medianOfThreeSortByFreq(data, lo, lo+s, lo+2*s)
|
||||
medianOfThreeSortByFreq(data, m, m-s, m+s)
|
||||
medianOfThreeSortByFreq(data, hi-1, hi-1-s, hi-1-2*s)
|
||||
}
|
||||
medianOfThreeSortByFreq(data, lo, m, hi-1)
|
||||
|
||||
// Invariants are:
|
||||
// data[lo] = pivot (set up by ChoosePivot)
|
||||
// data[lo < i < a] < pivot
|
||||
// data[a <= i < b] <= pivot
|
||||
// data[b <= i < c] unexamined
|
||||
// data[c <= i < hi-1] > pivot
|
||||
// data[hi-1] >= pivot
|
||||
pivot := lo
|
||||
a, c := lo+1, hi-1
|
||||
|
||||
for ; a < c && (data[a].freq == data[pivot].freq && data[a].literal < data[pivot].literal || data[a].freq < data[pivot].freq); a++ {
|
||||
}
|
||||
b := a
|
||||
for {
|
||||
for ; b < c && (data[pivot].freq == data[b].freq && data[pivot].literal > data[b].literal || data[pivot].freq > data[b].freq); b++ { // data[b] <= pivot
|
||||
}
|
||||
for ; b < c && (data[pivot].freq == data[c-1].freq && data[pivot].literal < data[c-1].literal || data[pivot].freq < data[c-1].freq); c-- { // data[c-1] > pivot
|
||||
}
|
||||
if b >= c {
|
||||
break
|
||||
}
|
||||
// data[b] > pivot; data[c-1] <= pivot
|
||||
data[b], data[c-1] = data[c-1], data[b]
|
||||
b++
|
||||
c--
|
||||
}
|
||||
// If hi-c<3 then there are duplicates (by property of median of nine).
|
||||
// Let's be a bit more conservative, and set border to 5.
|
||||
protect := hi-c < 5
|
||||
if !protect && hi-c < (hi-lo)/4 {
|
||||
// Lets test some points for equality to pivot
|
||||
dups := 0
|
||||
if data[pivot].freq == data[hi-1].freq && data[pivot].literal > data[hi-1].literal || data[pivot].freq > data[hi-1].freq { // data[hi-1] = pivot
|
||||
data[c], data[hi-1] = data[hi-1], data[c]
|
||||
c++
|
||||
dups++
|
||||
}
|
||||
if data[b-1].freq == data[pivot].freq && data[b-1].literal > data[pivot].literal || data[b-1].freq > data[pivot].freq { // data[b-1] = pivot
|
||||
b--
|
||||
dups++
|
||||
}
|
||||
// m-lo = (hi-lo)/2 > 6
|
||||
// b-lo > (hi-lo)*3/4-1 > 8
|
||||
// ==> m < b ==> data[m] <= pivot
|
||||
if data[m].freq == data[pivot].freq && data[m].literal > data[pivot].literal || data[m].freq > data[pivot].freq { // data[m] = pivot
|
||||
data[m], data[b-1] = data[b-1], data[m]
|
||||
b--
|
||||
dups++
|
||||
}
|
||||
// if at least 2 points are equal to pivot, assume skewed distribution
|
||||
protect = dups > 1
|
||||
}
|
||||
if protect {
|
||||
// Protect against a lot of duplicates
|
||||
// Add invariant:
|
||||
// data[a <= i < b] unexamined
|
||||
// data[b <= i < c] = pivot
|
||||
for {
|
||||
for ; a < b && (data[b-1].freq == data[pivot].freq && data[b-1].literal > data[pivot].literal || data[b-1].freq > data[pivot].freq); b-- { // data[b] == pivot
|
||||
}
|
||||
for ; a < b && (data[a].freq == data[pivot].freq && data[a].literal < data[pivot].literal || data[a].freq < data[pivot].freq); a++ { // data[a] < pivot
|
||||
}
|
||||
if a >= b {
|
||||
break
|
||||
}
|
||||
// data[a] == pivot; data[b-1] < pivot
|
||||
data[a], data[b-1] = data[b-1], data[a]
|
||||
a++
|
||||
b--
|
||||
}
|
||||
}
|
||||
// Swap pivot into middle
|
||||
data[pivot], data[b-1] = data[b-1], data[pivot]
|
||||
return b - 1, c
|
||||
}
|
||||
|
||||
// Insertion sort
|
||||
func insertionSortByFreq(data []literalNode, a, b int) {
|
||||
for i := a + 1; i < b; i++ {
|
||||
for j := i; j > a && (data[j].freq == data[j-1].freq && data[j].literal < data[j-1].literal || data[j].freq < data[j-1].freq); j-- {
|
||||
data[j], data[j-1] = data[j-1], data[j]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// quickSortByFreq, loosely following Bentley and McIlroy,
|
||||
// ``Engineering a Sort Function,'' SP&E November 1993.
|
||||
|
||||
// medianOfThreeSortByFreq moves the median of the three values data[m0], data[m1], data[m2] into data[m1].
|
||||
func medianOfThreeSortByFreq(data []literalNode, m1, m0, m2 int) {
|
||||
// sort 3 elements
|
||||
if data[m1].freq == data[m0].freq && data[m1].literal < data[m0].literal || data[m1].freq < data[m0].freq {
|
||||
data[m1], data[m0] = data[m0], data[m1]
|
||||
}
|
||||
// data[m0] <= data[m1]
|
||||
if data[m2].freq == data[m1].freq && data[m2].literal < data[m1].literal || data[m2].freq < data[m1].freq {
|
||||
data[m2], data[m1] = data[m1], data[m2]
|
||||
// data[m0] <= data[m2] && data[m1] < data[m2]
|
||||
if data[m1].freq == data[m0].freq && data[m1].literal < data[m0].literal || data[m1].freq < data[m0].freq {
|
||||
data[m1], data[m0] = data[m0], data[m1]
|
||||
}
|
||||
}
|
||||
// now data[m0] <= data[m1] <= data[m2]
|
||||
}
|
201
gateway/vendor/github.com/klauspost/compress/flate/huffman_sortByLiteral.go
generated
vendored
Normal file
201
gateway/vendor/github.com/klauspost/compress/flate/huffman_sortByLiteral.go
generated
vendored
Normal file
@ -0,0 +1,201 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package flate
|
||||
|
||||
// Sort sorts data.
|
||||
// It makes one call to data.Len to determine n, and O(n*log(n)) calls to
|
||||
// data.Less and data.Swap. The sort is not guaranteed to be stable.
|
||||
func sortByLiteral(data []literalNode) {
|
||||
n := len(data)
|
||||
quickSort(data, 0, n, maxDepth(n))
|
||||
}
|
||||
|
||||
func quickSort(data []literalNode, a, b, maxDepth int) {
|
||||
for b-a > 12 { // Use ShellSort for slices <= 12 elements
|
||||
if maxDepth == 0 {
|
||||
heapSort(data, a, b)
|
||||
return
|
||||
}
|
||||
maxDepth--
|
||||
mlo, mhi := doPivot(data, a, b)
|
||||
// Avoiding recursion on the larger subproblem guarantees
|
||||
// a stack depth of at most lg(b-a).
|
||||
if mlo-a < b-mhi {
|
||||
quickSort(data, a, mlo, maxDepth)
|
||||
a = mhi // i.e., quickSort(data, mhi, b)
|
||||
} else {
|
||||
quickSort(data, mhi, b, maxDepth)
|
||||
b = mlo // i.e., quickSort(data, a, mlo)
|
||||
}
|
||||
}
|
||||
if b-a > 1 {
|
||||
// Do ShellSort pass with gap 6
|
||||
// It could be written in this simplified form cause b-a <= 12
|
||||
for i := a + 6; i < b; i++ {
|
||||
if data[i].literal < data[i-6].literal {
|
||||
data[i], data[i-6] = data[i-6], data[i]
|
||||
}
|
||||
}
|
||||
insertionSort(data, a, b)
|
||||
}
|
||||
}
|
||||
func heapSort(data []literalNode, a, b int) {
|
||||
first := a
|
||||
lo := 0
|
||||
hi := b - a
|
||||
|
||||
// Build heap with greatest element at top.
|
||||
for i := (hi - 1) / 2; i >= 0; i-- {
|
||||
siftDown(data, i, hi, first)
|
||||
}
|
||||
|
||||
// Pop elements, largest first, into end of data.
|
||||
for i := hi - 1; i >= 0; i-- {
|
||||
data[first], data[first+i] = data[first+i], data[first]
|
||||
siftDown(data, lo, i, first)
|
||||
}
|
||||
}
|
||||
|
||||
// siftDown implements the heap property on data[lo, hi).
|
||||
// first is an offset into the array where the root of the heap lies.
|
||||
func siftDown(data []literalNode, lo, hi, first int) {
|
||||
root := lo
|
||||
for {
|
||||
child := 2*root + 1
|
||||
if child >= hi {
|
||||
break
|
||||
}
|
||||
if child+1 < hi && data[first+child].literal < data[first+child+1].literal {
|
||||
child++
|
||||
}
|
||||
if data[first+root].literal > data[first+child].literal {
|
||||
return
|
||||
}
|
||||
data[first+root], data[first+child] = data[first+child], data[first+root]
|
||||
root = child
|
||||
}
|
||||
}
|
||||
func doPivot(data []literalNode, lo, hi int) (midlo, midhi int) {
|
||||
m := int(uint(lo+hi) >> 1) // Written like this to avoid integer overflow.
|
||||
if hi-lo > 40 {
|
||||
// Tukey's ``Ninther,'' median of three medians of three.
|
||||
s := (hi - lo) / 8
|
||||
medianOfThree(data, lo, lo+s, lo+2*s)
|
||||
medianOfThree(data, m, m-s, m+s)
|
||||
medianOfThree(data, hi-1, hi-1-s, hi-1-2*s)
|
||||
}
|
||||
medianOfThree(data, lo, m, hi-1)
|
||||
|
||||
// Invariants are:
|
||||
// data[lo] = pivot (set up by ChoosePivot)
|
||||
// data[lo < i < a] < pivot
|
||||
// data[a <= i < b] <= pivot
|
||||
// data[b <= i < c] unexamined
|
||||
// data[c <= i < hi-1] > pivot
|
||||
// data[hi-1] >= pivot
|
||||
pivot := lo
|
||||
a, c := lo+1, hi-1
|
||||
|
||||
for ; a < c && data[a].literal < data[pivot].literal; a++ {
|
||||
}
|
||||
b := a
|
||||
for {
|
||||
for ; b < c && data[pivot].literal > data[b].literal; b++ { // data[b] <= pivot
|
||||
}
|
||||
for ; b < c && data[pivot].literal < data[c-1].literal; c-- { // data[c-1] > pivot
|
||||
}
|
||||
if b >= c {
|
||||
break
|
||||
}
|
||||
// data[b] > pivot; data[c-1] <= pivot
|
||||
data[b], data[c-1] = data[c-1], data[b]
|
||||
b++
|
||||
c--
|
||||
}
|
||||
// If hi-c<3 then there are duplicates (by property of median of nine).
|
||||
// Let's be a bit more conservative, and set border to 5.
|
||||
protect := hi-c < 5
|
||||
if !protect && hi-c < (hi-lo)/4 {
|
||||
// Lets test some points for equality to pivot
|
||||
dups := 0
|
||||
if data[pivot].literal > data[hi-1].literal { // data[hi-1] = pivot
|
||||
data[c], data[hi-1] = data[hi-1], data[c]
|
||||
c++
|
||||
dups++
|
||||
}
|
||||
if data[b-1].literal > data[pivot].literal { // data[b-1] = pivot
|
||||
b--
|
||||
dups++
|
||||
}
|
||||
// m-lo = (hi-lo)/2 > 6
|
||||
// b-lo > (hi-lo)*3/4-1 > 8
|
||||
// ==> m < b ==> data[m] <= pivot
|
||||
if data[m].literal > data[pivot].literal { // data[m] = pivot
|
||||
data[m], data[b-1] = data[b-1], data[m]
|
||||
b--
|
||||
dups++
|
||||
}
|
||||
// if at least 2 points are equal to pivot, assume skewed distribution
|
||||
protect = dups > 1
|
||||
}
|
||||
if protect {
|
||||
// Protect against a lot of duplicates
|
||||
// Add invariant:
|
||||
// data[a <= i < b] unexamined
|
||||
// data[b <= i < c] = pivot
|
||||
for {
|
||||
for ; a < b && data[b-1].literal > data[pivot].literal; b-- { // data[b] == pivot
|
||||
}
|
||||
for ; a < b && data[a].literal < data[pivot].literal; a++ { // data[a] < pivot
|
||||
}
|
||||
if a >= b {
|
||||
break
|
||||
}
|
||||
// data[a] == pivot; data[b-1] < pivot
|
||||
data[a], data[b-1] = data[b-1], data[a]
|
||||
a++
|
||||
b--
|
||||
}
|
||||
}
|
||||
// Swap pivot into middle
|
||||
data[pivot], data[b-1] = data[b-1], data[pivot]
|
||||
return b - 1, c
|
||||
}
|
||||
|
||||
// Insertion sort
|
||||
func insertionSort(data []literalNode, a, b int) {
|
||||
for i := a + 1; i < b; i++ {
|
||||
for j := i; j > a && data[j].literal < data[j-1].literal; j-- {
|
||||
data[j], data[j-1] = data[j-1], data[j]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// maxDepth returns a threshold at which quicksort should switch
|
||||
// to heapsort. It returns 2*ceil(lg(n+1)).
|
||||
func maxDepth(n int) int {
|
||||
var depth int
|
||||
for i := n; i > 0; i >>= 1 {
|
||||
depth++
|
||||
}
|
||||
return depth * 2
|
||||
}
|
||||
|
||||
// medianOfThree moves the median of the three values data[m0], data[m1], data[m2] into data[m1].
|
||||
func medianOfThree(data []literalNode, m1, m0, m2 int) {
|
||||
// sort 3 elements
|
||||
if data[m1].literal < data[m0].literal {
|
||||
data[m1], data[m0] = data[m0], data[m1]
|
||||
}
|
||||
// data[m0] <= data[m1]
|
||||
if data[m2].literal < data[m1].literal {
|
||||
data[m2], data[m1] = data[m1], data[m2]
|
||||
// data[m0] <= data[m2] && data[m1] < data[m2]
|
||||
if data[m1].literal < data[m0].literal {
|
||||
data[m1], data[m0] = data[m0], data[m1]
|
||||
}
|
||||
}
|
||||
// now data[m0] <= data[m1] <= data[m2]
|
||||
}
|
829
gateway/vendor/github.com/klauspost/compress/flate/inflate.go
generated
vendored
Normal file
829
gateway/vendor/github.com/klauspost/compress/flate/inflate.go
generated
vendored
Normal file
@ -0,0 +1,829 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package flate implements the DEFLATE compressed data format, described in
|
||||
// RFC 1951. The gzip and zlib packages implement access to DEFLATE-based file
|
||||
// formats.
|
||||
package flate
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"compress/flate"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/bits"
|
||||
"sync"
|
||||
)
|
||||
|
||||
const (
|
||||
maxCodeLen = 16 // max length of Huffman code
|
||||
maxCodeLenMask = 15 // mask for max length of Huffman code
|
||||
// The next three numbers come from the RFC section 3.2.7, with the
|
||||
// additional proviso in section 3.2.5 which implies that distance codes
|
||||
// 30 and 31 should never occur in compressed data.
|
||||
maxNumLit = 286
|
||||
maxNumDist = 30
|
||||
numCodes = 19 // number of codes in Huffman meta-code
|
||||
|
||||
debugDecode = false
|
||||
)
|
||||
|
||||
// Value of length - 3 and extra bits.
|
||||
type lengthExtra struct {
|
||||
length, extra uint8
|
||||
}
|
||||
|
||||
var decCodeToLen = [32]lengthExtra{{length: 0x0, extra: 0x0}, {length: 0x1, extra: 0x0}, {length: 0x2, extra: 0x0}, {length: 0x3, extra: 0x0}, {length: 0x4, extra: 0x0}, {length: 0x5, extra: 0x0}, {length: 0x6, extra: 0x0}, {length: 0x7, extra: 0x0}, {length: 0x8, extra: 0x1}, {length: 0xa, extra: 0x1}, {length: 0xc, extra: 0x1}, {length: 0xe, extra: 0x1}, {length: 0x10, extra: 0x2}, {length: 0x14, extra: 0x2}, {length: 0x18, extra: 0x2}, {length: 0x1c, extra: 0x2}, {length: 0x20, extra: 0x3}, {length: 0x28, extra: 0x3}, {length: 0x30, extra: 0x3}, {length: 0x38, extra: 0x3}, {length: 0x40, extra: 0x4}, {length: 0x50, extra: 0x4}, {length: 0x60, extra: 0x4}, {length: 0x70, extra: 0x4}, {length: 0x80, extra: 0x5}, {length: 0xa0, extra: 0x5}, {length: 0xc0, extra: 0x5}, {length: 0xe0, extra: 0x5}, {length: 0xff, extra: 0x0}, {length: 0x0, extra: 0x0}, {length: 0x0, extra: 0x0}, {length: 0x0, extra: 0x0}}
|
||||
|
||||
var bitMask32 = [32]uint32{
|
||||
0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF,
|
||||
0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF,
|
||||
0x1ffff, 0x3ffff, 0x7FFFF, 0xfFFFF, 0x1fFFFF, 0x3fFFFF, 0x7fFFFF, 0xffFFFF,
|
||||
0x1ffFFFF, 0x3ffFFFF, 0x7ffFFFF, 0xfffFFFF, 0x1fffFFFF, 0x3fffFFFF, 0x7fffFFFF,
|
||||
} // up to 32 bits
|
||||
|
||||
// Initialize the fixedHuffmanDecoder only once upon first use.
|
||||
var fixedOnce sync.Once
|
||||
var fixedHuffmanDecoder huffmanDecoder
|
||||
|
||||
// A CorruptInputError reports the presence of corrupt input at a given offset.
|
||||
type CorruptInputError = flate.CorruptInputError
|
||||
|
||||
// An InternalError reports an error in the flate code itself.
|
||||
type InternalError string
|
||||
|
||||
func (e InternalError) Error() string { return "flate: internal error: " + string(e) }
|
||||
|
||||
// A ReadError reports an error encountered while reading input.
|
||||
//
|
||||
// Deprecated: No longer returned.
|
||||
type ReadError = flate.ReadError
|
||||
|
||||
// A WriteError reports an error encountered while writing output.
|
||||
//
|
||||
// Deprecated: No longer returned.
|
||||
type WriteError = flate.WriteError
|
||||
|
||||
// Resetter resets a ReadCloser returned by NewReader or NewReaderDict to
|
||||
// to switch to a new underlying Reader. This permits reusing a ReadCloser
|
||||
// instead of allocating a new one.
|
||||
type Resetter interface {
|
||||
// Reset discards any buffered data and resets the Resetter as if it was
|
||||
// newly initialized with the given reader.
|
||||
Reset(r io.Reader, dict []byte) error
|
||||
}
|
||||
|
||||
// The data structure for decoding Huffman tables is based on that of
|
||||
// zlib. There is a lookup table of a fixed bit width (huffmanChunkBits),
|
||||
// For codes smaller than the table width, there are multiple entries
|
||||
// (each combination of trailing bits has the same value). For codes
|
||||
// larger than the table width, the table contains a link to an overflow
|
||||
// table. The width of each entry in the link table is the maximum code
|
||||
// size minus the chunk width.
|
||||
//
|
||||
// Note that you can do a lookup in the table even without all bits
|
||||
// filled. Since the extra bits are zero, and the DEFLATE Huffman codes
|
||||
// have the property that shorter codes come before longer ones, the
|
||||
// bit length estimate in the result is a lower bound on the actual
|
||||
// number of bits.
|
||||
//
|
||||
// See the following:
|
||||
// http://www.gzip.org/algorithm.txt
|
||||
|
||||
// chunk & 15 is number of bits
|
||||
// chunk >> 4 is value, including table link
|
||||
|
||||
const (
|
||||
huffmanChunkBits = 9
|
||||
huffmanNumChunks = 1 << huffmanChunkBits
|
||||
huffmanCountMask = 15
|
||||
huffmanValueShift = 4
|
||||
)
|
||||
|
||||
type huffmanDecoder struct {
|
||||
maxRead int // the maximum number of bits we can read and not overread
|
||||
chunks *[huffmanNumChunks]uint16 // chunks as described above
|
||||
links [][]uint16 // overflow links
|
||||
linkMask uint32 // mask the width of the link table
|
||||
}
|
||||
|
||||
// Initialize Huffman decoding tables from array of code lengths.
|
||||
// Following this function, h is guaranteed to be initialized into a complete
|
||||
// tree (i.e., neither over-subscribed nor under-subscribed). The exception is a
|
||||
// degenerate case where the tree has only a single symbol with length 1. Empty
|
||||
// trees are permitted.
|
||||
func (h *huffmanDecoder) init(lengths []int) bool {
|
||||
// Sanity enables additional runtime tests during Huffman
|
||||
// table construction. It's intended to be used during
|
||||
// development to supplement the currently ad-hoc unit tests.
|
||||
const sanity = false
|
||||
|
||||
if h.chunks == nil {
|
||||
h.chunks = new([huffmanNumChunks]uint16)
|
||||
}
|
||||
|
||||
if h.maxRead != 0 {
|
||||
*h = huffmanDecoder{chunks: h.chunks, links: h.links}
|
||||
}
|
||||
|
||||
// Count number of codes of each length,
|
||||
// compute maxRead and max length.
|
||||
var count [maxCodeLen]int
|
||||
var min, max int
|
||||
for _, n := range lengths {
|
||||
if n == 0 {
|
||||
continue
|
||||
}
|
||||
if min == 0 || n < min {
|
||||
min = n
|
||||
}
|
||||
if n > max {
|
||||
max = n
|
||||
}
|
||||
count[n&maxCodeLenMask]++
|
||||
}
|
||||
|
||||
// Empty tree. The decompressor.huffSym function will fail later if the tree
|
||||
// is used. Technically, an empty tree is only valid for the HDIST tree and
|
||||
// not the HCLEN and HLIT tree. However, a stream with an empty HCLEN tree
|
||||
// is guaranteed to fail since it will attempt to use the tree to decode the
|
||||
// codes for the HLIT and HDIST trees. Similarly, an empty HLIT tree is
|
||||
// guaranteed to fail later since the compressed data section must be
|
||||
// composed of at least one symbol (the end-of-block marker).
|
||||
if max == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
code := 0
|
||||
var nextcode [maxCodeLen]int
|
||||
for i := min; i <= max; i++ {
|
||||
code <<= 1
|
||||
nextcode[i&maxCodeLenMask] = code
|
||||
code += count[i&maxCodeLenMask]
|
||||
}
|
||||
|
||||
// Check that the coding is complete (i.e., that we've
|
||||
// assigned all 2-to-the-max possible bit sequences).
|
||||
// Exception: To be compatible with zlib, we also need to
|
||||
// accept degenerate single-code codings. See also
|
||||
// TestDegenerateHuffmanCoding.
|
||||
if code != 1<<uint(max) && !(code == 1 && max == 1) {
|
||||
if debugDecode {
|
||||
fmt.Println("coding failed, code, max:", code, max, code == 1<<uint(max), code == 1 && max == 1, "(one should be true)")
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
h.maxRead = min
|
||||
|
||||
chunks := h.chunks[:]
|
||||
for i := range chunks {
|
||||
chunks[i] = 0
|
||||
}
|
||||
|
||||
if max > huffmanChunkBits {
|
||||
numLinks := 1 << (uint(max) - huffmanChunkBits)
|
||||
h.linkMask = uint32(numLinks - 1)
|
||||
|
||||
// create link tables
|
||||
link := nextcode[huffmanChunkBits+1] >> 1
|
||||
if cap(h.links) < huffmanNumChunks-link {
|
||||
h.links = make([][]uint16, huffmanNumChunks-link)
|
||||
} else {
|
||||
h.links = h.links[:huffmanNumChunks-link]
|
||||
}
|
||||
for j := uint(link); j < huffmanNumChunks; j++ {
|
||||
reverse := int(bits.Reverse16(uint16(j)))
|
||||
reverse >>= uint(16 - huffmanChunkBits)
|
||||
off := j - uint(link)
|
||||
if sanity && h.chunks[reverse] != 0 {
|
||||
panic("impossible: overwriting existing chunk")
|
||||
}
|
||||
h.chunks[reverse] = uint16(off<<huffmanValueShift | (huffmanChunkBits + 1))
|
||||
if cap(h.links[off]) < numLinks {
|
||||
h.links[off] = make([]uint16, numLinks)
|
||||
} else {
|
||||
h.links[off] = h.links[off][:numLinks]
|
||||
}
|
||||
}
|
||||
} else {
|
||||
h.links = h.links[:0]
|
||||
}
|
||||
|
||||
for i, n := range lengths {
|
||||
if n == 0 {
|
||||
continue
|
||||
}
|
||||
code := nextcode[n]
|
||||
nextcode[n]++
|
||||
chunk := uint16(i<<huffmanValueShift | n)
|
||||
reverse := int(bits.Reverse16(uint16(code)))
|
||||
reverse >>= uint(16 - n)
|
||||
if n <= huffmanChunkBits {
|
||||
for off := reverse; off < len(h.chunks); off += 1 << uint(n) {
|
||||
// We should never need to overwrite
|
||||
// an existing chunk. Also, 0 is
|
||||
// never a valid chunk, because the
|
||||
// lower 4 "count" bits should be
|
||||
// between 1 and 15.
|
||||
if sanity && h.chunks[off] != 0 {
|
||||
panic("impossible: overwriting existing chunk")
|
||||
}
|
||||
h.chunks[off] = chunk
|
||||
}
|
||||
} else {
|
||||
j := reverse & (huffmanNumChunks - 1)
|
||||
if sanity && h.chunks[j]&huffmanCountMask != huffmanChunkBits+1 {
|
||||
// Longer codes should have been
|
||||
// associated with a link table above.
|
||||
panic("impossible: not an indirect chunk")
|
||||
}
|
||||
value := h.chunks[j] >> huffmanValueShift
|
||||
linktab := h.links[value]
|
||||
reverse >>= huffmanChunkBits
|
||||
for off := reverse; off < len(linktab); off += 1 << uint(n-huffmanChunkBits) {
|
||||
if sanity && linktab[off] != 0 {
|
||||
panic("impossible: overwriting existing chunk")
|
||||
}
|
||||
linktab[off] = chunk
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if sanity {
|
||||
// Above we've sanity checked that we never overwrote
|
||||
// an existing entry. Here we additionally check that
|
||||
// we filled the tables completely.
|
||||
for i, chunk := range h.chunks {
|
||||
if chunk == 0 {
|
||||
// As an exception, in the degenerate
|
||||
// single-code case, we allow odd
|
||||
// chunks to be missing.
|
||||
if code == 1 && i%2 == 1 {
|
||||
continue
|
||||
}
|
||||
panic("impossible: missing chunk")
|
||||
}
|
||||
}
|
||||
for _, linktab := range h.links {
|
||||
for _, chunk := range linktab {
|
||||
if chunk == 0 {
|
||||
panic("impossible: missing chunk")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// Reader is the actual read interface needed by NewReader.
|
||||
// If the passed in io.Reader does not also have ReadByte,
|
||||
// the NewReader will introduce its own buffering.
|
||||
type Reader interface {
|
||||
io.Reader
|
||||
io.ByteReader
|
||||
}
|
||||
|
||||
type step uint8
|
||||
|
||||
const (
|
||||
copyData step = iota + 1
|
||||
nextBlock
|
||||
huffmanBytesBuffer
|
||||
huffmanBytesReader
|
||||
huffmanBufioReader
|
||||
huffmanStringsReader
|
||||
huffmanGenericReader
|
||||
)
|
||||
|
||||
// Decompress state.
|
||||
type decompressor struct {
|
||||
// Input source.
|
||||
r Reader
|
||||
roffset int64
|
||||
|
||||
// Huffman decoders for literal/length, distance.
|
||||
h1, h2 huffmanDecoder
|
||||
|
||||
// Length arrays used to define Huffman codes.
|
||||
bits *[maxNumLit + maxNumDist]int
|
||||
codebits *[numCodes]int
|
||||
|
||||
// Output history, buffer.
|
||||
dict dictDecoder
|
||||
|
||||
// Next step in the decompression,
|
||||
// and decompression state.
|
||||
step step
|
||||
stepState int
|
||||
err error
|
||||
toRead []byte
|
||||
hl, hd *huffmanDecoder
|
||||
copyLen int
|
||||
copyDist int
|
||||
|
||||
// Temporary buffer (avoids repeated allocation).
|
||||
buf [4]byte
|
||||
|
||||
// Input bits, in top of b.
|
||||
b uint32
|
||||
|
||||
nb uint
|
||||
final bool
|
||||
}
|
||||
|
||||
func (f *decompressor) nextBlock() {
|
||||
for f.nb < 1+2 {
|
||||
if f.err = f.moreBits(); f.err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
f.final = f.b&1 == 1
|
||||
f.b >>= 1
|
||||
typ := f.b & 3
|
||||
f.b >>= 2
|
||||
f.nb -= 1 + 2
|
||||
switch typ {
|
||||
case 0:
|
||||
f.dataBlock()
|
||||
if debugDecode {
|
||||
fmt.Println("stored block")
|
||||
}
|
||||
case 1:
|
||||
// compressed, fixed Huffman tables
|
||||
f.hl = &fixedHuffmanDecoder
|
||||
f.hd = nil
|
||||
f.huffmanBlockDecoder()
|
||||
if debugDecode {
|
||||
fmt.Println("predefinied huffman block")
|
||||
}
|
||||
case 2:
|
||||
// compressed, dynamic Huffman tables
|
||||
if f.err = f.readHuffman(); f.err != nil {
|
||||
break
|
||||
}
|
||||
f.hl = &f.h1
|
||||
f.hd = &f.h2
|
||||
f.huffmanBlockDecoder()
|
||||
if debugDecode {
|
||||
fmt.Println("dynamic huffman block")
|
||||
}
|
||||
default:
|
||||
// 3 is reserved.
|
||||
if debugDecode {
|
||||
fmt.Println("reserved data block encountered")
|
||||
}
|
||||
f.err = CorruptInputError(f.roffset)
|
||||
}
|
||||
}
|
||||
|
||||
func (f *decompressor) Read(b []byte) (int, error) {
|
||||
for {
|
||||
if len(f.toRead) > 0 {
|
||||
n := copy(b, f.toRead)
|
||||
f.toRead = f.toRead[n:]
|
||||
if len(f.toRead) == 0 {
|
||||
return n, f.err
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
if f.err != nil {
|
||||
return 0, f.err
|
||||
}
|
||||
|
||||
f.doStep()
|
||||
|
||||
if f.err != nil && len(f.toRead) == 0 {
|
||||
f.toRead = f.dict.readFlush() // Flush what's left in case of error
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// WriteTo implements the io.WriteTo interface for io.Copy and friends.
|
||||
func (f *decompressor) WriteTo(w io.Writer) (int64, error) {
|
||||
total := int64(0)
|
||||
flushed := false
|
||||
for {
|
||||
if len(f.toRead) > 0 {
|
||||
n, err := w.Write(f.toRead)
|
||||
total += int64(n)
|
||||
if err != nil {
|
||||
f.err = err
|
||||
return total, err
|
||||
}
|
||||
if n != len(f.toRead) {
|
||||
return total, io.ErrShortWrite
|
||||
}
|
||||
f.toRead = f.toRead[:0]
|
||||
}
|
||||
if f.err != nil && flushed {
|
||||
if f.err == io.EOF {
|
||||
return total, nil
|
||||
}
|
||||
return total, f.err
|
||||
}
|
||||
if f.err == nil {
|
||||
f.doStep()
|
||||
}
|
||||
if len(f.toRead) == 0 && f.err != nil && !flushed {
|
||||
f.toRead = f.dict.readFlush() // Flush what's left in case of error
|
||||
flushed = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (f *decompressor) Close() error {
|
||||
if f.err == io.EOF {
|
||||
return nil
|
||||
}
|
||||
return f.err
|
||||
}
|
||||
|
||||
// RFC 1951 section 3.2.7.
|
||||
// Compression with dynamic Huffman codes
|
||||
|
||||
var codeOrder = [...]int{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}
|
||||
|
||||
func (f *decompressor) readHuffman() error {
|
||||
// HLIT[5], HDIST[5], HCLEN[4].
|
||||
for f.nb < 5+5+4 {
|
||||
if err := f.moreBits(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
nlit := int(f.b&0x1F) + 257
|
||||
if nlit > maxNumLit {
|
||||
if debugDecode {
|
||||
fmt.Println("nlit > maxNumLit", nlit)
|
||||
}
|
||||
return CorruptInputError(f.roffset)
|
||||
}
|
||||
f.b >>= 5
|
||||
ndist := int(f.b&0x1F) + 1
|
||||
if ndist > maxNumDist {
|
||||
if debugDecode {
|
||||
fmt.Println("ndist > maxNumDist", ndist)
|
||||
}
|
||||
return CorruptInputError(f.roffset)
|
||||
}
|
||||
f.b >>= 5
|
||||
nclen := int(f.b&0xF) + 4
|
||||
// numCodes is 19, so nclen is always valid.
|
||||
f.b >>= 4
|
||||
f.nb -= 5 + 5 + 4
|
||||
|
||||
// (HCLEN+4)*3 bits: code lengths in the magic codeOrder order.
|
||||
for i := 0; i < nclen; i++ {
|
||||
for f.nb < 3 {
|
||||
if err := f.moreBits(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
f.codebits[codeOrder[i]] = int(f.b & 0x7)
|
||||
f.b >>= 3
|
||||
f.nb -= 3
|
||||
}
|
||||
for i := nclen; i < len(codeOrder); i++ {
|
||||
f.codebits[codeOrder[i]] = 0
|
||||
}
|
||||
if !f.h1.init(f.codebits[0:]) {
|
||||
if debugDecode {
|
||||
fmt.Println("init codebits failed")
|
||||
}
|
||||
return CorruptInputError(f.roffset)
|
||||
}
|
||||
|
||||
// HLIT + 257 code lengths, HDIST + 1 code lengths,
|
||||
// using the code length Huffman code.
|
||||
for i, n := 0, nlit+ndist; i < n; {
|
||||
x, err := f.huffSym(&f.h1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if x < 16 {
|
||||
// Actual length.
|
||||
f.bits[i] = x
|
||||
i++
|
||||
continue
|
||||
}
|
||||
// Repeat previous length or zero.
|
||||
var rep int
|
||||
var nb uint
|
||||
var b int
|
||||
switch x {
|
||||
default:
|
||||
return InternalError("unexpected length code")
|
||||
case 16:
|
||||
rep = 3
|
||||
nb = 2
|
||||
if i == 0 {
|
||||
if debugDecode {
|
||||
fmt.Println("i==0")
|
||||
}
|
||||
return CorruptInputError(f.roffset)
|
||||
}
|
||||
b = f.bits[i-1]
|
||||
case 17:
|
||||
rep = 3
|
||||
nb = 3
|
||||
b = 0
|
||||
case 18:
|
||||
rep = 11
|
||||
nb = 7
|
||||
b = 0
|
||||
}
|
||||
for f.nb < nb {
|
||||
if err := f.moreBits(); err != nil {
|
||||
if debugDecode {
|
||||
fmt.Println("morebits:", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
rep += int(f.b & uint32(1<<(nb®SizeMaskUint32)-1))
|
||||
f.b >>= nb & regSizeMaskUint32
|
||||
f.nb -= nb
|
||||
if i+rep > n {
|
||||
if debugDecode {
|
||||
fmt.Println("i+rep > n", i, rep, n)
|
||||
}
|
||||
return CorruptInputError(f.roffset)
|
||||
}
|
||||
for j := 0; j < rep; j++ {
|
||||
f.bits[i] = b
|
||||
i++
|
||||
}
|
||||
}
|
||||
|
||||
if !f.h1.init(f.bits[0:nlit]) || !f.h2.init(f.bits[nlit:nlit+ndist]) {
|
||||
if debugDecode {
|
||||
fmt.Println("init2 failed")
|
||||
}
|
||||
return CorruptInputError(f.roffset)
|
||||
}
|
||||
|
||||
// As an optimization, we can initialize the maxRead bits to read at a time
|
||||
// for the HLIT tree to the length of the EOB marker since we know that
|
||||
// every block must terminate with one. This preserves the property that
|
||||
// we never read any extra bytes after the end of the DEFLATE stream.
|
||||
if f.h1.maxRead < f.bits[endBlockMarker] {
|
||||
f.h1.maxRead = f.bits[endBlockMarker]
|
||||
}
|
||||
if !f.final {
|
||||
// If not the final block, the smallest block possible is
|
||||
// a predefined table, BTYPE=01, with a single EOB marker.
|
||||
// This will take up 3 + 7 bits.
|
||||
f.h1.maxRead += 10
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Copy a single uncompressed data block from input to output.
|
||||
func (f *decompressor) dataBlock() {
|
||||
// Uncompressed.
|
||||
// Discard current half-byte.
|
||||
left := (f.nb) & 7
|
||||
f.nb -= left
|
||||
f.b >>= left
|
||||
|
||||
offBytes := f.nb >> 3
|
||||
// Unfilled values will be overwritten.
|
||||
f.buf[0] = uint8(f.b)
|
||||
f.buf[1] = uint8(f.b >> 8)
|
||||
f.buf[2] = uint8(f.b >> 16)
|
||||
f.buf[3] = uint8(f.b >> 24)
|
||||
|
||||
f.roffset += int64(offBytes)
|
||||
f.nb, f.b = 0, 0
|
||||
|
||||
// Length then ones-complement of length.
|
||||
nr, err := io.ReadFull(f.r, f.buf[offBytes:4])
|
||||
f.roffset += int64(nr)
|
||||
if err != nil {
|
||||
f.err = noEOF(err)
|
||||
return
|
||||
}
|
||||
n := uint16(f.buf[0]) | uint16(f.buf[1])<<8
|
||||
nn := uint16(f.buf[2]) | uint16(f.buf[3])<<8
|
||||
if nn != ^n {
|
||||
if debugDecode {
|
||||
ncomp := ^n
|
||||
fmt.Println("uint16(nn) != uint16(^n)", nn, ncomp)
|
||||
}
|
||||
f.err = CorruptInputError(f.roffset)
|
||||
return
|
||||
}
|
||||
|
||||
if n == 0 {
|
||||
f.toRead = f.dict.readFlush()
|
||||
f.finishBlock()
|
||||
return
|
||||
}
|
||||
|
||||
f.copyLen = int(n)
|
||||
f.copyData()
|
||||
}
|
||||
|
||||
// copyData copies f.copyLen bytes from the underlying reader into f.hist.
|
||||
// It pauses for reads when f.hist is full.
|
||||
func (f *decompressor) copyData() {
|
||||
buf := f.dict.writeSlice()
|
||||
if len(buf) > f.copyLen {
|
||||
buf = buf[:f.copyLen]
|
||||
}
|
||||
|
||||
cnt, err := io.ReadFull(f.r, buf)
|
||||
f.roffset += int64(cnt)
|
||||
f.copyLen -= cnt
|
||||
f.dict.writeMark(cnt)
|
||||
if err != nil {
|
||||
f.err = noEOF(err)
|
||||
return
|
||||
}
|
||||
|
||||
if f.dict.availWrite() == 0 || f.copyLen > 0 {
|
||||
f.toRead = f.dict.readFlush()
|
||||
f.step = copyData
|
||||
return
|
||||
}
|
||||
f.finishBlock()
|
||||
}
|
||||
|
||||
func (f *decompressor) finishBlock() {
|
||||
if f.final {
|
||||
if f.dict.availRead() > 0 {
|
||||
f.toRead = f.dict.readFlush()
|
||||
}
|
||||
f.err = io.EOF
|
||||
}
|
||||
f.step = nextBlock
|
||||
}
|
||||
|
||||
func (f *decompressor) doStep() {
|
||||
switch f.step {
|
||||
case copyData:
|
||||
f.copyData()
|
||||
case nextBlock:
|
||||
f.nextBlock()
|
||||
case huffmanBytesBuffer:
|
||||
f.huffmanBytesBuffer()
|
||||
case huffmanBytesReader:
|
||||
f.huffmanBytesReader()
|
||||
case huffmanBufioReader:
|
||||
f.huffmanBufioReader()
|
||||
case huffmanStringsReader:
|
||||
f.huffmanStringsReader()
|
||||
case huffmanGenericReader:
|
||||
f.huffmanGenericReader()
|
||||
default:
|
||||
panic("BUG: unexpected step state")
|
||||
}
|
||||
}
|
||||
|
||||
// noEOF returns err, unless err == io.EOF, in which case it returns io.ErrUnexpectedEOF.
|
||||
func noEOF(e error) error {
|
||||
if e == io.EOF {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
func (f *decompressor) moreBits() error {
|
||||
c, err := f.r.ReadByte()
|
||||
if err != nil {
|
||||
return noEOF(err)
|
||||
}
|
||||
f.roffset++
|
||||
f.b |= uint32(c) << (f.nb & regSizeMaskUint32)
|
||||
f.nb += 8
|
||||
return nil
|
||||
}
|
||||
|
||||
// Read the next Huffman-encoded symbol from f according to h.
|
||||
func (f *decompressor) huffSym(h *huffmanDecoder) (int, error) {
|
||||
// Since a huffmanDecoder can be empty or be composed of a degenerate tree
|
||||
// with single element, huffSym must error on these two edge cases. In both
|
||||
// cases, the chunks slice will be 0 for the invalid sequence, leading it
|
||||
// satisfy the n == 0 check below.
|
||||
n := uint(h.maxRead)
|
||||
// Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
|
||||
// but is smart enough to keep local variables in registers, so use nb and b,
|
||||
// inline call to moreBits and reassign b,nb back to f on return.
|
||||
nb, b := f.nb, f.b
|
||||
for {
|
||||
for nb < n {
|
||||
c, err := f.r.ReadByte()
|
||||
if err != nil {
|
||||
f.b = b
|
||||
f.nb = nb
|
||||
return 0, noEOF(err)
|
||||
}
|
||||
f.roffset++
|
||||
b |= uint32(c) << (nb & regSizeMaskUint32)
|
||||
nb += 8
|
||||
}
|
||||
chunk := h.chunks[b&(huffmanNumChunks-1)]
|
||||
n = uint(chunk & huffmanCountMask)
|
||||
if n > huffmanChunkBits {
|
||||
chunk = h.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&h.linkMask]
|
||||
n = uint(chunk & huffmanCountMask)
|
||||
}
|
||||
if n <= nb {
|
||||
if n == 0 {
|
||||
f.b = b
|
||||
f.nb = nb
|
||||
if debugDecode {
|
||||
fmt.Println("huffsym: n==0")
|
||||
}
|
||||
f.err = CorruptInputError(f.roffset)
|
||||
return 0, f.err
|
||||
}
|
||||
f.b = b >> (n & regSizeMaskUint32)
|
||||
f.nb = nb - n
|
||||
return int(chunk >> huffmanValueShift), nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func makeReader(r io.Reader) Reader {
|
||||
if rr, ok := r.(Reader); ok {
|
||||
return rr
|
||||
}
|
||||
return bufio.NewReader(r)
|
||||
}
|
||||
|
||||
func fixedHuffmanDecoderInit() {
|
||||
fixedOnce.Do(func() {
|
||||
// These come from the RFC section 3.2.6.
|
||||
var bits [288]int
|
||||
for i := 0; i < 144; i++ {
|
||||
bits[i] = 8
|
||||
}
|
||||
for i := 144; i < 256; i++ {
|
||||
bits[i] = 9
|
||||
}
|
||||
for i := 256; i < 280; i++ {
|
||||
bits[i] = 7
|
||||
}
|
||||
for i := 280; i < 288; i++ {
|
||||
bits[i] = 8
|
||||
}
|
||||
fixedHuffmanDecoder.init(bits[:])
|
||||
})
|
||||
}
|
||||
|
||||
func (f *decompressor) Reset(r io.Reader, dict []byte) error {
|
||||
*f = decompressor{
|
||||
r: makeReader(r),
|
||||
bits: f.bits,
|
||||
codebits: f.codebits,
|
||||
h1: f.h1,
|
||||
h2: f.h2,
|
||||
dict: f.dict,
|
||||
step: nextBlock,
|
||||
}
|
||||
f.dict.init(maxMatchOffset, dict)
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewReader returns a new ReadCloser that can be used
|
||||
// to read the uncompressed version of r.
|
||||
// If r does not also implement io.ByteReader,
|
||||
// the decompressor may read more data than necessary from r.
|
||||
// It is the caller's responsibility to call Close on the ReadCloser
|
||||
// when finished reading.
|
||||
//
|
||||
// The ReadCloser returned by NewReader also implements Resetter.
|
||||
func NewReader(r io.Reader) io.ReadCloser {
|
||||
fixedHuffmanDecoderInit()
|
||||
|
||||
var f decompressor
|
||||
f.r = makeReader(r)
|
||||
f.bits = new([maxNumLit + maxNumDist]int)
|
||||
f.codebits = new([numCodes]int)
|
||||
f.step = nextBlock
|
||||
f.dict.init(maxMatchOffset, nil)
|
||||
return &f
|
||||
}
|
||||
|
||||
// NewReaderDict is like NewReader but initializes the reader
|
||||
// with a preset dictionary. The returned Reader behaves as if
|
||||
// the uncompressed data stream started with the given dictionary,
|
||||
// which has already been read. NewReaderDict is typically used
|
||||
// to read data compressed by NewWriterDict.
|
||||
//
|
||||
// The ReadCloser returned by NewReader also implements Resetter.
|
||||
func NewReaderDict(r io.Reader, dict []byte) io.ReadCloser {
|
||||
fixedHuffmanDecoderInit()
|
||||
|
||||
var f decompressor
|
||||
f.r = makeReader(r)
|
||||
f.bits = new([maxNumLit + maxNumDist]int)
|
||||
f.codebits = new([numCodes]int)
|
||||
f.step = nextBlock
|
||||
f.dict.init(maxMatchOffset, dict)
|
||||
return &f
|
||||
}
|
1283
gateway/vendor/github.com/klauspost/compress/flate/inflate_gen.go
generated
vendored
Normal file
1283
gateway/vendor/github.com/klauspost/compress/flate/inflate_gen.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
241
gateway/vendor/github.com/klauspost/compress/flate/level1.go
generated
vendored
Normal file
241
gateway/vendor/github.com/klauspost/compress/flate/level1.go
generated
vendored
Normal file
@ -0,0 +1,241 @@
|
||||
package flate
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"math/bits"
|
||||
)
|
||||
|
||||
// fastGen maintains the table for matches,
|
||||
// and the previous byte block for level 2.
|
||||
// This is the generic implementation.
|
||||
type fastEncL1 struct {
|
||||
fastGen
|
||||
table [tableSize]tableEntry
|
||||
}
|
||||
|
||||
// EncodeL1 uses a similar algorithm to level 1
|
||||
func (e *fastEncL1) Encode(dst *tokens, src []byte) {
|
||||
const (
|
||||
inputMargin = 12 - 1
|
||||
minNonLiteralBlockSize = 1 + 1 + inputMargin
|
||||
hashBytes = 5
|
||||
)
|
||||
if debugDeflate && e.cur < 0 {
|
||||
panic(fmt.Sprint("e.cur < 0: ", e.cur))
|
||||
}
|
||||
|
||||
// Protect against e.cur wraparound.
|
||||
for e.cur >= bufferReset {
|
||||
if len(e.hist) == 0 {
|
||||
for i := range e.table[:] {
|
||||
e.table[i] = tableEntry{}
|
||||
}
|
||||
e.cur = maxMatchOffset
|
||||
break
|
||||
}
|
||||
// Shift down everything in the table that isn't already too far away.
|
||||
minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
|
||||
for i := range e.table[:] {
|
||||
v := e.table[i].offset
|
||||
if v <= minOff {
|
||||
v = 0
|
||||
} else {
|
||||
v = v - e.cur + maxMatchOffset
|
||||
}
|
||||
e.table[i].offset = v
|
||||
}
|
||||
e.cur = maxMatchOffset
|
||||
}
|
||||
|
||||
s := e.addBlock(src)
|
||||
|
||||
// This check isn't in the Snappy implementation, but there, the caller
|
||||
// instead of the callee handles this case.
|
||||
if len(src) < minNonLiteralBlockSize {
|
||||
// We do not fill the token table.
|
||||
// This will be picked up by caller.
|
||||
dst.n = uint16(len(src))
|
||||
return
|
||||
}
|
||||
|
||||
// Override src
|
||||
src = e.hist
|
||||
nextEmit := s
|
||||
|
||||
// sLimit is when to stop looking for offset/length copies. The inputMargin
|
||||
// lets us use a fast path for emitLiteral in the main loop, while we are
|
||||
// looking for copies.
|
||||
sLimit := int32(len(src) - inputMargin)
|
||||
|
||||
// nextEmit is where in src the next emitLiteral should start from.
|
||||
cv := load6432(src, s)
|
||||
|
||||
for {
|
||||
const skipLog = 5
|
||||
const doEvery = 2
|
||||
|
||||
nextS := s
|
||||
var candidate tableEntry
|
||||
for {
|
||||
nextHash := hashLen(cv, tableBits, hashBytes)
|
||||
candidate = e.table[nextHash]
|
||||
nextS = s + doEvery + (s-nextEmit)>>skipLog
|
||||
if nextS > sLimit {
|
||||
goto emitRemainder
|
||||
}
|
||||
|
||||
now := load6432(src, nextS)
|
||||
e.table[nextHash] = tableEntry{offset: s + e.cur}
|
||||
nextHash = hashLen(now, tableBits, hashBytes)
|
||||
|
||||
offset := s - (candidate.offset - e.cur)
|
||||
if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) {
|
||||
e.table[nextHash] = tableEntry{offset: nextS + e.cur}
|
||||
break
|
||||
}
|
||||
|
||||
// Do one right away...
|
||||
cv = now
|
||||
s = nextS
|
||||
nextS++
|
||||
candidate = e.table[nextHash]
|
||||
now >>= 8
|
||||
e.table[nextHash] = tableEntry{offset: s + e.cur}
|
||||
|
||||
offset = s - (candidate.offset - e.cur)
|
||||
if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) {
|
||||
e.table[nextHash] = tableEntry{offset: nextS + e.cur}
|
||||
break
|
||||
}
|
||||
cv = now
|
||||
s = nextS
|
||||
}
|
||||
|
||||
// A 4-byte match has been found. We'll later see if more than 4 bytes
|
||||
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
|
||||
// them as literal bytes.
|
||||
for {
|
||||
// Invariant: we have a 4-byte match at s, and no need to emit any
|
||||
// literal bytes prior to s.
|
||||
|
||||
// Extend the 4-byte match as long as possible.
|
||||
t := candidate.offset - e.cur
|
||||
var l = int32(4)
|
||||
if false {
|
||||
l = e.matchlenLong(s+4, t+4, src) + 4
|
||||
} else {
|
||||
// inlined:
|
||||
a := src[s+4:]
|
||||
b := src[t+4:]
|
||||
for len(a) >= 8 {
|
||||
if diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b); diff != 0 {
|
||||
l += int32(bits.TrailingZeros64(diff) >> 3)
|
||||
break
|
||||
}
|
||||
l += 8
|
||||
a = a[8:]
|
||||
b = b[8:]
|
||||
}
|
||||
if len(a) < 8 {
|
||||
b = b[:len(a)]
|
||||
for i := range a {
|
||||
if a[i] != b[i] {
|
||||
break
|
||||
}
|
||||
l++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Extend backwards
|
||||
for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
|
||||
s--
|
||||
t--
|
||||
l++
|
||||
}
|
||||
if nextEmit < s {
|
||||
if false {
|
||||
emitLiteral(dst, src[nextEmit:s])
|
||||
} else {
|
||||
for _, v := range src[nextEmit:s] {
|
||||
dst.tokens[dst.n] = token(v)
|
||||
dst.litHist[v]++
|
||||
dst.n++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Save the match found
|
||||
if false {
|
||||
dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
|
||||
} else {
|
||||
// Inlined...
|
||||
xoffset := uint32(s - t - baseMatchOffset)
|
||||
xlength := l
|
||||
oc := offsetCode(xoffset)
|
||||
xoffset |= oc << 16
|
||||
for xlength > 0 {
|
||||
xl := xlength
|
||||
if xl > 258 {
|
||||
if xl > 258+baseMatchLength {
|
||||
xl = 258
|
||||
} else {
|
||||
xl = 258 - baseMatchLength
|
||||
}
|
||||
}
|
||||
xlength -= xl
|
||||
xl -= baseMatchLength
|
||||
dst.extraHist[lengthCodes1[uint8(xl)]]++
|
||||
dst.offHist[oc]++
|
||||
dst.tokens[dst.n] = token(matchType | uint32(xl)<<lengthShift | xoffset)
|
||||
dst.n++
|
||||
}
|
||||
}
|
||||
s += l
|
||||
nextEmit = s
|
||||
if nextS >= s {
|
||||
s = nextS + 1
|
||||
}
|
||||
if s >= sLimit {
|
||||
// Index first pair after match end.
|
||||
if int(s+l+8) < len(src) {
|
||||
cv := load6432(src, s)
|
||||
e.table[hashLen(cv, tableBits, hashBytes)] = tableEntry{offset: s + e.cur}
|
||||
}
|
||||
goto emitRemainder
|
||||
}
|
||||
|
||||
// We could immediately start working at s now, but to improve
|
||||
// compression we first update the hash table at s-2 and at s. If
|
||||
// another emitCopy is not our next move, also calculate nextHash
|
||||
// at s+1. At least on GOARCH=amd64, these three hash calculations
|
||||
// are faster as one load64 call (with some shifts) instead of
|
||||
// three load32 calls.
|
||||
x := load6432(src, s-2)
|
||||
o := e.cur + s - 2
|
||||
prevHash := hashLen(x, tableBits, hashBytes)
|
||||
e.table[prevHash] = tableEntry{offset: o}
|
||||
x >>= 16
|
||||
currHash := hashLen(x, tableBits, hashBytes)
|
||||
candidate = e.table[currHash]
|
||||
e.table[currHash] = tableEntry{offset: o + 2}
|
||||
|
||||
offset := s - (candidate.offset - e.cur)
|
||||
if offset > maxMatchOffset || uint32(x) != load3232(src, candidate.offset-e.cur) {
|
||||
cv = x >> 8
|
||||
s++
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
emitRemainder:
|
||||
if int(nextEmit) < len(src) {
|
||||
// If nothing was added, don't encode literals.
|
||||
if dst.n == 0 {
|
||||
return
|
||||
}
|
||||
emitLiteral(dst, src[nextEmit:])
|
||||
}
|
||||
}
|
214
gateway/vendor/github.com/klauspost/compress/flate/level2.go
generated
vendored
Normal file
214
gateway/vendor/github.com/klauspost/compress/flate/level2.go
generated
vendored
Normal file
@ -0,0 +1,214 @@
|
||||
package flate
|
||||
|
||||
import "fmt"
|
||||
|
||||
// fastGen maintains the table for matches,
|
||||
// and the previous byte block for level 2.
|
||||
// This is the generic implementation.
|
||||
type fastEncL2 struct {
|
||||
fastGen
|
||||
table [bTableSize]tableEntry
|
||||
}
|
||||
|
||||
// EncodeL2 uses a similar algorithm to level 1, but is capable
|
||||
// of matching across blocks giving better compression at a small slowdown.
|
||||
func (e *fastEncL2) Encode(dst *tokens, src []byte) {
|
||||
const (
|
||||
inputMargin = 12 - 1
|
||||
minNonLiteralBlockSize = 1 + 1 + inputMargin
|
||||
hashBytes = 5
|
||||
)
|
||||
|
||||
if debugDeflate && e.cur < 0 {
|
||||
panic(fmt.Sprint("e.cur < 0: ", e.cur))
|
||||
}
|
||||
|
||||
// Protect against e.cur wraparound.
|
||||
for e.cur >= bufferReset {
|
||||
if len(e.hist) == 0 {
|
||||
for i := range e.table[:] {
|
||||
e.table[i] = tableEntry{}
|
||||
}
|
||||
e.cur = maxMatchOffset
|
||||
break
|
||||
}
|
||||
// Shift down everything in the table that isn't already too far away.
|
||||
minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
|
||||
for i := range e.table[:] {
|
||||
v := e.table[i].offset
|
||||
if v <= minOff {
|
||||
v = 0
|
||||
} else {
|
||||
v = v - e.cur + maxMatchOffset
|
||||
}
|
||||
e.table[i].offset = v
|
||||
}
|
||||
e.cur = maxMatchOffset
|
||||
}
|
||||
|
||||
s := e.addBlock(src)
|
||||
|
||||
// This check isn't in the Snappy implementation, but there, the caller
|
||||
// instead of the callee handles this case.
|
||||
if len(src) < minNonLiteralBlockSize {
|
||||
// We do not fill the token table.
|
||||
// This will be picked up by caller.
|
||||
dst.n = uint16(len(src))
|
||||
return
|
||||
}
|
||||
|
||||
// Override src
|
||||
src = e.hist
|
||||
nextEmit := s
|
||||
|
||||
// sLimit is when to stop looking for offset/length copies. The inputMargin
|
||||
// lets us use a fast path for emitLiteral in the main loop, while we are
|
||||
// looking for copies.
|
||||
sLimit := int32(len(src) - inputMargin)
|
||||
|
||||
// nextEmit is where in src the next emitLiteral should start from.
|
||||
cv := load6432(src, s)
|
||||
for {
|
||||
// When should we start skipping if we haven't found matches in a long while.
|
||||
const skipLog = 5
|
||||
const doEvery = 2
|
||||
|
||||
nextS := s
|
||||
var candidate tableEntry
|
||||
for {
|
||||
nextHash := hashLen(cv, bTableBits, hashBytes)
|
||||
s = nextS
|
||||
nextS = s + doEvery + (s-nextEmit)>>skipLog
|
||||
if nextS > sLimit {
|
||||
goto emitRemainder
|
||||
}
|
||||
candidate = e.table[nextHash]
|
||||
now := load6432(src, nextS)
|
||||
e.table[nextHash] = tableEntry{offset: s + e.cur}
|
||||
nextHash = hashLen(now, bTableBits, hashBytes)
|
||||
|
||||
offset := s - (candidate.offset - e.cur)
|
||||
if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) {
|
||||
e.table[nextHash] = tableEntry{offset: nextS + e.cur}
|
||||
break
|
||||
}
|
||||
|
||||
// Do one right away...
|
||||
cv = now
|
||||
s = nextS
|
||||
nextS++
|
||||
candidate = e.table[nextHash]
|
||||
now >>= 8
|
||||
e.table[nextHash] = tableEntry{offset: s + e.cur}
|
||||
|
||||
offset = s - (candidate.offset - e.cur)
|
||||
if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) {
|
||||
break
|
||||
}
|
||||
cv = now
|
||||
}
|
||||
|
||||
// A 4-byte match has been found. We'll later see if more than 4 bytes
|
||||
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
|
||||
// them as literal bytes.
|
||||
|
||||
// Call emitCopy, and then see if another emitCopy could be our next
|
||||
// move. Repeat until we find no match for the input immediately after
|
||||
// what was consumed by the last emitCopy call.
|
||||
//
|
||||
// If we exit this loop normally then we need to call emitLiteral next,
|
||||
// though we don't yet know how big the literal will be. We handle that
|
||||
// by proceeding to the next iteration of the main loop. We also can
|
||||
// exit this loop via goto if we get close to exhausting the input.
|
||||
for {
|
||||
// Invariant: we have a 4-byte match at s, and no need to emit any
|
||||
// literal bytes prior to s.
|
||||
|
||||
// Extend the 4-byte match as long as possible.
|
||||
t := candidate.offset - e.cur
|
||||
l := e.matchlenLong(s+4, t+4, src) + 4
|
||||
|
||||
// Extend backwards
|
||||
for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
|
||||
s--
|
||||
t--
|
||||
l++
|
||||
}
|
||||
if nextEmit < s {
|
||||
if false {
|
||||
emitLiteral(dst, src[nextEmit:s])
|
||||
} else {
|
||||
for _, v := range src[nextEmit:s] {
|
||||
dst.tokens[dst.n] = token(v)
|
||||
dst.litHist[v]++
|
||||
dst.n++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
|
||||
s += l
|
||||
nextEmit = s
|
||||
if nextS >= s {
|
||||
s = nextS + 1
|
||||
}
|
||||
|
||||
if s >= sLimit {
|
||||
// Index first pair after match end.
|
||||
if int(s+l+8) < len(src) {
|
||||
cv := load6432(src, s)
|
||||
e.table[hashLen(cv, bTableBits, hashBytes)] = tableEntry{offset: s + e.cur}
|
||||
}
|
||||
goto emitRemainder
|
||||
}
|
||||
|
||||
// Store every second hash in-between, but offset by 1.
|
||||
for i := s - l + 2; i < s-5; i += 7 {
|
||||
x := load6432(src, i)
|
||||
nextHash := hashLen(x, bTableBits, hashBytes)
|
||||
e.table[nextHash] = tableEntry{offset: e.cur + i}
|
||||
// Skip one
|
||||
x >>= 16
|
||||
nextHash = hashLen(x, bTableBits, hashBytes)
|
||||
e.table[nextHash] = tableEntry{offset: e.cur + i + 2}
|
||||
// Skip one
|
||||
x >>= 16
|
||||
nextHash = hashLen(x, bTableBits, hashBytes)
|
||||
e.table[nextHash] = tableEntry{offset: e.cur + i + 4}
|
||||
}
|
||||
|
||||
// We could immediately start working at s now, but to improve
|
||||
// compression we first update the hash table at s-2 to s. If
|
||||
// another emitCopy is not our next move, also calculate nextHash
|
||||
// at s+1. At least on GOARCH=amd64, these three hash calculations
|
||||
// are faster as one load64 call (with some shifts) instead of
|
||||
// three load32 calls.
|
||||
x := load6432(src, s-2)
|
||||
o := e.cur + s - 2
|
||||
prevHash := hashLen(x, bTableBits, hashBytes)
|
||||
prevHash2 := hashLen(x>>8, bTableBits, hashBytes)
|
||||
e.table[prevHash] = tableEntry{offset: o}
|
||||
e.table[prevHash2] = tableEntry{offset: o + 1}
|
||||
currHash := hashLen(x>>16, bTableBits, hashBytes)
|
||||
candidate = e.table[currHash]
|
||||
e.table[currHash] = tableEntry{offset: o + 2}
|
||||
|
||||
offset := s - (candidate.offset - e.cur)
|
||||
if offset > maxMatchOffset || uint32(x>>16) != load3232(src, candidate.offset-e.cur) {
|
||||
cv = x >> 24
|
||||
s++
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
emitRemainder:
|
||||
if int(nextEmit) < len(src) {
|
||||
// If nothing was added, don't encode literals.
|
||||
if dst.n == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
emitLiteral(dst, src[nextEmit:])
|
||||
}
|
||||
}
|
241
gateway/vendor/github.com/klauspost/compress/flate/level3.go
generated
vendored
Normal file
241
gateway/vendor/github.com/klauspost/compress/flate/level3.go
generated
vendored
Normal file
@ -0,0 +1,241 @@
|
||||
package flate
|
||||
|
||||
import "fmt"
|
||||
|
||||
// fastEncL3
|
||||
type fastEncL3 struct {
|
||||
fastGen
|
||||
table [1 << 16]tableEntryPrev
|
||||
}
|
||||
|
||||
// Encode uses a similar algorithm to level 2, will check up to two candidates.
|
||||
func (e *fastEncL3) Encode(dst *tokens, src []byte) {
|
||||
const (
|
||||
inputMargin = 12 - 1
|
||||
minNonLiteralBlockSize = 1 + 1 + inputMargin
|
||||
tableBits = 16
|
||||
tableSize = 1 << tableBits
|
||||
hashBytes = 5
|
||||
)
|
||||
|
||||
if debugDeflate && e.cur < 0 {
|
||||
panic(fmt.Sprint("e.cur < 0: ", e.cur))
|
||||
}
|
||||
|
||||
// Protect against e.cur wraparound.
|
||||
for e.cur >= bufferReset {
|
||||
if len(e.hist) == 0 {
|
||||
for i := range e.table[:] {
|
||||
e.table[i] = tableEntryPrev{}
|
||||
}
|
||||
e.cur = maxMatchOffset
|
||||
break
|
||||
}
|
||||
// Shift down everything in the table that isn't already too far away.
|
||||
minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
|
||||
for i := range e.table[:] {
|
||||
v := e.table[i]
|
||||
if v.Cur.offset <= minOff {
|
||||
v.Cur.offset = 0
|
||||
} else {
|
||||
v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset
|
||||
}
|
||||
if v.Prev.offset <= minOff {
|
||||
v.Prev.offset = 0
|
||||
} else {
|
||||
v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset
|
||||
}
|
||||
e.table[i] = v
|
||||
}
|
||||
e.cur = maxMatchOffset
|
||||
}
|
||||
|
||||
s := e.addBlock(src)
|
||||
|
||||
// Skip if too small.
|
||||
if len(src) < minNonLiteralBlockSize {
|
||||
// We do not fill the token table.
|
||||
// This will be picked up by caller.
|
||||
dst.n = uint16(len(src))
|
||||
return
|
||||
}
|
||||
|
||||
// Override src
|
||||
src = e.hist
|
||||
nextEmit := s
|
||||
|
||||
// sLimit is when to stop looking for offset/length copies. The inputMargin
|
||||
// lets us use a fast path for emitLiteral in the main loop, while we are
|
||||
// looking for copies.
|
||||
sLimit := int32(len(src) - inputMargin)
|
||||
|
||||
// nextEmit is where in src the next emitLiteral should start from.
|
||||
cv := load6432(src, s)
|
||||
for {
|
||||
const skipLog = 7
|
||||
nextS := s
|
||||
var candidate tableEntry
|
||||
for {
|
||||
nextHash := hashLen(cv, tableBits, hashBytes)
|
||||
s = nextS
|
||||
nextS = s + 1 + (s-nextEmit)>>skipLog
|
||||
if nextS > sLimit {
|
||||
goto emitRemainder
|
||||
}
|
||||
candidates := e.table[nextHash]
|
||||
now := load6432(src, nextS)
|
||||
|
||||
// Safe offset distance until s + 4...
|
||||
minOffset := e.cur + s - (maxMatchOffset - 4)
|
||||
e.table[nextHash] = tableEntryPrev{Prev: candidates.Cur, Cur: tableEntry{offset: s + e.cur}}
|
||||
|
||||
// Check both candidates
|
||||
candidate = candidates.Cur
|
||||
if candidate.offset < minOffset {
|
||||
cv = now
|
||||
// Previous will also be invalid, we have nothing.
|
||||
continue
|
||||
}
|
||||
|
||||
if uint32(cv) == load3232(src, candidate.offset-e.cur) {
|
||||
if candidates.Prev.offset < minOffset || uint32(cv) != load3232(src, candidates.Prev.offset-e.cur) {
|
||||
break
|
||||
}
|
||||
// Both match and are valid, pick longest.
|
||||
offset := s - (candidate.offset - e.cur)
|
||||
o2 := s - (candidates.Prev.offset - e.cur)
|
||||
l1, l2 := matchLen(src[s+4:], src[s-offset+4:]), matchLen(src[s+4:], src[s-o2+4:])
|
||||
if l2 > l1 {
|
||||
candidate = candidates.Prev
|
||||
}
|
||||
break
|
||||
} else {
|
||||
// We only check if value mismatches.
|
||||
// Offset will always be invalid in other cases.
|
||||
candidate = candidates.Prev
|
||||
if candidate.offset > minOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) {
|
||||
break
|
||||
}
|
||||
}
|
||||
cv = now
|
||||
}
|
||||
|
||||
// Call emitCopy, and then see if another emitCopy could be our next
|
||||
// move. Repeat until we find no match for the input immediately after
|
||||
// what was consumed by the last emitCopy call.
|
||||
//
|
||||
// If we exit this loop normally then we need to call emitLiteral next,
|
||||
// though we don't yet know how big the literal will be. We handle that
|
||||
// by proceeding to the next iteration of the main loop. We also can
|
||||
// exit this loop via goto if we get close to exhausting the input.
|
||||
for {
|
||||
// Invariant: we have a 4-byte match at s, and no need to emit any
|
||||
// literal bytes prior to s.
|
||||
|
||||
// Extend the 4-byte match as long as possible.
|
||||
//
|
||||
t := candidate.offset - e.cur
|
||||
l := e.matchlenLong(s+4, t+4, src) + 4
|
||||
|
||||
// Extend backwards
|
||||
for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
|
||||
s--
|
||||
t--
|
||||
l++
|
||||
}
|
||||
if nextEmit < s {
|
||||
if false {
|
||||
emitLiteral(dst, src[nextEmit:s])
|
||||
} else {
|
||||
for _, v := range src[nextEmit:s] {
|
||||
dst.tokens[dst.n] = token(v)
|
||||
dst.litHist[v]++
|
||||
dst.n++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
|
||||
s += l
|
||||
nextEmit = s
|
||||
if nextS >= s {
|
||||
s = nextS + 1
|
||||
}
|
||||
|
||||
if s >= sLimit {
|
||||
t += l
|
||||
// Index first pair after match end.
|
||||
if int(t+8) < len(src) && t > 0 {
|
||||
cv = load6432(src, t)
|
||||
nextHash := hashLen(cv, tableBits, hashBytes)
|
||||
e.table[nextHash] = tableEntryPrev{
|
||||
Prev: e.table[nextHash].Cur,
|
||||
Cur: tableEntry{offset: e.cur + t},
|
||||
}
|
||||
}
|
||||
goto emitRemainder
|
||||
}
|
||||
|
||||
// Store every 5th hash in-between.
|
||||
for i := s - l + 2; i < s-5; i += 6 {
|
||||
nextHash := hashLen(load6432(src, i), tableBits, hashBytes)
|
||||
e.table[nextHash] = tableEntryPrev{
|
||||
Prev: e.table[nextHash].Cur,
|
||||
Cur: tableEntry{offset: e.cur + i}}
|
||||
}
|
||||
// We could immediately start working at s now, but to improve
|
||||
// compression we first update the hash table at s-2 to s.
|
||||
x := load6432(src, s-2)
|
||||
prevHash := hashLen(x, tableBits, hashBytes)
|
||||
|
||||
e.table[prevHash] = tableEntryPrev{
|
||||
Prev: e.table[prevHash].Cur,
|
||||
Cur: tableEntry{offset: e.cur + s - 2},
|
||||
}
|
||||
x >>= 8
|
||||
prevHash = hashLen(x, tableBits, hashBytes)
|
||||
|
||||
e.table[prevHash] = tableEntryPrev{
|
||||
Prev: e.table[prevHash].Cur,
|
||||
Cur: tableEntry{offset: e.cur + s - 1},
|
||||
}
|
||||
x >>= 8
|
||||
currHash := hashLen(x, tableBits, hashBytes)
|
||||
candidates := e.table[currHash]
|
||||
cv = x
|
||||
e.table[currHash] = tableEntryPrev{
|
||||
Prev: candidates.Cur,
|
||||
Cur: tableEntry{offset: s + e.cur},
|
||||
}
|
||||
|
||||
// Check both candidates
|
||||
candidate = candidates.Cur
|
||||
minOffset := e.cur + s - (maxMatchOffset - 4)
|
||||
|
||||
if candidate.offset > minOffset {
|
||||
if uint32(cv) == load3232(src, candidate.offset-e.cur) {
|
||||
// Found a match...
|
||||
continue
|
||||
}
|
||||
candidate = candidates.Prev
|
||||
if candidate.offset > minOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) {
|
||||
// Match at prev...
|
||||
continue
|
||||
}
|
||||
}
|
||||
cv = x >> 8
|
||||
s++
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
emitRemainder:
|
||||
if int(nextEmit) < len(src) {
|
||||
// If nothing was added, don't encode literals.
|
||||
if dst.n == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
emitLiteral(dst, src[nextEmit:])
|
||||
}
|
||||
}
|
221
gateway/vendor/github.com/klauspost/compress/flate/level4.go
generated
vendored
Normal file
221
gateway/vendor/github.com/klauspost/compress/flate/level4.go
generated
vendored
Normal file
@ -0,0 +1,221 @@
|
||||
package flate
|
||||
|
||||
import "fmt"
|
||||
|
||||
type fastEncL4 struct {
|
||||
fastGen
|
||||
table [tableSize]tableEntry
|
||||
bTable [tableSize]tableEntry
|
||||
}
|
||||
|
||||
func (e *fastEncL4) Encode(dst *tokens, src []byte) {
|
||||
const (
|
||||
inputMargin = 12 - 1
|
||||
minNonLiteralBlockSize = 1 + 1 + inputMargin
|
||||
hashShortBytes = 4
|
||||
)
|
||||
if debugDeflate && e.cur < 0 {
|
||||
panic(fmt.Sprint("e.cur < 0: ", e.cur))
|
||||
}
|
||||
// Protect against e.cur wraparound.
|
||||
for e.cur >= bufferReset {
|
||||
if len(e.hist) == 0 {
|
||||
for i := range e.table[:] {
|
||||
e.table[i] = tableEntry{}
|
||||
}
|
||||
for i := range e.bTable[:] {
|
||||
e.bTable[i] = tableEntry{}
|
||||
}
|
||||
e.cur = maxMatchOffset
|
||||
break
|
||||
}
|
||||
// Shift down everything in the table that isn't already too far away.
|
||||
minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
|
||||
for i := range e.table[:] {
|
||||
v := e.table[i].offset
|
||||
if v <= minOff {
|
||||
v = 0
|
||||
} else {
|
||||
v = v - e.cur + maxMatchOffset
|
||||
}
|
||||
e.table[i].offset = v
|
||||
}
|
||||
for i := range e.bTable[:] {
|
||||
v := e.bTable[i].offset
|
||||
if v <= minOff {
|
||||
v = 0
|
||||
} else {
|
||||
v = v - e.cur + maxMatchOffset
|
||||
}
|
||||
e.bTable[i].offset = v
|
||||
}
|
||||
e.cur = maxMatchOffset
|
||||
}
|
||||
|
||||
s := e.addBlock(src)
|
||||
|
||||
// This check isn't in the Snappy implementation, but there, the caller
|
||||
// instead of the callee handles this case.
|
||||
if len(src) < minNonLiteralBlockSize {
|
||||
// We do not fill the token table.
|
||||
// This will be picked up by caller.
|
||||
dst.n = uint16(len(src))
|
||||
return
|
||||
}
|
||||
|
||||
// Override src
|
||||
src = e.hist
|
||||
nextEmit := s
|
||||
|
||||
// sLimit is when to stop looking for offset/length copies. The inputMargin
|
||||
// lets us use a fast path for emitLiteral in the main loop, while we are
|
||||
// looking for copies.
|
||||
sLimit := int32(len(src) - inputMargin)
|
||||
|
||||
// nextEmit is where in src the next emitLiteral should start from.
|
||||
cv := load6432(src, s)
|
||||
for {
|
||||
const skipLog = 6
|
||||
const doEvery = 1
|
||||
|
||||
nextS := s
|
||||
var t int32
|
||||
for {
|
||||
nextHashS := hashLen(cv, tableBits, hashShortBytes)
|
||||
nextHashL := hash7(cv, tableBits)
|
||||
|
||||
s = nextS
|
||||
nextS = s + doEvery + (s-nextEmit)>>skipLog
|
||||
if nextS > sLimit {
|
||||
goto emitRemainder
|
||||
}
|
||||
// Fetch a short+long candidate
|
||||
sCandidate := e.table[nextHashS]
|
||||
lCandidate := e.bTable[nextHashL]
|
||||
next := load6432(src, nextS)
|
||||
entry := tableEntry{offset: s + e.cur}
|
||||
e.table[nextHashS] = entry
|
||||
e.bTable[nextHashL] = entry
|
||||
|
||||
t = lCandidate.offset - e.cur
|
||||
if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.offset-e.cur) {
|
||||
// We got a long match. Use that.
|
||||
break
|
||||
}
|
||||
|
||||
t = sCandidate.offset - e.cur
|
||||
if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) {
|
||||
// Found a 4 match...
|
||||
lCandidate = e.bTable[hash7(next, tableBits)]
|
||||
|
||||
// If the next long is a candidate, check if we should use that instead...
|
||||
lOff := nextS - (lCandidate.offset - e.cur)
|
||||
if lOff < maxMatchOffset && load3232(src, lCandidate.offset-e.cur) == uint32(next) {
|
||||
l1, l2 := matchLen(src[s+4:], src[t+4:]), matchLen(src[nextS+4:], src[nextS-lOff+4:])
|
||||
if l2 > l1 {
|
||||
s = nextS
|
||||
t = lCandidate.offset - e.cur
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
cv = next
|
||||
}
|
||||
|
||||
// A 4-byte match has been found. We'll later see if more than 4 bytes
|
||||
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
|
||||
// them as literal bytes.
|
||||
|
||||
// Extend the 4-byte match as long as possible.
|
||||
l := e.matchlenLong(s+4, t+4, src) + 4
|
||||
|
||||
// Extend backwards
|
||||
for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
|
||||
s--
|
||||
t--
|
||||
l++
|
||||
}
|
||||
if nextEmit < s {
|
||||
if false {
|
||||
emitLiteral(dst, src[nextEmit:s])
|
||||
} else {
|
||||
for _, v := range src[nextEmit:s] {
|
||||
dst.tokens[dst.n] = token(v)
|
||||
dst.litHist[v]++
|
||||
dst.n++
|
||||
}
|
||||
}
|
||||
}
|
||||
if debugDeflate {
|
||||
if t >= s {
|
||||
panic("s-t")
|
||||
}
|
||||
if (s - t) > maxMatchOffset {
|
||||
panic(fmt.Sprintln("mmo", t))
|
||||
}
|
||||
if l < baseMatchLength {
|
||||
panic("bml")
|
||||
}
|
||||
}
|
||||
|
||||
dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
|
||||
s += l
|
||||
nextEmit = s
|
||||
if nextS >= s {
|
||||
s = nextS + 1
|
||||
}
|
||||
|
||||
if s >= sLimit {
|
||||
// Index first pair after match end.
|
||||
if int(s+8) < len(src) {
|
||||
cv := load6432(src, s)
|
||||
e.table[hashLen(cv, tableBits, hashShortBytes)] = tableEntry{offset: s + e.cur}
|
||||
e.bTable[hash7(cv, tableBits)] = tableEntry{offset: s + e.cur}
|
||||
}
|
||||
goto emitRemainder
|
||||
}
|
||||
|
||||
// Store every 3rd hash in-between
|
||||
if true {
|
||||
i := nextS
|
||||
if i < s-1 {
|
||||
cv := load6432(src, i)
|
||||
t := tableEntry{offset: i + e.cur}
|
||||
t2 := tableEntry{offset: t.offset + 1}
|
||||
e.bTable[hash7(cv, tableBits)] = t
|
||||
e.bTable[hash7(cv>>8, tableBits)] = t2
|
||||
e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2
|
||||
|
||||
i += 3
|
||||
for ; i < s-1; i += 3 {
|
||||
cv := load6432(src, i)
|
||||
t := tableEntry{offset: i + e.cur}
|
||||
t2 := tableEntry{offset: t.offset + 1}
|
||||
e.bTable[hash7(cv, tableBits)] = t
|
||||
e.bTable[hash7(cv>>8, tableBits)] = t2
|
||||
e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// We could immediately start working at s now, but to improve
|
||||
// compression we first update the hash table at s-1 and at s.
|
||||
x := load6432(src, s-1)
|
||||
o := e.cur + s - 1
|
||||
prevHashS := hashLen(x, tableBits, hashShortBytes)
|
||||
prevHashL := hash7(x, tableBits)
|
||||
e.table[prevHashS] = tableEntry{offset: o}
|
||||
e.bTable[prevHashL] = tableEntry{offset: o}
|
||||
cv = x >> 8
|
||||
}
|
||||
|
||||
emitRemainder:
|
||||
if int(nextEmit) < len(src) {
|
||||
// If nothing was added, don't encode literals.
|
||||
if dst.n == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
emitLiteral(dst, src[nextEmit:])
|
||||
}
|
||||
}
|
708
gateway/vendor/github.com/klauspost/compress/flate/level5.go
generated
vendored
Normal file
708
gateway/vendor/github.com/klauspost/compress/flate/level5.go
generated
vendored
Normal file
@ -0,0 +1,708 @@
|
||||
package flate
|
||||
|
||||
import "fmt"
|
||||
|
||||
type fastEncL5 struct {
|
||||
fastGen
|
||||
table [tableSize]tableEntry
|
||||
bTable [tableSize]tableEntryPrev
|
||||
}
|
||||
|
||||
func (e *fastEncL5) Encode(dst *tokens, src []byte) {
|
||||
const (
|
||||
inputMargin = 12 - 1
|
||||
minNonLiteralBlockSize = 1 + 1 + inputMargin
|
||||
hashShortBytes = 4
|
||||
)
|
||||
if debugDeflate && e.cur < 0 {
|
||||
panic(fmt.Sprint("e.cur < 0: ", e.cur))
|
||||
}
|
||||
|
||||
// Protect against e.cur wraparound.
|
||||
for e.cur >= bufferReset {
|
||||
if len(e.hist) == 0 {
|
||||
for i := range e.table[:] {
|
||||
e.table[i] = tableEntry{}
|
||||
}
|
||||
for i := range e.bTable[:] {
|
||||
e.bTable[i] = tableEntryPrev{}
|
||||
}
|
||||
e.cur = maxMatchOffset
|
||||
break
|
||||
}
|
||||
// Shift down everything in the table that isn't already too far away.
|
||||
minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
|
||||
for i := range e.table[:] {
|
||||
v := e.table[i].offset
|
||||
if v <= minOff {
|
||||
v = 0
|
||||
} else {
|
||||
v = v - e.cur + maxMatchOffset
|
||||
}
|
||||
e.table[i].offset = v
|
||||
}
|
||||
for i := range e.bTable[:] {
|
||||
v := e.bTable[i]
|
||||
if v.Cur.offset <= minOff {
|
||||
v.Cur.offset = 0
|
||||
v.Prev.offset = 0
|
||||
} else {
|
||||
v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset
|
||||
if v.Prev.offset <= minOff {
|
||||
v.Prev.offset = 0
|
||||
} else {
|
||||
v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset
|
||||
}
|
||||
}
|
||||
e.bTable[i] = v
|
||||
}
|
||||
e.cur = maxMatchOffset
|
||||
}
|
||||
|
||||
s := e.addBlock(src)
|
||||
|
||||
// This check isn't in the Snappy implementation, but there, the caller
|
||||
// instead of the callee handles this case.
|
||||
if len(src) < minNonLiteralBlockSize {
|
||||
// We do not fill the token table.
|
||||
// This will be picked up by caller.
|
||||
dst.n = uint16(len(src))
|
||||
return
|
||||
}
|
||||
|
||||
// Override src
|
||||
src = e.hist
|
||||
nextEmit := s
|
||||
|
||||
// sLimit is when to stop looking for offset/length copies. The inputMargin
|
||||
// lets us use a fast path for emitLiteral in the main loop, while we are
|
||||
// looking for copies.
|
||||
sLimit := int32(len(src) - inputMargin)
|
||||
|
||||
// nextEmit is where in src the next emitLiteral should start from.
|
||||
cv := load6432(src, s)
|
||||
for {
|
||||
const skipLog = 6
|
||||
const doEvery = 1
|
||||
|
||||
nextS := s
|
||||
var l int32
|
||||
var t int32
|
||||
for {
|
||||
nextHashS := hashLen(cv, tableBits, hashShortBytes)
|
||||
nextHashL := hash7(cv, tableBits)
|
||||
|
||||
s = nextS
|
||||
nextS = s + doEvery + (s-nextEmit)>>skipLog
|
||||
if nextS > sLimit {
|
||||
goto emitRemainder
|
||||
}
|
||||
// Fetch a short+long candidate
|
||||
sCandidate := e.table[nextHashS]
|
||||
lCandidate := e.bTable[nextHashL]
|
||||
next := load6432(src, nextS)
|
||||
entry := tableEntry{offset: s + e.cur}
|
||||
e.table[nextHashS] = entry
|
||||
eLong := &e.bTable[nextHashL]
|
||||
eLong.Cur, eLong.Prev = entry, eLong.Cur
|
||||
|
||||
nextHashS = hashLen(next, tableBits, hashShortBytes)
|
||||
nextHashL = hash7(next, tableBits)
|
||||
|
||||
t = lCandidate.Cur.offset - e.cur
|
||||
if s-t < maxMatchOffset {
|
||||
if uint32(cv) == load3232(src, lCandidate.Cur.offset-e.cur) {
|
||||
// Store the next match
|
||||
e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
|
||||
eLong := &e.bTable[nextHashL]
|
||||
eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
|
||||
|
||||
t2 := lCandidate.Prev.offset - e.cur
|
||||
if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) {
|
||||
l = e.matchlen(s+4, t+4, src) + 4
|
||||
ml1 := e.matchlen(s+4, t2+4, src) + 4
|
||||
if ml1 > l {
|
||||
t = t2
|
||||
l = ml1
|
||||
break
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
t = lCandidate.Prev.offset - e.cur
|
||||
if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) {
|
||||
// Store the next match
|
||||
e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
|
||||
eLong := &e.bTable[nextHashL]
|
||||
eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
t = sCandidate.offset - e.cur
|
||||
if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) {
|
||||
// Found a 4 match...
|
||||
l = e.matchlen(s+4, t+4, src) + 4
|
||||
lCandidate = e.bTable[nextHashL]
|
||||
// Store the next match
|
||||
|
||||
e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
|
||||
eLong := &e.bTable[nextHashL]
|
||||
eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
|
||||
|
||||
// If the next long is a candidate, use that...
|
||||
t2 := lCandidate.Cur.offset - e.cur
|
||||
if nextS-t2 < maxMatchOffset {
|
||||
if load3232(src, lCandidate.Cur.offset-e.cur) == uint32(next) {
|
||||
ml := e.matchlen(nextS+4, t2+4, src) + 4
|
||||
if ml > l {
|
||||
t = t2
|
||||
s = nextS
|
||||
l = ml
|
||||
break
|
||||
}
|
||||
}
|
||||
// If the previous long is a candidate, use that...
|
||||
t2 = lCandidate.Prev.offset - e.cur
|
||||
if nextS-t2 < maxMatchOffset && load3232(src, lCandidate.Prev.offset-e.cur) == uint32(next) {
|
||||
ml := e.matchlen(nextS+4, t2+4, src) + 4
|
||||
if ml > l {
|
||||
t = t2
|
||||
s = nextS
|
||||
l = ml
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
cv = next
|
||||
}
|
||||
|
||||
// A 4-byte match has been found. We'll later see if more than 4 bytes
|
||||
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
|
||||
// them as literal bytes.
|
||||
|
||||
if l == 0 {
|
||||
// Extend the 4-byte match as long as possible.
|
||||
l = e.matchlenLong(s+4, t+4, src) + 4
|
||||
} else if l == maxMatchLength {
|
||||
l += e.matchlenLong(s+l, t+l, src)
|
||||
}
|
||||
|
||||
// Try to locate a better match by checking the end of best match...
|
||||
if sAt := s + l; l < 30 && sAt < sLimit {
|
||||
// Allow some bytes at the beginning to mismatch.
|
||||
// Sweet spot is 2/3 bytes depending on input.
|
||||
// 3 is only a little better when it is but sometimes a lot worse.
|
||||
// The skipped bytes are tested in Extend backwards,
|
||||
// and still picked up as part of the match if they do.
|
||||
const skipBeginning = 2
|
||||
eLong := e.bTable[hash7(load6432(src, sAt), tableBits)].Cur.offset
|
||||
t2 := eLong - e.cur - l + skipBeginning
|
||||
s2 := s + skipBeginning
|
||||
off := s2 - t2
|
||||
if t2 >= 0 && off < maxMatchOffset && off > 0 {
|
||||
if l2 := e.matchlenLong(s2, t2, src); l2 > l {
|
||||
t = t2
|
||||
l = l2
|
||||
s = s2
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Extend backwards
|
||||
for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
|
||||
s--
|
||||
t--
|
||||
l++
|
||||
}
|
||||
if nextEmit < s {
|
||||
if false {
|
||||
emitLiteral(dst, src[nextEmit:s])
|
||||
} else {
|
||||
for _, v := range src[nextEmit:s] {
|
||||
dst.tokens[dst.n] = token(v)
|
||||
dst.litHist[v]++
|
||||
dst.n++
|
||||
}
|
||||
}
|
||||
}
|
||||
if debugDeflate {
|
||||
if t >= s {
|
||||
panic(fmt.Sprintln("s-t", s, t))
|
||||
}
|
||||
if (s - t) > maxMatchOffset {
|
||||
panic(fmt.Sprintln("mmo", s-t))
|
||||
}
|
||||
if l < baseMatchLength {
|
||||
panic("bml")
|
||||
}
|
||||
}
|
||||
|
||||
dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
|
||||
s += l
|
||||
nextEmit = s
|
||||
if nextS >= s {
|
||||
s = nextS + 1
|
||||
}
|
||||
|
||||
if s >= sLimit {
|
||||
goto emitRemainder
|
||||
}
|
||||
|
||||
// Store every 3rd hash in-between.
|
||||
if true {
|
||||
const hashEvery = 3
|
||||
i := s - l + 1
|
||||
if i < s-1 {
|
||||
cv := load6432(src, i)
|
||||
t := tableEntry{offset: i + e.cur}
|
||||
e.table[hashLen(cv, tableBits, hashShortBytes)] = t
|
||||
eLong := &e.bTable[hash7(cv, tableBits)]
|
||||
eLong.Cur, eLong.Prev = t, eLong.Cur
|
||||
|
||||
// Do an long at i+1
|
||||
cv >>= 8
|
||||
t = tableEntry{offset: t.offset + 1}
|
||||
eLong = &e.bTable[hash7(cv, tableBits)]
|
||||
eLong.Cur, eLong.Prev = t, eLong.Cur
|
||||
|
||||
// We only have enough bits for a short entry at i+2
|
||||
cv >>= 8
|
||||
t = tableEntry{offset: t.offset + 1}
|
||||
e.table[hashLen(cv, tableBits, hashShortBytes)] = t
|
||||
|
||||
// Skip one - otherwise we risk hitting 's'
|
||||
i += 4
|
||||
for ; i < s-1; i += hashEvery {
|
||||
cv := load6432(src, i)
|
||||
t := tableEntry{offset: i + e.cur}
|
||||
t2 := tableEntry{offset: t.offset + 1}
|
||||
eLong := &e.bTable[hash7(cv, tableBits)]
|
||||
eLong.Cur, eLong.Prev = t, eLong.Cur
|
||||
e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// We could immediately start working at s now, but to improve
|
||||
// compression we first update the hash table at s-1 and at s.
|
||||
x := load6432(src, s-1)
|
||||
o := e.cur + s - 1
|
||||
prevHashS := hashLen(x, tableBits, hashShortBytes)
|
||||
prevHashL := hash7(x, tableBits)
|
||||
e.table[prevHashS] = tableEntry{offset: o}
|
||||
eLong := &e.bTable[prevHashL]
|
||||
eLong.Cur, eLong.Prev = tableEntry{offset: o}, eLong.Cur
|
||||
cv = x >> 8
|
||||
}
|
||||
|
||||
emitRemainder:
|
||||
if int(nextEmit) < len(src) {
|
||||
// If nothing was added, don't encode literals.
|
||||
if dst.n == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
emitLiteral(dst, src[nextEmit:])
|
||||
}
|
||||
}
|
||||
|
||||
// fastEncL5Window is a level 5 encoder,
|
||||
// but with a custom window size.
|
||||
type fastEncL5Window struct {
|
||||
hist []byte
|
||||
cur int32
|
||||
maxOffset int32
|
||||
table [tableSize]tableEntry
|
||||
bTable [tableSize]tableEntryPrev
|
||||
}
|
||||
|
||||
func (e *fastEncL5Window) Encode(dst *tokens, src []byte) {
|
||||
const (
|
||||
inputMargin = 12 - 1
|
||||
minNonLiteralBlockSize = 1 + 1 + inputMargin
|
||||
hashShortBytes = 4
|
||||
)
|
||||
maxMatchOffset := e.maxOffset
|
||||
if debugDeflate && e.cur < 0 {
|
||||
panic(fmt.Sprint("e.cur < 0: ", e.cur))
|
||||
}
|
||||
|
||||
// Protect against e.cur wraparound.
|
||||
for e.cur >= bufferReset {
|
||||
if len(e.hist) == 0 {
|
||||
for i := range e.table[:] {
|
||||
e.table[i] = tableEntry{}
|
||||
}
|
||||
for i := range e.bTable[:] {
|
||||
e.bTable[i] = tableEntryPrev{}
|
||||
}
|
||||
e.cur = maxMatchOffset
|
||||
break
|
||||
}
|
||||
// Shift down everything in the table that isn't already too far away.
|
||||
minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
|
||||
for i := range e.table[:] {
|
||||
v := e.table[i].offset
|
||||
if v <= minOff {
|
||||
v = 0
|
||||
} else {
|
||||
v = v - e.cur + maxMatchOffset
|
||||
}
|
||||
e.table[i].offset = v
|
||||
}
|
||||
for i := range e.bTable[:] {
|
||||
v := e.bTable[i]
|
||||
if v.Cur.offset <= minOff {
|
||||
v.Cur.offset = 0
|
||||
v.Prev.offset = 0
|
||||
} else {
|
||||
v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset
|
||||
if v.Prev.offset <= minOff {
|
||||
v.Prev.offset = 0
|
||||
} else {
|
||||
v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset
|
||||
}
|
||||
}
|
||||
e.bTable[i] = v
|
||||
}
|
||||
e.cur = maxMatchOffset
|
||||
}
|
||||
|
||||
s := e.addBlock(src)
|
||||
|
||||
// This check isn't in the Snappy implementation, but there, the caller
|
||||
// instead of the callee handles this case.
|
||||
if len(src) < minNonLiteralBlockSize {
|
||||
// We do not fill the token table.
|
||||
// This will be picked up by caller.
|
||||
dst.n = uint16(len(src))
|
||||
return
|
||||
}
|
||||
|
||||
// Override src
|
||||
src = e.hist
|
||||
nextEmit := s
|
||||
|
||||
// sLimit is when to stop looking for offset/length copies. The inputMargin
|
||||
// lets us use a fast path for emitLiteral in the main loop, while we are
|
||||
// looking for copies.
|
||||
sLimit := int32(len(src) - inputMargin)
|
||||
|
||||
// nextEmit is where in src the next emitLiteral should start from.
|
||||
cv := load6432(src, s)
|
||||
for {
|
||||
const skipLog = 6
|
||||
const doEvery = 1
|
||||
|
||||
nextS := s
|
||||
var l int32
|
||||
var t int32
|
||||
for {
|
||||
nextHashS := hashLen(cv, tableBits, hashShortBytes)
|
||||
nextHashL := hash7(cv, tableBits)
|
||||
|
||||
s = nextS
|
||||
nextS = s + doEvery + (s-nextEmit)>>skipLog
|
||||
if nextS > sLimit {
|
||||
goto emitRemainder
|
||||
}
|
||||
// Fetch a short+long candidate
|
||||
sCandidate := e.table[nextHashS]
|
||||
lCandidate := e.bTable[nextHashL]
|
||||
next := load6432(src, nextS)
|
||||
entry := tableEntry{offset: s + e.cur}
|
||||
e.table[nextHashS] = entry
|
||||
eLong := &e.bTable[nextHashL]
|
||||
eLong.Cur, eLong.Prev = entry, eLong.Cur
|
||||
|
||||
nextHashS = hashLen(next, tableBits, hashShortBytes)
|
||||
nextHashL = hash7(next, tableBits)
|
||||
|
||||
t = lCandidate.Cur.offset - e.cur
|
||||
if s-t < maxMatchOffset {
|
||||
if uint32(cv) == load3232(src, lCandidate.Cur.offset-e.cur) {
|
||||
// Store the next match
|
||||
e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
|
||||
eLong := &e.bTable[nextHashL]
|
||||
eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
|
||||
|
||||
t2 := lCandidate.Prev.offset - e.cur
|
||||
if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) {
|
||||
l = e.matchlen(s+4, t+4, src) + 4
|
||||
ml1 := e.matchlen(s+4, t2+4, src) + 4
|
||||
if ml1 > l {
|
||||
t = t2
|
||||
l = ml1
|
||||
break
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
t = lCandidate.Prev.offset - e.cur
|
||||
if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) {
|
||||
// Store the next match
|
||||
e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
|
||||
eLong := &e.bTable[nextHashL]
|
||||
eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
t = sCandidate.offset - e.cur
|
||||
if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) {
|
||||
// Found a 4 match...
|
||||
l = e.matchlen(s+4, t+4, src) + 4
|
||||
lCandidate = e.bTable[nextHashL]
|
||||
// Store the next match
|
||||
|
||||
e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
|
||||
eLong := &e.bTable[nextHashL]
|
||||
eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
|
||||
|
||||
// If the next long is a candidate, use that...
|
||||
t2 := lCandidate.Cur.offset - e.cur
|
||||
if nextS-t2 < maxMatchOffset {
|
||||
if load3232(src, lCandidate.Cur.offset-e.cur) == uint32(next) {
|
||||
ml := e.matchlen(nextS+4, t2+4, src) + 4
|
||||
if ml > l {
|
||||
t = t2
|
||||
s = nextS
|
||||
l = ml
|
||||
break
|
||||
}
|
||||
}
|
||||
// If the previous long is a candidate, use that...
|
||||
t2 = lCandidate.Prev.offset - e.cur
|
||||
if nextS-t2 < maxMatchOffset && load3232(src, lCandidate.Prev.offset-e.cur) == uint32(next) {
|
||||
ml := e.matchlen(nextS+4, t2+4, src) + 4
|
||||
if ml > l {
|
||||
t = t2
|
||||
s = nextS
|
||||
l = ml
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
cv = next
|
||||
}
|
||||
|
||||
// A 4-byte match has been found. We'll later see if more than 4 bytes
|
||||
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
|
||||
// them as literal bytes.
|
||||
|
||||
if l == 0 {
|
||||
// Extend the 4-byte match as long as possible.
|
||||
l = e.matchlenLong(s+4, t+4, src) + 4
|
||||
} else if l == maxMatchLength {
|
||||
l += e.matchlenLong(s+l, t+l, src)
|
||||
}
|
||||
|
||||
// Try to locate a better match by checking the end of best match...
|
||||
if sAt := s + l; l < 30 && sAt < sLimit {
|
||||
// Allow some bytes at the beginning to mismatch.
|
||||
// Sweet spot is 2/3 bytes depending on input.
|
||||
// 3 is only a little better when it is but sometimes a lot worse.
|
||||
// The skipped bytes are tested in Extend backwards,
|
||||
// and still picked up as part of the match if they do.
|
||||
const skipBeginning = 2
|
||||
eLong := e.bTable[hash7(load6432(src, sAt), tableBits)].Cur.offset
|
||||
t2 := eLong - e.cur - l + skipBeginning
|
||||
s2 := s + skipBeginning
|
||||
off := s2 - t2
|
||||
if t2 >= 0 && off < maxMatchOffset && off > 0 {
|
||||
if l2 := e.matchlenLong(s2, t2, src); l2 > l {
|
||||
t = t2
|
||||
l = l2
|
||||
s = s2
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Extend backwards
|
||||
for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
|
||||
s--
|
||||
t--
|
||||
l++
|
||||
}
|
||||
if nextEmit < s {
|
||||
if false {
|
||||
emitLiteral(dst, src[nextEmit:s])
|
||||
} else {
|
||||
for _, v := range src[nextEmit:s] {
|
||||
dst.tokens[dst.n] = token(v)
|
||||
dst.litHist[v]++
|
||||
dst.n++
|
||||
}
|
||||
}
|
||||
}
|
||||
if debugDeflate {
|
||||
if t >= s {
|
||||
panic(fmt.Sprintln("s-t", s, t))
|
||||
}
|
||||
if (s - t) > maxMatchOffset {
|
||||
panic(fmt.Sprintln("mmo", s-t))
|
||||
}
|
||||
if l < baseMatchLength {
|
||||
panic("bml")
|
||||
}
|
||||
}
|
||||
|
||||
dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
|
||||
s += l
|
||||
nextEmit = s
|
||||
if nextS >= s {
|
||||
s = nextS + 1
|
||||
}
|
||||
|
||||
if s >= sLimit {
|
||||
goto emitRemainder
|
||||
}
|
||||
|
||||
// Store every 3rd hash in-between.
|
||||
if true {
|
||||
const hashEvery = 3
|
||||
i := s - l + 1
|
||||
if i < s-1 {
|
||||
cv := load6432(src, i)
|
||||
t := tableEntry{offset: i + e.cur}
|
||||
e.table[hashLen(cv, tableBits, hashShortBytes)] = t
|
||||
eLong := &e.bTable[hash7(cv, tableBits)]
|
||||
eLong.Cur, eLong.Prev = t, eLong.Cur
|
||||
|
||||
// Do an long at i+1
|
||||
cv >>= 8
|
||||
t = tableEntry{offset: t.offset + 1}
|
||||
eLong = &e.bTable[hash7(cv, tableBits)]
|
||||
eLong.Cur, eLong.Prev = t, eLong.Cur
|
||||
|
||||
// We only have enough bits for a short entry at i+2
|
||||
cv >>= 8
|
||||
t = tableEntry{offset: t.offset + 1}
|
||||
e.table[hashLen(cv, tableBits, hashShortBytes)] = t
|
||||
|
||||
// Skip one - otherwise we risk hitting 's'
|
||||
i += 4
|
||||
for ; i < s-1; i += hashEvery {
|
||||
cv := load6432(src, i)
|
||||
t := tableEntry{offset: i + e.cur}
|
||||
t2 := tableEntry{offset: t.offset + 1}
|
||||
eLong := &e.bTable[hash7(cv, tableBits)]
|
||||
eLong.Cur, eLong.Prev = t, eLong.Cur
|
||||
e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// We could immediately start working at s now, but to improve
|
||||
// compression we first update the hash table at s-1 and at s.
|
||||
x := load6432(src, s-1)
|
||||
o := e.cur + s - 1
|
||||
prevHashS := hashLen(x, tableBits, hashShortBytes)
|
||||
prevHashL := hash7(x, tableBits)
|
||||
e.table[prevHashS] = tableEntry{offset: o}
|
||||
eLong := &e.bTable[prevHashL]
|
||||
eLong.Cur, eLong.Prev = tableEntry{offset: o}, eLong.Cur
|
||||
cv = x >> 8
|
||||
}
|
||||
|
||||
emitRemainder:
|
||||
if int(nextEmit) < len(src) {
|
||||
// If nothing was added, don't encode literals.
|
||||
if dst.n == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
emitLiteral(dst, src[nextEmit:])
|
||||
}
|
||||
}
|
||||
|
||||
// Reset the encoding table.
|
||||
func (e *fastEncL5Window) Reset() {
|
||||
// We keep the same allocs, since we are compressing the same block sizes.
|
||||
if cap(e.hist) < allocHistory {
|
||||
e.hist = make([]byte, 0, allocHistory)
|
||||
}
|
||||
|
||||
// We offset current position so everything will be out of reach.
|
||||
// If we are above the buffer reset it will be cleared anyway since len(hist) == 0.
|
||||
if e.cur <= int32(bufferReset) {
|
||||
e.cur += e.maxOffset + int32(len(e.hist))
|
||||
}
|
||||
e.hist = e.hist[:0]
|
||||
}
|
||||
|
||||
func (e *fastEncL5Window) addBlock(src []byte) int32 {
|
||||
// check if we have space already
|
||||
maxMatchOffset := e.maxOffset
|
||||
|
||||
if len(e.hist)+len(src) > cap(e.hist) {
|
||||
if cap(e.hist) == 0 {
|
||||
e.hist = make([]byte, 0, allocHistory)
|
||||
} else {
|
||||
if cap(e.hist) < int(maxMatchOffset*2) {
|
||||
panic("unexpected buffer size")
|
||||
}
|
||||
// Move down
|
||||
offset := int32(len(e.hist)) - maxMatchOffset
|
||||
copy(e.hist[0:maxMatchOffset], e.hist[offset:])
|
||||
e.cur += offset
|
||||
e.hist = e.hist[:maxMatchOffset]
|
||||
}
|
||||
}
|
||||
s := int32(len(e.hist))
|
||||
e.hist = append(e.hist, src...)
|
||||
return s
|
||||
}
|
||||
|
||||
// matchlen will return the match length between offsets and t in src.
|
||||
// The maximum length returned is maxMatchLength - 4.
|
||||
// It is assumed that s > t, that t >=0 and s < len(src).
|
||||
func (e *fastEncL5Window) matchlen(s, t int32, src []byte) int32 {
|
||||
if debugDecode {
|
||||
if t >= s {
|
||||
panic(fmt.Sprint("t >=s:", t, s))
|
||||
}
|
||||
if int(s) >= len(src) {
|
||||
panic(fmt.Sprint("s >= len(src):", s, len(src)))
|
||||
}
|
||||
if t < 0 {
|
||||
panic(fmt.Sprint("t < 0:", t))
|
||||
}
|
||||
if s-t > e.maxOffset {
|
||||
panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")"))
|
||||
}
|
||||
}
|
||||
s1 := int(s) + maxMatchLength - 4
|
||||
if s1 > len(src) {
|
||||
s1 = len(src)
|
||||
}
|
||||
|
||||
// Extend the match to be as long as possible.
|
||||
return int32(matchLen(src[s:s1], src[t:]))
|
||||
}
|
||||
|
||||
// matchlenLong will return the match length between offsets and t in src.
|
||||
// It is assumed that s > t, that t >=0 and s < len(src).
|
||||
func (e *fastEncL5Window) matchlenLong(s, t int32, src []byte) int32 {
|
||||
if debugDeflate {
|
||||
if t >= s {
|
||||
panic(fmt.Sprint("t >=s:", t, s))
|
||||
}
|
||||
if int(s) >= len(src) {
|
||||
panic(fmt.Sprint("s >= len(src):", s, len(src)))
|
||||
}
|
||||
if t < 0 {
|
||||
panic(fmt.Sprint("t < 0:", t))
|
||||
}
|
||||
if s-t > e.maxOffset {
|
||||
panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")"))
|
||||
}
|
||||
}
|
||||
// Extend the match to be as long as possible.
|
||||
return int32(matchLen(src[s:], src[t:]))
|
||||
}
|
325
gateway/vendor/github.com/klauspost/compress/flate/level6.go
generated
vendored
Normal file
325
gateway/vendor/github.com/klauspost/compress/flate/level6.go
generated
vendored
Normal file
@ -0,0 +1,325 @@
|
||||
package flate
|
||||
|
||||
import "fmt"
|
||||
|
||||
type fastEncL6 struct {
|
||||
fastGen
|
||||
table [tableSize]tableEntry
|
||||
bTable [tableSize]tableEntryPrev
|
||||
}
|
||||
|
||||
func (e *fastEncL6) Encode(dst *tokens, src []byte) {
|
||||
const (
|
||||
inputMargin = 12 - 1
|
||||
minNonLiteralBlockSize = 1 + 1 + inputMargin
|
||||
hashShortBytes = 4
|
||||
)
|
||||
if debugDeflate && e.cur < 0 {
|
||||
panic(fmt.Sprint("e.cur < 0: ", e.cur))
|
||||
}
|
||||
|
||||
// Protect against e.cur wraparound.
|
||||
for e.cur >= bufferReset {
|
||||
if len(e.hist) == 0 {
|
||||
for i := range e.table[:] {
|
||||
e.table[i] = tableEntry{}
|
||||
}
|
||||
for i := range e.bTable[:] {
|
||||
e.bTable[i] = tableEntryPrev{}
|
||||
}
|
||||
e.cur = maxMatchOffset
|
||||
break
|
||||
}
|
||||
// Shift down everything in the table that isn't already too far away.
|
||||
minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
|
||||
for i := range e.table[:] {
|
||||
v := e.table[i].offset
|
||||
if v <= minOff {
|
||||
v = 0
|
||||
} else {
|
||||
v = v - e.cur + maxMatchOffset
|
||||
}
|
||||
e.table[i].offset = v
|
||||
}
|
||||
for i := range e.bTable[:] {
|
||||
v := e.bTable[i]
|
||||
if v.Cur.offset <= minOff {
|
||||
v.Cur.offset = 0
|
||||
v.Prev.offset = 0
|
||||
} else {
|
||||
v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset
|
||||
if v.Prev.offset <= minOff {
|
||||
v.Prev.offset = 0
|
||||
} else {
|
||||
v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset
|
||||
}
|
||||
}
|
||||
e.bTable[i] = v
|
||||
}
|
||||
e.cur = maxMatchOffset
|
||||
}
|
||||
|
||||
s := e.addBlock(src)
|
||||
|
||||
// This check isn't in the Snappy implementation, but there, the caller
|
||||
// instead of the callee handles this case.
|
||||
if len(src) < minNonLiteralBlockSize {
|
||||
// We do not fill the token table.
|
||||
// This will be picked up by caller.
|
||||
dst.n = uint16(len(src))
|
||||
return
|
||||
}
|
||||
|
||||
// Override src
|
||||
src = e.hist
|
||||
nextEmit := s
|
||||
|
||||
// sLimit is when to stop looking for offset/length copies. The inputMargin
|
||||
// lets us use a fast path for emitLiteral in the main loop, while we are
|
||||
// looking for copies.
|
||||
sLimit := int32(len(src) - inputMargin)
|
||||
|
||||
// nextEmit is where in src the next emitLiteral should start from.
|
||||
cv := load6432(src, s)
|
||||
// Repeat MUST be > 1 and within range
|
||||
repeat := int32(1)
|
||||
for {
|
||||
const skipLog = 7
|
||||
const doEvery = 1
|
||||
|
||||
nextS := s
|
||||
var l int32
|
||||
var t int32
|
||||
for {
|
||||
nextHashS := hashLen(cv, tableBits, hashShortBytes)
|
||||
nextHashL := hash7(cv, tableBits)
|
||||
s = nextS
|
||||
nextS = s + doEvery + (s-nextEmit)>>skipLog
|
||||
if nextS > sLimit {
|
||||
goto emitRemainder
|
||||
}
|
||||
// Fetch a short+long candidate
|
||||
sCandidate := e.table[nextHashS]
|
||||
lCandidate := e.bTable[nextHashL]
|
||||
next := load6432(src, nextS)
|
||||
entry := tableEntry{offset: s + e.cur}
|
||||
e.table[nextHashS] = entry
|
||||
eLong := &e.bTable[nextHashL]
|
||||
eLong.Cur, eLong.Prev = entry, eLong.Cur
|
||||
|
||||
// Calculate hashes of 'next'
|
||||
nextHashS = hashLen(next, tableBits, hashShortBytes)
|
||||
nextHashL = hash7(next, tableBits)
|
||||
|
||||
t = lCandidate.Cur.offset - e.cur
|
||||
if s-t < maxMatchOffset {
|
||||
if uint32(cv) == load3232(src, lCandidate.Cur.offset-e.cur) {
|
||||
// Long candidate matches at least 4 bytes.
|
||||
|
||||
// Store the next match
|
||||
e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
|
||||
eLong := &e.bTable[nextHashL]
|
||||
eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
|
||||
|
||||
// Check the previous long candidate as well.
|
||||
t2 := lCandidate.Prev.offset - e.cur
|
||||
if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) {
|
||||
l = e.matchlen(s+4, t+4, src) + 4
|
||||
ml1 := e.matchlen(s+4, t2+4, src) + 4
|
||||
if ml1 > l {
|
||||
t = t2
|
||||
l = ml1
|
||||
break
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
// Current value did not match, but check if previous long value does.
|
||||
t = lCandidate.Prev.offset - e.cur
|
||||
if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) {
|
||||
// Store the next match
|
||||
e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
|
||||
eLong := &e.bTable[nextHashL]
|
||||
eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
t = sCandidate.offset - e.cur
|
||||
if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) {
|
||||
// Found a 4 match...
|
||||
l = e.matchlen(s+4, t+4, src) + 4
|
||||
|
||||
// Look up next long candidate (at nextS)
|
||||
lCandidate = e.bTable[nextHashL]
|
||||
|
||||
// Store the next match
|
||||
e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
|
||||
eLong := &e.bTable[nextHashL]
|
||||
eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
|
||||
|
||||
// Check repeat at s + repOff
|
||||
const repOff = 1
|
||||
t2 := s - repeat + repOff
|
||||
if load3232(src, t2) == uint32(cv>>(8*repOff)) {
|
||||
ml := e.matchlen(s+4+repOff, t2+4, src) + 4
|
||||
if ml > l {
|
||||
t = t2
|
||||
l = ml
|
||||
s += repOff
|
||||
// Not worth checking more.
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// If the next long is a candidate, use that...
|
||||
t2 = lCandidate.Cur.offset - e.cur
|
||||
if nextS-t2 < maxMatchOffset {
|
||||
if load3232(src, lCandidate.Cur.offset-e.cur) == uint32(next) {
|
||||
ml := e.matchlen(nextS+4, t2+4, src) + 4
|
||||
if ml > l {
|
||||
t = t2
|
||||
s = nextS
|
||||
l = ml
|
||||
// This is ok, but check previous as well.
|
||||
}
|
||||
}
|
||||
// If the previous long is a candidate, use that...
|
||||
t2 = lCandidate.Prev.offset - e.cur
|
||||
if nextS-t2 < maxMatchOffset && load3232(src, lCandidate.Prev.offset-e.cur) == uint32(next) {
|
||||
ml := e.matchlen(nextS+4, t2+4, src) + 4
|
||||
if ml > l {
|
||||
t = t2
|
||||
s = nextS
|
||||
l = ml
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
cv = next
|
||||
}
|
||||
|
||||
// A 4-byte match has been found. We'll later see if more than 4 bytes
|
||||
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
|
||||
// them as literal bytes.
|
||||
|
||||
// Extend the 4-byte match as long as possible.
|
||||
if l == 0 {
|
||||
l = e.matchlenLong(s+4, t+4, src) + 4
|
||||
} else if l == maxMatchLength {
|
||||
l += e.matchlenLong(s+l, t+l, src)
|
||||
}
|
||||
|
||||
// Try to locate a better match by checking the end-of-match...
|
||||
if sAt := s + l; sAt < sLimit {
|
||||
// Allow some bytes at the beginning to mismatch.
|
||||
// Sweet spot is 2/3 bytes depending on input.
|
||||
// 3 is only a little better when it is but sometimes a lot worse.
|
||||
// The skipped bytes are tested in Extend backwards,
|
||||
// and still picked up as part of the match if they do.
|
||||
const skipBeginning = 2
|
||||
eLong := &e.bTable[hash7(load6432(src, sAt), tableBits)]
|
||||
// Test current
|
||||
t2 := eLong.Cur.offset - e.cur - l + skipBeginning
|
||||
s2 := s + skipBeginning
|
||||
off := s2 - t2
|
||||
if off < maxMatchOffset {
|
||||
if off > 0 && t2 >= 0 {
|
||||
if l2 := e.matchlenLong(s2, t2, src); l2 > l {
|
||||
t = t2
|
||||
l = l2
|
||||
s = s2
|
||||
}
|
||||
}
|
||||
// Test next:
|
||||
t2 = eLong.Prev.offset - e.cur - l + skipBeginning
|
||||
off := s2 - t2
|
||||
if off > 0 && off < maxMatchOffset && t2 >= 0 {
|
||||
if l2 := e.matchlenLong(s2, t2, src); l2 > l {
|
||||
t = t2
|
||||
l = l2
|
||||
s = s2
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Extend backwards
|
||||
for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
|
||||
s--
|
||||
t--
|
||||
l++
|
||||
}
|
||||
if nextEmit < s {
|
||||
if false {
|
||||
emitLiteral(dst, src[nextEmit:s])
|
||||
} else {
|
||||
for _, v := range src[nextEmit:s] {
|
||||
dst.tokens[dst.n] = token(v)
|
||||
dst.litHist[v]++
|
||||
dst.n++
|
||||
}
|
||||
}
|
||||
}
|
||||
if false {
|
||||
if t >= s {
|
||||
panic(fmt.Sprintln("s-t", s, t))
|
||||
}
|
||||
if (s - t) > maxMatchOffset {
|
||||
panic(fmt.Sprintln("mmo", s-t))
|
||||
}
|
||||
if l < baseMatchLength {
|
||||
panic("bml")
|
||||
}
|
||||
}
|
||||
|
||||
dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
|
||||
repeat = s - t
|
||||
s += l
|
||||
nextEmit = s
|
||||
if nextS >= s {
|
||||
s = nextS + 1
|
||||
}
|
||||
|
||||
if s >= sLimit {
|
||||
// Index after match end.
|
||||
for i := nextS + 1; i < int32(len(src))-8; i += 2 {
|
||||
cv := load6432(src, i)
|
||||
e.table[hashLen(cv, tableBits, hashShortBytes)] = tableEntry{offset: i + e.cur}
|
||||
eLong := &e.bTable[hash7(cv, tableBits)]
|
||||
eLong.Cur, eLong.Prev = tableEntry{offset: i + e.cur}, eLong.Cur
|
||||
}
|
||||
goto emitRemainder
|
||||
}
|
||||
|
||||
// Store every long hash in-between and every second short.
|
||||
if true {
|
||||
for i := nextS + 1; i < s-1; i += 2 {
|
||||
cv := load6432(src, i)
|
||||
t := tableEntry{offset: i + e.cur}
|
||||
t2 := tableEntry{offset: t.offset + 1}
|
||||
eLong := &e.bTable[hash7(cv, tableBits)]
|
||||
eLong2 := &e.bTable[hash7(cv>>8, tableBits)]
|
||||
e.table[hashLen(cv, tableBits, hashShortBytes)] = t
|
||||
eLong.Cur, eLong.Prev = t, eLong.Cur
|
||||
eLong2.Cur, eLong2.Prev = t2, eLong2.Cur
|
||||
}
|
||||
}
|
||||
|
||||
// We could immediately start working at s now, but to improve
|
||||
// compression we first update the hash table at s-1 and at s.
|
||||
cv = load6432(src, s)
|
||||
}
|
||||
|
||||
emitRemainder:
|
||||
if int(nextEmit) < len(src) {
|
||||
// If nothing was added, don't encode literals.
|
||||
if dst.n == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
emitLiteral(dst, src[nextEmit:])
|
||||
}
|
||||
}
|
16
gateway/vendor/github.com/klauspost/compress/flate/matchlen_amd64.go
generated
vendored
Normal file
16
gateway/vendor/github.com/klauspost/compress/flate/matchlen_amd64.go
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
//go:build amd64 && !appengine && !noasm && gc
|
||||
// +build amd64,!appengine,!noasm,gc
|
||||
|
||||
// Copyright 2019+ Klaus Post. All rights reserved.
|
||||
// License information can be found in the LICENSE file.
|
||||
|
||||
package flate
|
||||
|
||||
// matchLen returns how many bytes match in a and b
|
||||
//
|
||||
// It assumes that:
|
||||
//
|
||||
// len(a) <= len(b) and len(a) > 0
|
||||
//
|
||||
//go:noescape
|
||||
func matchLen(a []byte, b []byte) int
|
68
gateway/vendor/github.com/klauspost/compress/flate/matchlen_amd64.s
generated
vendored
Normal file
68
gateway/vendor/github.com/klauspost/compress/flate/matchlen_amd64.s
generated
vendored
Normal file
@ -0,0 +1,68 @@
|
||||
// Copied from S2 implementation.
|
||||
|
||||
//go:build !appengine && !noasm && gc && !noasm
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
// func matchLen(a []byte, b []byte) int
|
||||
// Requires: BMI
|
||||
TEXT ·matchLen(SB), NOSPLIT, $0-56
|
||||
MOVQ a_base+0(FP), AX
|
||||
MOVQ b_base+24(FP), CX
|
||||
MOVQ a_len+8(FP), DX
|
||||
|
||||
// matchLen
|
||||
XORL SI, SI
|
||||
CMPL DX, $0x08
|
||||
JB matchlen_match4_standalone
|
||||
|
||||
matchlen_loopback_standalone:
|
||||
MOVQ (AX)(SI*1), BX
|
||||
XORQ (CX)(SI*1), BX
|
||||
TESTQ BX, BX
|
||||
JZ matchlen_loop_standalone
|
||||
|
||||
#ifdef GOAMD64_v3
|
||||
TZCNTQ BX, BX
|
||||
#else
|
||||
BSFQ BX, BX
|
||||
#endif
|
||||
SARQ $0x03, BX
|
||||
LEAL (SI)(BX*1), SI
|
||||
JMP gen_match_len_end
|
||||
|
||||
matchlen_loop_standalone:
|
||||
LEAL -8(DX), DX
|
||||
LEAL 8(SI), SI
|
||||
CMPL DX, $0x08
|
||||
JAE matchlen_loopback_standalone
|
||||
|
||||
matchlen_match4_standalone:
|
||||
CMPL DX, $0x04
|
||||
JB matchlen_match2_standalone
|
||||
MOVL (AX)(SI*1), BX
|
||||
CMPL (CX)(SI*1), BX
|
||||
JNE matchlen_match2_standalone
|
||||
LEAL -4(DX), DX
|
||||
LEAL 4(SI), SI
|
||||
|
||||
matchlen_match2_standalone:
|
||||
CMPL DX, $0x02
|
||||
JB matchlen_match1_standalone
|
||||
MOVW (AX)(SI*1), BX
|
||||
CMPW (CX)(SI*1), BX
|
||||
JNE matchlen_match1_standalone
|
||||
LEAL -2(DX), DX
|
||||
LEAL 2(SI), SI
|
||||
|
||||
matchlen_match1_standalone:
|
||||
CMPL DX, $0x01
|
||||
JB gen_match_len_end
|
||||
MOVB (AX)(SI*1), BL
|
||||
CMPB (CX)(SI*1), BL
|
||||
JNE gen_match_len_end
|
||||
INCL SI
|
||||
|
||||
gen_match_len_end:
|
||||
MOVQ SI, ret+48(FP)
|
||||
RET
|
33
gateway/vendor/github.com/klauspost/compress/flate/matchlen_generic.go
generated
vendored
Normal file
33
gateway/vendor/github.com/klauspost/compress/flate/matchlen_generic.go
generated
vendored
Normal file
@ -0,0 +1,33 @@
|
||||
//go:build !amd64 || appengine || !gc || noasm
|
||||
// +build !amd64 appengine !gc noasm
|
||||
|
||||
// Copyright 2019+ Klaus Post. All rights reserved.
|
||||
// License information can be found in the LICENSE file.
|
||||
|
||||
package flate
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"math/bits"
|
||||
)
|
||||
|
||||
// matchLen returns the maximum common prefix length of a and b.
|
||||
// a must be the shortest of the two.
|
||||
func matchLen(a, b []byte) (n int) {
|
||||
for ; len(a) >= 8 && len(b) >= 8; a, b = a[8:], b[8:] {
|
||||
diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b)
|
||||
if diff != 0 {
|
||||
return n + bits.TrailingZeros64(diff)>>3
|
||||
}
|
||||
n += 8
|
||||
}
|
||||
|
||||
for i := range a {
|
||||
if a[i] != b[i] {
|
||||
break
|
||||
}
|
||||
n++
|
||||
}
|
||||
return n
|
||||
|
||||
}
|
37
gateway/vendor/github.com/klauspost/compress/flate/regmask_amd64.go
generated
vendored
Normal file
37
gateway/vendor/github.com/klauspost/compress/flate/regmask_amd64.go
generated
vendored
Normal file
@ -0,0 +1,37 @@
|
||||
package flate
|
||||
|
||||
const (
|
||||
// Masks for shifts with register sizes of the shift value.
|
||||
// This can be used to work around the x86 design of shifting by mod register size.
|
||||
// It can be used when a variable shift is always smaller than the register size.
|
||||
|
||||
// reg8SizeMaskX - shift value is 8 bits, shifted is X
|
||||
reg8SizeMask8 = 7
|
||||
reg8SizeMask16 = 15
|
||||
reg8SizeMask32 = 31
|
||||
reg8SizeMask64 = 63
|
||||
|
||||
// reg16SizeMaskX - shift value is 16 bits, shifted is X
|
||||
reg16SizeMask8 = reg8SizeMask8
|
||||
reg16SizeMask16 = reg8SizeMask16
|
||||
reg16SizeMask32 = reg8SizeMask32
|
||||
reg16SizeMask64 = reg8SizeMask64
|
||||
|
||||
// reg32SizeMaskX - shift value is 32 bits, shifted is X
|
||||
reg32SizeMask8 = reg8SizeMask8
|
||||
reg32SizeMask16 = reg8SizeMask16
|
||||
reg32SizeMask32 = reg8SizeMask32
|
||||
reg32SizeMask64 = reg8SizeMask64
|
||||
|
||||
// reg64SizeMaskX - shift value is 64 bits, shifted is X
|
||||
reg64SizeMask8 = reg8SizeMask8
|
||||
reg64SizeMask16 = reg8SizeMask16
|
||||
reg64SizeMask32 = reg8SizeMask32
|
||||
reg64SizeMask64 = reg8SizeMask64
|
||||
|
||||
// regSizeMaskUintX - shift value is uint, shifted is X
|
||||
regSizeMaskUint8 = reg8SizeMask8
|
||||
regSizeMaskUint16 = reg8SizeMask16
|
||||
regSizeMaskUint32 = reg8SizeMask32
|
||||
regSizeMaskUint64 = reg8SizeMask64
|
||||
)
|
40
gateway/vendor/github.com/klauspost/compress/flate/regmask_other.go
generated
vendored
Normal file
40
gateway/vendor/github.com/klauspost/compress/flate/regmask_other.go
generated
vendored
Normal file
@ -0,0 +1,40 @@
|
||||
//go:build !amd64
|
||||
// +build !amd64
|
||||
|
||||
package flate
|
||||
|
||||
const (
|
||||
// Masks for shifts with register sizes of the shift value.
|
||||
// This can be used to work around the x86 design of shifting by mod register size.
|
||||
// It can be used when a variable shift is always smaller than the register size.
|
||||
|
||||
// reg8SizeMaskX - shift value is 8 bits, shifted is X
|
||||
reg8SizeMask8 = 0xff
|
||||
reg8SizeMask16 = 0xff
|
||||
reg8SizeMask32 = 0xff
|
||||
reg8SizeMask64 = 0xff
|
||||
|
||||
// reg16SizeMaskX - shift value is 16 bits, shifted is X
|
||||
reg16SizeMask8 = 0xffff
|
||||
reg16SizeMask16 = 0xffff
|
||||
reg16SizeMask32 = 0xffff
|
||||
reg16SizeMask64 = 0xffff
|
||||
|
||||
// reg32SizeMaskX - shift value is 32 bits, shifted is X
|
||||
reg32SizeMask8 = 0xffffffff
|
||||
reg32SizeMask16 = 0xffffffff
|
||||
reg32SizeMask32 = 0xffffffff
|
||||
reg32SizeMask64 = 0xffffffff
|
||||
|
||||
// reg64SizeMaskX - shift value is 64 bits, shifted is X
|
||||
reg64SizeMask8 = 0xffffffffffffffff
|
||||
reg64SizeMask16 = 0xffffffffffffffff
|
||||
reg64SizeMask32 = 0xffffffffffffffff
|
||||
reg64SizeMask64 = 0xffffffffffffffff
|
||||
|
||||
// regSizeMaskUintX - shift value is uint, shifted is X
|
||||
regSizeMaskUint8 = ^uint(0)
|
||||
regSizeMaskUint16 = ^uint(0)
|
||||
regSizeMaskUint32 = ^uint(0)
|
||||
regSizeMaskUint64 = ^uint(0)
|
||||
)
|
318
gateway/vendor/github.com/klauspost/compress/flate/stateless.go
generated
vendored
Normal file
318
gateway/vendor/github.com/klauspost/compress/flate/stateless.go
generated
vendored
Normal file
@ -0,0 +1,318 @@
|
||||
package flate
|
||||
|
||||
import (
|
||||
"io"
|
||||
"math"
|
||||
"sync"
|
||||
)
|
||||
|
||||
const (
|
||||
maxStatelessBlock = math.MaxInt16
|
||||
// dictionary will be taken from maxStatelessBlock, so limit it.
|
||||
maxStatelessDict = 8 << 10
|
||||
|
||||
slTableBits = 13
|
||||
slTableSize = 1 << slTableBits
|
||||
slTableShift = 32 - slTableBits
|
||||
)
|
||||
|
||||
type statelessWriter struct {
|
||||
dst io.Writer
|
||||
closed bool
|
||||
}
|
||||
|
||||
func (s *statelessWriter) Close() error {
|
||||
if s.closed {
|
||||
return nil
|
||||
}
|
||||
s.closed = true
|
||||
// Emit EOF block
|
||||
return StatelessDeflate(s.dst, nil, true, nil)
|
||||
}
|
||||
|
||||
func (s *statelessWriter) Write(p []byte) (n int, err error) {
|
||||
err = StatelessDeflate(s.dst, p, false, nil)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
func (s *statelessWriter) Reset(w io.Writer) {
|
||||
s.dst = w
|
||||
s.closed = false
|
||||
}
|
||||
|
||||
// NewStatelessWriter will do compression but without maintaining any state
|
||||
// between Write calls.
|
||||
// There will be no memory kept between Write calls,
|
||||
// but compression and speed will be suboptimal.
|
||||
// Because of this, the size of actual Write calls will affect output size.
|
||||
func NewStatelessWriter(dst io.Writer) io.WriteCloser {
|
||||
return &statelessWriter{dst: dst}
|
||||
}
|
||||
|
||||
// bitWriterPool contains bit writers that can be reused.
|
||||
var bitWriterPool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
return newHuffmanBitWriter(nil)
|
||||
},
|
||||
}
|
||||
|
||||
// StatelessDeflate allows compressing directly to a Writer without retaining state.
|
||||
// When returning everything will be flushed.
|
||||
// Up to 8KB of an optional dictionary can be given which is presumed to precede the block.
|
||||
// Longer dictionaries will be truncated and will still produce valid output.
|
||||
// Sending nil dictionary is perfectly fine.
|
||||
func StatelessDeflate(out io.Writer, in []byte, eof bool, dict []byte) error {
|
||||
var dst tokens
|
||||
bw := bitWriterPool.Get().(*huffmanBitWriter)
|
||||
bw.reset(out)
|
||||
defer func() {
|
||||
// don't keep a reference to our output
|
||||
bw.reset(nil)
|
||||
bitWriterPool.Put(bw)
|
||||
}()
|
||||
if eof && len(in) == 0 {
|
||||
// Just write an EOF block.
|
||||
// Could be faster...
|
||||
bw.writeStoredHeader(0, true)
|
||||
bw.flush()
|
||||
return bw.err
|
||||
}
|
||||
|
||||
// Truncate dict
|
||||
if len(dict) > maxStatelessDict {
|
||||
dict = dict[len(dict)-maxStatelessDict:]
|
||||
}
|
||||
|
||||
// For subsequent loops, keep shallow dict reference to avoid alloc+copy.
|
||||
var inDict []byte
|
||||
|
||||
for len(in) > 0 {
|
||||
todo := in
|
||||
if len(inDict) > 0 {
|
||||
if len(todo) > maxStatelessBlock-maxStatelessDict {
|
||||
todo = todo[:maxStatelessBlock-maxStatelessDict]
|
||||
}
|
||||
} else if len(todo) > maxStatelessBlock-len(dict) {
|
||||
todo = todo[:maxStatelessBlock-len(dict)]
|
||||
}
|
||||
inOrg := in
|
||||
in = in[len(todo):]
|
||||
uncompressed := todo
|
||||
if len(dict) > 0 {
|
||||
// combine dict and source
|
||||
bufLen := len(todo) + len(dict)
|
||||
combined := make([]byte, bufLen)
|
||||
copy(combined, dict)
|
||||
copy(combined[len(dict):], todo)
|
||||
todo = combined
|
||||
}
|
||||
// Compress
|
||||
if len(inDict) == 0 {
|
||||
statelessEnc(&dst, todo, int16(len(dict)))
|
||||
} else {
|
||||
statelessEnc(&dst, inDict[:maxStatelessDict+len(todo)], maxStatelessDict)
|
||||
}
|
||||
isEof := eof && len(in) == 0
|
||||
|
||||
if dst.n == 0 {
|
||||
bw.writeStoredHeader(len(uncompressed), isEof)
|
||||
if bw.err != nil {
|
||||
return bw.err
|
||||
}
|
||||
bw.writeBytes(uncompressed)
|
||||
} else if int(dst.n) > len(uncompressed)-len(uncompressed)>>4 {
|
||||
// If we removed less than 1/16th, huffman compress the block.
|
||||
bw.writeBlockHuff(isEof, uncompressed, len(in) == 0)
|
||||
} else {
|
||||
bw.writeBlockDynamic(&dst, isEof, uncompressed, len(in) == 0)
|
||||
}
|
||||
if len(in) > 0 {
|
||||
// Retain a dict if we have more
|
||||
inDict = inOrg[len(uncompressed)-maxStatelessDict:]
|
||||
dict = nil
|
||||
dst.Reset()
|
||||
}
|
||||
if bw.err != nil {
|
||||
return bw.err
|
||||
}
|
||||
}
|
||||
if !eof {
|
||||
// Align, only a stored block can do that.
|
||||
bw.writeStoredHeader(0, false)
|
||||
}
|
||||
bw.flush()
|
||||
return bw.err
|
||||
}
|
||||
|
||||
func hashSL(u uint32) uint32 {
|
||||
return (u * 0x1e35a7bd) >> slTableShift
|
||||
}
|
||||
|
||||
func load3216(b []byte, i int16) uint32 {
|
||||
// Help the compiler eliminate bounds checks on the read so it can be done in a single read.
|
||||
b = b[i:]
|
||||
b = b[:4]
|
||||
return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
|
||||
}
|
||||
|
||||
func load6416(b []byte, i int16) uint64 {
|
||||
// Help the compiler eliminate bounds checks on the read so it can be done in a single read.
|
||||
b = b[i:]
|
||||
b = b[:8]
|
||||
return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
|
||||
uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
|
||||
}
|
||||
|
||||
func statelessEnc(dst *tokens, src []byte, startAt int16) {
|
||||
const (
|
||||
inputMargin = 12 - 1
|
||||
minNonLiteralBlockSize = 1 + 1 + inputMargin
|
||||
)
|
||||
|
||||
type tableEntry struct {
|
||||
offset int16
|
||||
}
|
||||
|
||||
var table [slTableSize]tableEntry
|
||||
|
||||
// This check isn't in the Snappy implementation, but there, the caller
|
||||
// instead of the callee handles this case.
|
||||
if len(src)-int(startAt) < minNonLiteralBlockSize {
|
||||
// We do not fill the token table.
|
||||
// This will be picked up by caller.
|
||||
dst.n = 0
|
||||
return
|
||||
}
|
||||
// Index until startAt
|
||||
if startAt > 0 {
|
||||
cv := load3232(src, 0)
|
||||
for i := int16(0); i < startAt; i++ {
|
||||
table[hashSL(cv)] = tableEntry{offset: i}
|
||||
cv = (cv >> 8) | (uint32(src[i+4]) << 24)
|
||||
}
|
||||
}
|
||||
|
||||
s := startAt + 1
|
||||
nextEmit := startAt
|
||||
// sLimit is when to stop looking for offset/length copies. The inputMargin
|
||||
// lets us use a fast path for emitLiteral in the main loop, while we are
|
||||
// looking for copies.
|
||||
sLimit := int16(len(src) - inputMargin)
|
||||
|
||||
// nextEmit is where in src the next emitLiteral should start from.
|
||||
cv := load3216(src, s)
|
||||
|
||||
for {
|
||||
const skipLog = 5
|
||||
const doEvery = 2
|
||||
|
||||
nextS := s
|
||||
var candidate tableEntry
|
||||
for {
|
||||
nextHash := hashSL(cv)
|
||||
candidate = table[nextHash]
|
||||
nextS = s + doEvery + (s-nextEmit)>>skipLog
|
||||
if nextS > sLimit || nextS <= 0 {
|
||||
goto emitRemainder
|
||||
}
|
||||
|
||||
now := load6416(src, nextS)
|
||||
table[nextHash] = tableEntry{offset: s}
|
||||
nextHash = hashSL(uint32(now))
|
||||
|
||||
if cv == load3216(src, candidate.offset) {
|
||||
table[nextHash] = tableEntry{offset: nextS}
|
||||
break
|
||||
}
|
||||
|
||||
// Do one right away...
|
||||
cv = uint32(now)
|
||||
s = nextS
|
||||
nextS++
|
||||
candidate = table[nextHash]
|
||||
now >>= 8
|
||||
table[nextHash] = tableEntry{offset: s}
|
||||
|
||||
if cv == load3216(src, candidate.offset) {
|
||||
table[nextHash] = tableEntry{offset: nextS}
|
||||
break
|
||||
}
|
||||
cv = uint32(now)
|
||||
s = nextS
|
||||
}
|
||||
|
||||
// A 4-byte match has been found. We'll later see if more than 4 bytes
|
||||
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
|
||||
// them as literal bytes.
|
||||
for {
|
||||
// Invariant: we have a 4-byte match at s, and no need to emit any
|
||||
// literal bytes prior to s.
|
||||
|
||||
// Extend the 4-byte match as long as possible.
|
||||
t := candidate.offset
|
||||
l := int16(matchLen(src[s+4:], src[t+4:]) + 4)
|
||||
|
||||
// Extend backwards
|
||||
for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
|
||||
s--
|
||||
t--
|
||||
l++
|
||||
}
|
||||
if nextEmit < s {
|
||||
if false {
|
||||
emitLiteral(dst, src[nextEmit:s])
|
||||
} else {
|
||||
for _, v := range src[nextEmit:s] {
|
||||
dst.tokens[dst.n] = token(v)
|
||||
dst.litHist[v]++
|
||||
dst.n++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Save the match found
|
||||
dst.AddMatchLong(int32(l), uint32(s-t-baseMatchOffset))
|
||||
s += l
|
||||
nextEmit = s
|
||||
if nextS >= s {
|
||||
s = nextS + 1
|
||||
}
|
||||
if s >= sLimit {
|
||||
goto emitRemainder
|
||||
}
|
||||
|
||||
// We could immediately start working at s now, but to improve
|
||||
// compression we first update the hash table at s-2 and at s. If
|
||||
// another emitCopy is not our next move, also calculate nextHash
|
||||
// at s+1. At least on GOARCH=amd64, these three hash calculations
|
||||
// are faster as one load64 call (with some shifts) instead of
|
||||
// three load32 calls.
|
||||
x := load6416(src, s-2)
|
||||
o := s - 2
|
||||
prevHash := hashSL(uint32(x))
|
||||
table[prevHash] = tableEntry{offset: o}
|
||||
x >>= 16
|
||||
currHash := hashSL(uint32(x))
|
||||
candidate = table[currHash]
|
||||
table[currHash] = tableEntry{offset: o + 2}
|
||||
|
||||
if uint32(x) != load3216(src, candidate.offset) {
|
||||
cv = uint32(x >> 8)
|
||||
s++
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
emitRemainder:
|
||||
if int(nextEmit) < len(src) {
|
||||
// If nothing was added, don't encode literals.
|
||||
if dst.n == 0 {
|
||||
return
|
||||
}
|
||||
emitLiteral(dst, src[nextEmit:])
|
||||
}
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user