mirror of
https://github.com/openfaas/faas.git
synced 2025-06-21 11:56:29 +00:00
Compare commits
1 Commits
master
...
dependabot
Author | SHA1 | Date | |
---|---|---|---|
af936a14d8 |
1
.github/CODEOWNERS
vendored
1
.github/CODEOWNERS
vendored
@ -1,2 +1 @@
|
||||
@alexellis
|
||||
@welteki
|
8
.github/workflows/build.yml
vendored
8
.github/workflows/build.yml
vendored
@ -16,9 +16,9 @@ jobs:
|
||||
with:
|
||||
fetch-depth: 1
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
uses: docker/setup-qemu-action@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- name: Get git commit
|
||||
id: get_git_commit
|
||||
@ -31,12 +31,12 @@ jobs:
|
||||
run: echo "REPO_OWNER=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" > $GITHUB_ENV
|
||||
|
||||
- name: Build ${{ matrix.svc }}
|
||||
uses: docker/build-push-action@v5
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
context: ./gateway
|
||||
file: ./gateway/Dockerfile
|
||||
outputs: "type=image,push=false"
|
||||
platforms: linux/amd64,linux/arm64
|
||||
platforms: linux/amd64,linux/arm/v7,linux/arm64
|
||||
build-args: |
|
||||
VERSION=${{ env.TAG }}
|
||||
GIT_COMMIT=${{ github.sha }}
|
||||
|
10
.github/workflows/publish.yml
vendored
10
.github/workflows/publish.yml
vendored
@ -22,11 +22,11 @@ jobs:
|
||||
with:
|
||||
fetch-depth: 1
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
uses: docker/setup-qemu-action@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to Docker Registry
|
||||
uses: docker/login-action@v3
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
@ -47,12 +47,12 @@ jobs:
|
||||
run: echo "REPO_OWNER=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" > $GITHUB_ENV
|
||||
|
||||
- name: Publish ${{ matrix.svc }}
|
||||
uses: docker/build-push-action@v5
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
context: ./gateway
|
||||
file: ./gateway/Dockerfile
|
||||
outputs: "type=registry,push=true"
|
||||
platforms: linux/amd64,linux/arm64
|
||||
platforms: linux/amd64,linux/arm/v7,linux/arm64
|
||||
build-args: |
|
||||
VERSION=${{ env.TAG }}
|
||||
GIT_COMMIT=${{ github.sha }}
|
||||
|
51
ADOPTERS.md
51
ADOPTERS.md
@ -36,8 +36,6 @@ Tell us more:
|
||||
|
||||
* [911 Security](https://www.911security.com/) - "We migrated our Python functions from AWS Lambda using automation, and now run them in airgapped environments for customers using OpenFaaS and arkade" - Scott Creager
|
||||
|
||||
* [Altair Engineering](https://altair.com/) - OpenFaaS powers the customer functions capability of the IoT SaaS platform, and separately each private on-prem installation of the product.
|
||||
|
||||
* [Axa France](https://www.axa.fr) - Axa uses OpenFaaS for inference and predictions at scale using ML models - Pierre-Henri Gache
|
||||
|
||||
* [Baidu](https://baidu.com) - A team within Baidu provides ML models to customers which are hosted on OpenFaaS - He Sun.
|
||||
@ -46,26 +44,26 @@ Tell us more:
|
||||
|
||||
* [Black.ai](https://black.ai) - video encoding, transcoding - object detection for CCTV using AI.
|
||||
|
||||
* [Corva.ai](https://corva.ai) - "Corva is an information-sharing, collaboration-driving, and productivity-powering solution for your Drilling, Completions, Geoscience, and Sustainability teams."
|
||||
|
||||
* [3fs](https://3fs.si) - 3fs is using OpenFaaS for automating repetitive development tasks like automatic rebasing, vendoring of dependencies on merge requests and many other things that make our developers lives easier
|
||||
|
||||
* [Baidu](https://baidu.com) - A team within Baidu provides ML models to customers which are hosted on OpenFaaS - He Sun.
|
||||
|
||||
* [Breu](https://breu.io) - Breu is using OpenFaaS to build an end user monitoring solution for hybrid cloud.
|
||||
|
||||
* [BT](https://www.bt.com) - BT are using OpenFaaS to enable collaboration between data-scientists and developers. The teams are going from 3-years to build and deliver a PoC, to 3 months. See: [KubeCon video](https://www.youtube.com/watch?v=y77HlN2Fa-w)
|
||||
|
||||
* [BulletProof](https://www.bulletproof.co.uk/) - Bulletproof are using OpenFaaS to build an on demand and scalable Vulnerability Scanning (VA) engine. Using OpenFaaS allows us to use compute resource efficiently yet maintain the ability to grow to meet customer scanning demands. We also like the ability to use pure docker containers to compose multiple scanning tools with different technologies into a single, coherent interface. This has reduced the time need to add new tools to the platform.
|
||||
|
||||
* [CDATA](https://cdata.com) - Used for background jobs and tasks such as backing up and exchanging data between systems.
|
||||
|
||||
* [Citrix](https://www.citrix.com/en-gb/) - Citrix built out a closed-source multi-tenant functions platform and UI using OpenFaaS. It is used for testing hardware devices and for automated QA testing.
|
||||
|
||||
* [Civo](https://www.civo.com) - Civo Cloud provide a 1-click Kubernetes marketplace application for OpenFaaS
|
||||
|
||||
* [Cloud Initiatives](https://cloudinitiatives.com) - Used for customer installations for custom functionality, and for main product providing educational course metrics.
|
||||
|
||||
* [Cognite](https://www.cognite.com) - Cognite targets heavy asset industries such as oil and gas, shipping and energy sector. They provide data integration tools that help you extract, import, and transform data from siloed source systems, and OpenFaaS is used to provide a cloud function service for heavy tasks.
|
||||
|
||||
* [Contiamo](https://www.contiamo.com) - data-science platform hosting jupyter notebooks and functions for multiple tenants.
|
||||
|
||||
* [Corva.ai](https://corva.ai) - "Corva is an information-sharing, collaboration-driving, and productivity-powering solution for your Drilling, Completions, Geoscience, and Sustainability teams."
|
||||
|
||||
* [DB2 Limited](https://db2.io) - mobile and web development company in Ukraine. Our internal projects using OpenFaaS functions to run customers code in Kubernetes cluster.
|
||||
|
||||
* [DigitalOcean](https://www.digitalocean.com) - DigitalOcean provide a one-click droplet and a 1-click Kubernetes marketplace application for OpenFaaS
|
||||
@ -74,7 +72,7 @@ Tell us more:
|
||||
|
||||
* [Dragonchain](https://dragonchain.com/) - "At Dragonchain, we focus on creating a hybrid blockchain-as-a-service product, with integrations of OpenFaaS as our 'smart contract' platform, to be able to automatically run customer code based on interactions that occur on the blockchain. This allows us to be extremely flexible, as customers only have to create a docker container and give it to us in order to create a 'smart contract' which can have deep integrations with our blockchain itself.". Blog: [Dragonchain & OpenFaaS](https://dragonchain.com/blog/blockchain-as-a-service-at-scale-for-enterprise)
|
||||
|
||||
* [Edge Delta](https://www.edgedelta.com/) - "OpenFaaS powers parts of our "edge observability platform""
|
||||
* [Edge Delta](https://www.edgedelta.com/) - "OpenFaaS powers parts of the "edge observability platform"
|
||||
|
||||
* [First Baptist Church Carrollton](https://www.fbcc.us) - "We use faasd as the backend for a Slack bot connected to our internal Slack workspace. The bot was initially created to facilitate remote question and answer sessions at our church by allowing viewers of our live stream to text or email questions in, have a staff member ask their question in the room, and then allow the staff member to send a response back to the sender. The texting is facilitated by Twilio while the email is done by interacting with a Gmail account via IMAP and SMTP."
|
||||
|
||||
@ -84,8 +82,6 @@ Tell us more:
|
||||
|
||||
* [GalaxyCard](https://www.galaxycard.in/) - "GalaxyCard is a happy user of OpenFaaS"
|
||||
|
||||
* [GH Electronic GmbH](https://gselectronic.com/) - "We've been using OpenFaaS in production for over 5 years and have 30 C# functions which are used in our manufacturing process."
|
||||
|
||||
* [GMO Internet](https://www.gmo.jp/en/)
|
||||
|
||||
* [HelloSafe](https://hellosafe.ca/en/) - "HelloSafe is one of the leading website of financial products comparison in Canada. We're using OpenFaas on our production applications."
|
||||
@ -96,10 +92,10 @@ Tell us more:
|
||||
|
||||
* [Iconscout](https://iconscout.com) - e-commerce site for stock photography and icons. OpenFaaS is used to resize images and to bundle assets for customers.
|
||||
|
||||
* [Infotechpartners](www.infotechpartners.be)
|
||||
|
||||
* [Ingrooves](https://ingrooves.com) - Ingrooves is a global music distribution, tech & marketing company, and OpenFaaS is a key component in its finance system for report generation, event publishing, and data ingestion.
|
||||
|
||||
* [Infotechpartners](www.infotechpartners.be)
|
||||
|
||||
* [Intel.com](https://intel.com) - OpenFaaS is used within a commercial service and within the Open Source group for AI model serving.
|
||||
|
||||
* [Intraffic](https://www.intraffic.nl/) - "Using OpenFaaS for integration and callable AI/ML models for asset management."
|
||||
@ -112,17 +108,15 @@ Tell us more:
|
||||
|
||||
* [Live Time Value (LTV) Co.](https://www.ltvco.com) - "Data is at the heart of what we do" - the data-science team at LTV use OpenFaaS to provide a scalable and cost-effective way to run their models in production.
|
||||
|
||||
* [Mercedes Benz Tech Innovation](https://mercedes-benz.com) - "We are currently using OpenFaaS as a communicative API between the vehicle app and the backend. In the future we plan to offer a service in the company, which every app developer can host an API on the OpenFaaS platform at short notice"
|
||||
|
||||
* [metaspan](https://metaspan.com) - "End-to-end blockchain solutions". metaspan ported all api endpoints from monolith express.js/sails.js to openfaas micro-functions.
|
||||
|
||||
* [MoneyLion](https://www.moneylion.com/)
|
||||
|
||||
* [Naamio](https://naamio.cloud/) - "Naamio are providing an event-based serverless API to developers to enable rapid development of decentralized applications on the cloud. By providing progressive enhancement within the developer tools, OpenFaaS has enabled Naamio to go from clustered Docker container deployments with REST APIs using Kubernetes, to load balanced deployable functions over an open event queue interface. It was key to enabling a standard multilingual development kit across cloud providers."
|
||||
|
||||
* [Neoskop](https://www.neoskop.de) - Neoskop is using OpenFaaS in production to provide our developers with a self-service platform for backend functionality and thereby our customers agile and rapid feature development.
|
||||
* [Neoskop](https://www.neoskop.de) - Neoskop is using OpenFAAS in production to provide our developers with a self-service platform for backend functionality and thereby our customers agile and rapid feature development.
|
||||
|
||||
* [Nexylan](nexylan.com/) - "We are a French professional host that use OpenFaaS in dev and production inside our private extranet. We use OpenFaaS to split our historic monolith project and then simplify development/maintainability and speed up development times."
|
||||
* [Nexylan](nexylan.com/) - "We are a French professional hoster that use OpenFaaS in dev and production inside our private extranet. We use OpenFaaS to split our historic monolith project and then simplify development/maintainability and speed up development times."
|
||||
|
||||
* [NGC](https://www.ngcsoftware.com/)
|
||||
|
||||
@ -148,11 +142,9 @@ Tell us more:
|
||||
|
||||
* [Pypestream](https://www.pypestream.com) - "We have just migrated 50 of our customers from Kubeless, which is now deprecated to OpenFaaS" - Antoine Hamon
|
||||
|
||||
* [Rapid Circle](https://www.rapidcircle.com) is using OpenFaaS within a Azure Kubernetes cluster to host a large amount of micro-services aiming at automating core activities of their Microsoft 365 Cloud Managed Services offering. Robustness, speed, scalable and simplicity have been major reasons to favor OpenFaaS over Azure Functions.
|
||||
|
||||
* [Ratehub](https://www.ratehub.ca) - Ratehub is Canada's leading personal finance comparison site. We're breaking apart our monolithic PHP and Java codebases into Node, PHP and Java OpenFaaS functions; there's not much that we don't plan on moving to FaaS!
|
||||
|
||||
* [skyslope.com](https://skyslope.com) - "We process millions of documents per day and moved from AWS Lambda to Kubernetes. We estimate that OpenFaaS has saved us 60,000 USD each year over the past three years that we've been running it in our business" - Derrick Martinez
|
||||
* [Rapid Circle](https://www.rapidcircle.com) is using OpenFaaS within a Azure Kubernetes cluster to host a large amount of micro-services aiming at automating core activities of their Microsoft 365 Cloud Managed Services offering. Robustness, speed, scalable and simplicity have been major reasons to favor OpenFaaS over Azure Functions.
|
||||
|
||||
* [smashHit](https://smashhit.eu) - smashHit is a project funded by the European Union's Horizon 2020 research and innovation programme under grant agreement No. 871477. The objective of smashHit is to assure trusted and secure sharing of data streams from both personal and industrial platforms, needed to build sectorial and cross-sectorial services, by establishing a Framework for processing of data owner consent and legal rules (GDPR) and effective contracting, as well as joint security and privacy-preserving mechanisms. We are utilising OpenFaaS to support the need for scalable processing through the use of functions.
|
||||
|
||||
@ -164,7 +156,7 @@ Tell us more:
|
||||
|
||||
* [Surge](https://www.workwithsurge.com) - Lending Platform and Salesforce integrations
|
||||
|
||||
* [TeamViewer.com](https://teamviewer.com) - "TeamViewer users OpenFaaS across several clusters"
|
||||
* [skyslope.com](https://skyslope.com) - "We process millions of documents per day and moved from AWS Lambda to Kubernetes. We estimate that OpenFaaS has saved us 60,000 USD each year over the past three years that we've been running it in our business" - Derrick Martinez
|
||||
|
||||
* [T-Mobile](https://www.t-mobile.com/) - T-Mobile is a global mobile network that provides mobile data, voice and text services to consumers and businesses.
|
||||
|
||||
@ -191,25 +183,8 @@ Tell us more:
|
||||
|
||||
* [Wireline.io](https://wireline.io) - portable functions that can run on any hardware, indexed through blockchain.
|
||||
|
||||
* [WorldQuant](https://worldquant.com) - Using OpenFaaS as part of WorldQuant's solutions.
|
||||
|
||||
* [Yokogawa Electric](https://en.wikipedia.org/wiki/Yokogawa_Electric)
|
||||
|
||||
* [Ytel](https://www.ytel.com) - Ytel are a Google Cloud customer and deployed OpenFaaS vs. the vendor alternative due to its wide range of templates, Dockerfile support and easier access to services within the VPC. The Dockerfile template allowed for easy migration of existing code. The latency of transactions for customers during purchase process was reduced by offloading synchronous code to NATS which is built into OpenFaaS. OpenFaaS also allowed "hot path" code to be refactored from large services into multiple functions, to take advantage of horizontal scaling.
|
||||
|
||||
See the top of the file for how to participate.
|
||||
|
||||
## Appendix
|
||||
|
||||
### Sorting sections
|
||||
|
||||
Adopeters list should be sorted after it's been updated, here's how you can do that with bash.
|
||||
|
||||
```bash
|
||||
cat | sort --ignore-case
|
||||
# Copy / paste
|
||||
|
||||
# Control + D
|
||||
```
|
||||
|
||||
Please note, for few vendors multiple use case has been listed under same vendor. Hence, the output need to be compared with content in document before updating the document.
|
||||
|
@ -1,14 +1,12 @@
|
||||
# Contributing guidelines
|
||||
# Contributing
|
||||
|
||||
These are the guidelines for contributing to OpenFaaS Community Edition (CE) and faasd components.
|
||||
## Guidelines
|
||||
|
||||
OpenFaaS Standard and OpenFaaS For Enterprises are commercial software, and maintained solely by employees of OpenFaaS Ltd.
|
||||
Guidelines for contributing.
|
||||
|
||||
Customers can provide feedback via the [openfaas/customers](https://github.com/openfaas/customers) repository
|
||||
### First impressions - introducing yourself and your use-case
|
||||
|
||||
## First impressions - introducing yourself and your use-case
|
||||
|
||||
One of the best ways to participate within a new open source community is to introduce yourself and your use-case. This builds goodwill, but also means the community can start to understand your needs and how best to help you.
|
||||
One of the best ways to participate within a new open source communities is to introduce yourself and your use-case. This builds goodwill, but also means the community can start to understand your needs and how best to help you.
|
||||
|
||||
Given that the community is made up of volunteers, making a good first impression is important to getting their ear and attention.
|
||||
|
||||
@ -33,7 +31,7 @@ The primary ways to engage with the community are via GitHub Issues and [Enterpr
|
||||
|
||||
See also: [The no-excuses guide to introducing yourself to a new open source project](https://opensource.com/education/13/7/introduce-yourself-open-source-project)
|
||||
|
||||
## How can I get involved?
|
||||
### How can I get involved?
|
||||
|
||||
There are a number of areas where contributions can be accepted:
|
||||
|
||||
@ -51,19 +49,17 @@ There are a number of areas where contributions can be accepted:
|
||||
|
||||
This is just a short list of ideas, if you have other ideas for contributing please make a suggestion.
|
||||
|
||||
If you'd like help getting involved, [join our weekly community call on Zoom](https://docs.openfaas.com/community).
|
||||
### I want to contribute on GitHub
|
||||
|
||||
## I want to contribute on GitHub
|
||||
#### I've found a security issue
|
||||
|
||||
### I've found a security issue
|
||||
Please follow [responsible disclosure practices](https://en.wikipedia.org/wiki/Responsible_disclosure) and send an email to support@openfaas.com. Bear in mind that instructions on how to reproduce the issue are key to proving an issue exists, and getting it resolved. Suggested solutions are also weclome.
|
||||
|
||||
Please follow [responsible disclosure practices](https://en.wikipedia.org/wiki/Responsible_disclosure) and send an email to support@openfaas.com. Bear in mind that instructions on how to reproduce the issue are key to proving an issue exists, and getting it resolved.
|
||||
|
||||
### I've found a typo
|
||||
#### I've found a typo
|
||||
|
||||
* A Pull Request is not necessary. Raise an [Issue](https://github.com/openfaas/faas/issues) and we'll fix it as soon as we can.
|
||||
|
||||
### I have a (great) idea
|
||||
#### I have a (great) idea
|
||||
|
||||
The OpenFaaS maintainers would like to make OpenFaaS the best it can be and welcome new contributions that align with the project's goals. Our time is limited so we'd like to make sure we agree on the proposed work before you spend time doing it. Saying "no" is hard which is why we'd rather say "yes" ahead of time. You need to raise a proposal.
|
||||
|
||||
@ -88,7 +84,7 @@ If you are proposing a new tool or service please do due diligence. Does this to
|
||||
|
||||
Every effort will be made to work with contributors who do not follow the process. Your PR may be closed or marked as `invalid` if it is left inactive, or the proposal cannot move into a `design/approved` status.
|
||||
|
||||
### Paperwork for Pull Requests
|
||||
#### Paperwork for Pull Requests
|
||||
|
||||
Please read this whole guide and make sure you agree to the Developer Certificate of Origin (DCO) agreement (included below):
|
||||
|
||||
@ -99,7 +95,7 @@ Please read this whole guide and make sure you agree to the Developer Certificat
|
||||
* Always give instructions for testing
|
||||
* Provide us CLI commands and output or screenshots where you can
|
||||
|
||||
#### Commit messages
|
||||
##### Commit messages
|
||||
|
||||
The first line of the commit message is the *subject*, this should be followed by a blank line and then a message describing the intent and purpose of the commit. These guidelines are based upon a [post by Chris Beams](https://chris.beams.io/posts/git-commit/).
|
||||
|
||||
@ -182,44 +178,47 @@ defer goleak.VerifyNoLeaks(t)
|
||||
|
||||
at the very beginning of the test, and it will fail the test if it detects goroutines that were opened but never cleaned up at the end of the test.
|
||||
|
||||
#### I need to add a dependency
|
||||
|
||||
All projects use [Go modules](https://github.com/golang/go/wiki/Modules) and vendoring. The concept of `vendoring` is still broadly used in projects written in Go. This means that a copy of the source-code of dependencies is stored within each repository in the `vendor` folder. It allows for a repeatable build and isolates change.
|
||||
|
||||
Components must be licensed with an MIT, BSD, or Apache 2.0 license. We may ask you to write your own code when dependencies are trivial, or unmaintained by their authors.
|
||||
|
||||
### I have a question, a suggestion or need help
|
||||
#### I have a question, a suggestion or need help
|
||||
|
||||
If you have a deeply technical request or need help debugging your application then you should prepare a simple, public GitHub repository with the minimum amount of code required to reproduce the issue.
|
||||
|
||||
If you feel there is an issue with OpenFaaS or were unable to get the help you needed from the GitHub, [then send us an email](https://openfaas.com/support/)
|
||||
|
||||
#### Setting expectations, support and SLAs
|
||||
|
||||
* What kind of support can I expect?
|
||||
* What kind of support can I expect for free?
|
||||
|
||||
OpenFaaS Standard customers have self-service support, and can directly contact the OpenFaaS Ltd team via the [openfaas/customers](https://github.com/openfaas/customers) repository using Discussions.
|
||||
OpenFaaS is licensed in a way that enables you to use the source code in or with your project or product.
|
||||
|
||||
Support is only offered to free users to fix bugs and issues in the codebase, where the full Issue Template is filled out with sufficient instructions to reproduce the issue. We will not debug your application, or comment on your architecture on GitHub.
|
||||
If you are using one of the Open Source projects within the openfaas or openfaas-incubator repository, then help may be offered on a limited, good-will basis by volunteers, but if you are a commercial user, you will need to purchase support for timely help.
|
||||
|
||||
Please be respectful of any time given to you and your needs. The person you are requesting help from may not reside in your timezone and contacting them via direct message is inappropriate.
|
||||
|
||||
* Can we talk to you in person?
|
||||
Enterprise support is the best place to ask questions, suggest features, and to get help. The GitHub issue tracker can be used for suspected issues with the codebase or deployment artifacts. The whole template must be filled out in detail.
|
||||
|
||||
There is a weekly Zoom call for any free user or customer to attend, topics are taken at the beginning of the call, and we will strive to give everyone time to talk.
|
||||
* Doesn't Open Source mean that everything is free?
|
||||
|
||||
The OpenFaaS projects are licensed as MIT which means that you are free to use, modify and distribute the software within the terms of the license.
|
||||
|
||||
Contributions, suggestions and feedback is welcomed in the appropriate channels as outlined in this guide. The MIT license does not cover support for PRs, Issues, Technical
|
||||
Support questions, feature requests and technical support/professional services which you may require; the preceding are not free and have a cost to those providing the services. Where possible, this time may be volunteered for free, but it is not unlimited.
|
||||
|
||||
* What is the SLA for my Issue?
|
||||
|
||||
Issues are examined, triaged and answered on a best effort basis by volunteers and community contributors. This means that you may receive an initial response within any time period such as: 1 minute, 1 hour, 1 day, or 1 week. There is no implicit meaning to the time between you raising an issue and it being answered or resolved.
|
||||
|
||||
If you see an issue which does not have a response or does not have a resolution, it does not mean that it is not important, or that it is being ignored. It simply means it has not been worked on yet, or may have been missed.
|
||||
If you see an issue which does not have a response or does not have a resolution, it does not mean that it is not important, or that it is being ignored. It simply means it has not been worked on by a volunteer yet.
|
||||
|
||||
Please take responsibility for following up on your Issues if you feel further action is required.
|
||||
|
||||
If you're an OpenFaaS customer, then you will have a direct line of communication with the OpenFaaS Ltd team, feel free to reach out for an update.
|
||||
If you are a business using OpenFaaS and need timely and attentive responses, then you should purchase Enterprise Support from OpenFaaS Ltd.
|
||||
|
||||
* What is the SLA for my Pull Request?
|
||||
|
||||
In a similar way to Issues, Pull Requests are triaged, reviewed, and considered by a team of volunteers - the Core Team, Members Team and the Project Lead. There are dozens of components that make up the OpenFaaS project and a limited amount of people. Sometimes PRs may become blocked or require further action.
|
||||
|
||||
Please take responsibility for following up on your Pull Requests if you feel further action is required.
|
||||
|
||||
|
||||
* Why may your PR be delayed?
|
||||
|
||||
* The contributing guide was not followed in some way
|
||||
@ -230,19 +229,25 @@ If you have a deeply technical request or need help debugging your application t
|
||||
|
||||
* Changes have been requested
|
||||
|
||||
* The PR is low priority or low impact
|
||||
|
||||
In addition, more information, a use-case, or context may be required for the change to be accepted.
|
||||
More information, a use-case, or context may be required for the change to be accepted.
|
||||
|
||||
* What if I am a GitHub Sponsor?
|
||||
|
||||
If you [sponsor OpenFaaS on GitHub](https://github.com/sponsors/openfaas), then you will show up as a Sponsor on your issues and PRs which is one way to show your support for the community and project. Thank you for your contribution.
|
||||
|
||||
Most sponsors are individuals, not corporations. But if your organisation can also take up a GitHub Sponsorship using their GitHub organisation's existing billing relationship.
|
||||
If you [sponsor OpenFaaS on GitHub](https://github.com/sponsors/openfaas), then you will show up as a Sponsor on your issues and PRs which is one way to show your support for the community and project. Whilst the entry-level sponsorship is only 25 USD / mo, you will benefit from access to regular updates on project development via the [Treasure Trove portal](https://faasd.exit.openfaas.pro/function/trove/). Your company can also take up a GitHub Sponsorship using their GitHub organisation's existing billing relationship.
|
||||
|
||||
* What if I need more?
|
||||
* What if I need more than that?
|
||||
|
||||
[Check out the options for self-service and enterprise support](https://openfaas.com/pricing/).
|
||||
If you're a company using any of these projects, you can get the following through an [Enterprise Support agreement with OpenFaaS Ltd](https://openfaas.com/support/) so that the time and resources required to support your business are paid for.
|
||||
|
||||
A support agreement can be tailored to your needs, you may benefit from support, if you need any of the following:
|
||||
|
||||
* security issues patched in a timely manner for all 40 +/- open source components
|
||||
* priority responses to issues/PRs
|
||||
* immediate help and access to experts
|
||||
|
||||
#### I need to add a dependency
|
||||
|
||||
All projects use [Go modules](https://github.com/golang/go/wiki/Modules) and vendoring. The concept of `vendoring` is still broadly used in projects written in Go. This means that a copy of the source-code of dependencies is stored within each repository in the `vendor` folder. It allows for a repeatable build and isolates change.
|
||||
|
||||
### How are releases made?
|
||||
|
||||
@ -297,10 +302,8 @@ Core Team attend all project meetings and calls. Allowances will be made for tim
|
||||
|
||||
The Core Team includes:
|
||||
|
||||
- Alex Ellis (@alexellis) - Founder, OpenFaaS Ltd
|
||||
- Han Verstraete (@welteki) - Junior Software Developer, OpenFaaS Ltd
|
||||
- Lucas Roesler (@LucasRoesler) - SME for logs, provider model and secrets. Lead Developer @ Contiamo
|
||||
- Nitishkumar Singh (@nitishkumar71) - Senior Engineer, CTO.ai
|
||||
- Alex Ellis (@alexellis) - Lead
|
||||
- Lucas Roesler (@LucasRoesler) - SME for logs, provider model and secrets
|
||||
|
||||
#### Members Team
|
||||
|
||||
@ -386,9 +389,7 @@ See also: [OpenFaaS Pro](https://docs.openfaas.com/openfaas-pro/introduction/)
|
||||
|
||||
## License
|
||||
|
||||
All third-party contributions are licensed under the MIT license, all OpenFaaS Ltd contributions are licensed under the [OpenFaaS CE EULA](https://github.com/openfaas/faas/blob/master/EULA.md).
|
||||
|
||||
OpenFaaS Standard and OpenFaaS for Enterprises are proprietary and binaries are licensed under the commercial [OpenFaaS Pro EULA](https://github.com/openfaas/faas/blob/master/pro/EULA.md).
|
||||
This project is licensed under the MIT License.
|
||||
|
||||
### Copyright notice
|
||||
|
||||
@ -397,7 +398,7 @@ It is important to state that you retain copyright for your contributions, but a
|
||||
Please add a Copyright notice to new files you add where this is not already present.
|
||||
|
||||
```
|
||||
// Copyright (c) OpenFaaS Author(s) 2023. All rights reserved.
|
||||
// Copyright (c) OpenFaaS Author(s) 2018. All rights reserved.
|
||||
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
|
||||
```
|
||||
|
||||
|
65
EULA.md
65
EULA.md
@ -1,65 +0,0 @@
|
||||
End User License Agreement (EULA) for OpenFaaS Community Edition
|
||||
|
||||
Licensed Software. OpenFaaS Community Edition is provided as free software under this End User License Agreement (EULA). This EULA applies to all source code, tooling, documentation, configuration, binaries, and container images produced by OpenFaaS Ltd 2017, 2019-2024. Any third-party contributions included in OpenFaaS Community Edition (CE) are licensed under the MIT License.
|
||||
|
||||
1.1 Software Ownership. All available artifacts that comprise "OpenFaaS" (gateway, faasd, faas-netes, queue-worker, etc) including code, documentation, configuration, designs, binaries and container images are the sole property and copyright of OpenFaaS Ltd.
|
||||
|
||||
1.2 Use Restrictions. Commercial use of the OpenFaaS Community Edition is limited to one installation per company and to a period not exceeding 60 days. The software cannot be resold, distributed to, or installed for a client for commercial purposes.
|
||||
|
||||
Your Agreement. By accessing, executing, or otherwise using the OpenFaaS Community Edition, you ("Customer") acknowledge that you have read this Agreement, understand it, and agree to be bound by its terms and conditions. If you are not willing to be bound by the terms of this Agreement, do not access or use the OpenFaaS Community Edition.
|
||||
|
||||
2.1 Entity Representation. If you are using the OpenFaaS Community Edition in your capacity as an employee or agent of a company or organization, any references to “you” in this Agreement shall refer to such entity and not to you in your personal capacity. You warrant that you are authorized to legally bind the company or organization on whose behalf you are accessing the OpenFaaS Community Edition.
|
||||
|
||||
2.2 Agreement Parties. This Agreement is between You ("Customer") and OpenFaaS Ltd. ("Supplier").
|
||||
|
||||
2.3 Governing Law. This Agreement shall be governed by, and construed in accordance with, the laws of England and Wales.
|
||||
|
||||
2.4 Entire Agreement and Supersession.
|
||||
|
||||
This Agreement, together with any referenced Order Form, forms the entire agreement between the Customer and OpenFaaS Ltd. regarding the Licensed Software, superseding all prior or existing agreements, including any purchase order (PO) terms submitted by the Customer. Any PO T&Cs are void and replaced by this Agreement unless OpenFaaS Ltd. agrees otherwise in writing.
|
||||
|
||||
License Grant; Ownership.
|
||||
|
||||
3.1 License Grant. Subject to the terms and conditions of this Agreement, OpenFaaS Ltd hereby grants to the Customer a limited, non-exclusive, non-transferable, revocable license to use the OpenFaaS Community Edition solely for internal business purposes and in accordance with the restrictions set forth herein.
|
||||
|
||||
3.2 Ownership and Intellectual Property Rights. OpenFaaS Ltd retains all rights, title, and interest in the OpenFaaS Community Edition, including any and all intellectual property rights.
|
||||
|
||||
Restrictions and Responsibilities.
|
||||
|
||||
4.1 Usage Restrictions. The Customer shall not distribute, sublicense, rent, lease, modify, translate, reverse engineer, decompile, disassemble, create derivative works based on, or copy the OpenFaaS Community Edition, except as expressly permitted by applicable law.
|
||||
|
||||
4.2 Feedback. Customer may provide feedback to OpenFaaS Ltd, which OpenFaaS Ltd may use to improve the software without obligation to the Customer.
|
||||
|
||||
Termination. This Agreement is effective from the first date you install the OpenFaaS Community Edition. You may terminate this Agreement at any time by deleting all copies of the software. OpenFaaS Ltd may terminate this Agreement at any time if you fail to comply with the terms.
|
||||
|
||||
Limitation of Liability.
|
||||
|
||||
6.1 Warranty Disclaimer. The OpenFaaS Community Edition is provided "as is" without warranty of any kind. You use the software at your own risk.
|
||||
|
||||
6.2 Liability Limitations. OpenFaaS Ltd shall not be liable for any indirect, special, incidental, or consequential damages arising out of the use of the OpenFaaS Community Edition.
|
||||
|
||||
General Provisions.
|
||||
|
||||
7. Co-Marketing
|
||||
|
||||
7.1 At the request of Supplier, Customer agrees to participate in other reasonable marketing activities that promote the benefits of the Services to other potential customers, including providing testimonials, case studies, and references.
|
||||
|
||||
7.2 Customer grants use of the Customer's name and logo on the Supplier's websites and in the Supplier's promotional materials.
|
||||
|
||||
7.3 Customer agrees that Supplier may disclose Customer as a customer of the Products.
|
||||
|
||||
8. Installation and Usage Restrictions
|
||||
|
||||
8.1 Single Installation Limit for Commercial Use: The License granted under this Agreement for the OpenFaaS Community Edition permits commercial use of the software for a period not exceeding 60 days. This period is intended to allow for evaluation of the software's capabilities in a commercial environment. Commercial use is strictly limited to one (1) installation per company for the 60-day evaluation period.
|
||||
|
||||
8.2 Prohibition on Circumvention: To ensure fair use of the OpenFaaS Community Edition, the Customer agrees not to engage in any action with the intent to circumvent the 60-day evaluation period limitation. This includes, but is not limited to, uninstalling and reinstalling the software on the same or different systems within the company to restart the evaluation period.
|
||||
|
||||
8.3 Enforcement and Verification: OpenFaaS Ltd reserves the right to implement reasonable measures to verify compliance with the terms of this Agreement, including the installation and usage restrictions set forth herein. The Customer agrees to cooperate with OpenFaaS Ltd in any such compliance verification efforts.
|
||||
|
||||
8.4 Consequences of Violation: Any attempt to bypass or violate the installation and usage restrictions, as described in Sections 8.1 and 8.2, may result in immediate termination of this EULA and the License granted hereunder. Further, the Customer may be subject to legal action and liable for damages resulting from any such violation.
|
||||
|
||||
9.1 Entire Agreement. This EULA constitutes the entire agreement between the parties concerning the subject matter hereof.
|
||||
|
||||
9.2 Sections 1-8 will remain effective after the termination of the Agreement.
|
||||
|
||||
9.3 Contact Information. For questions about these terms, contact OpenFaaS Ltd at: contact@openfaas.com.
|
11
LICENSE
11
LICENSE
@ -1,14 +1,3 @@
|
||||
All contributions from Alex Ellis & OpenFaaS Ltd are licensed under the
|
||||
OpenFaaS Community Edition (CE) EULA between the years 2017,2019-2024.
|
||||
|
||||
Contributions from third-parties are licensed under the MIT license.
|
||||
|
||||
A license is required for commercial use of OpenFaaS CE:
|
||||
https://github.com/openfaas/faas/blob/master/EULA.md
|
||||
|
||||
A separate commercial license covering all contributions can be purchased
|
||||
from OpenFaaS Ltd, with details available at: https://openfaas.com/pricing
|
||||
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2016-2018 Alex Ellis
|
||||
|
6
Makefile
6
Makefile
@ -5,12 +5,6 @@ NS?=openfaas
|
||||
build-gateway:
|
||||
(cd gateway; docker buildx build --platform linux/amd64 -t ${NS}/gateway:latest-dev .)
|
||||
|
||||
|
||||
# generate Go models from the OpenAPI spec using https://github.com/contiamo/openapi-generator-go
|
||||
generate:
|
||||
rm gateway/models/model_*.go || true
|
||||
openapi-generator-go generate models -s api-docs/spec.openapi.yml -o gateway/models --package-name models
|
||||
|
||||
# .PHONY: test-ci
|
||||
# test-ci:
|
||||
# ./contrib/ci.sh
|
||||
|
17
README.md
17
README.md
@ -2,6 +2,7 @@
|
||||
|
||||
[](https://github.com/openfaas/faas/actions/workflows/build.yml)
|
||||
[](https://pkg.go.dev/github.com/openfaas/faas)
|
||||
[](https://opensource.org/licenses/MIT)
|
||||
[](https://www.openfaas.com)
|
||||
[](https://github.com/alexellis/derek/)
|
||||
|
||||
@ -154,7 +155,7 @@ Have you written a blog about OpenFaaS? Do you have a speaking event? Send a Pul
|
||||
|
||||
### Contributing
|
||||
|
||||
OpenFaaS Community Edition is written in Golang. All third-party contributions to the source code are made under the MIT license, additional restrictions apply to OpenFaaS CE as a whole, where contributions from OpenFaaS Ltd are licensed under the [OpenFaaS CE EULA](EULA.md). Various types of contributions are welcomed whether that means providing feedback, testing existing and new feature or hacking on the source code.
|
||||
OpenFaaS Community Edition is written in Golang and is MIT licensed. Various types of contributions are welcomed whether that means providing feedback, testing existing and new feature or hacking on the source code.
|
||||
|
||||
#### How do I become a contributor?
|
||||
|
||||
@ -173,13 +174,21 @@ An alternative community dashboard is [available here](https://grafana.com/dashb
|
||||
|
||||
* Individual Sponsorships 🍻
|
||||
|
||||
Users and contributors are encouraged to join their peers in supporting the OpenFaaS project through [GitHub Sponsors](https://github.com/sponsors/openfaas).
|
||||
The source code for OpenFaaS shared in public repositories on GitHub is free to use and open source under the terms of the MIT license.
|
||||
|
||||
OpenFaaS Ltd offers [commercial support and enterprise add-ons](https://www.openfaas.com/support) for end-users and [training and consulting services for Cloud and Kubernetes](https://www.openfaas.com/consulting).
|
||||
|
||||
Users and contributors are encouraged to join their peers in supporting the project through [GitHub Sponsors](https://github.com/sponsors/openfaas).
|
||||
|
||||
* OpenFaaS Pro for Production
|
||||
|
||||
OpenFaaS Pro (Standard and For Enterprises) is built for production, the [Community Edition (CE)](EULA.md) is suitable for a Proof of Concept (PoC), for experimentation, and some limited internal use.
|
||||
OpenFaaS Pro is built for production, the Community Edition (CE) is suitable for open-source developers.
|
||||
|
||||
[Learn more about OpenFaaS editions](https://openfaas.com/pricing/)
|
||||
Upgrade to our commercial distribution with finely-tuned auto-scaling, scale to zero and event connectors for Kafka and AWS SQS.
|
||||
|
||||
We also offer Enterprise Support where you get to work directly with the founders of the project.
|
||||
|
||||
[Contact us about OpenFaaS Pro & Enterprise Support](https://openfaas.com/support/)
|
||||
|
||||
* Website Sponsorship 🌎
|
||||
|
||||
|
@ -1,11 +1,11 @@
|
||||
## Exploring or editing the Swagger API documentation
|
||||
|
||||
The `spec.openapi.yml` file can be viewed and edited in the Swagger UI.
|
||||
The `swagger.yml` file can be viewed and edited in the Swagger UI.
|
||||
|
||||
* Head over to the [Swagger editor](http://editor.swagger.io/)
|
||||
|
||||
* Now click File -> Import URL
|
||||
|
||||
* Type in `https://raw.githubusercontent.com/openfaas/faas/master/api-docs/spec.openapi.yml` and click OK
|
||||
* Type in `https://raw.githubusercontent.com/openfaas/faas/master/api-docs/swagger.yml` and click OK
|
||||
|
||||
You can now view and edit the Swagger, copy back to your fork before pushing changes.
|
||||
|
@ -1,996 +0,0 @@
|
||||
openapi: 3.0.1
|
||||
info:
|
||||
title: OpenFaaS API Gateway
|
||||
description: OpenFaaS API documentation
|
||||
license:
|
||||
name: MIT
|
||||
version: 0.8.12
|
||||
contact:
|
||||
name: OpenFaaS Ltd
|
||||
url: https://www.openfaas.com/support/
|
||||
servers:
|
||||
- url: "http://localhost:8080"
|
||||
description: Local server
|
||||
tags:
|
||||
- name: internal
|
||||
description: Internal use only
|
||||
- name: system
|
||||
description: System endpoints for managing functions and related objects
|
||||
- name: function
|
||||
description: Endpoints for invoking functions
|
||||
paths:
|
||||
"/healthz":
|
||||
get:
|
||||
summary: Healthcheck
|
||||
operationId: healthcheck
|
||||
description: Healthcheck for the gateway, indicates if the gateway is running and available
|
||||
tags:
|
||||
- internal
|
||||
responses:
|
||||
'200':
|
||||
description: Healthy
|
||||
'500':
|
||||
description: Not healthy
|
||||
"/metrics":
|
||||
get:
|
||||
summary: Prometheus metrics
|
||||
operationId: metrics
|
||||
description: Prometheus metrics for the gateway
|
||||
tags:
|
||||
- internal
|
||||
responses:
|
||||
'200':
|
||||
description: Prometheus metrics in text format
|
||||
"/system/info":
|
||||
get:
|
||||
operationId: GetSystemInfo
|
||||
description: Get system provider information
|
||||
summary: Get info such as provider version number and provider orchestrator
|
||||
tags:
|
||||
- system
|
||||
responses:
|
||||
'200':
|
||||
description: Info result
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
"$ref": "#/components/schemas/GatewayInfo"
|
||||
'500':
|
||||
description: Internal Server Error
|
||||
"/system/alert":
|
||||
post:
|
||||
operationId: ScaleAlert
|
||||
description: Scale a function based on an alert
|
||||
summary: |
|
||||
Event-sink for AlertManager, for auto-scaling
|
||||
|
||||
Internal use for AlertManager, requires valid AlertManager alert
|
||||
JSON
|
||||
tags:
|
||||
- internal
|
||||
requestBody:
|
||||
description: Incoming alert
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/PrometheusAlert'
|
||||
required: false
|
||||
responses:
|
||||
'200':
|
||||
description: Alert handled successfully
|
||||
'500':
|
||||
description: Internal error with swarm or request JSON invalid
|
||||
"/system/functions":
|
||||
get:
|
||||
operationId: GetFunctions
|
||||
description: Get a list of deployed functions
|
||||
summary: 'Get a list of deployed functions with: stats and image digest'
|
||||
tags:
|
||||
- system
|
||||
responses:
|
||||
'200':
|
||||
description: List of deployed functions.
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
"$ref": "#/components/schemas/FunctionStatus"
|
||||
put:
|
||||
operationId: UpdateFunction
|
||||
description: update a function spec
|
||||
summary: Update a function.
|
||||
tags:
|
||||
- system
|
||||
requestBody:
|
||||
description: Function to update
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
"$ref": "#/components/schemas/FunctionDeployment"
|
||||
required: true
|
||||
responses:
|
||||
'200':
|
||||
description: Accepted
|
||||
'400':
|
||||
description: Bad Request
|
||||
'404':
|
||||
description: Not Found
|
||||
'500':
|
||||
description: Internal Server Error
|
||||
post:
|
||||
operationId: DeployFunction
|
||||
description: Deploy a new function.
|
||||
summary: Deploy a new function.
|
||||
tags:
|
||||
- system
|
||||
requestBody:
|
||||
description: Function to deploy
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
"$ref": "#/components/schemas/FunctionDeployment"
|
||||
required: true
|
||||
responses:
|
||||
'202':
|
||||
description: Accepted
|
||||
'400':
|
||||
description: Bad Request
|
||||
'500':
|
||||
description: Internal Server Error
|
||||
delete:
|
||||
operationId: DeleteFunction
|
||||
description: Remove a deployed function.
|
||||
summary: Remove a deployed function.
|
||||
tags:
|
||||
- system
|
||||
requestBody:
|
||||
description: Function to delete
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
"$ref": "#/components/schemas/DeleteFunctionRequest"
|
||||
required: true
|
||||
responses:
|
||||
'200':
|
||||
description: OK
|
||||
'400':
|
||||
description: Bad Request
|
||||
'404':
|
||||
description: Not Found
|
||||
'500':
|
||||
description: Internal Server Error
|
||||
"/system/scale-function/{functionName}":
|
||||
post:
|
||||
operationId: ScaleFunction
|
||||
description: Scale a function
|
||||
summary: Scale a function to a specific replica count
|
||||
tags:
|
||||
- system
|
||||
parameters:
|
||||
- name: functionName
|
||||
in: path
|
||||
description: Function name
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
requestBody:
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/ScaleServiceRequest'
|
||||
responses:
|
||||
'200':
|
||||
description: Scaling OK
|
||||
'202':
|
||||
description: Scaling OK
|
||||
'404':
|
||||
description: Function not found
|
||||
'500':
|
||||
description: Error scaling function
|
||||
|
||||
"/system/function/{functionName}":
|
||||
get:
|
||||
operationId: GetFunctionStatus
|
||||
description: Get the status of a function by name
|
||||
tags:
|
||||
- system
|
||||
parameters:
|
||||
- name: functionName
|
||||
in: path
|
||||
description: Function name
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
- name: namespace
|
||||
in: query
|
||||
description: Namespace of the function
|
||||
required: false
|
||||
schema:
|
||||
type: string
|
||||
responses:
|
||||
'200':
|
||||
description: Function Summary
|
||||
content:
|
||||
"*/*":
|
||||
schema:
|
||||
"$ref": "#/components/schemas/FunctionStatus"
|
||||
'404':
|
||||
description: Not Found
|
||||
'500':
|
||||
description: Internal Server Error
|
||||
"/system/secrets":
|
||||
get:
|
||||
operationId: ListSecrets
|
||||
description: Get a list of secret names and metadata from the provider
|
||||
summary: Get a list of secret names and metadata from the provider
|
||||
tags:
|
||||
- system
|
||||
responses:
|
||||
'200':
|
||||
description: List of submitted secrets.
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
"$ref": "#/components/schemas/SecretDescription"
|
||||
put:
|
||||
operationId: UpdateSecret
|
||||
description: Update a secret.
|
||||
summary: Update a secret, the value is replaced.
|
||||
tags:
|
||||
- system
|
||||
requestBody:
|
||||
description: Secret to update
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
"$ref": "#/components/schemas/Secret"
|
||||
required: true
|
||||
responses:
|
||||
'200':
|
||||
description: Ok
|
||||
'400':
|
||||
description: Bad Request
|
||||
'404':
|
||||
description: Not Found
|
||||
'405':
|
||||
description: Method Not Allowed. Secret update is not allowed in faas-swarm.
|
||||
'500':
|
||||
description: Internal Server Error
|
||||
post:
|
||||
operationId: CreateSecret
|
||||
description: Create a new secret.
|
||||
tags:
|
||||
- system
|
||||
requestBody:
|
||||
description: A new secret to create
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
"$ref": "#/components/schemas/Secret"
|
||||
required: true
|
||||
responses:
|
||||
'201':
|
||||
description: Created
|
||||
'400':
|
||||
description: Bad Request
|
||||
'500':
|
||||
description: Internal Server Error
|
||||
delete:
|
||||
operationId: DeleteSecret
|
||||
description: Remove a secret.
|
||||
tags:
|
||||
- system
|
||||
requestBody:
|
||||
description: Secret to delete
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
"$ref": "#/components/schemas/SecretDescription"
|
||||
required: true
|
||||
responses:
|
||||
'204':
|
||||
description: OK
|
||||
'400':
|
||||
description: Bad Request
|
||||
'404':
|
||||
description: Not Found
|
||||
'500':
|
||||
description: Internal Server Error
|
||||
"/system/logs":
|
||||
get:
|
||||
operationId: GetFunctionLogs
|
||||
description: Get a stream of the logs for a specific function
|
||||
tags:
|
||||
- system
|
||||
parameters:
|
||||
- name: name
|
||||
in: query
|
||||
description: Function name
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
- name: namespace
|
||||
in: query
|
||||
description: Namespace of the function
|
||||
required: false
|
||||
schema:
|
||||
type: string
|
||||
- name: instance
|
||||
in: query
|
||||
description: Instance of the function
|
||||
required: false
|
||||
schema:
|
||||
type: string
|
||||
- name: tail
|
||||
in: query
|
||||
description: Sets the maximum number of log messages to return, <=0 means
|
||||
unlimited
|
||||
schema:
|
||||
type: integer
|
||||
- name: follow
|
||||
in: query
|
||||
description: When true, the request will stream logs until the request timeout
|
||||
schema:
|
||||
type: boolean
|
||||
- name: since
|
||||
in: query
|
||||
description: Only return logs after a specific date (RFC3339)
|
||||
schema:
|
||||
type: string
|
||||
format: date-time
|
||||
responses:
|
||||
'200':
|
||||
description: Newline delimited stream of log messages
|
||||
content:
|
||||
application/x-ndjson:
|
||||
schema:
|
||||
"$ref": "#/components/schemas/LogEntry"
|
||||
'404':
|
||||
description: Not Found
|
||||
'500':
|
||||
description: Internal Server Error
|
||||
|
||||
"/system/namespaces":
|
||||
get:
|
||||
operationId: ListNamespaces
|
||||
description: Get a list of namespaces
|
||||
tags:
|
||||
- system
|
||||
responses:
|
||||
'200':
|
||||
description: List of namespaces
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/ListNamespaceResponse'
|
||||
'500':
|
||||
description: Internal Server Error
|
||||
|
||||
"/async-function/{functionName}":
|
||||
post:
|
||||
operationId: InvokeAsync
|
||||
description: Invoke a function asynchronously
|
||||
summary: |
|
||||
Invoke a function asynchronously in the default OpenFaaS namespace
|
||||
|
||||
Any additional path segments and query parameters will be passed to the function as is.
|
||||
|
||||
See https://docs.openfaas.com/reference/async/.
|
||||
tags:
|
||||
- function
|
||||
parameters:
|
||||
- name: functionName
|
||||
in: path
|
||||
description: Function name
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
requestBody:
|
||||
description: "(Optional) data to pass to function"
|
||||
content:
|
||||
"*/*":
|
||||
schema:
|
||||
type: string
|
||||
format: binary
|
||||
example: '{"hello": "world"}'
|
||||
required: false
|
||||
responses:
|
||||
'202':
|
||||
description: Request accepted and queued
|
||||
'404':
|
||||
description: Not Found
|
||||
'500':
|
||||
description: Internal Server Error
|
||||
|
||||
"/async-function/{functionName}.{namespace}":
|
||||
post:
|
||||
operationId: InvokeAsyncNamespaced
|
||||
description: Invoke a function asynchronously in an OpenFaaS namespace.
|
||||
summary: |
|
||||
Invoke a function asynchronously in an OpenFaaS namespace.
|
||||
|
||||
Any additional path segments and query parameters will be passed to the function as is.
|
||||
|
||||
See https://docs.openfaas.com/reference/async/.
|
||||
tags:
|
||||
- function
|
||||
parameters:
|
||||
- name: functionName
|
||||
in: path
|
||||
description: Function name
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
- name: namespace
|
||||
in: path
|
||||
description: Namespace of the function
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
requestBody:
|
||||
description: "(Optional) data to pass to function"
|
||||
content:
|
||||
"*/*":
|
||||
schema:
|
||||
type: string
|
||||
format: binary
|
||||
example: '{"hello": "world"}'
|
||||
required: false
|
||||
responses:
|
||||
'202':
|
||||
description: Request accepted and queued
|
||||
'404':
|
||||
description: Not Found
|
||||
'500':
|
||||
description: Internal Server Error
|
||||
|
||||
"/function/{functionName}":
|
||||
post:
|
||||
operationId: InvokeFunction
|
||||
description: Invoke a function in the default OpenFaaS namespace.
|
||||
summary: |
|
||||
Synchronously invoke a function defined in te default OpenFaaS namespace.
|
||||
|
||||
Any additional path segments and query parameters will be passed to the function as is.
|
||||
tags:
|
||||
- function
|
||||
parameters:
|
||||
- name: functionName
|
||||
in: path
|
||||
description: Function name
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
requestBody:
|
||||
description: "(Optional) data to pass to function"
|
||||
content:
|
||||
"*/*":
|
||||
schema:
|
||||
type: string
|
||||
format: binary
|
||||
example: '{"hello": "world"}'
|
||||
required: false
|
||||
responses:
|
||||
'200':
|
||||
description: Value returned from function
|
||||
'404':
|
||||
description: Not Found
|
||||
'500':
|
||||
description: Internal server error
|
||||
|
||||
"/function/{functionName}.{namespace}":
|
||||
post:
|
||||
operationId: InvokeFunctionNamespaced
|
||||
description: Invoke a function in an OpenFaaS namespace.
|
||||
summary: |
|
||||
Synchronously invoke a function defined in the specified namespace.
|
||||
|
||||
Any additional path segments and query parameters will be passed to the function as is.
|
||||
tags:
|
||||
- function
|
||||
parameters:
|
||||
- name: functionName
|
||||
in: path
|
||||
description: Function name
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
- name: namespace
|
||||
in: path
|
||||
description: Namespace of the function
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
requestBody:
|
||||
description: "(Optional) data to pass to function"
|
||||
content:
|
||||
"*/*":
|
||||
schema:
|
||||
type: string
|
||||
format: binary
|
||||
example: '{"hello": "world"}'
|
||||
required: false
|
||||
responses:
|
||||
'200':
|
||||
description: Value returned from function
|
||||
'404':
|
||||
description: Not Found
|
||||
'500':
|
||||
description: Internal server error
|
||||
components:
|
||||
securitySchemes:
|
||||
basicAuth:
|
||||
type: http
|
||||
scheme: basic
|
||||
|
||||
schemas:
|
||||
GatewayInfo:
|
||||
required:
|
||||
- provider
|
||||
- version
|
||||
- arch
|
||||
type: object
|
||||
properties:
|
||||
provider:
|
||||
nullable: true
|
||||
allOf:
|
||||
- $ref: "#/components/schemas/ProviderInfo"
|
||||
version:
|
||||
nullable: true
|
||||
description: version of the gateway
|
||||
allOf:
|
||||
- $ref: "#/components/schemas/VersionInfo"
|
||||
arch:
|
||||
type: string
|
||||
description: Platform architecture
|
||||
example: x86_64
|
||||
VersionInfo:
|
||||
type: object
|
||||
required:
|
||||
- sha
|
||||
- release
|
||||
properties:
|
||||
commit_message:
|
||||
type: string
|
||||
example: Sample Message
|
||||
sha:
|
||||
type: string
|
||||
example: 7108418d9dd6b329ddff40e7393b3166f8160a88
|
||||
release:
|
||||
type: string
|
||||
format: semver
|
||||
example: 0.8.9
|
||||
ProviderInfo:
|
||||
type: object
|
||||
required:
|
||||
- provider
|
||||
- orchestration
|
||||
- version
|
||||
properties:
|
||||
provider:
|
||||
type: string
|
||||
description: The orchestration provider / implementation
|
||||
example: faas-netes
|
||||
orchestration:
|
||||
type: string
|
||||
example: kubernetes
|
||||
version:
|
||||
description: The version of the provider
|
||||
nullable: true
|
||||
allOf:
|
||||
- $ref: "#/components/schemas/VersionInfo"
|
||||
|
||||
PrometheusAlert:
|
||||
type: object
|
||||
description: Prometheus alert produced by AlertManager. This is only a subset of the full alert payload.
|
||||
required:
|
||||
- status
|
||||
- receiver
|
||||
- alerts
|
||||
properties:
|
||||
status:
|
||||
type: string
|
||||
description: The status of the alert
|
||||
example: resolved
|
||||
receiver:
|
||||
type: string
|
||||
description: The name of the receiver
|
||||
example: webhook
|
||||
alerts:
|
||||
type: array
|
||||
description: The list of alerts
|
||||
items:
|
||||
$ref: "#/components/schemas/PrometheusInnerAlert"
|
||||
example:
|
||||
{
|
||||
"receiver": "scale-up",
|
||||
"status": "firing",
|
||||
"alerts": [{
|
||||
"status": "firing",
|
||||
"labels": {
|
||||
"alertname": "APIHighInvocationRate",
|
||||
"code": "200",
|
||||
"function_name": "func_nodeinfo",
|
||||
"instance": "gateway:8080",
|
||||
"job": "gateway",
|
||||
"monitor": "faas-monitor",
|
||||
"service": "gateway",
|
||||
"severity": "major",
|
||||
"value": "8.998200359928017"
|
||||
},
|
||||
"annotations": {
|
||||
"description": "High invocation total on gateway:8080",
|
||||
"summary": "High invocation total on gateway:8080"
|
||||
},
|
||||
"startsAt": "2017-03-15T15:52:57.805Z",
|
||||
"endsAt": "0001-01-01T00:00:00Z",
|
||||
"generatorURL": "http://4156cb797423:9090/graph?g0.expr=rate%28gateway_function_invocation_total%5B10s%5D%29+%3E+5\u0026g0.tab=0"
|
||||
}],
|
||||
"groupLabels": {
|
||||
"alertname": "APIHighInvocationRate",
|
||||
"service": "gateway"
|
||||
},
|
||||
"commonLabels": {
|
||||
"alertname": "APIHighInvocationRate",
|
||||
"code": "200",
|
||||
"function_name": "func_nodeinfo",
|
||||
"instance": "gateway:8080",
|
||||
"job": "gateway",
|
||||
"monitor": "faas-monitor",
|
||||
"service": "gateway",
|
||||
"severity": "major",
|
||||
"value": "8.998200359928017"
|
||||
},
|
||||
"commonAnnotations": {
|
||||
"description": "High invocation total on gateway:8080",
|
||||
"summary": "High invocation total on gateway:8080"
|
||||
},
|
||||
"externalURL": "http://f054879d97db:9093",
|
||||
"version": "3",
|
||||
"groupKey": 18195285354214864953
|
||||
}
|
||||
|
||||
PrometheusInnerAlert:
|
||||
type: object
|
||||
description: A single alert produced by Prometheus
|
||||
required:
|
||||
- status
|
||||
- labels
|
||||
properties:
|
||||
status:
|
||||
type: string
|
||||
description: The status of the alert
|
||||
example: resolved
|
||||
labels:
|
||||
$ref: "#/components/schemas/PrometheusInnerAlertLabel"
|
||||
|
||||
PrometheusInnerAlertLabel:
|
||||
type: object
|
||||
description: A single label of a Prometheus alert
|
||||
required:
|
||||
- alertname
|
||||
- function_name
|
||||
properties:
|
||||
alertname:
|
||||
type: string
|
||||
description: The name of the alert
|
||||
function_name:
|
||||
type: string
|
||||
description: The name of the function
|
||||
example: nodeinfo
|
||||
|
||||
FunctionDeployment:
|
||||
required:
|
||||
- service
|
||||
- image
|
||||
type: object
|
||||
properties:
|
||||
service:
|
||||
type: string
|
||||
description: Name of deployed function
|
||||
example: nodeinfo
|
||||
image:
|
||||
type: string
|
||||
description: Docker image in accessible registry
|
||||
example: functions/nodeinfo:latest
|
||||
namespace:
|
||||
type: string
|
||||
description: Namespace to deploy function to. When omitted, the default namespace
|
||||
is used, typically this is `openfaas-fn` but is configured by the provider.
|
||||
example: openfaas-fn
|
||||
envProcess:
|
||||
type: string
|
||||
description: |
|
||||
Process for watchdog to fork, i.e. the command to start the function process.
|
||||
|
||||
This value configures the `fprocess` env variable.
|
||||
example: node main.js
|
||||
constraints:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
description: Constraints are specific to OpenFaaS Provider
|
||||
example: node.platform.os == linux
|
||||
envVars:
|
||||
type: object
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: Overrides to environmental variables
|
||||
secrets:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
description: An array of names of secrets that are required to be loaded
|
||||
from the Docker Swarm.
|
||||
example: secret-name-1
|
||||
labels:
|
||||
type: object
|
||||
nullable: true
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: A map of labels for making scheduling or routing decisions
|
||||
example:
|
||||
foo: bar
|
||||
annotations:
|
||||
type: object
|
||||
nullable: true
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: A map of annotations for management, orchestration, events
|
||||
and build tasks
|
||||
example:
|
||||
topics: awesome-kafka-topic
|
||||
foo: bar
|
||||
limits:
|
||||
nullable: true
|
||||
allOf:
|
||||
- $ref: "#/components/schemas/FunctionResources"
|
||||
requests:
|
||||
nullable: true
|
||||
allOf:
|
||||
- $ref: "#/components/schemas/FunctionResources"
|
||||
readOnlyRootFilesystem:
|
||||
type: boolean
|
||||
description: Make the root filesystem of the function read-only
|
||||
|
||||
# DEPRECATED FIELDS, these fields are ignored in all current providers
|
||||
registryAuth:
|
||||
type: string
|
||||
description: |
|
||||
Deprecated: Private registry base64-encoded basic auth (as present in ~/.docker/config.json)
|
||||
|
||||
Use a Kubernetes Secret with registry-auth secret type to provide this value instead.
|
||||
|
||||
This value is completely ignored.
|
||||
example: dXNlcjpwYXNzd29yZA==
|
||||
deprecated: true
|
||||
network:
|
||||
type: string
|
||||
description: |
|
||||
Deprecated: Network, usually func_functions for Swarm.
|
||||
|
||||
This value is completely ignored.
|
||||
deprecated: true
|
||||
example: func_functions
|
||||
|
||||
FunctionStatus:
|
||||
type: object
|
||||
required:
|
||||
- name
|
||||
- image
|
||||
properties:
|
||||
name:
|
||||
type: string
|
||||
description: The name of the function
|
||||
example: nodeinfo
|
||||
image:
|
||||
type: string
|
||||
description: The fully qualified docker image name of the function
|
||||
example: functions/nodeinfo:latest
|
||||
namespace:
|
||||
type: string
|
||||
description: The namespace of the function
|
||||
example: openfaas-fn
|
||||
envProcess:
|
||||
type: string
|
||||
description: Process for watchdog to fork
|
||||
example: node main.js
|
||||
envVars:
|
||||
type: object
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: environment variables for the function runtime
|
||||
constraints:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
description: Constraints are specific to OpenFaaS Provider
|
||||
example: node.platform.os == linux
|
||||
secrets:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
description: An array of names of secrets that are made available to the function
|
||||
labels:
|
||||
type: object
|
||||
nullable: true
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: A map of labels for making scheduling or routing decisions
|
||||
example:
|
||||
foo: bar
|
||||
annotations:
|
||||
type: object
|
||||
nullable: true
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: A map of annotations for management, orchestration, events
|
||||
and build tasks
|
||||
example:
|
||||
topics: awesome-kafka-topic
|
||||
foo: bar
|
||||
limits:
|
||||
nullable: true
|
||||
allOf:
|
||||
- $ref: "#/components/schemas/FunctionResources"
|
||||
requests:
|
||||
nullable: true
|
||||
allOf:
|
||||
- $ref: "#/components/schemas/FunctionResources"
|
||||
readOnlyRootFilesystem:
|
||||
type: boolean
|
||||
description: removes write-access from the root filesystem mount-point.
|
||||
invocationCount:
|
||||
type: number
|
||||
description: The amount of invocations for the specified function
|
||||
format: integer
|
||||
example: 1337
|
||||
replicas:
|
||||
type: number
|
||||
description: Desired amount of replicas
|
||||
format: integer
|
||||
example: 2
|
||||
availableReplicas:
|
||||
type: number
|
||||
description: The current available amount of replicas
|
||||
format: integer
|
||||
example: 2
|
||||
createdAt:
|
||||
type: string
|
||||
description: |
|
||||
is the time read back from the faas backend's
|
||||
data store for when the function or its container was created.
|
||||
format: date-time
|
||||
usage:
|
||||
nullable: true
|
||||
allOf:
|
||||
- $ref: "#/components/schemas/FunctionUsage"
|
||||
|
||||
FunctionResources:
|
||||
type: object
|
||||
properties:
|
||||
memory:
|
||||
type: string
|
||||
description: The amount of memory that is allocated for the function
|
||||
example: 128M
|
||||
cpu:
|
||||
type: string
|
||||
description: The amount of cpu that is allocated for the function
|
||||
example: '0.01'
|
||||
|
||||
FunctionUsage:
|
||||
type: object
|
||||
properties:
|
||||
cpu:
|
||||
type: number
|
||||
description: |
|
||||
is the increase in CPU usage since the last measurement
|
||||
equivalent to Kubernetes' concept of millicores.
|
||||
format: double
|
||||
example: 0.01
|
||||
totalMemoryBytes:
|
||||
type: number
|
||||
description: is the total memory usage in bytes.
|
||||
format: double
|
||||
example: 1337
|
||||
|
||||
DeleteFunctionRequest:
|
||||
required:
|
||||
- functionName
|
||||
type: object
|
||||
properties:
|
||||
functionName:
|
||||
type: string
|
||||
description: Name of deployed function
|
||||
example: nodeinfo
|
||||
|
||||
ScaleServiceRequest:
|
||||
required:
|
||||
- serviceName
|
||||
- namespace
|
||||
- replicas
|
||||
type: object
|
||||
properties:
|
||||
serviceName:
|
||||
type: string
|
||||
description: Name of deployed function
|
||||
example: nodeinfo
|
||||
namespace:
|
||||
type: string
|
||||
description: Namespace the function is deployed to.
|
||||
example: openfaas-fn
|
||||
replicas:
|
||||
type: integer
|
||||
format: int64
|
||||
minimum: 0
|
||||
description: Number of replicas to scale to
|
||||
example: 2
|
||||
|
||||
SecretDescription:
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
properties:
|
||||
name:
|
||||
type: string
|
||||
description: Name of secret
|
||||
example: aws-key
|
||||
namespace:
|
||||
type: string
|
||||
description: Namespace of secret
|
||||
example: openfaas-fn
|
||||
SecretValues:
|
||||
type: object
|
||||
properties:
|
||||
value:
|
||||
type: string
|
||||
description: Value of secret in plain-text
|
||||
example: changeme
|
||||
rawValue:
|
||||
type: string
|
||||
format: byte
|
||||
description: |
|
||||
Value of secret in base64.
|
||||
|
||||
This can be used to provide raw binary data when the `value` field is omitted.
|
||||
example: Y2hhbmdlbWU=
|
||||
|
||||
Secret:
|
||||
type: object
|
||||
allOf:
|
||||
- $ref: "#/components/schemas/SecretDescription"
|
||||
- $ref: "#/components/schemas/SecretValues"
|
||||
|
||||
LogEntry:
|
||||
type: object
|
||||
required:
|
||||
- name
|
||||
- namespace
|
||||
- instance
|
||||
- timestamp
|
||||
- text
|
||||
properties:
|
||||
name:
|
||||
type: string
|
||||
description: the function name
|
||||
namespace:
|
||||
type: string
|
||||
description: the namespace of the function
|
||||
instance:
|
||||
type: string
|
||||
description: the name/id of the specific function instance
|
||||
timestamp:
|
||||
type: string
|
||||
description: the timestamp of when the log message was recorded
|
||||
format: date-time
|
||||
text:
|
||||
type: string
|
||||
description: raw log message content
|
||||
|
||||
ListNamespaceResponse:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
description: Namespace name
|
||||
example: openfaas-fn
|
628
api-docs/swagger.yml
Normal file
628
api-docs/swagger.yml
Normal file
@ -0,0 +1,628 @@
|
||||
swagger: '2.0'
|
||||
info:
|
||||
description: OpenFaaS API documentation
|
||||
version: 0.8.12
|
||||
title: OpenFaaS API Gateway
|
||||
license:
|
||||
name: MIT
|
||||
basePath: /
|
||||
schemes:
|
||||
- http
|
||||
paths:
|
||||
'/system/functions':
|
||||
get:
|
||||
summary: 'Get a list of deployed functions with: stats and image digest'
|
||||
consumes:
|
||||
- application/json
|
||||
produces:
|
||||
- application/json
|
||||
responses:
|
||||
'200':
|
||||
description: List of deployed functions.
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
$ref: '#/definitions/FunctionListEntry'
|
||||
post:
|
||||
summary: Deploy a new function.
|
||||
description: ''
|
||||
consumes:
|
||||
- application/json
|
||||
produces:
|
||||
- application/json
|
||||
parameters:
|
||||
- in: body
|
||||
name: body
|
||||
description: Function to deploy
|
||||
required: true
|
||||
schema:
|
||||
$ref: '#/definitions/FunctionDefinition'
|
||||
responses:
|
||||
'202':
|
||||
description: Accepted
|
||||
'400':
|
||||
description: Bad Request
|
||||
'500':
|
||||
description: Internal Server Error
|
||||
put:
|
||||
summary: Update a function.
|
||||
description: ''
|
||||
consumes:
|
||||
- application/json
|
||||
produces:
|
||||
- application/json
|
||||
parameters:
|
||||
- in: body
|
||||
name: body
|
||||
description: Function to update
|
||||
required: true
|
||||
schema:
|
||||
$ref: '#/definitions/FunctionDefinition'
|
||||
responses:
|
||||
'200':
|
||||
description: Accepted
|
||||
'400':
|
||||
description: Bad Request
|
||||
'404':
|
||||
description: Not Found
|
||||
'500':
|
||||
description: Internal Server Error
|
||||
delete:
|
||||
summary: Remove a deployed function.
|
||||
description: ''
|
||||
consumes:
|
||||
- application/json
|
||||
produces:
|
||||
- application/json
|
||||
parameters:
|
||||
- in: body
|
||||
name: body
|
||||
description: Function to delete
|
||||
required: true
|
||||
schema:
|
||||
$ref: '#/definitions/DeleteFunctionRequest'
|
||||
responses:
|
||||
'200':
|
||||
description: OK
|
||||
'400':
|
||||
description: Bad Request
|
||||
'404':
|
||||
description: Not Found
|
||||
'500':
|
||||
description: Internal Server Error
|
||||
'/system/alert':
|
||||
post:
|
||||
summary: 'Event-sink for AlertManager, for auto-scaling'
|
||||
description: 'Internal use for AlertManager, requires valid AlertManager alert JSON'
|
||||
consumes:
|
||||
- application/json
|
||||
produces:
|
||||
- application/json
|
||||
parameters:
|
||||
- in: body
|
||||
name: body
|
||||
description: Incoming alert
|
||||
schema:
|
||||
type: object
|
||||
example: |-
|
||||
{"receiver": "scale-up",
|
||||
"status": "firing",
|
||||
"alerts": [{
|
||||
"status": "firing",
|
||||
"labels": {
|
||||
"alertname": "APIHighInvocationRate",
|
||||
"code": "200",
|
||||
"function_name": "func_nodeinfo",
|
||||
"instance": "gateway:8080",
|
||||
"job": "gateway",
|
||||
"monitor": "faas-monitor",
|
||||
"service": "gateway",
|
||||
"severity": "major",
|
||||
"value": "8.998200359928017"
|
||||
},
|
||||
"annotations": {
|
||||
"description": "High invocation total on gateway:8080",
|
||||
"summary": "High invocation total on gateway:8080"
|
||||
},
|
||||
"startsAt": "2017-03-15T15:52:57.805Z",
|
||||
"endsAt": "0001-01-01T00:00:00Z",
|
||||
"generatorURL": "http://4156cb797423:9090/graph?g0.expr=rate%28gateway_function_invocation_total%5B10s%5D%29+%3E+5\u0026g0.tab=0"
|
||||
}],
|
||||
"groupLabels": {
|
||||
"alertname": "APIHighInvocationRate",
|
||||
"service": "gateway"
|
||||
},
|
||||
"commonLabels": {
|
||||
"alertname": "APIHighInvocationRate",
|
||||
"code": "200",
|
||||
"function_name": "func_nodeinfo",
|
||||
"instance": "gateway:8080",
|
||||
"job": "gateway",
|
||||
"monitor": "faas-monitor",
|
||||
"service": "gateway",
|
||||
"severity": "major",
|
||||
"value": "8.998200359928017"
|
||||
},
|
||||
"commonAnnotations": {
|
||||
"description": "High invocation total on gateway:8080",
|
||||
"summary": "High invocation total on gateway:8080"
|
||||
},
|
||||
"externalURL": "http://f054879d97db:9093",
|
||||
"version": "3",
|
||||
"groupKey": 18195285354214864953
|
||||
}
|
||||
responses:
|
||||
'200':
|
||||
description: Alert handled successfully
|
||||
'500':
|
||||
description: Internal error with swarm or request JSON invalid
|
||||
'/async-function/{functionName}':
|
||||
post:
|
||||
summary: 'Invoke a function asynchronously in OpenFaaS'
|
||||
description: >-
|
||||
See https://docs.openfaas.com/reference/async/.
|
||||
parameters:
|
||||
- in: path
|
||||
name: functionName
|
||||
description: Function name
|
||||
type: string
|
||||
required: true
|
||||
- in: body
|
||||
name: input
|
||||
description: (Optional) data to pass to function
|
||||
schema:
|
||||
type: string
|
||||
format: binary
|
||||
example:
|
||||
'{"hello": "world"}'
|
||||
required: false
|
||||
responses:
|
||||
'202':
|
||||
description: Request accepted and queued
|
||||
'404':
|
||||
description: Not Found
|
||||
'500':
|
||||
description: Internal Server Error
|
||||
'/function/{functionName}':
|
||||
post:
|
||||
summary: Invoke a function defined in OpenFaaS
|
||||
parameters:
|
||||
- in: path
|
||||
name: functionName
|
||||
description: Function name
|
||||
type: string
|
||||
required: true
|
||||
- in: body
|
||||
name: input
|
||||
description: (Optional) data to pass to function
|
||||
schema:
|
||||
type: string
|
||||
format: binary
|
||||
example:
|
||||
'{"hello": "world"}'
|
||||
required: false
|
||||
responses:
|
||||
'200':
|
||||
description: Value returned from function
|
||||
'404':
|
||||
description: Not Found
|
||||
'500':
|
||||
description: Internal server error
|
||||
'/system/scale-function/{functionName}':
|
||||
post:
|
||||
summary: Scale a function
|
||||
parameters:
|
||||
- in: path
|
||||
name: functionName
|
||||
description: Function name
|
||||
type: string
|
||||
required: true
|
||||
- in: body
|
||||
name: input
|
||||
description: Function to scale plus replica count
|
||||
schema:
|
||||
type: string
|
||||
format: binary
|
||||
example:
|
||||
'{"service": "hello-world", "replicas": 10}'
|
||||
required: false
|
||||
responses:
|
||||
'200':
|
||||
description: Scaling OK
|
||||
'202':
|
||||
description: Scaling OK
|
||||
'404':
|
||||
description: Function not found
|
||||
'500':
|
||||
description: Error scaling function
|
||||
'/system/function/{functionName}':
|
||||
get:
|
||||
summary: Get a summary of an OpenFaaS function
|
||||
parameters:
|
||||
- in: path
|
||||
name: functionName
|
||||
description: Function name
|
||||
type: string
|
||||
required: true
|
||||
responses:
|
||||
'200':
|
||||
description: Function Summary
|
||||
schema:
|
||||
$ref: '#/definitions/FunctionListEntry'
|
||||
'404':
|
||||
description: Not Found
|
||||
'500':
|
||||
description: Internal Server Error
|
||||
'/system/secrets':
|
||||
get:
|
||||
summary: 'Get a list of secret names and metadata from the provider'
|
||||
consumes:
|
||||
- application/json
|
||||
produces:
|
||||
- application/json
|
||||
responses:
|
||||
'200':
|
||||
description: List of submitted secrets.
|
||||
schema:
|
||||
$ref: '#/definitions/SecretName'
|
||||
post:
|
||||
summary: Create a new secret.
|
||||
description: ''
|
||||
consumes:
|
||||
- application/json
|
||||
produces:
|
||||
- application/json
|
||||
parameters:
|
||||
- in: body
|
||||
name: body
|
||||
description: A new secret to create
|
||||
required: true
|
||||
schema:
|
||||
$ref: '#/definitions/Secret'
|
||||
responses:
|
||||
'201':
|
||||
description: Created
|
||||
'400':
|
||||
description: Bad Request
|
||||
'500':
|
||||
description: Internal Server Error
|
||||
put:
|
||||
summary: Update a secret.
|
||||
description: ''
|
||||
consumes:
|
||||
- application/json
|
||||
produces:
|
||||
- application/json
|
||||
parameters:
|
||||
- in: body
|
||||
name: body
|
||||
description: Secret to update
|
||||
required: true
|
||||
schema:
|
||||
$ref: '#/definitions/Secret'
|
||||
responses:
|
||||
'200':
|
||||
description: Ok
|
||||
'400':
|
||||
description: Bad Request
|
||||
'404':
|
||||
description: Not Found
|
||||
'405':
|
||||
description: Method Not Allowed. Secret update is not allowed in faas-swarm.
|
||||
'500':
|
||||
description: Internal Server Error
|
||||
delete:
|
||||
summary: Remove a secret.
|
||||
description: ''
|
||||
consumes:
|
||||
- application/json
|
||||
produces:
|
||||
- application/json
|
||||
parameters:
|
||||
- in: body
|
||||
name: body
|
||||
description: Secret to delete
|
||||
required: true
|
||||
schema:
|
||||
$ref: '#/definitions/SecretName'
|
||||
responses:
|
||||
'204':
|
||||
description: OK
|
||||
'400':
|
||||
description: Bad Request
|
||||
'404':
|
||||
description: Not Found
|
||||
'500':
|
||||
description: Internal Server Error
|
||||
'/system/logs':
|
||||
get:
|
||||
summary: Get a stream of the logs for a specific function
|
||||
produces:
|
||||
- application/x-ndjson
|
||||
parameters:
|
||||
- in: query
|
||||
name: name
|
||||
description: Function name
|
||||
type: string
|
||||
required: true
|
||||
- in: query
|
||||
name: since
|
||||
description: Only return logs after a specific date (RFC3339)
|
||||
type: string
|
||||
required: false
|
||||
- in: query
|
||||
name: tail
|
||||
description: Sets the maximum number of log messages to return, <=0 means unlimited
|
||||
type: integer
|
||||
required: false
|
||||
- in: query
|
||||
name: follow
|
||||
description: When true, the request will stream logs until the request timeout
|
||||
type: boolean
|
||||
required: false
|
||||
responses:
|
||||
'200':
|
||||
description: Newline delimited stream of log messages
|
||||
schema:
|
||||
$ref: '#/definitions/LogEntry'
|
||||
'404':
|
||||
description: Not Found
|
||||
'500':
|
||||
description: Internal Server Error
|
||||
'/system/info':
|
||||
get:
|
||||
summary: Get info such as provider version number and provider orchestrator
|
||||
produces:
|
||||
- application/json
|
||||
responses:
|
||||
'200':
|
||||
description: Info result
|
||||
schema:
|
||||
$ref: '#/definitions/Info'
|
||||
'404':
|
||||
description: Provider does not support info endpoint
|
||||
'500':
|
||||
description: Internal Server Error
|
||||
'/healthz':
|
||||
get:
|
||||
summary: Healthcheck
|
||||
responses:
|
||||
'200':
|
||||
description: Healthy
|
||||
'500':
|
||||
description: Not healthy
|
||||
securityDefinitions:
|
||||
basicAuth:
|
||||
type: basic
|
||||
definitions:
|
||||
Info:
|
||||
type: object
|
||||
properties:
|
||||
provider:
|
||||
type: object
|
||||
description: The OpenFaaS Provider
|
||||
properties:
|
||||
provider:
|
||||
type: string
|
||||
example: faas-netes
|
||||
orchestration:
|
||||
type: string
|
||||
example: kubernetes
|
||||
version:
|
||||
type: object
|
||||
description: Version of the OpenFaaS Provider
|
||||
properties:
|
||||
commit_message:
|
||||
type: string
|
||||
example: Sample Message
|
||||
sha:
|
||||
type: string
|
||||
example: 7108418d9dd6b329ddff40e7393b3166f8160a88
|
||||
release:
|
||||
type: string
|
||||
format: semver
|
||||
example: 0.2.6
|
||||
version:
|
||||
type: object
|
||||
description: Version of the Gateway
|
||||
properties:
|
||||
commit_message:
|
||||
type: string
|
||||
example: Sample Message
|
||||
sha:
|
||||
type: string
|
||||
example: 7108418d9dd6b329ddff40e7393b3166f8160a88
|
||||
release:
|
||||
type: string
|
||||
format: semver
|
||||
example: 0.8.9
|
||||
arch:
|
||||
type: string
|
||||
description: "Platform architecture"
|
||||
example: "x86_64"
|
||||
required:
|
||||
- provider
|
||||
- version
|
||||
DeleteFunctionRequest:
|
||||
type: object
|
||||
properties:
|
||||
functionName:
|
||||
type: string
|
||||
description: Name of deployed function
|
||||
example: nodeinfo
|
||||
required:
|
||||
- functionName
|
||||
FunctionDefinition:
|
||||
type: object
|
||||
properties:
|
||||
service:
|
||||
type: string
|
||||
description: Name of deployed function
|
||||
example: nodeinfo
|
||||
network:
|
||||
type: string
|
||||
description: Network, usually func_functions for Swarm (deprecated)
|
||||
example: func_functions
|
||||
image:
|
||||
type: string
|
||||
description: Docker image in accessible registry
|
||||
example: functions/nodeinfo:latest
|
||||
envProcess:
|
||||
type: string
|
||||
description: Process for watchdog to fork
|
||||
example: node main.js
|
||||
envVars:
|
||||
type: object
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: Overrides to environmental variables
|
||||
constraints:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
description: Constraints are specific to OpenFaaS Provider
|
||||
example: "node.platform.os == linux"
|
||||
labels:
|
||||
description: A map of labels for making scheduling or routing decisions
|
||||
type: object
|
||||
additionalProperties:
|
||||
type: string
|
||||
example:
|
||||
foo: bar
|
||||
annotations:
|
||||
description: A map of annotations for management, orchestration, events and build tasks
|
||||
type: object
|
||||
additionalProperties:
|
||||
type: string
|
||||
example:
|
||||
topics: awesome-kafka-topic
|
||||
foo: bar
|
||||
secrets:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
description: An array of names of secrets that are required to be loaded from the Docker Swarm.
|
||||
example: "secret-name-1"
|
||||
registryAuth:
|
||||
type: string
|
||||
description: >-
|
||||
Private registry base64-encoded basic auth (as present in
|
||||
~/.docker/config.json)
|
||||
example: dXNlcjpwYXNzd29yZA==
|
||||
limits:
|
||||
type: object
|
||||
properties:
|
||||
memory:
|
||||
type: string
|
||||
example: "128M"
|
||||
cpu:
|
||||
type: string
|
||||
example: "0.01"
|
||||
requests:
|
||||
type: object
|
||||
properties:
|
||||
memory:
|
||||
type: string
|
||||
example: "128M"
|
||||
cpu:
|
||||
type: string
|
||||
example: "0.01"
|
||||
readOnlyRootFilesystem:
|
||||
type: boolean
|
||||
description: Make the root filesystem of the function read-only
|
||||
required:
|
||||
- service
|
||||
- image
|
||||
- envProcess
|
||||
FunctionListEntry:
|
||||
type: object
|
||||
properties:
|
||||
name:
|
||||
description: The name of the function
|
||||
type: string
|
||||
example: nodeinfo
|
||||
image:
|
||||
description: The fully qualified docker image name of the function
|
||||
type: string
|
||||
example: functions/nodeinfo:latest
|
||||
invocationCount:
|
||||
description: The amount of invocations for the specified function
|
||||
type: number
|
||||
format: integer
|
||||
example: 1337
|
||||
replicas:
|
||||
description: The current minimal ammount of replicas
|
||||
type: number
|
||||
format: integer
|
||||
example: 2
|
||||
availableReplicas:
|
||||
description: The current available amount of replicas
|
||||
type: number
|
||||
format: integer
|
||||
example: 2
|
||||
envProcess:
|
||||
description: Process for watchdog to fork
|
||||
type: string
|
||||
example: node main.js
|
||||
labels:
|
||||
description: A map of labels for making scheduling or routing decisions
|
||||
type: object
|
||||
additionalProperties:
|
||||
type: string
|
||||
example:
|
||||
foo: bar
|
||||
annotations:
|
||||
description: A map of annotations for management, orchestration, events and build tasks
|
||||
type: object
|
||||
additionalProperties:
|
||||
type: string
|
||||
example:
|
||||
topics: awesome-kafka-topic
|
||||
foo: bar
|
||||
required:
|
||||
- name
|
||||
- image
|
||||
- invocationCount
|
||||
- replicas
|
||||
- availableReplicas
|
||||
- envProcess
|
||||
- labels
|
||||
Secret:
|
||||
type: object
|
||||
properties:
|
||||
name:
|
||||
type: string
|
||||
description: Name of secret
|
||||
example: aws-key
|
||||
value:
|
||||
type: string
|
||||
description: Value of secret in plain-text
|
||||
example: changeme
|
||||
required:
|
||||
- name
|
||||
LogEntry:
|
||||
type: object
|
||||
properties:
|
||||
name:
|
||||
type: string
|
||||
description: the function name
|
||||
instance:
|
||||
type: string
|
||||
description: the name/id of the specific function instance
|
||||
timestamp:
|
||||
type: string
|
||||
format: date-time
|
||||
description: the timestamp of when the log message was recorded
|
||||
text:
|
||||
type: string
|
||||
description: raw log message content
|
||||
SecretName:
|
||||
type: object
|
||||
properties:
|
||||
name:
|
||||
type: string
|
||||
description: Name of secret
|
||||
example: aws-key
|
||||
externalDocs:
|
||||
description: More documentation available on Github
|
||||
url: 'https://github.com/openfaas/faas'
|
@ -1,6 +1,6 @@
|
||||
FROM --platform=${BUILDPLATFORM:-linux/amd64} ghcr.io/openfaas/license-check:0.4.1 AS license-check
|
||||
FROM --platform=${BUILDPLATFORM:-linux/amd64} ghcr.io/openfaas/license-check:0.4.1 as license-check
|
||||
|
||||
FROM --platform=${BUILDPLATFORM:-linux/amd64} golang:1.23 AS build
|
||||
FROM --platform=${BUILDPLATFORM:-linux/amd64} golang:1.19 as build
|
||||
|
||||
ENV GO111MODULE=on
|
||||
ENV CGO_ENABLED=0
|
||||
@ -42,11 +42,11 @@ RUN CGO_ENABLED=${CGO_ENABLED} GOOS=${TARGETOS} GOARCH=${TARGETARCH} go build --
|
||||
-X \"github.com/openfaas/faas/gateway/version.GitCommitSHA=${GIT_COMMIT}\" \
|
||||
-X \"github.com/openfaas/faas/gateway/version.Version=${VERSION}\" \
|
||||
-X github.com/openfaas/faas/gateway/types.Arch=${TARGETARCH}" \
|
||||
-o gateway .
|
||||
-a -installsuffix cgo -o gateway .
|
||||
|
||||
FROM --platform=${TARGETPLATFORM:-linux/amd64} alpine:3.21.0 AS ship
|
||||
FROM --platform=${TARGETPLATFORM:-linux/amd64} alpine:3.17 as ship
|
||||
|
||||
LABEL org.label-schema.license="OpenFaaS CE EULA - non-commercial" \
|
||||
LABEL org.label-schema.license="MIT" \
|
||||
org.label-schema.vcs-url="https://github.com/openfaas/faas" \
|
||||
org.label-schema.vcs-type="Git" \
|
||||
org.label-schema.name="openfaas/faas" \
|
||||
@ -63,8 +63,8 @@ WORKDIR /home/app
|
||||
|
||||
EXPOSE 8080
|
||||
EXPOSE 8082
|
||||
ENV http_proxy=""
|
||||
ENV https_proxy=""
|
||||
ENV http_proxy ""
|
||||
ENV https_proxy ""
|
||||
|
||||
COPY --from=build /go/src/github.com/openfaas/faas/gateway/gateway .
|
||||
COPY assets assets
|
||||
|
@ -1,14 +1,14 @@
|
||||
export DOCKER_CLI_EXPERIMENTAL=enabled
|
||||
|
||||
PLATFORM?="linux/amd64,linux/arm/v7,linux/arm64"
|
||||
PLATFORM := "linux/amd64,linux/arm/v7,linux/arm64"
|
||||
|
||||
TAG?=dev
|
||||
SERVER?=ttl.sh
|
||||
OWNER?=openfaas
|
||||
TAG?=latest
|
||||
SERVER?=docker.io
|
||||
OWNER?=alexellis2
|
||||
NAME=gateway
|
||||
|
||||
.PHONY: buildx-local
|
||||
buildx-local:
|
||||
.PHONY: local-docker
|
||||
build-local:
|
||||
@echo $(SERVER)/$(OWNER)/$(NAME):$(TAG) \
|
||||
&& docker buildx create --use --name=multiarch --node multiarch \
|
||||
&& docker buildx build \
|
||||
@ -17,18 +17,8 @@ buildx-local:
|
||||
--output "type=docker,push=false" \
|
||||
--tag $(SERVER)/$(OWNER)/$(NAME):$(TAG) .
|
||||
|
||||
.PHONY: buildx-push
|
||||
buildx-push:
|
||||
@echo $(SERVER)/$(OWNER)/$(NAME):$(TAG) \
|
||||
&& docker buildx create --use --name=multiarch --node multiarch \
|
||||
&& docker buildx build \
|
||||
--progress=plain \
|
||||
--platform linux/amd64 \
|
||||
--output "type=image,push=true" \
|
||||
--tag $(SERVER)/$(OWNER)/$(NAME):$(TAG) .
|
||||
|
||||
.PHONY: buildx-push-all
|
||||
buildx-push-all:
|
||||
.PHONY: push-docker
|
||||
push-docker:
|
||||
@echo $(SERVER)/$(OWNER)/$(NAME):$(TAG) \
|
||||
&& docker buildx create --use --name=multiarch --node multiarch \
|
||||
&& docker buildx build \
|
||||
|
@ -1,40 +1,34 @@
|
||||
module github.com/openfaas/faas/gateway
|
||||
|
||||
go 1.23
|
||||
go 1.19
|
||||
|
||||
require (
|
||||
github.com/docker/distribution v2.8.3+incompatible
|
||||
github.com/gorilla/mux v1.8.1
|
||||
github.com/openfaas/faas-provider v0.25.4
|
||||
github.com/openfaas/nats-queue-worker v0.0.0-20231219105451-b94918cb8a24
|
||||
github.com/prometheus/client_golang v1.20.5
|
||||
github.com/prometheus/client_model v0.6.1
|
||||
go.uber.org/goleak v1.3.0
|
||||
golang.org/x/sync v0.10.0
|
||||
github.com/docker/distribution v2.8.2+incompatible
|
||||
github.com/gorilla/mux v1.8.0
|
||||
github.com/openfaas/faas-provider v0.19.1
|
||||
github.com/openfaas/nats-queue-worker v0.0.0-20230117214128-3615ccb286cc
|
||||
github.com/prometheus/client_golang v1.13.0
|
||||
github.com/prometheus/client_model v0.2.0
|
||||
go.uber.org/goleak v1.1.12
|
||||
golang.org/x/sync v0.1.0
|
||||
)
|
||||
|
||||
// replace github.com/openfaas/faas-provider => ../../faas-provider
|
||||
|
||||
require (
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/hashicorp/go-hclog v1.6.3 // indirect
|
||||
github.com/hashicorp/go-msgpack/v2 v2.1.2 // indirect
|
||||
github.com/hashicorp/raft v1.7.1 // indirect
|
||||
github.com/klauspost/compress v1.17.11 // indirect
|
||||
github.com/minio/highwayhash v1.0.3 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/nats-io/jwt/v2 v2.7.2 // indirect
|
||||
github.com/nats-io/nats.go v1.37.0 // indirect
|
||||
github.com/nats-io/nkeys v0.4.8 // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
|
||||
github.com/nats-io/nats-server/v2 v2.9.11 // indirect
|
||||
github.com/nats-io/nats-streaming-server v0.25.3 // indirect
|
||||
github.com/nats-io/nats.go v1.22.1 // indirect
|
||||
github.com/nats-io/nkeys v0.3.0 // indirect
|
||||
github.com/nats-io/nuid v1.0.1 // indirect
|
||||
github.com/nats-io/stan.go v0.10.4 // indirect
|
||||
github.com/prometheus/common v0.61.0 // indirect
|
||||
github.com/prometheus/procfs v0.15.1 // indirect
|
||||
go.etcd.io/bbolt v1.3.11 // indirect
|
||||
golang.org/x/crypto v0.30.0 // indirect
|
||||
golang.org/x/sys v0.28.0 // indirect
|
||||
golang.org/x/time v0.8.0 // indirect
|
||||
google.golang.org/protobuf v1.35.2 // indirect
|
||||
github.com/prometheus/common v0.37.0 // indirect
|
||||
github.com/prometheus/procfs v0.8.0 // indirect
|
||||
github.com/stretchr/testify v1.7.1 // indirect
|
||||
golang.org/x/crypto v0.5.0 // indirect
|
||||
golang.org/x/sys v0.4.1-0.20230105183443-b8be2fde2a9e // indirect
|
||||
google.golang.org/protobuf v1.28.1 // indirect
|
||||
)
|
||||
|
605
gateway/go.sum
605
gateway/go.sum
@ -1,149 +1,596 @@
|
||||
github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA=
|
||||
github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4=
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
|
||||
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
|
||||
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
|
||||
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
|
||||
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
|
||||
cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
|
||||
cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
|
||||
cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
|
||||
cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
|
||||
cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
|
||||
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
|
||||
cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
|
||||
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
|
||||
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
|
||||
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
|
||||
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
|
||||
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
|
||||
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
|
||||
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
||||
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
|
||||
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
|
||||
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
|
||||
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
|
||||
cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
|
||||
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
|
||||
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
|
||||
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
|
||||
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
|
||||
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
|
||||
github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878 h1:EFSB7Zo9Eg91v7MJPVsifUysc/wPdN+NOnVe6bWbdBM=
|
||||
github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878/go.mod h1:3AMJUQhVx52RsWOnlkpikZr01T/yAVN2gn0861vByNg=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
|
||||
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
|
||||
github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk=
|
||||
github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w=
|
||||
github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
|
||||
github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8=
|
||||
github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
|
||||
github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
||||
github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
|
||||
github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
|
||||
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
|
||||
github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k=
|
||||
github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
|
||||
github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||
github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||
github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||
github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||
github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
|
||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
|
||||
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
|
||||
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
|
||||
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
|
||||
github.com/hashicorp/go-hclog v0.9.1/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
|
||||
github.com/hashicorp/go-hclog v1.1.0 h1:QsGcniKx5/LuX2eYoeL+Np3UKYPNaN7YKpTh29h8rbw=
|
||||
github.com/hashicorp/go-hclog v1.1.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
|
||||
github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0=
|
||||
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
||||
github.com/hashicorp/go-msgpack/v2 v2.1.2 h1:4Ee8FTp834e+ewB71RDrQ0VKpyFdrKOjvYtnQ/ltVj0=
|
||||
github.com/hashicorp/go-msgpack/v2 v2.1.2/go.mod h1:upybraOAblm4S7rx0+jeNy+CWWhzywQsSRV5033mMu4=
|
||||
github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo=
|
||||
github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
|
||||
github.com/hashicorp/go-msgpack v1.1.5 h1:9byZdVjKTe5mce63pRVNP1L7UAmdHOTEMGehn6KvJWs=
|
||||
github.com/hashicorp/go-msgpack v1.1.5/go.mod h1:gWVc3sv/wbDmR3rQsj1CAktEZzoz1YNK9NfGLXJ69/4=
|
||||
github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
|
||||
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/raft v1.7.1 h1:ytxsNx4baHsRZrhUcbt3+79zc4ly8qm7pi0393pSchY=
|
||||
github.com/hashicorp/raft v1.7.1/go.mod h1:hUeiEwQQR/Nk2iKDD0dkEhklSsu3jcAcqvPzPoZSAEM=
|
||||
github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=
|
||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/raft v1.3.11 h1:p3v6gf6l3S797NnK5av3HcczOC1T5CLoaRvg0g9ys4A=
|
||||
github.com/hashicorp/raft v1.3.11/go.mod h1:J8naEwc6XaaCfts7+28whSeRvCqTd6e20BlCU3LtEO4=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
|
||||
github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
|
||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||
github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
|
||||
github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40=
|
||||
github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
|
||||
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||
github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y=
|
||||
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
|
||||
github.com/minio/highwayhash v1.0.3 h1:kbnuUMoHYyVl7szWjSxJnxw11k2U709jqFPPmIUyD6Q=
|
||||
github.com/minio/highwayhash v1.0.3/go.mod h1:GGYsuwP/fPD6Y9hMiXuapVvlIUEhFhMTh0rxU3ik1LQ=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/nats-io/jwt/v2 v2.7.2 h1:SCRjfDLJ2q8naXp8YlGJJS5/yj3wGSODFYVi4nnwVMw=
|
||||
github.com/nats-io/jwt/v2 v2.7.2/go.mod h1:kB6QUmqHG6Wdrzj0KP2L+OX4xiTPBeV+NHVstFaATXU=
|
||||
github.com/nats-io/nats-server/v2 v2.10.3 h1:nk2QVLpJUh3/AhZCJlQdTfj2oeLDvWnn1Z6XzGlNFm0=
|
||||
github.com/nats-io/nats-server/v2 v2.10.3/go.mod h1:lzrskZ/4gyMAh+/66cCd+q74c6v7muBypzfWhP/MAaM=
|
||||
github.com/nats-io/nats-streaming-server v0.25.5 h1:DX6xaPhKvVLhdpNsuEmmD+O9LfWSnw8cvxQU/H9LRy8=
|
||||
github.com/nats-io/nats-streaming-server v0.25.5/go.mod h1:dSBVdHGsT/tV91lT4MWFfE6+yjRCNhRIYJpBaTHFdAo=
|
||||
github.com/klauspost/compress v1.15.11 h1:Lcadnb3RKGin4FYM/orgq0qde+nc15E5Cbqg4B9Sx9c=
|
||||
github.com/klauspost/compress v1.15.11/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/lib/pq v1.10.4/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA=
|
||||
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.10 h1:qxFzApOv4WsAL965uUPIsXzAKCZxN2p9UqdhFS4ZW10=
|
||||
github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA/g=
|
||||
github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/nats-io/jwt/v2 v2.3.0 h1:z2mA1a7tIf5ShggOFlR1oBPgd6hGqcDYsISxZByUzdI=
|
||||
github.com/nats-io/jwt/v2 v2.3.0/go.mod h1:0tqz9Hlu6bCBFLWAASKhE5vUA4c24L9KPUUgvwumE/k=
|
||||
github.com/nats-io/nats-server/v2 v2.9.11 h1:4y5SwWvWI59V5mcqtuoqKq6L9NDUydOP3Ekwuwl8cZI=
|
||||
github.com/nats-io/nats-server/v2 v2.9.11/go.mod h1:b0oVuxSlkvS3ZjMkncFeACGyZohbO4XhSqW1Lt7iRRY=
|
||||
github.com/nats-io/nats-streaming-server v0.25.3 h1:A9dmf4fMIxFPBGqgwePljWsBePMEjl3ugsIwK6F2wow=
|
||||
github.com/nats-io/nats-streaming-server v0.25.3/go.mod h1:0Cyht7y75el3if3Qdq31OPc/TNjVwzglMPz04Hn77kk=
|
||||
github.com/nats-io/nats.go v1.19.0/go.mod h1:tLqubohF7t4z3du1QDPYJIQQyhb4wl6DhjxEajSI7UA=
|
||||
github.com/nats-io/nats.go v1.22.1 h1:XzfqDspY0RNufzdrB8c4hFR+R3dahkxlpWe5+IWJzbE=
|
||||
github.com/nats-io/nats.go v1.22.1/go.mod h1:tLqubohF7t4z3du1QDPYJIQQyhb4wl6DhjxEajSI7UA=
|
||||
github.com/nats-io/nats.go v1.37.0 h1:07rauXbVnnJvv1gfIyghFEo6lUcYRY0WXc3x7x0vUxE=
|
||||
github.com/nats-io/nats.go v1.37.0/go.mod h1:Ubdu4Nh9exXdSz0RVWRFBbRfrbSxOYd26oF0wkWclB8=
|
||||
github.com/nats-io/nkeys v0.3.0 h1:cgM5tL53EvYRU+2YLXIK0G2mJtK12Ft9oeooSZMA2G8=
|
||||
github.com/nats-io/nkeys v0.3.0/go.mod h1:gvUNGjVcM2IPr5rCsRsC6Wb3Hr2CQAm08dsxtV6A5y4=
|
||||
github.com/nats-io/nkeys v0.4.8 h1:+wee30071y3vCZAYRsnrmIPaOe47A/SkK/UBDPdIV70=
|
||||
github.com/nats-io/nkeys v0.4.8/go.mod h1:kqXRgRDPlGy7nGaEDMuYzmiJCIAAWDK0IMBtDmGD0nc=
|
||||
github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=
|
||||
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
|
||||
github.com/nats-io/stan.go v0.10.4 h1:19GS/eD1SeQJaVkeM9EkvEYattnvnWrZ3wkSWSw4uXw=
|
||||
github.com/nats-io/stan.go v0.10.4/go.mod h1:3XJXH8GagrGqajoO/9+HgPyKV5MWsv7S5ccdda+pc6k=
|
||||
github.com/openfaas/faas-provider v0.25.4 h1:Cly/M8/Q+OOn8qFxxeaZGyC5B2x4f+RSU28hej+1WcM=
|
||||
github.com/openfaas/faas-provider v0.25.4/go.mod h1:t6RSPCvNfiqYEzf/CtdIj+0OItdyK6IzrOca1EOLNSg=
|
||||
github.com/openfaas/nats-queue-worker v0.0.0-20231219105451-b94918cb8a24 h1:IeFWHV+vpviPvmAVrh6SsW19PkU+7+Pu/CLrFIe1gtc=
|
||||
github.com/openfaas/nats-queue-worker v0.0.0-20231219105451-b94918cb8a24/go.mod h1:SR1bzVXQaZoZS+wWmvO7bXZE6V9S9bukR9J5Kynr/vc=
|
||||
github.com/openfaas/faas-provider v0.19.1 h1:xH8lTWabfDZwzIvC0u1AO48ghD3BNw6Vo231DLqTeI0=
|
||||
github.com/openfaas/faas-provider v0.19.1/go.mod h1:Farrp+9Med8LeK3aoYpqplMP8f5ebTILbCSLg2LPLZk=
|
||||
github.com/openfaas/nats-queue-worker v0.0.0-20230117214128-3615ccb286cc h1:qMcVTwdbRAqZtQrYS9rvUxnbQY6nmqf7CIsjp7A+cOo=
|
||||
github.com/openfaas/nats-queue-worker v0.0.0-20230117214128-3615ccb286cc/go.mod h1:s86POyW6C8S4CALFRhO8ax5sR2uaQUJQ0HaQGvbTpTc=
|
||||
github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y=
|
||||
github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
|
||||
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
|
||||
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
|
||||
github.com/prometheus/common v0.61.0 h1:3gv/GThfX0cV2lpO7gkTUwZru38mxevy90Bj8YFSRQQ=
|
||||
github.com/prometheus/common v0.61.0/go.mod h1:zr29OCN/2BsJRaFwG8QOBr41D6kkchKbpeNH7pAjb/s=
|
||||
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
|
||||
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
|
||||
github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM=
|
||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
||||
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
|
||||
github.com/prometheus/client_golang v1.13.0 h1:b71QUfeo5M8gq2+evJdTPfZhYMAU0uKPkyPJ7TPsloU=
|
||||
github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
|
||||
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
||||
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
||||
github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE=
|
||||
github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo=
|
||||
github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
|
||||
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
go.etcd.io/bbolt v1.3.11 h1:yGEzV1wPz2yVCLsD8ZAiGHhHVlczyC9d1rP43/VCRJ0=
|
||||
go.etcd.io/bbolt v1.3.11/go.mod h1:dksAq7YMXoljX0xu6VF5DMZGbhYYoLUalEiSySYAS4I=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU=
|
||||
go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4=
|
||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.uber.org/automaxprocs v1.5.1/go.mod h1:BF4eumQw0P9GtnuxxovUd06vwm1o18oMzFtK66vU6XU=
|
||||
go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA=
|
||||
go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE=
|
||||
golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU=
|
||||
golang.org/x/crypto v0.30.0 h1:RwoQn3GkWiMkzlX562cLB7OxWvjH1L8xutO2WoJcRoY=
|
||||
golang.org/x/crypto v0.30.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
|
||||
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
|
||||
golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
|
||||
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
|
||||
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k=
|
||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
|
||||
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
|
||||
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
||||
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
|
||||
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
|
||||
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.4.1-0.20230105183443-b8be2fde2a9e h1:Lw2b7QX5zDuEsD5ZkJNRUGEGkLuho3UAKsO25Ucv140=
|
||||
golang.org/x/sys v0.4.1-0.20230105183443-b8be2fde2a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg=
|
||||
golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20220922220347-f3bd1da661af h1:Yx9k8YCG3dvF87UAn2tu2HQLf2dt/eR1bXxpLMWeH+Y=
|
||||
golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190424220101-1e8e1cfdf96b/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
|
||||
golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
|
||||
golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
|
||||
golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.12 h1:VveCTK38A2rkS8ZqFY25HIDFscX5X9OoEhJd3quQmXU=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io=
|
||||
google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
||||
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
|
||||
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||
google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||
google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||
google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||
google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
||||
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
||||
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
|
||||
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
|
||||
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
|
||||
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
|
||||
google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
|
||||
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
||||
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
|
||||
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
|
||||
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
|
||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
|
||||
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
||||
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
||||
|
@ -1,14 +1,12 @@
|
||||
// License: OpenFaaS Community Edition (CE) EULA
|
||||
// Copyright (c) 2017,2019-2024 OpenFaaS Author(s)
|
||||
|
||||
// Copyright (c) Alex Ellis 2017. All rights reserved.
|
||||
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
|
||||
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"math"
|
||||
"net/http"
|
||||
@ -29,7 +27,7 @@ func MakeAlertHandler(service scaling.ServiceQuery, defaultNamespace string) htt
|
||||
|
||||
defer r.Body.Close()
|
||||
|
||||
body, err := io.ReadAll(r.Body)
|
||||
body, err := ioutil.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
w.Write([]byte("Unable to read alert."))
|
||||
@ -46,7 +44,7 @@ func MakeAlertHandler(service scaling.ServiceQuery, defaultNamespace string) htt
|
||||
return
|
||||
}
|
||||
|
||||
errors := handleAlerts(req, service, defaultNamespace)
|
||||
errors := handleAlerts(&req, service, defaultNamespace)
|
||||
if len(errors) > 0 {
|
||||
log.Println(errors)
|
||||
var errorOutput string
|
||||
@ -62,7 +60,7 @@ func MakeAlertHandler(service scaling.ServiceQuery, defaultNamespace string) htt
|
||||
}
|
||||
}
|
||||
|
||||
func handleAlerts(req requests.PrometheusAlert, service scaling.ServiceQuery, defaultNamespace string) []error {
|
||||
func handleAlerts(req *requests.PrometheusAlert, service scaling.ServiceQuery, defaultNamespace string) []error {
|
||||
var errors []error
|
||||
for _, alert := range req.Alerts {
|
||||
if err := scaleService(alert, service, defaultNamespace); err != nil {
|
||||
|
@ -1,7 +1,5 @@
|
||||
// License: OpenFaaS Community Edition (CE) EULA
|
||||
// Copyright (c) 2017,2019-2024 OpenFaaS Author(s)
|
||||
|
||||
// Copyright (c) Alex Ellis 2017. All rights reserved.
|
||||
// Copyright (c) Alex Ellis 1017. All rights reserved.
|
||||
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
|
||||
|
||||
package handlers
|
||||
|
||||
|
@ -1,7 +1,5 @@
|
||||
// License: OpenFaaS Community Edition (CE) EULA
|
||||
// Copyright (c) 2017,2019-2024 OpenFaaS Author(s)
|
||||
|
||||
// Copyright (c) OpenFaaS Author(s). All rights reserved.
|
||||
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
|
||||
|
||||
package handlers
|
||||
|
||||
@ -11,14 +9,10 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/docker/distribution/uuid"
|
||||
"github.com/openfaas/faas/gateway/version"
|
||||
)
|
||||
|
||||
// MakeCallIDMiddleware middleware tags a request with a uid
|
||||
func MakeCallIDMiddleware(next http.HandlerFunc) http.HandlerFunc {
|
||||
|
||||
version := version.Version
|
||||
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
start := time.Now()
|
||||
if len(r.Header.Get("X-Call-Id")) == 0 {
|
||||
@ -30,8 +24,6 @@ func MakeCallIDMiddleware(next http.HandlerFunc) http.HandlerFunc {
|
||||
r.Header.Add("X-Start-Time", fmt.Sprintf("%d", start.UTC().UnixNano()))
|
||||
w.Header().Add("X-Start-Time", fmt.Sprintf("%d", start.UTC().UnixNano()))
|
||||
|
||||
w.Header().Add("X-Served-By", fmt.Sprintf("openfaas-ce/%s", version))
|
||||
|
||||
next(w, r)
|
||||
}
|
||||
}
|
||||
|
@ -1,7 +1,5 @@
|
||||
// License: OpenFaaS Community Edition (CE) EULA
|
||||
// Copyright (c) 2017,2019-2024 OpenFaaS Author(s)
|
||||
|
||||
// Copyright (c) OpenFaaS Author(s). All rights reserved.
|
||||
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
|
||||
|
||||
package handlers
|
||||
|
||||
|
@ -1,7 +1,5 @@
|
||||
// License: OpenFaaS Community Edition (CE) EULA
|
||||
// Copyright (c) 2017,2019-2024 OpenFaaS Author(s)
|
||||
|
||||
// Copyright (c) OpenFaaS Author(s). All rights reserved.
|
||||
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
|
||||
|
||||
package handlers
|
||||
|
||||
|
@ -1,24 +1,17 @@
|
||||
// License: OpenFaaS Community Edition (CE) EULA
|
||||
// Copyright (c) 2017,2019-2024 OpenFaaS Author(s)
|
||||
|
||||
// Copyright (c) OpenFaaS Author(s). All rights reserved.
|
||||
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
|
||||
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
fhttputil "github.com/openfaas/faas-provider/httputil"
|
||||
"github.com/openfaas/faas/gateway/pkg/middleware"
|
||||
"github.com/openfaas/faas/gateway/types"
|
||||
)
|
||||
@ -35,10 +28,7 @@ func MakeForwardingProxyHandler(proxy *types.HTTPClientReverseProxy,
|
||||
writeRequestURI = exists
|
||||
}
|
||||
|
||||
reverseProxy := makeRewriteProxy(baseURLResolver, urlPathTransformer)
|
||||
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
baseURL := baseURLResolver.Resolve(r)
|
||||
originalURL := r.URL.String()
|
||||
requestURL := urlPathTransformer.Transform(r)
|
||||
@ -49,13 +39,13 @@ func MakeForwardingProxyHandler(proxy *types.HTTPClientReverseProxy,
|
||||
|
||||
start := time.Now()
|
||||
|
||||
statusCode, err := forwardRequest(w, r, proxy.Client, baseURL, requestURL, proxy.Timeout, writeRequestURI, serviceAuthInjector, reverseProxy)
|
||||
statusCode, err := forwardRequest(w, r, proxy.Client, baseURL, requestURL, proxy.Timeout, writeRequestURI, serviceAuthInjector)
|
||||
|
||||
seconds := time.Since(start)
|
||||
if err != nil {
|
||||
log.Printf("error with upstream request to: %s, %s\n", requestURL, err.Error())
|
||||
}
|
||||
|
||||
seconds := time.Since(start)
|
||||
|
||||
for _, notifier := range notifiers {
|
||||
notifier.Notify(r.Method, requestURL, originalURL, statusCode, "completed", seconds)
|
||||
}
|
||||
@ -96,14 +86,12 @@ func forwardRequest(w http.ResponseWriter,
|
||||
requestURL string,
|
||||
timeout time.Duration,
|
||||
writeRequestURI bool,
|
||||
serviceAuthInjector middleware.AuthInjector,
|
||||
reverseProxy *httputil.ReverseProxy) (int, error) {
|
||||
|
||||
if r.Body != nil {
|
||||
defer r.Body.Close()
|
||||
}
|
||||
serviceAuthInjector middleware.AuthInjector) (int, error) {
|
||||
|
||||
upstreamReq := buildUpstreamRequest(r, baseURL, requestURL)
|
||||
if upstreamReq.Body != nil {
|
||||
defer upstreamReq.Body.Close()
|
||||
}
|
||||
|
||||
if serviceAuthInjector != nil {
|
||||
serviceAuthInjector.Inject(upstreamReq)
|
||||
@ -113,19 +101,14 @@ func forwardRequest(w http.ResponseWriter,
|
||||
log.Printf("forwardRequest: %s %s\n", upstreamReq.Host, upstreamReq.URL.String())
|
||||
}
|
||||
|
||||
if strings.HasPrefix(r.Header.Get("Accept"), "text/event-stream") {
|
||||
|
||||
return handleEventStream(w, r, reverseProxy, upstreamReq, timeout)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(r.Context(), timeout)
|
||||
defer cancel()
|
||||
|
||||
res, err := proxyClient.Do(upstreamReq.WithContext(ctx))
|
||||
if err != nil {
|
||||
res, resErr := proxyClient.Do(upstreamReq.WithContext(ctx))
|
||||
if resErr != nil {
|
||||
badStatus := http.StatusBadGateway
|
||||
w.WriteHeader(badStatus)
|
||||
return badStatus, err
|
||||
return badStatus, resErr
|
||||
}
|
||||
|
||||
if res.Body != nil {
|
||||
@ -134,45 +117,17 @@ func forwardRequest(w http.ResponseWriter,
|
||||
|
||||
copyHeaders(w.Header(), &res.Header)
|
||||
|
||||
// Write status code
|
||||
w.WriteHeader(res.StatusCode)
|
||||
|
||||
if res.Body != nil {
|
||||
io.Copy(w, res.Body)
|
||||
// Copy the body over
|
||||
io.CopyBuffer(w, res.Body, nil)
|
||||
}
|
||||
|
||||
return res.StatusCode, nil
|
||||
}
|
||||
|
||||
func handleEventStream(w http.ResponseWriter, r *http.Request, reverseProxy *httputil.ReverseProxy, upstreamReq *http.Request, timeout time.Duration) (int, error) {
|
||||
ww := fhttputil.NewHttpWriteInterceptor(w)
|
||||
|
||||
ctx, cancel := context.WithTimeoutCause(r.Context(), timeout, http.ErrHandlerTimeout)
|
||||
defer cancel()
|
||||
|
||||
r = r.WithContext(ctx)
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer func() {
|
||||
wg.Done()
|
||||
if r := recover(); r != nil {
|
||||
if errors.Is(r.(error), http.ErrAbortHandler) {
|
||||
log.Printf("Aborted [%s] for: %s", upstreamReq.Method, upstreamReq.URL.Path)
|
||||
|
||||
} else {
|
||||
log.Printf("Recovered from panic in reverseproxy: %v", r)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
reverseProxy.ServeHTTP(ww, r)
|
||||
}()
|
||||
|
||||
wg.Wait()
|
||||
|
||||
return ww.Status(), nil
|
||||
}
|
||||
|
||||
func copyHeaders(destination http.Header, source *http.Header) {
|
||||
for k, v := range *source {
|
||||
vClone := make([]string, len(v))
|
||||
@ -204,24 +159,3 @@ var hopHeaders = []string{
|
||||
"Transfer-Encoding",
|
||||
"Upgrade",
|
||||
}
|
||||
|
||||
func makeRewriteProxy(baseURLResolver middleware.BaseURLResolver, urlPathTransformer middleware.URLPathTransformer) *httputil.ReverseProxy {
|
||||
|
||||
return &httputil.ReverseProxy{
|
||||
|
||||
ErrorLog: log.New(io.Discard, "proxy:", 0),
|
||||
ErrorHandler: func(w http.ResponseWriter, r *http.Request, err error) {
|
||||
},
|
||||
Director: func(r *http.Request) {
|
||||
|
||||
baseURL := baseURLResolver.Resolve(r)
|
||||
baseURLu, _ := r.URL.Parse(baseURL)
|
||||
|
||||
requestURL := urlPathTransformer.Transform(r)
|
||||
|
||||
r.URL.Scheme = "http"
|
||||
r.URL.Path = requestURL
|
||||
r.URL.Host = baseURLu.Host
|
||||
},
|
||||
}
|
||||
}
|
||||
|
@ -1,14 +1,12 @@
|
||||
// License: OpenFaaS Community Edition (CE) EULA
|
||||
// Copyright (c) 2017,2019-2024 OpenFaaS Author(s)
|
||||
|
||||
// Copyright (c) OpenFaaS Author(s). All rights reserved.
|
||||
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
|
||||
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"testing"
|
||||
@ -35,7 +33,7 @@ func Test_buildUpstreamRequest_Body_Method_Query(t *testing.T) {
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
upstreamBytes, _ := io.ReadAll(upstream.Body)
|
||||
upstreamBytes, _ := ioutil.ReadAll(upstream.Body)
|
||||
|
||||
if string(upstreamBytes) != string(srcBytes) {
|
||||
t.Errorf("Body - want: %s, got: %s", string(upstreamBytes), string(srcBytes))
|
||||
@ -214,7 +212,7 @@ func Test_buildUpstreamRequest_WithPathNoQuery(t *testing.T) {
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
upstreamBytes, _ := io.ReadAll(upstream.Body)
|
||||
upstreamBytes, _ := ioutil.ReadAll(upstream.Body)
|
||||
|
||||
if string(upstreamBytes) != string(srcBytes) {
|
||||
t.Errorf("Body - want: %s, got: %s", string(upstreamBytes), string(srcBytes))
|
||||
@ -270,7 +268,7 @@ func Test_buildUpstreamRequest_WithNoPathNoQuery(t *testing.T) {
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
upstreamBytes, _ := io.ReadAll(upstream.Body)
|
||||
upstreamBytes, _ := ioutil.ReadAll(upstream.Body)
|
||||
|
||||
if string(upstreamBytes) != string(srcBytes) {
|
||||
t.Errorf("Body - want: %s, got: %s", string(upstreamBytes), string(srcBytes))
|
||||
@ -324,7 +322,7 @@ func Test_buildUpstreamRequest_WithPathAndQuery(t *testing.T) {
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
upstreamBytes, _ := io.ReadAll(upstream.Body)
|
||||
upstreamBytes, _ := ioutil.ReadAll(upstream.Body)
|
||||
|
||||
if string(upstreamBytes) != string(srcBytes) {
|
||||
t.Errorf("Body - want: %s, got: %s", string(upstreamBytes), string(srcBytes))
|
||||
|
@ -1,7 +1,5 @@
|
||||
// License: OpenFaaS Community Edition (CE) EULA
|
||||
// Copyright (c) 2017,2019-2024 OpenFaaS Author(s)
|
||||
|
||||
// Copyright (c) OpenFaaS Author(s) 2019. All rights reserved.
|
||||
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
|
||||
|
||||
package handlers
|
||||
|
||||
|
@ -1,7 +1,5 @@
|
||||
// License: OpenFaaS Community Edition (CE) EULA
|
||||
// Copyright (c) 2017,2019-2024 OpenFaaS Author(s)
|
||||
|
||||
// Copyright (c) OpenFaaS Author(s). All rights reserved.
|
||||
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
|
||||
|
||||
package handlers
|
||||
|
||||
|
@ -3,7 +3,7 @@ package handlers
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
@ -52,7 +52,7 @@ func Test_logsProxyDoesNotLeakGoroutinesWhenProviderClosesConnection(t *testing.
|
||||
t.Fatalf("unexpected error sending log request: %s", err)
|
||||
}
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error reading the response body: %s", err)
|
||||
}
|
||||
@ -126,7 +126,7 @@ func Test_logsProxyDoesNotLeakGoroutinesWhenClientClosesConnection(t *testing.T)
|
||||
go func() {
|
||||
defer resp.Body.Close()
|
||||
defer close(errCh)
|
||||
_, err := io.ReadAll(resp.Body)
|
||||
_, err := ioutil.ReadAll(resp.Body)
|
||||
errCh <- err
|
||||
}()
|
||||
cancel()
|
||||
|
@ -1,14 +1,11 @@
|
||||
// License: OpenFaaS Community Edition (CE) EULA
|
||||
// Copyright (c) 2017,2019-2024 OpenFaaS Author(s)
|
||||
|
||||
// Copyright (c) Alex Ellis 2017. All rights reserved.
|
||||
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
|
||||
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/openfaas/faas/gateway/pkg/middleware"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func Test_getNamespace_Default(t *testing.T) {
|
||||
|
@ -1,10 +1,5 @@
|
||||
// License: OpenFaaS Community Edition (CE) EULA
|
||||
// Copyright (c) 2017,2019-2024 OpenFaaS Author(s)
|
||||
|
||||
// License: OpenFaaS Community Edition (CE) EULA
|
||||
// Copyright (c) 2017,2019-2024 OpenFaaS Author(s)
|
||||
|
||||
// Copyright (c) OpenFaaS Author(s) 2018. All rights reserved.
|
||||
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
|
||||
|
||||
package handlers
|
||||
|
||||
|
@ -1,7 +1,5 @@
|
||||
// License: OpenFaaS Community Edition (CE) EULA
|
||||
// Copyright (c) 2017,2019-2024 OpenFaaS Author(s)
|
||||
|
||||
// Copyright (c) OpenFaaS Author(s) 2018. All rights reserved.
|
||||
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
|
||||
|
||||
package handlers
|
||||
|
||||
|
@ -1,13 +1,11 @@
|
||||
// License: OpenFaaS Community Edition (CE) EULA
|
||||
// Copyright (c) 2017,2019-2024 OpenFaaS Author(s)
|
||||
|
||||
// Copyright (c) Alex Ellis 2017. All rights reserved.
|
||||
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
|
||||
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
@ -29,7 +27,7 @@ func MakeQueuedProxy(metrics metrics.MetricOptions, queuer ftypes.RequestQueuer,
|
||||
defer r.Body.Close()
|
||||
|
||||
var err error
|
||||
body, err = io.ReadAll(r.Body)
|
||||
body, err = ioutil.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
|
@ -1,7 +1,5 @@
|
||||
// License: OpenFaaS Community Edition (CE) EULA
|
||||
// Copyright (c) 2017,2019-2024 OpenFaaS Author(s)
|
||||
|
||||
// Copyright (c) Alex Ellis 2017. All rights reserved.
|
||||
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
|
||||
|
||||
package handlers
|
||||
|
||||
|
@ -1,7 +1,5 @@
|
||||
// License: OpenFaaS Community Edition (CE) EULA
|
||||
// Copyright (c) 2017,2019-2024 OpenFaaS Author(s)
|
||||
|
||||
// Copyright (c) OpenFaaS Author(s). All rights reserved.
|
||||
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
|
||||
|
||||
package handlers
|
||||
|
||||
|
@ -1,7 +1,5 @@
|
||||
// License: OpenFaaS Community Edition (CE) EULA
|
||||
// Copyright (c) 2017,2019-2024 OpenFaaS Author(s)
|
||||
|
||||
// Copyright (c) Alex Ellis 2017. All rights reserved.
|
||||
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
|
||||
|
||||
package main
|
||||
|
||||
@ -129,12 +127,9 @@ func main() {
|
||||
faasHandlers.FunctionStatus = handlers.MakeForwardingProxyHandler(reverseProxy, forwardingNotifiers, urlResolver, nilURLTransformer, serviceAuthInjector)
|
||||
|
||||
faasHandlers.InfoHandler = handlers.MakeInfoHandler(handlers.MakeForwardingProxyHandler(reverseProxy, forwardingNotifiers, urlResolver, nilURLTransformer, serviceAuthInjector))
|
||||
faasHandlers.TelemetryHandler = handlers.MakeForwardingProxyHandler(reverseProxy, forwardingNotifiers, urlResolver, nilURLTransformer, nil)
|
||||
|
||||
faasHandlers.SecretHandler = handlers.MakeForwardingProxyHandler(reverseProxy, forwardingNotifiers, urlResolver, nilURLTransformer, serviceAuthInjector)
|
||||
|
||||
faasHandlers.NamespaceListerHandler = handlers.MakeForwardingProxyHandler(reverseProxy, forwardingNotifiers, urlResolver, nilURLTransformer, serviceAuthInjector)
|
||||
faasHandlers.NamespaceMutatorHandler = handlers.MakeForwardingProxyHandler(reverseProxy, forwardingNotifiers, urlResolver, nilURLTransformer, serviceAuthInjector)
|
||||
|
||||
faasHandlers.Alert = handlers.MakeNotifierWrapper(
|
||||
handlers.MakeAlertHandler(externalServiceQuery, config.Namespace),
|
||||
@ -171,7 +166,7 @@ func main() {
|
||||
)
|
||||
}
|
||||
|
||||
prometheusQuery := metrics.NewPrometheusQuery(config.PrometheusHost, config.PrometheusPort, http.DefaultClient, version.BuildVersion())
|
||||
prometheusQuery := metrics.NewPrometheusQuery(config.PrometheusHost, config.PrometheusPort, &http.Client{})
|
||||
faasHandlers.ListFunctions = metrics.AddMetricsHandler(faasHandlers.ListFunctions, prometheusQuery)
|
||||
faasHandlers.ScaleFunction = scaling.MakeHorizontalScalingHandler(handlers.MakeForwardingProxyHandler(reverseProxy, forwardingNotifiers, urlResolver, nilURLTransformer, serviceAuthInjector))
|
||||
|
||||
@ -198,8 +193,6 @@ func main() {
|
||||
auth.DecorateWithBasicAuth(faasHandlers.LogProxyHandler, credentials)
|
||||
faasHandlers.NamespaceListerHandler =
|
||||
auth.DecorateWithBasicAuth(faasHandlers.NamespaceListerHandler, credentials)
|
||||
faasHandlers.NamespaceMutatorHandler =
|
||||
auth.DecorateWithBasicAuth(faasHandlers.NamespaceMutatorHandler, credentials)
|
||||
}
|
||||
|
||||
r := mux.NewRouter()
|
||||
@ -210,8 +203,6 @@ func main() {
|
||||
r.HandleFunc("/function/{name:["+NameExpression+"]+}/{params:.*}", functionProxy)
|
||||
|
||||
r.HandleFunc("/system/info", faasHandlers.InfoHandler).Methods(http.MethodGet)
|
||||
r.HandleFunc("/system/telemetry", faasHandlers.TelemetryHandler).Methods(http.MethodGet)
|
||||
|
||||
r.HandleFunc("/system/alert", faasHandlers.Alert).Methods(http.MethodPost)
|
||||
|
||||
r.HandleFunc("/system/function/{name:["+NameExpression+"]+}", faasHandlers.FunctionStatus).Methods(http.MethodGet)
|
||||
@ -225,8 +216,6 @@ func main() {
|
||||
r.HandleFunc("/system/logs", faasHandlers.LogProxyHandler).Methods(http.MethodGet)
|
||||
|
||||
r.HandleFunc("/system/namespaces", faasHandlers.NamespaceListerHandler).Methods(http.MethodGet)
|
||||
r.HandleFunc("/system/namespace/{namespace:["+NameExpression+"]*}", faasHandlers.NamespaceMutatorHandler).
|
||||
Methods(http.MethodPost, http.MethodDelete, http.MethodPut, http.MethodGet)
|
||||
|
||||
if faasHandlers.QueuedProxy != nil {
|
||||
r.HandleFunc("/async-function/{name:["+NameExpression+"]+}/", faasHandlers.QueuedProxy).Methods(http.MethodPost)
|
||||
@ -256,7 +245,7 @@ func main() {
|
||||
r.HandleFunc("/healthz",
|
||||
handlers.MakeForwardingProxyHandler(reverseProxy, forwardingNotifiers, urlResolver, nilURLTransformer, serviceAuthInjector)).Methods(http.MethodGet)
|
||||
|
||||
r.Handle("/", http.RedirectHandler("/ui/", http.StatusTemporaryRedirect)).Methods(http.MethodGet)
|
||||
r.Handle("/", http.RedirectHandler("/ui/", http.StatusMovedPermanently)).Methods(http.MethodGet)
|
||||
|
||||
tcpPort := 8080
|
||||
|
||||
|
@ -3,7 +3,7 @@ package metrics
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
@ -28,7 +28,7 @@ func AddMetricsHandler(handler http.HandlerFunc, prometheusQuery PrometheusQuery
|
||||
}
|
||||
|
||||
defer upstreamCall.Body.Close()
|
||||
upstreamBody, _ := io.ReadAll(upstreamCall.Body)
|
||||
upstreamBody, _ := ioutil.ReadAll(upstreamCall.Body)
|
||||
|
||||
if recorder.Code != http.StatusOK {
|
||||
log.Printf("List functions responded with code %d, body: %s",
|
||||
|
@ -1,15 +1,13 @@
|
||||
// License: OpenFaaS Community Edition (CE) EULA
|
||||
// Copyright (c) 2017,2019-2024 OpenFaaS Author(s)
|
||||
|
||||
// Copyright (c) Alex Ellis 2017
|
||||
// Copyright (c) 2018 OpenFaaS Author(s)
|
||||
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
|
||||
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
@ -88,7 +86,7 @@ func (e *Exporter) StartServiceWatcher(endpointURL url.URL, metricsOptions Metri
|
||||
|
||||
namespaces, err := e.getNamespaces(endpointURL)
|
||||
if err != nil {
|
||||
log.Printf("Error listing namespaces: %s", err)
|
||||
log.Println(err)
|
||||
}
|
||||
|
||||
services := []types.FunctionStatus{}
|
||||
@ -97,7 +95,7 @@ func (e *Exporter) StartServiceWatcher(endpointURL url.URL, metricsOptions Metri
|
||||
if len(namespaces) == 0 {
|
||||
services, err = e.getFunctions(endpointURL, e.FunctionNamespace)
|
||||
if err != nil {
|
||||
log.Printf("Error getting functions from: %s, error: %s", e.FunctionNamespace, err)
|
||||
log.Println(err)
|
||||
continue
|
||||
}
|
||||
e.services = services
|
||||
@ -105,7 +103,7 @@ func (e *Exporter) StartServiceWatcher(endpointURL url.URL, metricsOptions Metri
|
||||
for _, namespace := range namespaces {
|
||||
nsServices, err := e.getFunctions(endpointURL, namespace)
|
||||
if err != nil {
|
||||
log.Printf("Error getting functions from: %s, error: %s", e.FunctionNamespace, err)
|
||||
log.Println(err)
|
||||
continue
|
||||
}
|
||||
services = append(services, nsServices...)
|
||||
@ -114,6 +112,7 @@ func (e *Exporter) StartServiceWatcher(endpointURL url.URL, metricsOptions Metri
|
||||
|
||||
e.services = services
|
||||
|
||||
break
|
||||
case <-quit:
|
||||
return
|
||||
}
|
||||
@ -160,24 +159,14 @@ func (e *Exporter) getFunctions(endpointURL url.URL, namespace string) ([]types.
|
||||
return services, err
|
||||
}
|
||||
|
||||
var body []byte
|
||||
if res.Body != nil {
|
||||
defer res.Body.Close()
|
||||
|
||||
if b, err := io.ReadAll(res.Body); err != nil {
|
||||
return services, err
|
||||
} else {
|
||||
body = b
|
||||
}
|
||||
bytesOut, readErr := ioutil.ReadAll(res.Body)
|
||||
if readErr != nil {
|
||||
return services, readErr
|
||||
}
|
||||
|
||||
if len(body) == 0 {
|
||||
return services, fmt.Errorf("no response body from /system/functions")
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(body, &services); err != nil {
|
||||
if err := json.Unmarshal(bytesOut, &services); err != nil {
|
||||
return services, fmt.Errorf("error unmarshalling response: %s, error: %s",
|
||||
string(body), err)
|
||||
string(bytesOut), err)
|
||||
}
|
||||
|
||||
return services, nil
|
||||
@ -204,24 +193,13 @@ func (e *Exporter) getNamespaces(endpointURL url.URL) ([]string, error) {
|
||||
return namespaces, nil
|
||||
}
|
||||
|
||||
var body []byte
|
||||
if res.Body != nil {
|
||||
defer res.Body.Close()
|
||||
|
||||
if b, err := io.ReadAll(res.Body); err != nil {
|
||||
return namespaces, err
|
||||
} else {
|
||||
body = b
|
||||
}
|
||||
bytesOut, readErr := ioutil.ReadAll(res.Body)
|
||||
if readErr != nil {
|
||||
return namespaces, readErr
|
||||
}
|
||||
|
||||
if len(body) == 0 {
|
||||
return namespaces, fmt.Errorf("no response body from /system/namespaces")
|
||||
if err := json.Unmarshal(bytesOut, &namespaces); err != nil {
|
||||
return namespaces, fmt.Errorf("error unmarshalling response: %s, error: %s", string(bytesOut), err)
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(body, &namespaces); err != nil {
|
||||
return namespaces, fmt.Errorf("error unmarshalling response: %s, error: %s", string(body), err)
|
||||
}
|
||||
|
||||
return namespaces, nil
|
||||
}
|
||||
|
@ -41,21 +41,21 @@ func Test_Describe_DescribesThePrometheusMetrics(t *testing.T) {
|
||||
go exporter.Describe(ch)
|
||||
|
||||
d := <-ch
|
||||
expectedGatewayFunctionInvocationDesc := `Desc{fqName: "gateway_function_invocation_total", help: "Function metrics", constLabels: {}, variableLabels: {function_name,code}}`
|
||||
expectedGatewayFunctionInvocationDesc := `Desc{fqName: "gateway_function_invocation_total", help: "Function metrics", constLabels: {}, variableLabels: [function_name code]}`
|
||||
actualGatewayFunctionInvocationDesc := d.String()
|
||||
if expectedGatewayFunctionInvocationDesc != actualGatewayFunctionInvocationDesc {
|
||||
t.Errorf("Want\n%s\ngot\n%s", expectedGatewayFunctionInvocationDesc, actualGatewayFunctionInvocationDesc)
|
||||
}
|
||||
|
||||
d = <-ch
|
||||
expectedGatewayFunctionsHistogramDesc := `Desc{fqName: "gateway_functions_seconds", help: "Function time taken", constLabels: {}, variableLabels: {function_name,code}}`
|
||||
expectedGatewayFunctionsHistogramDesc := `Desc{fqName: "gateway_functions_seconds", help: "Function time taken", constLabels: {}, variableLabels: [function_name code]}`
|
||||
actualGatewayFunctionsHistogramDesc := d.String()
|
||||
if expectedGatewayFunctionsHistogramDesc != actualGatewayFunctionsHistogramDesc {
|
||||
t.Errorf("Want\n%s\ngot\n%s", expectedGatewayFunctionsHistogramDesc, actualGatewayFunctionsHistogramDesc)
|
||||
}
|
||||
|
||||
d = <-ch
|
||||
expectedServiceReplicasGaugeDesc := `Desc{fqName: "gateway_service_count", help: "Current count of replicas for function", constLabels: {}, variableLabels: {function_name}}`
|
||||
expectedServiceReplicasGaugeDesc := `Desc{fqName: "gateway_service_count", help: "Current count of replicas for function", constLabels: {}, variableLabels: [function_name]}`
|
||||
actualServiceReplicasGaugeDesc := d.String()
|
||||
if expectedServiceReplicasGaugeDesc != actualServiceReplicasGaugeDesc {
|
||||
t.Errorf("Want\n%s\ngot\n%s", expectedServiceReplicasGaugeDesc, actualServiceReplicasGaugeDesc)
|
||||
|
@ -1,7 +1,5 @@
|
||||
// License: OpenFaaS Community Edition (CE) EULA
|
||||
// Copyright (c) 2017,2019-2024 OpenFaaS Author(s)
|
||||
|
||||
// Copyright (c) Alex Ellis 2017. All rights reserved.
|
||||
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
|
||||
|
||||
package metrics
|
||||
|
||||
|
@ -3,16 +3,15 @@ package metrics
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// PrometheusQuery represents parameters for querying Prometheus
|
||||
type PrometheusQuery struct {
|
||||
host string
|
||||
port int
|
||||
client *http.Client
|
||||
userAgentVersion string
|
||||
Port int
|
||||
Host string
|
||||
Client *http.Client
|
||||
}
|
||||
|
||||
type PrometheusQueryFetcher interface {
|
||||
@ -20,47 +19,45 @@ type PrometheusQueryFetcher interface {
|
||||
}
|
||||
|
||||
// NewPrometheusQuery create a NewPrometheusQuery
|
||||
func NewPrometheusQuery(host string, port int, client *http.Client, userAgentVersion string) PrometheusQuery {
|
||||
func NewPrometheusQuery(host string, port int, client *http.Client) PrometheusQuery {
|
||||
return PrometheusQuery{
|
||||
client: client,
|
||||
host: host,
|
||||
port: port,
|
||||
userAgentVersion: userAgentVersion,
|
||||
Client: client,
|
||||
Host: host,
|
||||
Port: port,
|
||||
}
|
||||
}
|
||||
|
||||
// Fetch queries aggregated stats
|
||||
func (q PrometheusQuery) Fetch(query string) (*VectorQueryResponse, error) {
|
||||
|
||||
req, err := http.NewRequest(http.MethodGet, fmt.Sprintf("http://%s:%d/api/v1/query?query=%s", q.host, q.port, query), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
req, reqErr := http.NewRequest(http.MethodGet, fmt.Sprintf("http://%s:%d/api/v1/query?query=%s", q.Host, q.Port, query), nil)
|
||||
if reqErr != nil {
|
||||
return nil, reqErr
|
||||
}
|
||||
|
||||
req.Header.Set("User-Agent", fmt.Sprintf("openfaas-gateway/%s (Prometheus query)", q.userAgentVersion))
|
||||
|
||||
res, err := q.client.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
res, getErr := q.Client.Do(req)
|
||||
if getErr != nil {
|
||||
return nil, getErr
|
||||
}
|
||||
|
||||
if res.Body != nil {
|
||||
defer res.Body.Close()
|
||||
}
|
||||
|
||||
bytesOut, err := io.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
bytesOut, readErr := ioutil.ReadAll(res.Body)
|
||||
if readErr != nil {
|
||||
return nil, readErr
|
||||
}
|
||||
|
||||
if res.StatusCode != http.StatusOK {
|
||||
return nil, fmt.Errorf("unexpected status code from Prometheus want: %d, got: %d, body: %s", http.StatusOK, res.StatusCode, string(bytesOut))
|
||||
return nil, fmt.Errorf("Unexpected status code from Prometheus want: %d, got: %d, body: %s", http.StatusOK, res.StatusCode, string(bytesOut))
|
||||
}
|
||||
|
||||
var values VectorQueryResponse
|
||||
|
||||
if err := json.Unmarshal(bytesOut, &values); err != nil {
|
||||
return nil, fmt.Errorf("error unmarshaling result: %s, '%s'", err, string(bytesOut))
|
||||
unmarshalErr := json.Unmarshal(bytesOut, &values)
|
||||
if unmarshalErr != nil {
|
||||
return nil, fmt.Errorf("Error unmarshaling result: %s, '%s'", unmarshalErr, string(bytesOut))
|
||||
}
|
||||
|
||||
return &values, nil
|
||||
|
@ -1,7 +1,5 @@
|
||||
// License: OpenFaaS Community Edition (CE) EULA
|
||||
// Copyright (c) 2017,2019-2024 OpenFaaS Author(s)
|
||||
|
||||
// Copyright (c) OpenFaaS Author(s). All rights reserved.
|
||||
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
|
||||
|
||||
package middleware
|
||||
|
||||
|
@ -1,7 +1,5 @@
|
||||
// License: OpenFaaS Community Edition (CE) EULA
|
||||
// Copyright (c) 2017,2019-2024 OpenFaaS Author(s)
|
||||
|
||||
// Copyright (c) OpenFaaS Author(s). All rights reserved.
|
||||
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
|
||||
|
||||
package middleware
|
||||
|
||||
|
@ -1,7 +1,5 @@
|
||||
// License: OpenFaaS Community Edition (CE) EULA
|
||||
// Copyright (c) 2017,2019-2024 OpenFaaS Author(s)
|
||||
|
||||
// Copyright (c) OpenFaaS Author(s). All rights reserved.
|
||||
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
|
||||
|
||||
package middleware
|
||||
|
||||
|
@ -1,7 +1,5 @@
|
||||
// License: OpenFaaS Community Edition (CE) EULA
|
||||
// Copyright (c) 2017,2019-2024 OpenFaaS Author(s)
|
||||
|
||||
// Copyright (c) OpenFaaS Author(s). All rights reserved.
|
||||
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
|
||||
|
||||
package middleware
|
||||
|
||||
|
@ -1,7 +1,5 @@
|
||||
// License: OpenFaaS Community Edition (CE) EULA
|
||||
// Copyright (c) 2017,2019-2024 OpenFaaS Author(s)
|
||||
|
||||
// Copyright (c) OpenFaaS Author(s). All rights reserved.
|
||||
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
|
||||
|
||||
package middleware
|
||||
|
||||
|
@ -1,7 +1,5 @@
|
||||
// License: OpenFaaS Community Edition (CE) EULA
|
||||
// Copyright (c) 2017,2019-2024 OpenFaaS Author(s)
|
||||
|
||||
// Copyright (c) Alex Ellis 2017. All rights reserved.
|
||||
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
|
||||
|
||||
package plugin
|
||||
|
||||
|
@ -1,7 +1,5 @@
|
||||
// License: OpenFaaS Community Edition (CE) EULA
|
||||
// Copyright (c) 2017,2019-2024 OpenFaaS Author(s)
|
||||
|
||||
// Copyright (c) Alex Ellis 2017. All rights reserved.
|
||||
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
|
||||
|
||||
package requests
|
||||
|
||||
|
@ -1,7 +1,5 @@
|
||||
// License: OpenFaaS Community Edition (CE) EULA
|
||||
// Copyright (c) 2017,2019-2024 OpenFaaS Author(s)
|
||||
|
||||
// Copyright (c) Alex Ellis 2017. All rights reserved.
|
||||
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
|
||||
|
||||
package requests
|
||||
|
||||
|
11
gateway/requests/requests.go
Normal file
11
gateway/requests/requests.go
Normal file
@ -0,0 +1,11 @@
|
||||
// Copyright (c) Alex Ellis 2017. All rights reserved.
|
||||
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
|
||||
|
||||
// Package requests package provides a client SDK or library for
|
||||
// the OpenFaaS gateway REST API
|
||||
package requests
|
||||
|
||||
// DeleteFunctionRequest delete a deployed function
|
||||
type DeleteFunctionRequest struct {
|
||||
FunctionName string `json:"functionName"`
|
||||
}
|
@ -1,7 +1,5 @@
|
||||
// License: OpenFaaS Community Edition (CE) EULA
|
||||
// Copyright (c) 2017,2019-2024 OpenFaaS Author(s)
|
||||
|
||||
// Copyright (c) OpenFaaS Author(s). All rights reserved.
|
||||
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
|
||||
|
||||
package scaling
|
||||
|
||||
|
@ -1,7 +1,5 @@
|
||||
// License: OpenFaaS Community Edition (CE) EULA
|
||||
// Copyright (c) 2017,2019-2024 OpenFaaS Author(s)
|
||||
|
||||
// Copyright (c) OpenFaaS Author(s). All rights reserved.
|
||||
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
|
||||
|
||||
package scaling
|
||||
|
||||
|
@ -1,7 +1,5 @@
|
||||
// License: OpenFaaS Community Edition (CE) EULA
|
||||
// Copyright (c) 2017,2019-2024 OpenFaaS Author(s)
|
||||
|
||||
// Copyright (c) OpenFaaS Author(s). All rights reserved.
|
||||
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
|
||||
|
||||
package scaling
|
||||
|
||||
|
@ -1,7 +1,5 @@
|
||||
// License: OpenFaaS Community Edition (CE) EULA
|
||||
// Copyright (c) 2017,2019-2024 OpenFaaS Author(s)
|
||||
|
||||
// Copyright (c) OpenFaaS Author(s). All rights reserved.
|
||||
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
|
||||
|
||||
package scaling
|
||||
|
||||
|
@ -3,7 +3,7 @@ package scaling
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
|
||||
"github.com/openfaas/faas-provider/types"
|
||||
@ -43,7 +43,7 @@ func MakeHorizontalScalingHandler(next http.HandlerFunc) http.HandlerFunc {
|
||||
return
|
||||
}
|
||||
|
||||
body, err := io.ReadAll(r.Body)
|
||||
body, err := ioutil.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
http.Error(w, "Error reading request body", http.StatusBadRequest)
|
||||
return
|
||||
@ -65,7 +65,7 @@ func MakeHorizontalScalingHandler(next http.HandlerFunc) http.HandlerFunc {
|
||||
|
||||
upstreamReq, _ := json.Marshal(scaleRequest)
|
||||
// Restore the io.ReadCloser to its original state
|
||||
r.Body = io.NopCloser(bytes.NewBuffer(upstreamReq))
|
||||
r.Body = ioutil.NopCloser(bytes.NewBuffer(upstreamReq))
|
||||
|
||||
next.ServeHTTP(w, r)
|
||||
}
|
||||
|
@ -1,7 +1,5 @@
|
||||
// License: OpenFaaS Community Edition (CE) EULA
|
||||
// Copyright (c) 2017,2019-2024 OpenFaaS Author(s)
|
||||
|
||||
// Copyright (c) OpenFaaS Author(s). All rights reserved.
|
||||
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
|
||||
|
||||
package scaling
|
||||
|
||||
|
@ -34,8 +34,6 @@ type HandlerSet struct {
|
||||
// InfoHandler provides version and build info
|
||||
InfoHandler http.HandlerFunc
|
||||
|
||||
TelemetryHandler http.HandlerFunc
|
||||
|
||||
// SecretHandler enables secrets to be managed
|
||||
SecretHandler http.HandlerFunc
|
||||
|
||||
@ -44,6 +42,4 @@ type HandlerSet struct {
|
||||
|
||||
// NamespaceListerHandler lists namespaces
|
||||
NamespaceListerHandler http.HandlerFunc
|
||||
|
||||
NamespaceMutatorHandler http.HandlerFunc
|
||||
}
|
||||
|
@ -1,18 +1,13 @@
|
||||
// License: OpenFaaS Community Edition (CE) EULA
|
||||
// Copyright (c) 2017,2019-2024 OpenFaaS Author(s)
|
||||
|
||||
// Copyright (c) OpenFaaS Author(s) 2018. All rights reserved.
|
||||
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
|
||||
|
||||
package types
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
"github.com/openfaas/faas/gateway/version"
|
||||
)
|
||||
|
||||
// NewHTTPClientReverseProxy proxies to an upstream host through the use of a http.Client
|
||||
@ -36,20 +31,18 @@ func NewHTTPClientReverseProxy(baseURL *url.URL, timeout time.Duration, maxIdleC
|
||||
// https://github.com/minio/minio/pull/5860
|
||||
|
||||
// Taken from http.DefaultTransport in Go 1.11
|
||||
h.Client.Transport = &proxyTransport{
|
||||
Transport: &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
DialContext: (&net.Dialer{
|
||||
Timeout: timeout,
|
||||
KeepAlive: timeout,
|
||||
DualStack: true,
|
||||
}).DialContext,
|
||||
MaxIdleConns: maxIdleConns,
|
||||
MaxIdleConnsPerHost: maxIdleConnsPerHost,
|
||||
IdleConnTimeout: 90 * time.Second,
|
||||
TLSHandshakeTimeout: 10 * time.Second,
|
||||
ExpectContinueTimeout: 1 * time.Second,
|
||||
},
|
||||
h.Client.Transport = &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
DialContext: (&net.Dialer{
|
||||
Timeout: timeout,
|
||||
KeepAlive: timeout,
|
||||
DualStack: true,
|
||||
}).DialContext,
|
||||
MaxIdleConns: maxIdleConns,
|
||||
MaxIdleConnsPerHost: maxIdleConnsPerHost,
|
||||
IdleConnTimeout: 90 * time.Second,
|
||||
TLSHandshakeTimeout: 10 * time.Second,
|
||||
ExpectContinueTimeout: 1 * time.Second,
|
||||
}
|
||||
|
||||
return &h
|
||||
@ -61,19 +54,3 @@ type HTTPClientReverseProxy struct {
|
||||
Client *http.Client
|
||||
Timeout time.Duration
|
||||
}
|
||||
|
||||
// proxyTransport is an http.RoundTripper for the reverse proxy client.
|
||||
// It ensures default headers like the `User-Agent` are set on requests.
|
||||
type proxyTransport struct {
|
||||
// Transport is the underlying HTTP transport to use when making requests.
|
||||
Transport http.RoundTripper
|
||||
}
|
||||
|
||||
// RoundTrip implements the RoundTripper interface.
|
||||
func (t *proxyTransport) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
if _, ok := req.Header["User-Agent"]; !ok {
|
||||
req.Header.Set("User-Agent", fmt.Sprintf("openfaas-ce-gateway/%s", version.BuildVersion()))
|
||||
}
|
||||
|
||||
return t.Transport.RoundTrip(req)
|
||||
}
|
||||
|
@ -1,7 +1,5 @@
|
||||
// License: OpenFaaS Community Edition (CE) EULA
|
||||
// Copyright (c) 2017,2019-2024 OpenFaaS Author(s)
|
||||
|
||||
// Copyright (c) Alex Ellis 2017. All rights reserved.
|
||||
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
|
||||
|
||||
package types
|
||||
|
||||
|
@ -1,7 +1,5 @@
|
||||
// License: OpenFaaS Community Edition (CE) EULA
|
||||
// Copyright (c) 2017,2019-2024 OpenFaaS Author(s)
|
||||
|
||||
// Copyright (c) Alex Ellis 2017. All rights reserved.
|
||||
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
|
||||
|
||||
package types
|
||||
|
||||
@ -98,7 +96,7 @@ func TestRead_ScaleZeroDefaultAndOverride(t *testing.T) {
|
||||
want = true
|
||||
|
||||
if config.ScaleFromZero != want {
|
||||
t.Logf("ScaleFromZero was overridden - should be %v, got: %v", want, config.ScaleFromZero)
|
||||
t.Logf("ScaleFromZero was overriden - should be %v, got: %v", want, config.ScaleFromZero)
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
|
33
gateway/vendor/github.com/cespare/xxhash/v2/README.md
generated
vendored
33
gateway/vendor/github.com/cespare/xxhash/v2/README.md
generated
vendored
@ -3,7 +3,8 @@
|
||||
[](https://pkg.go.dev/github.com/cespare/xxhash/v2)
|
||||
[](https://github.com/cespare/xxhash/actions/workflows/test.yml)
|
||||
|
||||
xxhash is a Go implementation of the 64-bit [xxHash] algorithm, XXH64. This is a
|
||||
xxhash is a Go implementation of the 64-bit
|
||||
[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a
|
||||
high-quality hashing algorithm that is much faster than anything in the Go
|
||||
standard library.
|
||||
|
||||
@ -24,11 +25,8 @@ func (*Digest) WriteString(string) (int, error)
|
||||
func (*Digest) Sum64() uint64
|
||||
```
|
||||
|
||||
The package is written with optimized pure Go and also contains even faster
|
||||
assembly implementations for amd64 and arm64. If desired, the `purego` build tag
|
||||
opts into using the Go code even on those architectures.
|
||||
|
||||
[xxHash]: http://cyan4973.github.io/xxHash/
|
||||
This implementation provides a fast pure-Go implementation and an even faster
|
||||
assembly implementation for amd64.
|
||||
|
||||
## Compatibility
|
||||
|
||||
@ -47,20 +45,19 @@ I recommend using the latest release of Go.
|
||||
Here are some quick benchmarks comparing the pure-Go and assembly
|
||||
implementations of Sum64.
|
||||
|
||||
| input size | purego | asm |
|
||||
| ---------- | --------- | --------- |
|
||||
| 4 B | 1.3 GB/s | 1.2 GB/s |
|
||||
| 16 B | 2.9 GB/s | 3.5 GB/s |
|
||||
| 100 B | 6.9 GB/s | 8.1 GB/s |
|
||||
| 4 KB | 11.7 GB/s | 16.7 GB/s |
|
||||
| 10 MB | 12.0 GB/s | 17.3 GB/s |
|
||||
| input size | purego | asm |
|
||||
| --- | --- | --- |
|
||||
| 5 B | 979.66 MB/s | 1291.17 MB/s |
|
||||
| 100 B | 7475.26 MB/s | 7973.40 MB/s |
|
||||
| 4 KB | 17573.46 MB/s | 17602.65 MB/s |
|
||||
| 10 MB | 17131.46 MB/s | 17142.16 MB/s |
|
||||
|
||||
These numbers were generated on Ubuntu 20.04 with an Intel Xeon Platinum 8252C
|
||||
CPU using the following commands under Go 1.19.2:
|
||||
These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using
|
||||
the following commands under Go 1.11.2:
|
||||
|
||||
```
|
||||
benchstat <(go test -tags purego -benchtime 500ms -count 15 -bench 'Sum64$')
|
||||
benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$')
|
||||
$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes'
|
||||
$ go test -benchtime 10s -bench '/xxhash,direct,bytes'
|
||||
```
|
||||
|
||||
## Projects using this package
|
||||
@ -70,5 +67,3 @@ benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$')
|
||||
- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics)
|
||||
- [FreeCache](https://github.com/coocood/freecache)
|
||||
- [FastCache](https://github.com/VictoriaMetrics/fastcache)
|
||||
- [Ristretto](https://github.com/dgraph-io/ristretto)
|
||||
- [Badger](https://github.com/dgraph-io/badger)
|
||||
|
10
gateway/vendor/github.com/cespare/xxhash/v2/testall.sh
generated
vendored
10
gateway/vendor/github.com/cespare/xxhash/v2/testall.sh
generated
vendored
@ -1,10 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -eu -o pipefail
|
||||
|
||||
# Small convenience script for running the tests with various combinations of
|
||||
# arch/tags. This assumes we're running on amd64 and have qemu available.
|
||||
|
||||
go test ./...
|
||||
go test -tags purego ./...
|
||||
GOARCH=arm64 go test
|
||||
GOARCH=arm64 go test -tags purego
|
70
gateway/vendor/github.com/cespare/xxhash/v2/xxhash.go
generated
vendored
70
gateway/vendor/github.com/cespare/xxhash/v2/xxhash.go
generated
vendored
@ -16,16 +16,21 @@ const (
|
||||
prime5 uint64 = 2870177450012600261
|
||||
)
|
||||
|
||||
// Store the primes in an array as well.
|
||||
//
|
||||
// The consts are used when possible in Go code to avoid MOVs but we need a
|
||||
// contiguous array for the assembly code.
|
||||
var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5}
|
||||
// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where
|
||||
// possible in the Go code is worth a small (but measurable) performance boost
|
||||
// by avoiding some MOVQs. Vars are needed for the asm and also are useful for
|
||||
// convenience in the Go code in a few places where we need to intentionally
|
||||
// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the
|
||||
// result overflows a uint64).
|
||||
var (
|
||||
prime1v = prime1
|
||||
prime2v = prime2
|
||||
prime3v = prime3
|
||||
prime4v = prime4
|
||||
prime5v = prime5
|
||||
)
|
||||
|
||||
// Digest implements hash.Hash64.
|
||||
//
|
||||
// Note that a zero-valued Digest is not ready to receive writes.
|
||||
// Call Reset or create a Digest using New before calling other methods.
|
||||
type Digest struct {
|
||||
v1 uint64
|
||||
v2 uint64
|
||||
@ -36,31 +41,19 @@ type Digest struct {
|
||||
n int // how much of mem is used
|
||||
}
|
||||
|
||||
// New creates a new Digest with a zero seed.
|
||||
// New creates a new Digest that computes the 64-bit xxHash algorithm.
|
||||
func New() *Digest {
|
||||
return NewWithSeed(0)
|
||||
}
|
||||
|
||||
// NewWithSeed creates a new Digest with the given seed.
|
||||
func NewWithSeed(seed uint64) *Digest {
|
||||
var d Digest
|
||||
d.ResetWithSeed(seed)
|
||||
d.Reset()
|
||||
return &d
|
||||
}
|
||||
|
||||
// Reset clears the Digest's state so that it can be reused.
|
||||
// It uses a seed value of zero.
|
||||
func (d *Digest) Reset() {
|
||||
d.ResetWithSeed(0)
|
||||
}
|
||||
|
||||
// ResetWithSeed clears the Digest's state so that it can be reused.
|
||||
// It uses the given seed to initialize the state.
|
||||
func (d *Digest) ResetWithSeed(seed uint64) {
|
||||
d.v1 = seed + prime1 + prime2
|
||||
d.v2 = seed + prime2
|
||||
d.v3 = seed
|
||||
d.v4 = seed - prime1
|
||||
d.v1 = prime1v + prime2
|
||||
d.v2 = prime2
|
||||
d.v3 = 0
|
||||
d.v4 = -prime1v
|
||||
d.total = 0
|
||||
d.n = 0
|
||||
}
|
||||
@ -76,23 +69,21 @@ func (d *Digest) Write(b []byte) (n int, err error) {
|
||||
n = len(b)
|
||||
d.total += uint64(n)
|
||||
|
||||
memleft := d.mem[d.n&(len(d.mem)-1):]
|
||||
|
||||
if d.n+n < 32 {
|
||||
// This new data doesn't even fill the current block.
|
||||
copy(memleft, b)
|
||||
copy(d.mem[d.n:], b)
|
||||
d.n += n
|
||||
return
|
||||
}
|
||||
|
||||
if d.n > 0 {
|
||||
// Finish off the partial block.
|
||||
c := copy(memleft, b)
|
||||
copy(d.mem[d.n:], b)
|
||||
d.v1 = round(d.v1, u64(d.mem[0:8]))
|
||||
d.v2 = round(d.v2, u64(d.mem[8:16]))
|
||||
d.v3 = round(d.v3, u64(d.mem[16:24]))
|
||||
d.v4 = round(d.v4, u64(d.mem[24:32]))
|
||||
b = b[c:]
|
||||
b = b[32-d.n:]
|
||||
d.n = 0
|
||||
}
|
||||
|
||||
@ -142,20 +133,21 @@ func (d *Digest) Sum64() uint64 {
|
||||
|
||||
h += d.total
|
||||
|
||||
b := d.mem[:d.n&(len(d.mem)-1)]
|
||||
for ; len(b) >= 8; b = b[8:] {
|
||||
k1 := round(0, u64(b[:8]))
|
||||
i, end := 0, d.n
|
||||
for ; i+8 <= end; i += 8 {
|
||||
k1 := round(0, u64(d.mem[i:i+8]))
|
||||
h ^= k1
|
||||
h = rol27(h)*prime1 + prime4
|
||||
}
|
||||
if len(b) >= 4 {
|
||||
h ^= uint64(u32(b[:4])) * prime1
|
||||
if i+4 <= end {
|
||||
h ^= uint64(u32(d.mem[i:i+4])) * prime1
|
||||
h = rol23(h)*prime2 + prime3
|
||||
b = b[4:]
|
||||
i += 4
|
||||
}
|
||||
for ; len(b) > 0; b = b[1:] {
|
||||
h ^= uint64(b[0]) * prime5
|
||||
for i < end {
|
||||
h ^= uint64(d.mem[i]) * prime5
|
||||
h = rol11(h) * prime1
|
||||
i++
|
||||
}
|
||||
|
||||
h ^= h >> 33
|
||||
|
@ -1,9 +1,6 @@
|
||||
//go:build (amd64 || arm64) && !appengine && gc && !purego && !noasm
|
||||
// +build amd64 arm64
|
||||
// +build !appengine
|
||||
// +build gc
|
||||
// +build !purego
|
||||
// +build !noasm
|
||||
|
||||
package xxhash
|
||||
|
||||
@ -13,4 +10,4 @@ package xxhash
|
||||
func Sum64(b []byte) uint64
|
||||
|
||||
//go:noescape
|
||||
func writeBlocks(s *Digest, b []byte) int
|
||||
func writeBlocks(d *Digest, b []byte) int
|
308
gateway/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
generated
vendored
308
gateway/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
generated
vendored
@ -1,209 +1,215 @@
|
||||
//go:build !appengine && gc && !purego
|
||||
// +build !appengine
|
||||
// +build gc
|
||||
// +build !purego
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
// Registers:
|
||||
#define h AX
|
||||
#define d AX
|
||||
#define p SI // pointer to advance through b
|
||||
#define n DX
|
||||
#define end BX // loop end
|
||||
#define v1 R8
|
||||
#define v2 R9
|
||||
#define v3 R10
|
||||
#define v4 R11
|
||||
#define x R12
|
||||
#define prime1 R13
|
||||
#define prime2 R14
|
||||
#define prime4 DI
|
||||
// Register allocation:
|
||||
// AX h
|
||||
// SI pointer to advance through b
|
||||
// DX n
|
||||
// BX loop end
|
||||
// R8 v1, k1
|
||||
// R9 v2
|
||||
// R10 v3
|
||||
// R11 v4
|
||||
// R12 tmp
|
||||
// R13 prime1v
|
||||
// R14 prime2v
|
||||
// DI prime4v
|
||||
|
||||
#define round(acc, x) \
|
||||
IMULQ prime2, x \
|
||||
ADDQ x, acc \
|
||||
ROLQ $31, acc \
|
||||
IMULQ prime1, acc
|
||||
// round reads from and advances the buffer pointer in SI.
|
||||
// It assumes that R13 has prime1v and R14 has prime2v.
|
||||
#define round(r) \
|
||||
MOVQ (SI), R12 \
|
||||
ADDQ $8, SI \
|
||||
IMULQ R14, R12 \
|
||||
ADDQ R12, r \
|
||||
ROLQ $31, r \
|
||||
IMULQ R13, r
|
||||
|
||||
// round0 performs the operation x = round(0, x).
|
||||
#define round0(x) \
|
||||
IMULQ prime2, x \
|
||||
ROLQ $31, x \
|
||||
IMULQ prime1, x
|
||||
|
||||
// mergeRound applies a merge round on the two registers acc and x.
|
||||
// It assumes that prime1, prime2, and prime4 have been loaded.
|
||||
#define mergeRound(acc, x) \
|
||||
round0(x) \
|
||||
XORQ x, acc \
|
||||
IMULQ prime1, acc \
|
||||
ADDQ prime4, acc
|
||||
|
||||
// blockLoop processes as many 32-byte blocks as possible,
|
||||
// updating v1, v2, v3, and v4. It assumes that there is at least one block
|
||||
// to process.
|
||||
#define blockLoop() \
|
||||
loop: \
|
||||
MOVQ +0(p), x \
|
||||
round(v1, x) \
|
||||
MOVQ +8(p), x \
|
||||
round(v2, x) \
|
||||
MOVQ +16(p), x \
|
||||
round(v3, x) \
|
||||
MOVQ +24(p), x \
|
||||
round(v4, x) \
|
||||
ADDQ $32, p \
|
||||
CMPQ p, end \
|
||||
JLE loop
|
||||
// mergeRound applies a merge round on the two registers acc and val.
|
||||
// It assumes that R13 has prime1v, R14 has prime2v, and DI has prime4v.
|
||||
#define mergeRound(acc, val) \
|
||||
IMULQ R14, val \
|
||||
ROLQ $31, val \
|
||||
IMULQ R13, val \
|
||||
XORQ val, acc \
|
||||
IMULQ R13, acc \
|
||||
ADDQ DI, acc
|
||||
|
||||
// func Sum64(b []byte) uint64
|
||||
TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32
|
||||
TEXT ·Sum64(SB), NOSPLIT, $0-32
|
||||
// Load fixed primes.
|
||||
MOVQ ·primes+0(SB), prime1
|
||||
MOVQ ·primes+8(SB), prime2
|
||||
MOVQ ·primes+24(SB), prime4
|
||||
MOVQ ·prime1v(SB), R13
|
||||
MOVQ ·prime2v(SB), R14
|
||||
MOVQ ·prime4v(SB), DI
|
||||
|
||||
// Load slice.
|
||||
MOVQ b_base+0(FP), p
|
||||
MOVQ b_len+8(FP), n
|
||||
LEAQ (p)(n*1), end
|
||||
MOVQ b_base+0(FP), SI
|
||||
MOVQ b_len+8(FP), DX
|
||||
LEAQ (SI)(DX*1), BX
|
||||
|
||||
// The first loop limit will be len(b)-32.
|
||||
SUBQ $32, end
|
||||
SUBQ $32, BX
|
||||
|
||||
// Check whether we have at least one block.
|
||||
CMPQ n, $32
|
||||
CMPQ DX, $32
|
||||
JLT noBlocks
|
||||
|
||||
// Set up initial state (v1, v2, v3, v4).
|
||||
MOVQ prime1, v1
|
||||
ADDQ prime2, v1
|
||||
MOVQ prime2, v2
|
||||
XORQ v3, v3
|
||||
XORQ v4, v4
|
||||
SUBQ prime1, v4
|
||||
MOVQ R13, R8
|
||||
ADDQ R14, R8
|
||||
MOVQ R14, R9
|
||||
XORQ R10, R10
|
||||
XORQ R11, R11
|
||||
SUBQ R13, R11
|
||||
|
||||
blockLoop()
|
||||
// Loop until SI > BX.
|
||||
blockLoop:
|
||||
round(R8)
|
||||
round(R9)
|
||||
round(R10)
|
||||
round(R11)
|
||||
|
||||
MOVQ v1, h
|
||||
ROLQ $1, h
|
||||
MOVQ v2, x
|
||||
ROLQ $7, x
|
||||
ADDQ x, h
|
||||
MOVQ v3, x
|
||||
ROLQ $12, x
|
||||
ADDQ x, h
|
||||
MOVQ v4, x
|
||||
ROLQ $18, x
|
||||
ADDQ x, h
|
||||
CMPQ SI, BX
|
||||
JLE blockLoop
|
||||
|
||||
mergeRound(h, v1)
|
||||
mergeRound(h, v2)
|
||||
mergeRound(h, v3)
|
||||
mergeRound(h, v4)
|
||||
MOVQ R8, AX
|
||||
ROLQ $1, AX
|
||||
MOVQ R9, R12
|
||||
ROLQ $7, R12
|
||||
ADDQ R12, AX
|
||||
MOVQ R10, R12
|
||||
ROLQ $12, R12
|
||||
ADDQ R12, AX
|
||||
MOVQ R11, R12
|
||||
ROLQ $18, R12
|
||||
ADDQ R12, AX
|
||||
|
||||
mergeRound(AX, R8)
|
||||
mergeRound(AX, R9)
|
||||
mergeRound(AX, R10)
|
||||
mergeRound(AX, R11)
|
||||
|
||||
JMP afterBlocks
|
||||
|
||||
noBlocks:
|
||||
MOVQ ·primes+32(SB), h
|
||||
MOVQ ·prime5v(SB), AX
|
||||
|
||||
afterBlocks:
|
||||
ADDQ n, h
|
||||
ADDQ DX, AX
|
||||
|
||||
ADDQ $24, end
|
||||
CMPQ p, end
|
||||
JG try4
|
||||
// Right now BX has len(b)-32, and we want to loop until SI > len(b)-8.
|
||||
ADDQ $24, BX
|
||||
|
||||
loop8:
|
||||
MOVQ (p), x
|
||||
ADDQ $8, p
|
||||
round0(x)
|
||||
XORQ x, h
|
||||
ROLQ $27, h
|
||||
IMULQ prime1, h
|
||||
ADDQ prime4, h
|
||||
CMPQ SI, BX
|
||||
JG fourByte
|
||||
|
||||
CMPQ p, end
|
||||
JLE loop8
|
||||
wordLoop:
|
||||
// Calculate k1.
|
||||
MOVQ (SI), R8
|
||||
ADDQ $8, SI
|
||||
IMULQ R14, R8
|
||||
ROLQ $31, R8
|
||||
IMULQ R13, R8
|
||||
|
||||
try4:
|
||||
ADDQ $4, end
|
||||
CMPQ p, end
|
||||
JG try1
|
||||
XORQ R8, AX
|
||||
ROLQ $27, AX
|
||||
IMULQ R13, AX
|
||||
ADDQ DI, AX
|
||||
|
||||
MOVL (p), x
|
||||
ADDQ $4, p
|
||||
IMULQ prime1, x
|
||||
XORQ x, h
|
||||
CMPQ SI, BX
|
||||
JLE wordLoop
|
||||
|
||||
ROLQ $23, h
|
||||
IMULQ prime2, h
|
||||
ADDQ ·primes+16(SB), h
|
||||
fourByte:
|
||||
ADDQ $4, BX
|
||||
CMPQ SI, BX
|
||||
JG singles
|
||||
|
||||
try1:
|
||||
ADDQ $4, end
|
||||
CMPQ p, end
|
||||
MOVL (SI), R8
|
||||
ADDQ $4, SI
|
||||
IMULQ R13, R8
|
||||
XORQ R8, AX
|
||||
|
||||
ROLQ $23, AX
|
||||
IMULQ R14, AX
|
||||
ADDQ ·prime3v(SB), AX
|
||||
|
||||
singles:
|
||||
ADDQ $4, BX
|
||||
CMPQ SI, BX
|
||||
JGE finalize
|
||||
|
||||
loop1:
|
||||
MOVBQZX (p), x
|
||||
ADDQ $1, p
|
||||
IMULQ ·primes+32(SB), x
|
||||
XORQ x, h
|
||||
ROLQ $11, h
|
||||
IMULQ prime1, h
|
||||
singlesLoop:
|
||||
MOVBQZX (SI), R12
|
||||
ADDQ $1, SI
|
||||
IMULQ ·prime5v(SB), R12
|
||||
XORQ R12, AX
|
||||
|
||||
CMPQ p, end
|
||||
JL loop1
|
||||
ROLQ $11, AX
|
||||
IMULQ R13, AX
|
||||
|
||||
CMPQ SI, BX
|
||||
JL singlesLoop
|
||||
|
||||
finalize:
|
||||
MOVQ h, x
|
||||
SHRQ $33, x
|
||||
XORQ x, h
|
||||
IMULQ prime2, h
|
||||
MOVQ h, x
|
||||
SHRQ $29, x
|
||||
XORQ x, h
|
||||
IMULQ ·primes+16(SB), h
|
||||
MOVQ h, x
|
||||
SHRQ $32, x
|
||||
XORQ x, h
|
||||
MOVQ AX, R12
|
||||
SHRQ $33, R12
|
||||
XORQ R12, AX
|
||||
IMULQ R14, AX
|
||||
MOVQ AX, R12
|
||||
SHRQ $29, R12
|
||||
XORQ R12, AX
|
||||
IMULQ ·prime3v(SB), AX
|
||||
MOVQ AX, R12
|
||||
SHRQ $32, R12
|
||||
XORQ R12, AX
|
||||
|
||||
MOVQ h, ret+24(FP)
|
||||
MOVQ AX, ret+24(FP)
|
||||
RET
|
||||
|
||||
// writeBlocks uses the same registers as above except that it uses AX to store
|
||||
// the d pointer.
|
||||
|
||||
// func writeBlocks(d *Digest, b []byte) int
|
||||
TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40
|
||||
TEXT ·writeBlocks(SB), NOSPLIT, $0-40
|
||||
// Load fixed primes needed for round.
|
||||
MOVQ ·primes+0(SB), prime1
|
||||
MOVQ ·primes+8(SB), prime2
|
||||
MOVQ ·prime1v(SB), R13
|
||||
MOVQ ·prime2v(SB), R14
|
||||
|
||||
// Load slice.
|
||||
MOVQ b_base+8(FP), p
|
||||
MOVQ b_len+16(FP), n
|
||||
LEAQ (p)(n*1), end
|
||||
SUBQ $32, end
|
||||
MOVQ b_base+8(FP), SI
|
||||
MOVQ b_len+16(FP), DX
|
||||
LEAQ (SI)(DX*1), BX
|
||||
SUBQ $32, BX
|
||||
|
||||
// Load vN from d.
|
||||
MOVQ s+0(FP), d
|
||||
MOVQ 0(d), v1
|
||||
MOVQ 8(d), v2
|
||||
MOVQ 16(d), v3
|
||||
MOVQ 24(d), v4
|
||||
MOVQ d+0(FP), AX
|
||||
MOVQ 0(AX), R8 // v1
|
||||
MOVQ 8(AX), R9 // v2
|
||||
MOVQ 16(AX), R10 // v3
|
||||
MOVQ 24(AX), R11 // v4
|
||||
|
||||
// We don't need to check the loop condition here; this function is
|
||||
// always called with at least one block of data to process.
|
||||
blockLoop()
|
||||
blockLoop:
|
||||
round(R8)
|
||||
round(R9)
|
||||
round(R10)
|
||||
round(R11)
|
||||
|
||||
CMPQ SI, BX
|
||||
JLE blockLoop
|
||||
|
||||
// Copy vN back to d.
|
||||
MOVQ v1, 0(d)
|
||||
MOVQ v2, 8(d)
|
||||
MOVQ v3, 16(d)
|
||||
MOVQ v4, 24(d)
|
||||
MOVQ R8, 0(AX)
|
||||
MOVQ R9, 8(AX)
|
||||
MOVQ R10, 16(AX)
|
||||
MOVQ R11, 24(AX)
|
||||
|
||||
// The number of bytes written is p minus the old base pointer.
|
||||
SUBQ b_base+8(FP), p
|
||||
MOVQ p, ret+32(FP)
|
||||
// The number of bytes written is SI minus the old base pointer.
|
||||
SUBQ b_base+8(FP), SI
|
||||
MOVQ SI, ret+32(FP)
|
||||
|
||||
RET
|
||||
|
183
gateway/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s
generated
vendored
183
gateway/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s
generated
vendored
@ -1,183 +0,0 @@
|
||||
//go:build !appengine && gc && !purego
|
||||
// +build !appengine
|
||||
// +build gc
|
||||
// +build !purego
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
// Registers:
|
||||
#define digest R1
|
||||
#define h R2 // return value
|
||||
#define p R3 // input pointer
|
||||
#define n R4 // input length
|
||||
#define nblocks R5 // n / 32
|
||||
#define prime1 R7
|
||||
#define prime2 R8
|
||||
#define prime3 R9
|
||||
#define prime4 R10
|
||||
#define prime5 R11
|
||||
#define v1 R12
|
||||
#define v2 R13
|
||||
#define v3 R14
|
||||
#define v4 R15
|
||||
#define x1 R20
|
||||
#define x2 R21
|
||||
#define x3 R22
|
||||
#define x4 R23
|
||||
|
||||
#define round(acc, x) \
|
||||
MADD prime2, acc, x, acc \
|
||||
ROR $64-31, acc \
|
||||
MUL prime1, acc
|
||||
|
||||
// round0 performs the operation x = round(0, x).
|
||||
#define round0(x) \
|
||||
MUL prime2, x \
|
||||
ROR $64-31, x \
|
||||
MUL prime1, x
|
||||
|
||||
#define mergeRound(acc, x) \
|
||||
round0(x) \
|
||||
EOR x, acc \
|
||||
MADD acc, prime4, prime1, acc
|
||||
|
||||
// blockLoop processes as many 32-byte blocks as possible,
|
||||
// updating v1, v2, v3, and v4. It assumes that n >= 32.
|
||||
#define blockLoop() \
|
||||
LSR $5, n, nblocks \
|
||||
PCALIGN $16 \
|
||||
loop: \
|
||||
LDP.P 16(p), (x1, x2) \
|
||||
LDP.P 16(p), (x3, x4) \
|
||||
round(v1, x1) \
|
||||
round(v2, x2) \
|
||||
round(v3, x3) \
|
||||
round(v4, x4) \
|
||||
SUB $1, nblocks \
|
||||
CBNZ nblocks, loop
|
||||
|
||||
// func Sum64(b []byte) uint64
|
||||
TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32
|
||||
LDP b_base+0(FP), (p, n)
|
||||
|
||||
LDP ·primes+0(SB), (prime1, prime2)
|
||||
LDP ·primes+16(SB), (prime3, prime4)
|
||||
MOVD ·primes+32(SB), prime5
|
||||
|
||||
CMP $32, n
|
||||
CSEL LT, prime5, ZR, h // if n < 32 { h = prime5 } else { h = 0 }
|
||||
BLT afterLoop
|
||||
|
||||
ADD prime1, prime2, v1
|
||||
MOVD prime2, v2
|
||||
MOVD $0, v3
|
||||
NEG prime1, v4
|
||||
|
||||
blockLoop()
|
||||
|
||||
ROR $64-1, v1, x1
|
||||
ROR $64-7, v2, x2
|
||||
ADD x1, x2
|
||||
ROR $64-12, v3, x3
|
||||
ROR $64-18, v4, x4
|
||||
ADD x3, x4
|
||||
ADD x2, x4, h
|
||||
|
||||
mergeRound(h, v1)
|
||||
mergeRound(h, v2)
|
||||
mergeRound(h, v3)
|
||||
mergeRound(h, v4)
|
||||
|
||||
afterLoop:
|
||||
ADD n, h
|
||||
|
||||
TBZ $4, n, try8
|
||||
LDP.P 16(p), (x1, x2)
|
||||
|
||||
round0(x1)
|
||||
|
||||
// NOTE: here and below, sequencing the EOR after the ROR (using a
|
||||
// rotated register) is worth a small but measurable speedup for small
|
||||
// inputs.
|
||||
ROR $64-27, h
|
||||
EOR x1 @> 64-27, h, h
|
||||
MADD h, prime4, prime1, h
|
||||
|
||||
round0(x2)
|
||||
ROR $64-27, h
|
||||
EOR x2 @> 64-27, h, h
|
||||
MADD h, prime4, prime1, h
|
||||
|
||||
try8:
|
||||
TBZ $3, n, try4
|
||||
MOVD.P 8(p), x1
|
||||
|
||||
round0(x1)
|
||||
ROR $64-27, h
|
||||
EOR x1 @> 64-27, h, h
|
||||
MADD h, prime4, prime1, h
|
||||
|
||||
try4:
|
||||
TBZ $2, n, try2
|
||||
MOVWU.P 4(p), x2
|
||||
|
||||
MUL prime1, x2
|
||||
ROR $64-23, h
|
||||
EOR x2 @> 64-23, h, h
|
||||
MADD h, prime3, prime2, h
|
||||
|
||||
try2:
|
||||
TBZ $1, n, try1
|
||||
MOVHU.P 2(p), x3
|
||||
AND $255, x3, x1
|
||||
LSR $8, x3, x2
|
||||
|
||||
MUL prime5, x1
|
||||
ROR $64-11, h
|
||||
EOR x1 @> 64-11, h, h
|
||||
MUL prime1, h
|
||||
|
||||
MUL prime5, x2
|
||||
ROR $64-11, h
|
||||
EOR x2 @> 64-11, h, h
|
||||
MUL prime1, h
|
||||
|
||||
try1:
|
||||
TBZ $0, n, finalize
|
||||
MOVBU (p), x4
|
||||
|
||||
MUL prime5, x4
|
||||
ROR $64-11, h
|
||||
EOR x4 @> 64-11, h, h
|
||||
MUL prime1, h
|
||||
|
||||
finalize:
|
||||
EOR h >> 33, h
|
||||
MUL prime2, h
|
||||
EOR h >> 29, h
|
||||
MUL prime3, h
|
||||
EOR h >> 32, h
|
||||
|
||||
MOVD h, ret+24(FP)
|
||||
RET
|
||||
|
||||
// func writeBlocks(d *Digest, b []byte) int
|
||||
TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40
|
||||
LDP ·primes+0(SB), (prime1, prime2)
|
||||
|
||||
// Load state. Assume v[1-4] are stored contiguously.
|
||||
MOVD d+0(FP), digest
|
||||
LDP 0(digest), (v1, v2)
|
||||
LDP 16(digest), (v3, v4)
|
||||
|
||||
LDP b_base+8(FP), (p, n)
|
||||
|
||||
blockLoop()
|
||||
|
||||
// Store updated state.
|
||||
STP (v1, v2), 0(digest)
|
||||
STP (v3, v4), 16(digest)
|
||||
|
||||
BIC $31, n
|
||||
MOVD n, ret+32(FP)
|
||||
RET
|
15
gateway/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go
generated
vendored
15
gateway/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go
generated
vendored
@ -1,15 +0,0 @@
|
||||
//go:build (amd64 || arm64) && !appengine && gc && !purego
|
||||
// +build amd64 arm64
|
||||
// +build !appengine
|
||||
// +build gc
|
||||
// +build !purego
|
||||
|
||||
package xxhash
|
||||
|
||||
// Sum64 computes the 64-bit xxHash digest of b with a zero seed.
|
||||
//
|
||||
//go:noescape
|
||||
func Sum64(b []byte) uint64
|
||||
|
||||
//go:noescape
|
||||
func writeBlocks(d *Digest, b []byte) int
|
24
gateway/vendor/github.com/cespare/xxhash/v2/xxhash_other.go
generated
vendored
24
gateway/vendor/github.com/cespare/xxhash/v2/xxhash_other.go
generated
vendored
@ -1,9 +1,8 @@
|
||||
//go:build (!amd64 && !arm64) || appengine || !gc || purego
|
||||
// +build !amd64,!arm64 appengine !gc purego
|
||||
// +build !amd64 appengine !gc purego
|
||||
|
||||
package xxhash
|
||||
|
||||
// Sum64 computes the 64-bit xxHash digest of b with a zero seed.
|
||||
// Sum64 computes the 64-bit xxHash digest of b.
|
||||
func Sum64(b []byte) uint64 {
|
||||
// A simpler version would be
|
||||
// d := New()
|
||||
@ -15,10 +14,10 @@ func Sum64(b []byte) uint64 {
|
||||
var h uint64
|
||||
|
||||
if n >= 32 {
|
||||
v1 := primes[0] + prime2
|
||||
v1 := prime1v + prime2
|
||||
v2 := prime2
|
||||
v3 := uint64(0)
|
||||
v4 := -primes[0]
|
||||
v4 := -prime1v
|
||||
for len(b) >= 32 {
|
||||
v1 = round(v1, u64(b[0:8:len(b)]))
|
||||
v2 = round(v2, u64(b[8:16:len(b)]))
|
||||
@ -37,18 +36,19 @@ func Sum64(b []byte) uint64 {
|
||||
|
||||
h += uint64(n)
|
||||
|
||||
for ; len(b) >= 8; b = b[8:] {
|
||||
k1 := round(0, u64(b[:8]))
|
||||
i, end := 0, len(b)
|
||||
for ; i+8 <= end; i += 8 {
|
||||
k1 := round(0, u64(b[i:i+8:len(b)]))
|
||||
h ^= k1
|
||||
h = rol27(h)*prime1 + prime4
|
||||
}
|
||||
if len(b) >= 4 {
|
||||
h ^= uint64(u32(b[:4])) * prime1
|
||||
if i+4 <= end {
|
||||
h ^= uint64(u32(b[i:i+4:len(b)])) * prime1
|
||||
h = rol23(h)*prime2 + prime3
|
||||
b = b[4:]
|
||||
i += 4
|
||||
}
|
||||
for ; len(b) > 0; b = b[1:] {
|
||||
h ^= uint64(b[0]) * prime5
|
||||
for ; i < end; i++ {
|
||||
h ^= uint64(b[i]) * prime5
|
||||
h = rol11(h) * prime1
|
||||
}
|
||||
|
||||
|
3
gateway/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
generated
vendored
3
gateway/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
generated
vendored
@ -1,11 +1,10 @@
|
||||
//go:build appengine
|
||||
// +build appengine
|
||||
|
||||
// This file contains the safe implementations of otherwise unsafe-using code.
|
||||
|
||||
package xxhash
|
||||
|
||||
// Sum64String computes the 64-bit xxHash digest of s with a zero seed.
|
||||
// Sum64String computes the 64-bit xxHash digest of s.
|
||||
func Sum64String(s string) uint64 {
|
||||
return Sum64([]byte(s))
|
||||
}
|
||||
|
5
gateway/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
generated
vendored
5
gateway/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
generated
vendored
@ -1,4 +1,3 @@
|
||||
//go:build !appengine
|
||||
// +build !appengine
|
||||
|
||||
// This file encapsulates usage of unsafe.
|
||||
@ -12,7 +11,7 @@ import (
|
||||
|
||||
// In the future it's possible that compiler optimizations will make these
|
||||
// XxxString functions unnecessary by realizing that calls such as
|
||||
// Sum64([]byte(s)) don't need to copy s. See https://go.dev/issue/2205.
|
||||
// Sum64([]byte(s)) don't need to copy s. See https://golang.org/issue/2205.
|
||||
// If that happens, even if we keep these functions they can be replaced with
|
||||
// the trivial safe code.
|
||||
|
||||
@ -33,7 +32,7 @@ import (
|
||||
//
|
||||
// See https://github.com/golang/go/issues/42739 for discussion.
|
||||
|
||||
// Sum64String computes the 64-bit xxHash digest of s with a zero seed.
|
||||
// Sum64String computes the 64-bit xxHash digest of s.
|
||||
// It may be faster than Sum64([]byte(s)) by avoiding a copy.
|
||||
func Sum64String(s string) uint64 {
|
||||
b := *(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)}))
|
||||
|
3
gateway/vendor/github.com/golang/protobuf/AUTHORS
generated
vendored
Normal file
3
gateway/vendor/github.com/golang/protobuf/AUTHORS
generated
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
# This source code refers to The Go Authors for copyright purposes.
|
||||
# The master list of authors is in the main Go distribution,
|
||||
# visible at http://tip.golang.org/AUTHORS.
|
3
gateway/vendor/github.com/golang/protobuf/CONTRIBUTORS
generated
vendored
Normal file
3
gateway/vendor/github.com/golang/protobuf/CONTRIBUTORS
generated
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
# This source code was written by the Go contributors.
|
||||
# The master list of contributors is in the main Go distribution,
|
||||
# visible at http://tip.golang.org/CONTRIBUTORS.
|
@ -1,16 +1,16 @@
|
||||
Copyright (c) 2013 The Go Authors. All rights reserved.
|
||||
Copyright 2010 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
@ -25,3 +25,4 @@ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
324
gateway/vendor/github.com/golang/protobuf/proto/buffer.go
generated
vendored
Normal file
324
gateway/vendor/github.com/golang/protobuf/proto/buffer.go
generated
vendored
Normal file
@ -0,0 +1,324 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"google.golang.org/protobuf/encoding/prototext"
|
||||
"google.golang.org/protobuf/encoding/protowire"
|
||||
"google.golang.org/protobuf/runtime/protoimpl"
|
||||
)
|
||||
|
||||
const (
|
||||
WireVarint = 0
|
||||
WireFixed32 = 5
|
||||
WireFixed64 = 1
|
||||
WireBytes = 2
|
||||
WireStartGroup = 3
|
||||
WireEndGroup = 4
|
||||
)
|
||||
|
||||
// EncodeVarint returns the varint encoded bytes of v.
|
||||
func EncodeVarint(v uint64) []byte {
|
||||
return protowire.AppendVarint(nil, v)
|
||||
}
|
||||
|
||||
// SizeVarint returns the length of the varint encoded bytes of v.
|
||||
// This is equal to len(EncodeVarint(v)).
|
||||
func SizeVarint(v uint64) int {
|
||||
return protowire.SizeVarint(v)
|
||||
}
|
||||
|
||||
// DecodeVarint parses a varint encoded integer from b,
|
||||
// returning the integer value and the length of the varint.
|
||||
// It returns (0, 0) if there is a parse error.
|
||||
func DecodeVarint(b []byte) (uint64, int) {
|
||||
v, n := protowire.ConsumeVarint(b)
|
||||
if n < 0 {
|
||||
return 0, 0
|
||||
}
|
||||
return v, n
|
||||
}
|
||||
|
||||
// Buffer is a buffer for encoding and decoding the protobuf wire format.
|
||||
// It may be reused between invocations to reduce memory usage.
|
||||
type Buffer struct {
|
||||
buf []byte
|
||||
idx int
|
||||
deterministic bool
|
||||
}
|
||||
|
||||
// NewBuffer allocates a new Buffer initialized with buf,
|
||||
// where the contents of buf are considered the unread portion of the buffer.
|
||||
func NewBuffer(buf []byte) *Buffer {
|
||||
return &Buffer{buf: buf}
|
||||
}
|
||||
|
||||
// SetDeterministic specifies whether to use deterministic serialization.
|
||||
//
|
||||
// Deterministic serialization guarantees that for a given binary, equal
|
||||
// messages will always be serialized to the same bytes. This implies:
|
||||
//
|
||||
// - Repeated serialization of a message will return the same bytes.
|
||||
// - Different processes of the same binary (which may be executing on
|
||||
// different machines) will serialize equal messages to the same bytes.
|
||||
//
|
||||
// Note that the deterministic serialization is NOT canonical across
|
||||
// languages. It is not guaranteed to remain stable over time. It is unstable
|
||||
// across different builds with schema changes due to unknown fields.
|
||||
// Users who need canonical serialization (e.g., persistent storage in a
|
||||
// canonical form, fingerprinting, etc.) should define their own
|
||||
// canonicalization specification and implement their own serializer rather
|
||||
// than relying on this API.
|
||||
//
|
||||
// If deterministic serialization is requested, map entries will be sorted
|
||||
// by keys in lexographical order. This is an implementation detail and
|
||||
// subject to change.
|
||||
func (b *Buffer) SetDeterministic(deterministic bool) {
|
||||
b.deterministic = deterministic
|
||||
}
|
||||
|
||||
// SetBuf sets buf as the internal buffer,
|
||||
// where the contents of buf are considered the unread portion of the buffer.
|
||||
func (b *Buffer) SetBuf(buf []byte) {
|
||||
b.buf = buf
|
||||
b.idx = 0
|
||||
}
|
||||
|
||||
// Reset clears the internal buffer of all written and unread data.
|
||||
func (b *Buffer) Reset() {
|
||||
b.buf = b.buf[:0]
|
||||
b.idx = 0
|
||||
}
|
||||
|
||||
// Bytes returns the internal buffer.
|
||||
func (b *Buffer) Bytes() []byte {
|
||||
return b.buf
|
||||
}
|
||||
|
||||
// Unread returns the unread portion of the buffer.
|
||||
func (b *Buffer) Unread() []byte {
|
||||
return b.buf[b.idx:]
|
||||
}
|
||||
|
||||
// Marshal appends the wire-format encoding of m to the buffer.
|
||||
func (b *Buffer) Marshal(m Message) error {
|
||||
var err error
|
||||
b.buf, err = marshalAppend(b.buf, m, b.deterministic)
|
||||
return err
|
||||
}
|
||||
|
||||
// Unmarshal parses the wire-format message in the buffer and
|
||||
// places the decoded results in m.
|
||||
// It does not reset m before unmarshaling.
|
||||
func (b *Buffer) Unmarshal(m Message) error {
|
||||
err := UnmarshalMerge(b.Unread(), m)
|
||||
b.idx = len(b.buf)
|
||||
return err
|
||||
}
|
||||
|
||||
type unknownFields struct{ XXX_unrecognized protoimpl.UnknownFields }
|
||||
|
||||
func (m *unknownFields) String() string { panic("not implemented") }
|
||||
func (m *unknownFields) Reset() { panic("not implemented") }
|
||||
func (m *unknownFields) ProtoMessage() { panic("not implemented") }
|
||||
|
||||
// DebugPrint dumps the encoded bytes of b with a header and footer including s
|
||||
// to stdout. This is only intended for debugging.
|
||||
func (*Buffer) DebugPrint(s string, b []byte) {
|
||||
m := MessageReflect(new(unknownFields))
|
||||
m.SetUnknown(b)
|
||||
b, _ = prototext.MarshalOptions{AllowPartial: true, Indent: "\t"}.Marshal(m.Interface())
|
||||
fmt.Printf("==== %s ====\n%s==== %s ====\n", s, b, s)
|
||||
}
|
||||
|
||||
// EncodeVarint appends an unsigned varint encoding to the buffer.
|
||||
func (b *Buffer) EncodeVarint(v uint64) error {
|
||||
b.buf = protowire.AppendVarint(b.buf, v)
|
||||
return nil
|
||||
}
|
||||
|
||||
// EncodeZigzag32 appends a 32-bit zig-zag varint encoding to the buffer.
|
||||
func (b *Buffer) EncodeZigzag32(v uint64) error {
|
||||
return b.EncodeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31))))
|
||||
}
|
||||
|
||||
// EncodeZigzag64 appends a 64-bit zig-zag varint encoding to the buffer.
|
||||
func (b *Buffer) EncodeZigzag64(v uint64) error {
|
||||
return b.EncodeVarint(uint64((uint64(v) << 1) ^ uint64((int64(v) >> 63))))
|
||||
}
|
||||
|
||||
// EncodeFixed32 appends a 32-bit little-endian integer to the buffer.
|
||||
func (b *Buffer) EncodeFixed32(v uint64) error {
|
||||
b.buf = protowire.AppendFixed32(b.buf, uint32(v))
|
||||
return nil
|
||||
}
|
||||
|
||||
// EncodeFixed64 appends a 64-bit little-endian integer to the buffer.
|
||||
func (b *Buffer) EncodeFixed64(v uint64) error {
|
||||
b.buf = protowire.AppendFixed64(b.buf, uint64(v))
|
||||
return nil
|
||||
}
|
||||
|
||||
// EncodeRawBytes appends a length-prefixed raw bytes to the buffer.
|
||||
func (b *Buffer) EncodeRawBytes(v []byte) error {
|
||||
b.buf = protowire.AppendBytes(b.buf, v)
|
||||
return nil
|
||||
}
|
||||
|
||||
// EncodeStringBytes appends a length-prefixed raw bytes to the buffer.
|
||||
// It does not validate whether v contains valid UTF-8.
|
||||
func (b *Buffer) EncodeStringBytes(v string) error {
|
||||
b.buf = protowire.AppendString(b.buf, v)
|
||||
return nil
|
||||
}
|
||||
|
||||
// EncodeMessage appends a length-prefixed encoded message to the buffer.
|
||||
func (b *Buffer) EncodeMessage(m Message) error {
|
||||
var err error
|
||||
b.buf = protowire.AppendVarint(b.buf, uint64(Size(m)))
|
||||
b.buf, err = marshalAppend(b.buf, m, b.deterministic)
|
||||
return err
|
||||
}
|
||||
|
||||
// DecodeVarint consumes an encoded unsigned varint from the buffer.
|
||||
func (b *Buffer) DecodeVarint() (uint64, error) {
|
||||
v, n := protowire.ConsumeVarint(b.buf[b.idx:])
|
||||
if n < 0 {
|
||||
return 0, protowire.ParseError(n)
|
||||
}
|
||||
b.idx += n
|
||||
return uint64(v), nil
|
||||
}
|
||||
|
||||
// DecodeZigzag32 consumes an encoded 32-bit zig-zag varint from the buffer.
|
||||
func (b *Buffer) DecodeZigzag32() (uint64, error) {
|
||||
v, err := b.DecodeVarint()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return uint64((uint32(v) >> 1) ^ uint32((int32(v&1)<<31)>>31)), nil
|
||||
}
|
||||
|
||||
// DecodeZigzag64 consumes an encoded 64-bit zig-zag varint from the buffer.
|
||||
func (b *Buffer) DecodeZigzag64() (uint64, error) {
|
||||
v, err := b.DecodeVarint()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return uint64((uint64(v) >> 1) ^ uint64((int64(v&1)<<63)>>63)), nil
|
||||
}
|
||||
|
||||
// DecodeFixed32 consumes a 32-bit little-endian integer from the buffer.
|
||||
func (b *Buffer) DecodeFixed32() (uint64, error) {
|
||||
v, n := protowire.ConsumeFixed32(b.buf[b.idx:])
|
||||
if n < 0 {
|
||||
return 0, protowire.ParseError(n)
|
||||
}
|
||||
b.idx += n
|
||||
return uint64(v), nil
|
||||
}
|
||||
|
||||
// DecodeFixed64 consumes a 64-bit little-endian integer from the buffer.
|
||||
func (b *Buffer) DecodeFixed64() (uint64, error) {
|
||||
v, n := protowire.ConsumeFixed64(b.buf[b.idx:])
|
||||
if n < 0 {
|
||||
return 0, protowire.ParseError(n)
|
||||
}
|
||||
b.idx += n
|
||||
return uint64(v), nil
|
||||
}
|
||||
|
||||
// DecodeRawBytes consumes a length-prefixed raw bytes from the buffer.
|
||||
// If alloc is specified, it returns a copy the raw bytes
|
||||
// rather than a sub-slice of the buffer.
|
||||
func (b *Buffer) DecodeRawBytes(alloc bool) ([]byte, error) {
|
||||
v, n := protowire.ConsumeBytes(b.buf[b.idx:])
|
||||
if n < 0 {
|
||||
return nil, protowire.ParseError(n)
|
||||
}
|
||||
b.idx += n
|
||||
if alloc {
|
||||
v = append([]byte(nil), v...)
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// DecodeStringBytes consumes a length-prefixed raw bytes from the buffer.
|
||||
// It does not validate whether the raw bytes contain valid UTF-8.
|
||||
func (b *Buffer) DecodeStringBytes() (string, error) {
|
||||
v, n := protowire.ConsumeString(b.buf[b.idx:])
|
||||
if n < 0 {
|
||||
return "", protowire.ParseError(n)
|
||||
}
|
||||
b.idx += n
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// DecodeMessage consumes a length-prefixed message from the buffer.
|
||||
// It does not reset m before unmarshaling.
|
||||
func (b *Buffer) DecodeMessage(m Message) error {
|
||||
v, err := b.DecodeRawBytes(false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return UnmarshalMerge(v, m)
|
||||
}
|
||||
|
||||
// DecodeGroup consumes a message group from the buffer.
|
||||
// It assumes that the start group marker has already been consumed and
|
||||
// consumes all bytes until (and including the end group marker).
|
||||
// It does not reset m before unmarshaling.
|
||||
func (b *Buffer) DecodeGroup(m Message) error {
|
||||
v, n, err := consumeGroup(b.buf[b.idx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b.idx += n
|
||||
return UnmarshalMerge(v, m)
|
||||
}
|
||||
|
||||
// consumeGroup parses b until it finds an end group marker, returning
|
||||
// the raw bytes of the message (excluding the end group marker) and the
|
||||
// the total length of the message (including the end group marker).
|
||||
func consumeGroup(b []byte) ([]byte, int, error) {
|
||||
b0 := b
|
||||
depth := 1 // assume this follows a start group marker
|
||||
for {
|
||||
_, wtyp, tagLen := protowire.ConsumeTag(b)
|
||||
if tagLen < 0 {
|
||||
return nil, 0, protowire.ParseError(tagLen)
|
||||
}
|
||||
b = b[tagLen:]
|
||||
|
||||
var valLen int
|
||||
switch wtyp {
|
||||
case protowire.VarintType:
|
||||
_, valLen = protowire.ConsumeVarint(b)
|
||||
case protowire.Fixed32Type:
|
||||
_, valLen = protowire.ConsumeFixed32(b)
|
||||
case protowire.Fixed64Type:
|
||||
_, valLen = protowire.ConsumeFixed64(b)
|
||||
case protowire.BytesType:
|
||||
_, valLen = protowire.ConsumeBytes(b)
|
||||
case protowire.StartGroupType:
|
||||
depth++
|
||||
case protowire.EndGroupType:
|
||||
depth--
|
||||
default:
|
||||
return nil, 0, errors.New("proto: cannot parse reserved wire type")
|
||||
}
|
||||
if valLen < 0 {
|
||||
return nil, 0, protowire.ParseError(valLen)
|
||||
}
|
||||
b = b[valLen:]
|
||||
|
||||
if depth == 0 {
|
||||
return b0[:len(b0)-len(b)-tagLen], len(b0) - len(b), nil
|
||||
}
|
||||
}
|
||||
}
|
63
gateway/vendor/github.com/golang/protobuf/proto/defaults.go
generated
vendored
Normal file
63
gateway/vendor/github.com/golang/protobuf/proto/defaults.go
generated
vendored
Normal file
@ -0,0 +1,63 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"google.golang.org/protobuf/reflect/protoreflect"
|
||||
)
|
||||
|
||||
// SetDefaults sets unpopulated scalar fields to their default values.
|
||||
// Fields within a oneof are not set even if they have a default value.
|
||||
// SetDefaults is recursively called upon any populated message fields.
|
||||
func SetDefaults(m Message) {
|
||||
if m != nil {
|
||||
setDefaults(MessageReflect(m))
|
||||
}
|
||||
}
|
||||
|
||||
func setDefaults(m protoreflect.Message) {
|
||||
fds := m.Descriptor().Fields()
|
||||
for i := 0; i < fds.Len(); i++ {
|
||||
fd := fds.Get(i)
|
||||
if !m.Has(fd) {
|
||||
if fd.HasDefault() && fd.ContainingOneof() == nil {
|
||||
v := fd.Default()
|
||||
if fd.Kind() == protoreflect.BytesKind {
|
||||
v = protoreflect.ValueOf(append([]byte(nil), v.Bytes()...)) // copy the default bytes
|
||||
}
|
||||
m.Set(fd, v)
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
|
||||
switch {
|
||||
// Handle singular message.
|
||||
case fd.Cardinality() != protoreflect.Repeated:
|
||||
if fd.Message() != nil {
|
||||
setDefaults(m.Get(fd).Message())
|
||||
}
|
||||
// Handle list of messages.
|
||||
case fd.IsList():
|
||||
if fd.Message() != nil {
|
||||
ls := m.Get(fd).List()
|
||||
for i := 0; i < ls.Len(); i++ {
|
||||
setDefaults(ls.Get(i).Message())
|
||||
}
|
||||
}
|
||||
// Handle map of messages.
|
||||
case fd.IsMap():
|
||||
if fd.MapValue().Message() != nil {
|
||||
ms := m.Get(fd).Map()
|
||||
ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool {
|
||||
setDefaults(v.Message())
|
||||
return true
|
||||
})
|
||||
}
|
||||
}
|
||||
return true
|
||||
})
|
||||
}
|
113
gateway/vendor/github.com/golang/protobuf/proto/deprecated.go
generated
vendored
Normal file
113
gateway/vendor/github.com/golang/protobuf/proto/deprecated.go
generated
vendored
Normal file
@ -0,0 +1,113 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
protoV2 "google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
var (
|
||||
// Deprecated: No longer returned.
|
||||
ErrNil = errors.New("proto: Marshal called with nil")
|
||||
|
||||
// Deprecated: No longer returned.
|
||||
ErrTooLarge = errors.New("proto: message encodes to over 2 GB")
|
||||
|
||||
// Deprecated: No longer returned.
|
||||
ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
|
||||
)
|
||||
|
||||
// Deprecated: Do not use.
|
||||
type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 }
|
||||
|
||||
// Deprecated: Do not use.
|
||||
func GetStats() Stats { return Stats{} }
|
||||
|
||||
// Deprecated: Do not use.
|
||||
func MarshalMessageSet(interface{}) ([]byte, error) {
|
||||
return nil, errors.New("proto: not implemented")
|
||||
}
|
||||
|
||||
// Deprecated: Do not use.
|
||||
func UnmarshalMessageSet([]byte, interface{}) error {
|
||||
return errors.New("proto: not implemented")
|
||||
}
|
||||
|
||||
// Deprecated: Do not use.
|
||||
func MarshalMessageSetJSON(interface{}) ([]byte, error) {
|
||||
return nil, errors.New("proto: not implemented")
|
||||
}
|
||||
|
||||
// Deprecated: Do not use.
|
||||
func UnmarshalMessageSetJSON([]byte, interface{}) error {
|
||||
return errors.New("proto: not implemented")
|
||||
}
|
||||
|
||||
// Deprecated: Do not use.
|
||||
func RegisterMessageSetType(Message, int32, string) {}
|
||||
|
||||
// Deprecated: Do not use.
|
||||
func EnumName(m map[int32]string, v int32) string {
|
||||
s, ok := m[v]
|
||||
if ok {
|
||||
return s
|
||||
}
|
||||
return strconv.Itoa(int(v))
|
||||
}
|
||||
|
||||
// Deprecated: Do not use.
|
||||
func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
|
||||
if data[0] == '"' {
|
||||
// New style: enums are strings.
|
||||
var repr string
|
||||
if err := json.Unmarshal(data, &repr); err != nil {
|
||||
return -1, err
|
||||
}
|
||||
val, ok := m[repr]
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
// Old style: enums are ints.
|
||||
var val int32
|
||||
if err := json.Unmarshal(data, &val); err != nil {
|
||||
return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
|
||||
// Deprecated: Do not use; this type existed for intenal-use only.
|
||||
type InternalMessageInfo struct{}
|
||||
|
||||
// Deprecated: Do not use; this method existed for intenal-use only.
|
||||
func (*InternalMessageInfo) DiscardUnknown(m Message) {
|
||||
DiscardUnknown(m)
|
||||
}
|
||||
|
||||
// Deprecated: Do not use; this method existed for intenal-use only.
|
||||
func (*InternalMessageInfo) Marshal(b []byte, m Message, deterministic bool) ([]byte, error) {
|
||||
return protoV2.MarshalOptions{Deterministic: deterministic}.MarshalAppend(b, MessageV2(m))
|
||||
}
|
||||
|
||||
// Deprecated: Do not use; this method existed for intenal-use only.
|
||||
func (*InternalMessageInfo) Merge(dst, src Message) {
|
||||
protoV2.Merge(MessageV2(dst), MessageV2(src))
|
||||
}
|
||||
|
||||
// Deprecated: Do not use; this method existed for intenal-use only.
|
||||
func (*InternalMessageInfo) Size(m Message) int {
|
||||
return protoV2.Size(MessageV2(m))
|
||||
}
|
||||
|
||||
// Deprecated: Do not use; this method existed for intenal-use only.
|
||||
func (*InternalMessageInfo) Unmarshal(m Message, b []byte) error {
|
||||
return protoV2.UnmarshalOptions{Merge: true}.Unmarshal(b, MessageV2(m))
|
||||
}
|
58
gateway/vendor/github.com/golang/protobuf/proto/discard.go
generated
vendored
Normal file
58
gateway/vendor/github.com/golang/protobuf/proto/discard.go
generated
vendored
Normal file
@ -0,0 +1,58 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"google.golang.org/protobuf/reflect/protoreflect"
|
||||
)
|
||||
|
||||
// DiscardUnknown recursively discards all unknown fields from this message
|
||||
// and all embedded messages.
|
||||
//
|
||||
// When unmarshaling a message with unrecognized fields, the tags and values
|
||||
// of such fields are preserved in the Message. This allows a later call to
|
||||
// marshal to be able to produce a message that continues to have those
|
||||
// unrecognized fields. To avoid this, DiscardUnknown is used to
|
||||
// explicitly clear the unknown fields after unmarshaling.
|
||||
func DiscardUnknown(m Message) {
|
||||
if m != nil {
|
||||
discardUnknown(MessageReflect(m))
|
||||
}
|
||||
}
|
||||
|
||||
func discardUnknown(m protoreflect.Message) {
|
||||
m.Range(func(fd protoreflect.FieldDescriptor, val protoreflect.Value) bool {
|
||||
switch {
|
||||
// Handle singular message.
|
||||
case fd.Cardinality() != protoreflect.Repeated:
|
||||
if fd.Message() != nil {
|
||||
discardUnknown(m.Get(fd).Message())
|
||||
}
|
||||
// Handle list of messages.
|
||||
case fd.IsList():
|
||||
if fd.Message() != nil {
|
||||
ls := m.Get(fd).List()
|
||||
for i := 0; i < ls.Len(); i++ {
|
||||
discardUnknown(ls.Get(i).Message())
|
||||
}
|
||||
}
|
||||
// Handle map of messages.
|
||||
case fd.IsMap():
|
||||
if fd.MapValue().Message() != nil {
|
||||
ms := m.Get(fd).Map()
|
||||
ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool {
|
||||
discardUnknown(v.Message())
|
||||
return true
|
||||
})
|
||||
}
|
||||
}
|
||||
return true
|
||||
})
|
||||
|
||||
// Discard unknown fields.
|
||||
if len(m.GetUnknown()) > 0 {
|
||||
m.SetUnknown(nil)
|
||||
}
|
||||
}
|
356
gateway/vendor/github.com/golang/protobuf/proto/extensions.go
generated
vendored
Normal file
356
gateway/vendor/github.com/golang/protobuf/proto/extensions.go
generated
vendored
Normal file
@ -0,0 +1,356 @@
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"google.golang.org/protobuf/encoding/protowire"
|
||||
"google.golang.org/protobuf/proto"
|
||||
"google.golang.org/protobuf/reflect/protoreflect"
|
||||
"google.golang.org/protobuf/reflect/protoregistry"
|
||||
"google.golang.org/protobuf/runtime/protoiface"
|
||||
"google.golang.org/protobuf/runtime/protoimpl"
|
||||
)
|
||||
|
||||
type (
|
||||
// ExtensionDesc represents an extension descriptor and
|
||||
// is used to interact with an extension field in a message.
|
||||
//
|
||||
// Variables of this type are generated in code by protoc-gen-go.
|
||||
ExtensionDesc = protoimpl.ExtensionInfo
|
||||
|
||||
// ExtensionRange represents a range of message extensions.
|
||||
// Used in code generated by protoc-gen-go.
|
||||
ExtensionRange = protoiface.ExtensionRangeV1
|
||||
|
||||
// Deprecated: Do not use; this is an internal type.
|
||||
Extension = protoimpl.ExtensionFieldV1
|
||||
|
||||
// Deprecated: Do not use; this is an internal type.
|
||||
XXX_InternalExtensions = protoimpl.ExtensionFields
|
||||
)
|
||||
|
||||
// ErrMissingExtension reports whether the extension was not present.
|
||||
var ErrMissingExtension = errors.New("proto: missing extension")
|
||||
|
||||
var errNotExtendable = errors.New("proto: not an extendable proto.Message")
|
||||
|
||||
// HasExtension reports whether the extension field is present in m
|
||||
// either as an explicitly populated field or as an unknown field.
|
||||
func HasExtension(m Message, xt *ExtensionDesc) (has bool) {
|
||||
mr := MessageReflect(m)
|
||||
if mr == nil || !mr.IsValid() {
|
||||
return false
|
||||
}
|
||||
|
||||
// Check whether any populated known field matches the field number.
|
||||
xtd := xt.TypeDescriptor()
|
||||
if isValidExtension(mr.Descriptor(), xtd) {
|
||||
has = mr.Has(xtd)
|
||||
} else {
|
||||
mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
|
||||
has = int32(fd.Number()) == xt.Field
|
||||
return !has
|
||||
})
|
||||
}
|
||||
|
||||
// Check whether any unknown field matches the field number.
|
||||
for b := mr.GetUnknown(); !has && len(b) > 0; {
|
||||
num, _, n := protowire.ConsumeField(b)
|
||||
has = int32(num) == xt.Field
|
||||
b = b[n:]
|
||||
}
|
||||
return has
|
||||
}
|
||||
|
||||
// ClearExtension removes the extension field from m
|
||||
// either as an explicitly populated field or as an unknown field.
|
||||
func ClearExtension(m Message, xt *ExtensionDesc) {
|
||||
mr := MessageReflect(m)
|
||||
if mr == nil || !mr.IsValid() {
|
||||
return
|
||||
}
|
||||
|
||||
xtd := xt.TypeDescriptor()
|
||||
if isValidExtension(mr.Descriptor(), xtd) {
|
||||
mr.Clear(xtd)
|
||||
} else {
|
||||
mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
|
||||
if int32(fd.Number()) == xt.Field {
|
||||
mr.Clear(fd)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
})
|
||||
}
|
||||
clearUnknown(mr, fieldNum(xt.Field))
|
||||
}
|
||||
|
||||
// ClearAllExtensions clears all extensions from m.
|
||||
// This includes populated fields and unknown fields in the extension range.
|
||||
func ClearAllExtensions(m Message) {
|
||||
mr := MessageReflect(m)
|
||||
if mr == nil || !mr.IsValid() {
|
||||
return
|
||||
}
|
||||
|
||||
mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
|
||||
if fd.IsExtension() {
|
||||
mr.Clear(fd)
|
||||
}
|
||||
return true
|
||||
})
|
||||
clearUnknown(mr, mr.Descriptor().ExtensionRanges())
|
||||
}
|
||||
|
||||
// GetExtension retrieves a proto2 extended field from m.
|
||||
//
|
||||
// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil),
|
||||
// then GetExtension parses the encoded field and returns a Go value of the specified type.
|
||||
// If the field is not present, then the default value is returned (if one is specified),
|
||||
// otherwise ErrMissingExtension is reported.
|
||||
//
|
||||
// If the descriptor is type incomplete (i.e., ExtensionDesc.ExtensionType is nil),
|
||||
// then GetExtension returns the raw encoded bytes for the extension field.
|
||||
func GetExtension(m Message, xt *ExtensionDesc) (interface{}, error) {
|
||||
mr := MessageReflect(m)
|
||||
if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 {
|
||||
return nil, errNotExtendable
|
||||
}
|
||||
|
||||
// Retrieve the unknown fields for this extension field.
|
||||
var bo protoreflect.RawFields
|
||||
for bi := mr.GetUnknown(); len(bi) > 0; {
|
||||
num, _, n := protowire.ConsumeField(bi)
|
||||
if int32(num) == xt.Field {
|
||||
bo = append(bo, bi[:n]...)
|
||||
}
|
||||
bi = bi[n:]
|
||||
}
|
||||
|
||||
// For type incomplete descriptors, only retrieve the unknown fields.
|
||||
if xt.ExtensionType == nil {
|
||||
return []byte(bo), nil
|
||||
}
|
||||
|
||||
// If the extension field only exists as unknown fields, unmarshal it.
|
||||
// This is rarely done since proto.Unmarshal eagerly unmarshals extensions.
|
||||
xtd := xt.TypeDescriptor()
|
||||
if !isValidExtension(mr.Descriptor(), xtd) {
|
||||
return nil, fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m)
|
||||
}
|
||||
if !mr.Has(xtd) && len(bo) > 0 {
|
||||
m2 := mr.New()
|
||||
if err := (proto.UnmarshalOptions{
|
||||
Resolver: extensionResolver{xt},
|
||||
}.Unmarshal(bo, m2.Interface())); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if m2.Has(xtd) {
|
||||
mr.Set(xtd, m2.Get(xtd))
|
||||
clearUnknown(mr, fieldNum(xt.Field))
|
||||
}
|
||||
}
|
||||
|
||||
// Check whether the message has the extension field set or a default.
|
||||
var pv protoreflect.Value
|
||||
switch {
|
||||
case mr.Has(xtd):
|
||||
pv = mr.Get(xtd)
|
||||
case xtd.HasDefault():
|
||||
pv = xtd.Default()
|
||||
default:
|
||||
return nil, ErrMissingExtension
|
||||
}
|
||||
|
||||
v := xt.InterfaceOf(pv)
|
||||
rv := reflect.ValueOf(v)
|
||||
if isScalarKind(rv.Kind()) {
|
||||
rv2 := reflect.New(rv.Type())
|
||||
rv2.Elem().Set(rv)
|
||||
v = rv2.Interface()
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// extensionResolver is a custom extension resolver that stores a single
|
||||
// extension type that takes precedence over the global registry.
|
||||
type extensionResolver struct{ xt protoreflect.ExtensionType }
|
||||
|
||||
func (r extensionResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) {
|
||||
if xtd := r.xt.TypeDescriptor(); xtd.FullName() == field {
|
||||
return r.xt, nil
|
||||
}
|
||||
return protoregistry.GlobalTypes.FindExtensionByName(field)
|
||||
}
|
||||
|
||||
func (r extensionResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) {
|
||||
if xtd := r.xt.TypeDescriptor(); xtd.ContainingMessage().FullName() == message && xtd.Number() == field {
|
||||
return r.xt, nil
|
||||
}
|
||||
return protoregistry.GlobalTypes.FindExtensionByNumber(message, field)
|
||||
}
|
||||
|
||||
// GetExtensions returns a list of the extensions values present in m,
|
||||
// corresponding with the provided list of extension descriptors, xts.
|
||||
// If an extension is missing in m, the corresponding value is nil.
|
||||
func GetExtensions(m Message, xts []*ExtensionDesc) ([]interface{}, error) {
|
||||
mr := MessageReflect(m)
|
||||
if mr == nil || !mr.IsValid() {
|
||||
return nil, errNotExtendable
|
||||
}
|
||||
|
||||
vs := make([]interface{}, len(xts))
|
||||
for i, xt := range xts {
|
||||
v, err := GetExtension(m, xt)
|
||||
if err != nil {
|
||||
if err == ErrMissingExtension {
|
||||
continue
|
||||
}
|
||||
return vs, err
|
||||
}
|
||||
vs[i] = v
|
||||
}
|
||||
return vs, nil
|
||||
}
|
||||
|
||||
// SetExtension sets an extension field in m to the provided value.
|
||||
func SetExtension(m Message, xt *ExtensionDesc, v interface{}) error {
|
||||
mr := MessageReflect(m)
|
||||
if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 {
|
||||
return errNotExtendable
|
||||
}
|
||||
|
||||
rv := reflect.ValueOf(v)
|
||||
if reflect.TypeOf(v) != reflect.TypeOf(xt.ExtensionType) {
|
||||
return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", v, xt.ExtensionType)
|
||||
}
|
||||
if rv.Kind() == reflect.Ptr {
|
||||
if rv.IsNil() {
|
||||
return fmt.Errorf("proto: SetExtension called with nil value of type %T", v)
|
||||
}
|
||||
if isScalarKind(rv.Elem().Kind()) {
|
||||
v = rv.Elem().Interface()
|
||||
}
|
||||
}
|
||||
|
||||
xtd := xt.TypeDescriptor()
|
||||
if !isValidExtension(mr.Descriptor(), xtd) {
|
||||
return fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m)
|
||||
}
|
||||
mr.Set(xtd, xt.ValueOf(v))
|
||||
clearUnknown(mr, fieldNum(xt.Field))
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetRawExtension inserts b into the unknown fields of m.
|
||||
//
|
||||
// Deprecated: Use Message.ProtoReflect.SetUnknown instead.
|
||||
func SetRawExtension(m Message, fnum int32, b []byte) {
|
||||
mr := MessageReflect(m)
|
||||
if mr == nil || !mr.IsValid() {
|
||||
return
|
||||
}
|
||||
|
||||
// Verify that the raw field is valid.
|
||||
for b0 := b; len(b0) > 0; {
|
||||
num, _, n := protowire.ConsumeField(b0)
|
||||
if int32(num) != fnum {
|
||||
panic(fmt.Sprintf("mismatching field number: got %d, want %d", num, fnum))
|
||||
}
|
||||
b0 = b0[n:]
|
||||
}
|
||||
|
||||
ClearExtension(m, &ExtensionDesc{Field: fnum})
|
||||
mr.SetUnknown(append(mr.GetUnknown(), b...))
|
||||
}
|
||||
|
||||
// ExtensionDescs returns a list of extension descriptors found in m,
|
||||
// containing descriptors for both populated extension fields in m and
|
||||
// also unknown fields of m that are in the extension range.
|
||||
// For the later case, an type incomplete descriptor is provided where only
|
||||
// the ExtensionDesc.Field field is populated.
|
||||
// The order of the extension descriptors is undefined.
|
||||
func ExtensionDescs(m Message) ([]*ExtensionDesc, error) {
|
||||
mr := MessageReflect(m)
|
||||
if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 {
|
||||
return nil, errNotExtendable
|
||||
}
|
||||
|
||||
// Collect a set of known extension descriptors.
|
||||
extDescs := make(map[protoreflect.FieldNumber]*ExtensionDesc)
|
||||
mr.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
|
||||
if fd.IsExtension() {
|
||||
xt := fd.(protoreflect.ExtensionTypeDescriptor)
|
||||
if xd, ok := xt.Type().(*ExtensionDesc); ok {
|
||||
extDescs[fd.Number()] = xd
|
||||
}
|
||||
}
|
||||
return true
|
||||
})
|
||||
|
||||
// Collect a set of unknown extension descriptors.
|
||||
extRanges := mr.Descriptor().ExtensionRanges()
|
||||
for b := mr.GetUnknown(); len(b) > 0; {
|
||||
num, _, n := protowire.ConsumeField(b)
|
||||
if extRanges.Has(num) && extDescs[num] == nil {
|
||||
extDescs[num] = nil
|
||||
}
|
||||
b = b[n:]
|
||||
}
|
||||
|
||||
// Transpose the set of descriptors into a list.
|
||||
var xts []*ExtensionDesc
|
||||
for num, xt := range extDescs {
|
||||
if xt == nil {
|
||||
xt = &ExtensionDesc{Field: int32(num)}
|
||||
}
|
||||
xts = append(xts, xt)
|
||||
}
|
||||
return xts, nil
|
||||
}
|
||||
|
||||
// isValidExtension reports whether xtd is a valid extension descriptor for md.
|
||||
func isValidExtension(md protoreflect.MessageDescriptor, xtd protoreflect.ExtensionTypeDescriptor) bool {
|
||||
return xtd.ContainingMessage() == md && md.ExtensionRanges().Has(xtd.Number())
|
||||
}
|
||||
|
||||
// isScalarKind reports whether k is a protobuf scalar kind (except bytes).
|
||||
// This function exists for historical reasons since the representation of
|
||||
// scalars differs between v1 and v2, where v1 uses *T and v2 uses T.
|
||||
func isScalarKind(k reflect.Kind) bool {
|
||||
switch k {
|
||||
case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// clearUnknown removes unknown fields from m where remover.Has reports true.
|
||||
func clearUnknown(m protoreflect.Message, remover interface {
|
||||
Has(protoreflect.FieldNumber) bool
|
||||
}) {
|
||||
var bo protoreflect.RawFields
|
||||
for bi := m.GetUnknown(); len(bi) > 0; {
|
||||
num, _, n := protowire.ConsumeField(bi)
|
||||
if !remover.Has(num) {
|
||||
bo = append(bo, bi[:n]...)
|
||||
}
|
||||
bi = bi[n:]
|
||||
}
|
||||
if bi := m.GetUnknown(); len(bi) != len(bo) {
|
||||
m.SetUnknown(bo)
|
||||
}
|
||||
}
|
||||
|
||||
type fieldNum protoreflect.FieldNumber
|
||||
|
||||
func (n1 fieldNum) Has(n2 protoreflect.FieldNumber) bool {
|
||||
return protoreflect.FieldNumber(n1) == n2
|
||||
}
|
306
gateway/vendor/github.com/golang/protobuf/proto/properties.go
generated
vendored
Normal file
306
gateway/vendor/github.com/golang/protobuf/proto/properties.go
generated
vendored
Normal file
@ -0,0 +1,306 @@
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"google.golang.org/protobuf/reflect/protoreflect"
|
||||
"google.golang.org/protobuf/runtime/protoimpl"
|
||||
)
|
||||
|
||||
// StructProperties represents protocol buffer type information for a
|
||||
// generated protobuf message in the open-struct API.
|
||||
//
|
||||
// Deprecated: Do not use.
|
||||
type StructProperties struct {
|
||||
// Prop are the properties for each field.
|
||||
//
|
||||
// Fields belonging to a oneof are stored in OneofTypes instead, with a
|
||||
// single Properties representing the parent oneof held here.
|
||||
//
|
||||
// The order of Prop matches the order of fields in the Go struct.
|
||||
// Struct fields that are not related to protobufs have a "XXX_" prefix
|
||||
// in the Properties.Name and must be ignored by the user.
|
||||
Prop []*Properties
|
||||
|
||||
// OneofTypes contains information about the oneof fields in this message.
|
||||
// It is keyed by the protobuf field name.
|
||||
OneofTypes map[string]*OneofProperties
|
||||
}
|
||||
|
||||
// Properties represents the type information for a protobuf message field.
|
||||
//
|
||||
// Deprecated: Do not use.
|
||||
type Properties struct {
|
||||
// Name is a placeholder name with little meaningful semantic value.
|
||||
// If the name has an "XXX_" prefix, the entire Properties must be ignored.
|
||||
Name string
|
||||
// OrigName is the protobuf field name or oneof name.
|
||||
OrigName string
|
||||
// JSONName is the JSON name for the protobuf field.
|
||||
JSONName string
|
||||
// Enum is a placeholder name for enums.
|
||||
// For historical reasons, this is neither the Go name for the enum,
|
||||
// nor the protobuf name for the enum.
|
||||
Enum string // Deprecated: Do not use.
|
||||
// Weak contains the full name of the weakly referenced message.
|
||||
Weak string
|
||||
// Wire is a string representation of the wire type.
|
||||
Wire string
|
||||
// WireType is the protobuf wire type for the field.
|
||||
WireType int
|
||||
// Tag is the protobuf field number.
|
||||
Tag int
|
||||
// Required reports whether this is a required field.
|
||||
Required bool
|
||||
// Optional reports whether this is a optional field.
|
||||
Optional bool
|
||||
// Repeated reports whether this is a repeated field.
|
||||
Repeated bool
|
||||
// Packed reports whether this is a packed repeated field of scalars.
|
||||
Packed bool
|
||||
// Proto3 reports whether this field operates under the proto3 syntax.
|
||||
Proto3 bool
|
||||
// Oneof reports whether this field belongs within a oneof.
|
||||
Oneof bool
|
||||
|
||||
// Default is the default value in string form.
|
||||
Default string
|
||||
// HasDefault reports whether the field has a default value.
|
||||
HasDefault bool
|
||||
|
||||
// MapKeyProp is the properties for the key field for a map field.
|
||||
MapKeyProp *Properties
|
||||
// MapValProp is the properties for the value field for a map field.
|
||||
MapValProp *Properties
|
||||
}
|
||||
|
||||
// OneofProperties represents the type information for a protobuf oneof.
|
||||
//
|
||||
// Deprecated: Do not use.
|
||||
type OneofProperties struct {
|
||||
// Type is a pointer to the generated wrapper type for the field value.
|
||||
// This is nil for messages that are not in the open-struct API.
|
||||
Type reflect.Type
|
||||
// Field is the index into StructProperties.Prop for the containing oneof.
|
||||
Field int
|
||||
// Prop is the properties for the field.
|
||||
Prop *Properties
|
||||
}
|
||||
|
||||
// String formats the properties in the protobuf struct field tag style.
|
||||
func (p *Properties) String() string {
|
||||
s := p.Wire
|
||||
s += "," + strconv.Itoa(p.Tag)
|
||||
if p.Required {
|
||||
s += ",req"
|
||||
}
|
||||
if p.Optional {
|
||||
s += ",opt"
|
||||
}
|
||||
if p.Repeated {
|
||||
s += ",rep"
|
||||
}
|
||||
if p.Packed {
|
||||
s += ",packed"
|
||||
}
|
||||
s += ",name=" + p.OrigName
|
||||
if p.JSONName != "" {
|
||||
s += ",json=" + p.JSONName
|
||||
}
|
||||
if len(p.Enum) > 0 {
|
||||
s += ",enum=" + p.Enum
|
||||
}
|
||||
if len(p.Weak) > 0 {
|
||||
s += ",weak=" + p.Weak
|
||||
}
|
||||
if p.Proto3 {
|
||||
s += ",proto3"
|
||||
}
|
||||
if p.Oneof {
|
||||
s += ",oneof"
|
||||
}
|
||||
if p.HasDefault {
|
||||
s += ",def=" + p.Default
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Parse populates p by parsing a string in the protobuf struct field tag style.
|
||||
func (p *Properties) Parse(tag string) {
|
||||
// For example: "bytes,49,opt,name=foo,def=hello!"
|
||||
for len(tag) > 0 {
|
||||
i := strings.IndexByte(tag, ',')
|
||||
if i < 0 {
|
||||
i = len(tag)
|
||||
}
|
||||
switch s := tag[:i]; {
|
||||
case strings.HasPrefix(s, "name="):
|
||||
p.OrigName = s[len("name="):]
|
||||
case strings.HasPrefix(s, "json="):
|
||||
p.JSONName = s[len("json="):]
|
||||
case strings.HasPrefix(s, "enum="):
|
||||
p.Enum = s[len("enum="):]
|
||||
case strings.HasPrefix(s, "weak="):
|
||||
p.Weak = s[len("weak="):]
|
||||
case strings.Trim(s, "0123456789") == "":
|
||||
n, _ := strconv.ParseUint(s, 10, 32)
|
||||
p.Tag = int(n)
|
||||
case s == "opt":
|
||||
p.Optional = true
|
||||
case s == "req":
|
||||
p.Required = true
|
||||
case s == "rep":
|
||||
p.Repeated = true
|
||||
case s == "varint" || s == "zigzag32" || s == "zigzag64":
|
||||
p.Wire = s
|
||||
p.WireType = WireVarint
|
||||
case s == "fixed32":
|
||||
p.Wire = s
|
||||
p.WireType = WireFixed32
|
||||
case s == "fixed64":
|
||||
p.Wire = s
|
||||
p.WireType = WireFixed64
|
||||
case s == "bytes":
|
||||
p.Wire = s
|
||||
p.WireType = WireBytes
|
||||
case s == "group":
|
||||
p.Wire = s
|
||||
p.WireType = WireStartGroup
|
||||
case s == "packed":
|
||||
p.Packed = true
|
||||
case s == "proto3":
|
||||
p.Proto3 = true
|
||||
case s == "oneof":
|
||||
p.Oneof = true
|
||||
case strings.HasPrefix(s, "def="):
|
||||
// The default tag is special in that everything afterwards is the
|
||||
// default regardless of the presence of commas.
|
||||
p.HasDefault = true
|
||||
p.Default, i = tag[len("def="):], len(tag)
|
||||
}
|
||||
tag = strings.TrimPrefix(tag[i:], ",")
|
||||
}
|
||||
}
|
||||
|
||||
// Init populates the properties from a protocol buffer struct tag.
|
||||
//
|
||||
// Deprecated: Do not use.
|
||||
func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
|
||||
p.Name = name
|
||||
p.OrigName = name
|
||||
if tag == "" {
|
||||
return
|
||||
}
|
||||
p.Parse(tag)
|
||||
|
||||
if typ != nil && typ.Kind() == reflect.Map {
|
||||
p.MapKeyProp = new(Properties)
|
||||
p.MapKeyProp.Init(nil, "Key", f.Tag.Get("protobuf_key"), nil)
|
||||
p.MapValProp = new(Properties)
|
||||
p.MapValProp.Init(nil, "Value", f.Tag.Get("protobuf_val"), nil)
|
||||
}
|
||||
}
|
||||
|
||||
var propertiesCache sync.Map // map[reflect.Type]*StructProperties
|
||||
|
||||
// GetProperties returns the list of properties for the type represented by t,
|
||||
// which must be a generated protocol buffer message in the open-struct API,
|
||||
// where protobuf message fields are represented by exported Go struct fields.
|
||||
//
|
||||
// Deprecated: Use protobuf reflection instead.
|
||||
func GetProperties(t reflect.Type) *StructProperties {
|
||||
if p, ok := propertiesCache.Load(t); ok {
|
||||
return p.(*StructProperties)
|
||||
}
|
||||
p, _ := propertiesCache.LoadOrStore(t, newProperties(t))
|
||||
return p.(*StructProperties)
|
||||
}
|
||||
|
||||
func newProperties(t reflect.Type) *StructProperties {
|
||||
if t.Kind() != reflect.Struct {
|
||||
panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t))
|
||||
}
|
||||
|
||||
var hasOneof bool
|
||||
prop := new(StructProperties)
|
||||
|
||||
// Construct a list of properties for each field in the struct.
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
p := new(Properties)
|
||||
f := t.Field(i)
|
||||
tagField := f.Tag.Get("protobuf")
|
||||
p.Init(f.Type, f.Name, tagField, &f)
|
||||
|
||||
tagOneof := f.Tag.Get("protobuf_oneof")
|
||||
if tagOneof != "" {
|
||||
hasOneof = true
|
||||
p.OrigName = tagOneof
|
||||
}
|
||||
|
||||
// Rename unrelated struct fields with the "XXX_" prefix since so much
|
||||
// user code simply checks for this to exclude special fields.
|
||||
if tagField == "" && tagOneof == "" && !strings.HasPrefix(p.Name, "XXX_") {
|
||||
p.Name = "XXX_" + p.Name
|
||||
p.OrigName = "XXX_" + p.OrigName
|
||||
} else if p.Weak != "" {
|
||||
p.Name = p.OrigName // avoid possible "XXX_" prefix on weak field
|
||||
}
|
||||
|
||||
prop.Prop = append(prop.Prop, p)
|
||||
}
|
||||
|
||||
// Construct a mapping of oneof field names to properties.
|
||||
if hasOneof {
|
||||
var oneofWrappers []interface{}
|
||||
if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofFuncs"); ok {
|
||||
oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[3].Interface().([]interface{})
|
||||
}
|
||||
if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofWrappers"); ok {
|
||||
oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[0].Interface().([]interface{})
|
||||
}
|
||||
if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(protoreflect.ProtoMessage); ok {
|
||||
if m, ok := m.ProtoReflect().(interface{ ProtoMessageInfo() *protoimpl.MessageInfo }); ok {
|
||||
oneofWrappers = m.ProtoMessageInfo().OneofWrappers
|
||||
}
|
||||
}
|
||||
|
||||
prop.OneofTypes = make(map[string]*OneofProperties)
|
||||
for _, wrapper := range oneofWrappers {
|
||||
p := &OneofProperties{
|
||||
Type: reflect.ValueOf(wrapper).Type(), // *T
|
||||
Prop: new(Properties),
|
||||
}
|
||||
f := p.Type.Elem().Field(0)
|
||||
p.Prop.Name = f.Name
|
||||
p.Prop.Parse(f.Tag.Get("protobuf"))
|
||||
|
||||
// Determine the struct field that contains this oneof.
|
||||
// Each wrapper is assignable to exactly one parent field.
|
||||
var foundOneof bool
|
||||
for i := 0; i < t.NumField() && !foundOneof; i++ {
|
||||
if p.Type.AssignableTo(t.Field(i).Type) {
|
||||
p.Field = i
|
||||
foundOneof = true
|
||||
}
|
||||
}
|
||||
if !foundOneof {
|
||||
panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t))
|
||||
}
|
||||
prop.OneofTypes[p.Prop.OrigName] = p
|
||||
}
|
||||
}
|
||||
|
||||
return prop
|
||||
}
|
||||
|
||||
func (sp *StructProperties) Len() int { return len(sp.Prop) }
|
||||
func (sp *StructProperties) Less(i, j int) bool { return false }
|
||||
func (sp *StructProperties) Swap(i, j int) { return }
|
167
gateway/vendor/github.com/golang/protobuf/proto/proto.go
generated
vendored
Normal file
167
gateway/vendor/github.com/golang/protobuf/proto/proto.go
generated
vendored
Normal file
@ -0,0 +1,167 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package proto provides functionality for handling protocol buffer messages.
|
||||
// In particular, it provides marshaling and unmarshaling between a protobuf
|
||||
// message and the binary wire format.
|
||||
//
|
||||
// See https://developers.google.com/protocol-buffers/docs/gotutorial for
|
||||
// more information.
|
||||
//
|
||||
// Deprecated: Use the "google.golang.org/protobuf/proto" package instead.
|
||||
package proto
|
||||
|
||||
import (
|
||||
protoV2 "google.golang.org/protobuf/proto"
|
||||
"google.golang.org/protobuf/reflect/protoreflect"
|
||||
"google.golang.org/protobuf/runtime/protoiface"
|
||||
"google.golang.org/protobuf/runtime/protoimpl"
|
||||
)
|
||||
|
||||
const (
|
||||
ProtoPackageIsVersion1 = true
|
||||
ProtoPackageIsVersion2 = true
|
||||
ProtoPackageIsVersion3 = true
|
||||
ProtoPackageIsVersion4 = true
|
||||
)
|
||||
|
||||
// GeneratedEnum is any enum type generated by protoc-gen-go
|
||||
// which is a named int32 kind.
|
||||
// This type exists for documentation purposes.
|
||||
type GeneratedEnum interface{}
|
||||
|
||||
// GeneratedMessage is any message type generated by protoc-gen-go
|
||||
// which is a pointer to a named struct kind.
|
||||
// This type exists for documentation purposes.
|
||||
type GeneratedMessage interface{}
|
||||
|
||||
// Message is a protocol buffer message.
|
||||
//
|
||||
// This is the v1 version of the message interface and is marginally better
|
||||
// than an empty interface as it lacks any method to programatically interact
|
||||
// with the contents of the message.
|
||||
//
|
||||
// A v2 message is declared in "google.golang.org/protobuf/proto".Message and
|
||||
// exposes protobuf reflection as a first-class feature of the interface.
|
||||
//
|
||||
// To convert a v1 message to a v2 message, use the MessageV2 function.
|
||||
// To convert a v2 message to a v1 message, use the MessageV1 function.
|
||||
type Message = protoiface.MessageV1
|
||||
|
||||
// MessageV1 converts either a v1 or v2 message to a v1 message.
|
||||
// It returns nil if m is nil.
|
||||
func MessageV1(m GeneratedMessage) protoiface.MessageV1 {
|
||||
return protoimpl.X.ProtoMessageV1Of(m)
|
||||
}
|
||||
|
||||
// MessageV2 converts either a v1 or v2 message to a v2 message.
|
||||
// It returns nil if m is nil.
|
||||
func MessageV2(m GeneratedMessage) protoV2.Message {
|
||||
return protoimpl.X.ProtoMessageV2Of(m)
|
||||
}
|
||||
|
||||
// MessageReflect returns a reflective view for a message.
|
||||
// It returns nil if m is nil.
|
||||
func MessageReflect(m Message) protoreflect.Message {
|
||||
return protoimpl.X.MessageOf(m)
|
||||
}
|
||||
|
||||
// Marshaler is implemented by messages that can marshal themselves.
|
||||
// This interface is used by the following functions: Size, Marshal,
|
||||
// Buffer.Marshal, and Buffer.EncodeMessage.
|
||||
//
|
||||
// Deprecated: Do not implement.
|
||||
type Marshaler interface {
|
||||
// Marshal formats the encoded bytes of the message.
|
||||
// It should be deterministic and emit valid protobuf wire data.
|
||||
// The caller takes ownership of the returned buffer.
|
||||
Marshal() ([]byte, error)
|
||||
}
|
||||
|
||||
// Unmarshaler is implemented by messages that can unmarshal themselves.
|
||||
// This interface is used by the following functions: Unmarshal, UnmarshalMerge,
|
||||
// Buffer.Unmarshal, Buffer.DecodeMessage, and Buffer.DecodeGroup.
|
||||
//
|
||||
// Deprecated: Do not implement.
|
||||
type Unmarshaler interface {
|
||||
// Unmarshal parses the encoded bytes of the protobuf wire input.
|
||||
// The provided buffer is only valid for during method call.
|
||||
// It should not reset the receiver message.
|
||||
Unmarshal([]byte) error
|
||||
}
|
||||
|
||||
// Merger is implemented by messages that can merge themselves.
|
||||
// This interface is used by the following functions: Clone and Merge.
|
||||
//
|
||||
// Deprecated: Do not implement.
|
||||
type Merger interface {
|
||||
// Merge merges the contents of src into the receiver message.
|
||||
// It clones all data structures in src such that it aliases no mutable
|
||||
// memory referenced by src.
|
||||
Merge(src Message)
|
||||
}
|
||||
|
||||
// RequiredNotSetError is an error type returned when
|
||||
// marshaling or unmarshaling a message with missing required fields.
|
||||
type RequiredNotSetError struct {
|
||||
err error
|
||||
}
|
||||
|
||||
func (e *RequiredNotSetError) Error() string {
|
||||
if e.err != nil {
|
||||
return e.err.Error()
|
||||
}
|
||||
return "proto: required field not set"
|
||||
}
|
||||
func (e *RequiredNotSetError) RequiredNotSet() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func checkRequiredNotSet(m protoV2.Message) error {
|
||||
if err := protoV2.CheckInitialized(m); err != nil {
|
||||
return &RequiredNotSetError{err: err}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Clone returns a deep copy of src.
|
||||
func Clone(src Message) Message {
|
||||
return MessageV1(protoV2.Clone(MessageV2(src)))
|
||||
}
|
||||
|
||||
// Merge merges src into dst, which must be messages of the same type.
|
||||
//
|
||||
// Populated scalar fields in src are copied to dst, while populated
|
||||
// singular messages in src are merged into dst by recursively calling Merge.
|
||||
// The elements of every list field in src is appended to the corresponded
|
||||
// list fields in dst. The entries of every map field in src is copied into
|
||||
// the corresponding map field in dst, possibly replacing existing entries.
|
||||
// The unknown fields of src are appended to the unknown fields of dst.
|
||||
func Merge(dst, src Message) {
|
||||
protoV2.Merge(MessageV2(dst), MessageV2(src))
|
||||
}
|
||||
|
||||
// Equal reports whether two messages are equal.
|
||||
// If two messages marshal to the same bytes under deterministic serialization,
|
||||
// then Equal is guaranteed to report true.
|
||||
//
|
||||
// Two messages are equal if they are the same protobuf message type,
|
||||
// have the same set of populated known and extension field values,
|
||||
// and the same set of unknown fields values.
|
||||
//
|
||||
// Scalar values are compared with the equivalent of the == operator in Go,
|
||||
// except bytes values which are compared using bytes.Equal and
|
||||
// floating point values which specially treat NaNs as equal.
|
||||
// Message values are compared by recursively calling Equal.
|
||||
// Lists are equal if each element value is also equal.
|
||||
// Maps are equal if they have the same set of keys, where the pair of values
|
||||
// for each key is also equal.
|
||||
func Equal(x, y Message) bool {
|
||||
return protoV2.Equal(MessageV2(x), MessageV2(y))
|
||||
}
|
||||
|
||||
func isMessageSet(md protoreflect.MessageDescriptor) bool {
|
||||
ms, ok := md.(interface{ IsMessageSet() bool })
|
||||
return ok && ms.IsMessageSet()
|
||||
}
|
317
gateway/vendor/github.com/golang/protobuf/proto/registry.go
generated
vendored
Normal file
317
gateway/vendor/github.com/golang/protobuf/proto/registry.go
generated
vendored
Normal file
@ -0,0 +1,317 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"google.golang.org/protobuf/reflect/protodesc"
|
||||
"google.golang.org/protobuf/reflect/protoreflect"
|
||||
"google.golang.org/protobuf/reflect/protoregistry"
|
||||
"google.golang.org/protobuf/runtime/protoimpl"
|
||||
)
|
||||
|
||||
// filePath is the path to the proto source file.
|
||||
type filePath = string // e.g., "google/protobuf/descriptor.proto"
|
||||
|
||||
// fileDescGZIP is the compressed contents of the encoded FileDescriptorProto.
|
||||
type fileDescGZIP = []byte
|
||||
|
||||
var fileCache sync.Map // map[filePath]fileDescGZIP
|
||||
|
||||
// RegisterFile is called from generated code to register the compressed
|
||||
// FileDescriptorProto with the file path for a proto source file.
|
||||
//
|
||||
// Deprecated: Use protoregistry.GlobalFiles.RegisterFile instead.
|
||||
func RegisterFile(s filePath, d fileDescGZIP) {
|
||||
// Decompress the descriptor.
|
||||
zr, err := gzip.NewReader(bytes.NewReader(d))
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err))
|
||||
}
|
||||
b, err := ioutil.ReadAll(zr)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err))
|
||||
}
|
||||
|
||||
// Construct a protoreflect.FileDescriptor from the raw descriptor.
|
||||
// Note that DescBuilder.Build automatically registers the constructed
|
||||
// file descriptor with the v2 registry.
|
||||
protoimpl.DescBuilder{RawDescriptor: b}.Build()
|
||||
|
||||
// Locally cache the raw descriptor form for the file.
|
||||
fileCache.Store(s, d)
|
||||
}
|
||||
|
||||
// FileDescriptor returns the compressed FileDescriptorProto given the file path
|
||||
// for a proto source file. It returns nil if not found.
|
||||
//
|
||||
// Deprecated: Use protoregistry.GlobalFiles.FindFileByPath instead.
|
||||
func FileDescriptor(s filePath) fileDescGZIP {
|
||||
if v, ok := fileCache.Load(s); ok {
|
||||
return v.(fileDescGZIP)
|
||||
}
|
||||
|
||||
// Find the descriptor in the v2 registry.
|
||||
var b []byte
|
||||
if fd, _ := protoregistry.GlobalFiles.FindFileByPath(s); fd != nil {
|
||||
b, _ = Marshal(protodesc.ToFileDescriptorProto(fd))
|
||||
}
|
||||
|
||||
// Locally cache the raw descriptor form for the file.
|
||||
if len(b) > 0 {
|
||||
v, _ := fileCache.LoadOrStore(s, protoimpl.X.CompressGZIP(b))
|
||||
return v.(fileDescGZIP)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// enumName is the name of an enum. For historical reasons, the enum name is
|
||||
// neither the full Go name nor the full protobuf name of the enum.
|
||||
// The name is the dot-separated combination of just the proto package that the
|
||||
// enum is declared within followed by the Go type name of the generated enum.
|
||||
type enumName = string // e.g., "my.proto.package.GoMessage_GoEnum"
|
||||
|
||||
// enumsByName maps enum values by name to their numeric counterpart.
|
||||
type enumsByName = map[string]int32
|
||||
|
||||
// enumsByNumber maps enum values by number to their name counterpart.
|
||||
type enumsByNumber = map[int32]string
|
||||
|
||||
var enumCache sync.Map // map[enumName]enumsByName
|
||||
var numFilesCache sync.Map // map[protoreflect.FullName]int
|
||||
|
||||
// RegisterEnum is called from the generated code to register the mapping of
|
||||
// enum value names to enum numbers for the enum identified by s.
|
||||
//
|
||||
// Deprecated: Use protoregistry.GlobalTypes.RegisterEnum instead.
|
||||
func RegisterEnum(s enumName, _ enumsByNumber, m enumsByName) {
|
||||
if _, ok := enumCache.Load(s); ok {
|
||||
panic("proto: duplicate enum registered: " + s)
|
||||
}
|
||||
enumCache.Store(s, m)
|
||||
|
||||
// This does not forward registration to the v2 registry since this API
|
||||
// lacks sufficient information to construct a complete v2 enum descriptor.
|
||||
}
|
||||
|
||||
// EnumValueMap returns the mapping from enum value names to enum numbers for
|
||||
// the enum of the given name. It returns nil if not found.
|
||||
//
|
||||
// Deprecated: Use protoregistry.GlobalTypes.FindEnumByName instead.
|
||||
func EnumValueMap(s enumName) enumsByName {
|
||||
if v, ok := enumCache.Load(s); ok {
|
||||
return v.(enumsByName)
|
||||
}
|
||||
|
||||
// Check whether the cache is stale. If the number of files in the current
|
||||
// package differs, then it means that some enums may have been recently
|
||||
// registered upstream that we do not know about.
|
||||
var protoPkg protoreflect.FullName
|
||||
if i := strings.LastIndexByte(s, '.'); i >= 0 {
|
||||
protoPkg = protoreflect.FullName(s[:i])
|
||||
}
|
||||
v, _ := numFilesCache.Load(protoPkg)
|
||||
numFiles, _ := v.(int)
|
||||
if protoregistry.GlobalFiles.NumFilesByPackage(protoPkg) == numFiles {
|
||||
return nil // cache is up-to-date; was not found earlier
|
||||
}
|
||||
|
||||
// Update the enum cache for all enums declared in the given proto package.
|
||||
numFiles = 0
|
||||
protoregistry.GlobalFiles.RangeFilesByPackage(protoPkg, func(fd protoreflect.FileDescriptor) bool {
|
||||
walkEnums(fd, func(ed protoreflect.EnumDescriptor) {
|
||||
name := protoimpl.X.LegacyEnumName(ed)
|
||||
if _, ok := enumCache.Load(name); !ok {
|
||||
m := make(enumsByName)
|
||||
evs := ed.Values()
|
||||
for i := evs.Len() - 1; i >= 0; i-- {
|
||||
ev := evs.Get(i)
|
||||
m[string(ev.Name())] = int32(ev.Number())
|
||||
}
|
||||
enumCache.LoadOrStore(name, m)
|
||||
}
|
||||
})
|
||||
numFiles++
|
||||
return true
|
||||
})
|
||||
numFilesCache.Store(protoPkg, numFiles)
|
||||
|
||||
// Check cache again for enum map.
|
||||
if v, ok := enumCache.Load(s); ok {
|
||||
return v.(enumsByName)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// walkEnums recursively walks all enums declared in d.
|
||||
func walkEnums(d interface {
|
||||
Enums() protoreflect.EnumDescriptors
|
||||
Messages() protoreflect.MessageDescriptors
|
||||
}, f func(protoreflect.EnumDescriptor)) {
|
||||
eds := d.Enums()
|
||||
for i := eds.Len() - 1; i >= 0; i-- {
|
||||
f(eds.Get(i))
|
||||
}
|
||||
mds := d.Messages()
|
||||
for i := mds.Len() - 1; i >= 0; i-- {
|
||||
walkEnums(mds.Get(i), f)
|
||||
}
|
||||
}
|
||||
|
||||
// messageName is the full name of protobuf message.
|
||||
type messageName = string
|
||||
|
||||
var messageTypeCache sync.Map // map[messageName]reflect.Type
|
||||
|
||||
// RegisterType is called from generated code to register the message Go type
|
||||
// for a message of the given name.
|
||||
//
|
||||
// Deprecated: Use protoregistry.GlobalTypes.RegisterMessage instead.
|
||||
func RegisterType(m Message, s messageName) {
|
||||
mt := protoimpl.X.LegacyMessageTypeOf(m, protoreflect.FullName(s))
|
||||
if err := protoregistry.GlobalTypes.RegisterMessage(mt); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
messageTypeCache.Store(s, reflect.TypeOf(m))
|
||||
}
|
||||
|
||||
// RegisterMapType is called from generated code to register the Go map type
|
||||
// for a protobuf message representing a map entry.
|
||||
//
|
||||
// Deprecated: Do not use.
|
||||
func RegisterMapType(m interface{}, s messageName) {
|
||||
t := reflect.TypeOf(m)
|
||||
if t.Kind() != reflect.Map {
|
||||
panic(fmt.Sprintf("invalid map kind: %v", t))
|
||||
}
|
||||
if _, ok := messageTypeCache.Load(s); ok {
|
||||
panic(fmt.Errorf("proto: duplicate proto message registered: %s", s))
|
||||
}
|
||||
messageTypeCache.Store(s, t)
|
||||
}
|
||||
|
||||
// MessageType returns the message type for a named message.
|
||||
// It returns nil if not found.
|
||||
//
|
||||
// Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead.
|
||||
func MessageType(s messageName) reflect.Type {
|
||||
if v, ok := messageTypeCache.Load(s); ok {
|
||||
return v.(reflect.Type)
|
||||
}
|
||||
|
||||
// Derive the message type from the v2 registry.
|
||||
var t reflect.Type
|
||||
if mt, _ := protoregistry.GlobalTypes.FindMessageByName(protoreflect.FullName(s)); mt != nil {
|
||||
t = messageGoType(mt)
|
||||
}
|
||||
|
||||
// If we could not get a concrete type, it is possible that it is a
|
||||
// pseudo-message for a map entry.
|
||||
if t == nil {
|
||||
d, _ := protoregistry.GlobalFiles.FindDescriptorByName(protoreflect.FullName(s))
|
||||
if md, _ := d.(protoreflect.MessageDescriptor); md != nil && md.IsMapEntry() {
|
||||
kt := goTypeForField(md.Fields().ByNumber(1))
|
||||
vt := goTypeForField(md.Fields().ByNumber(2))
|
||||
t = reflect.MapOf(kt, vt)
|
||||
}
|
||||
}
|
||||
|
||||
// Locally cache the message type for the given name.
|
||||
if t != nil {
|
||||
v, _ := messageTypeCache.LoadOrStore(s, t)
|
||||
return v.(reflect.Type)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func goTypeForField(fd protoreflect.FieldDescriptor) reflect.Type {
|
||||
switch k := fd.Kind(); k {
|
||||
case protoreflect.EnumKind:
|
||||
if et, _ := protoregistry.GlobalTypes.FindEnumByName(fd.Enum().FullName()); et != nil {
|
||||
return enumGoType(et)
|
||||
}
|
||||
return reflect.TypeOf(protoreflect.EnumNumber(0))
|
||||
case protoreflect.MessageKind, protoreflect.GroupKind:
|
||||
if mt, _ := protoregistry.GlobalTypes.FindMessageByName(fd.Message().FullName()); mt != nil {
|
||||
return messageGoType(mt)
|
||||
}
|
||||
return reflect.TypeOf((*protoreflect.Message)(nil)).Elem()
|
||||
default:
|
||||
return reflect.TypeOf(fd.Default().Interface())
|
||||
}
|
||||
}
|
||||
|
||||
func enumGoType(et protoreflect.EnumType) reflect.Type {
|
||||
return reflect.TypeOf(et.New(0))
|
||||
}
|
||||
|
||||
func messageGoType(mt protoreflect.MessageType) reflect.Type {
|
||||
return reflect.TypeOf(MessageV1(mt.Zero().Interface()))
|
||||
}
|
||||
|
||||
// MessageName returns the full protobuf name for the given message type.
|
||||
//
|
||||
// Deprecated: Use protoreflect.MessageDescriptor.FullName instead.
|
||||
func MessageName(m Message) messageName {
|
||||
if m == nil {
|
||||
return ""
|
||||
}
|
||||
if m, ok := m.(interface{ XXX_MessageName() messageName }); ok {
|
||||
return m.XXX_MessageName()
|
||||
}
|
||||
return messageName(protoimpl.X.MessageDescriptorOf(m).FullName())
|
||||
}
|
||||
|
||||
// RegisterExtension is called from the generated code to register
|
||||
// the extension descriptor.
|
||||
//
|
||||
// Deprecated: Use protoregistry.GlobalTypes.RegisterExtension instead.
|
||||
func RegisterExtension(d *ExtensionDesc) {
|
||||
if err := protoregistry.GlobalTypes.RegisterExtension(d); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
type extensionsByNumber = map[int32]*ExtensionDesc
|
||||
|
||||
var extensionCache sync.Map // map[messageName]extensionsByNumber
|
||||
|
||||
// RegisteredExtensions returns a map of the registered extensions for the
|
||||
// provided protobuf message, indexed by the extension field number.
|
||||
//
|
||||
// Deprecated: Use protoregistry.GlobalTypes.RangeExtensionsByMessage instead.
|
||||
func RegisteredExtensions(m Message) extensionsByNumber {
|
||||
// Check whether the cache is stale. If the number of extensions for
|
||||
// the given message differs, then it means that some extensions were
|
||||
// recently registered upstream that we do not know about.
|
||||
s := MessageName(m)
|
||||
v, _ := extensionCache.Load(s)
|
||||
xs, _ := v.(extensionsByNumber)
|
||||
if protoregistry.GlobalTypes.NumExtensionsByMessage(protoreflect.FullName(s)) == len(xs) {
|
||||
return xs // cache is up-to-date
|
||||
}
|
||||
|
||||
// Cache is stale, re-compute the extensions map.
|
||||
xs = make(extensionsByNumber)
|
||||
protoregistry.GlobalTypes.RangeExtensionsByMessage(protoreflect.FullName(s), func(xt protoreflect.ExtensionType) bool {
|
||||
if xd, ok := xt.(*ExtensionDesc); ok {
|
||||
xs[int32(xt.TypeDescriptor().Number())] = xd
|
||||
} else {
|
||||
// TODO: This implies that the protoreflect.ExtensionType is a
|
||||
// custom type not generated by protoc-gen-go. We could try and
|
||||
// convert the type to an ExtensionDesc.
|
||||
}
|
||||
return true
|
||||
})
|
||||
extensionCache.Store(s, xs)
|
||||
return xs
|
||||
}
|
801
gateway/vendor/github.com/golang/protobuf/proto/text_decode.go
generated
vendored
Normal file
801
gateway/vendor/github.com/golang/protobuf/proto/text_decode.go
generated
vendored
Normal file
@ -0,0 +1,801 @@
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"encoding"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
|
||||
"google.golang.org/protobuf/encoding/prototext"
|
||||
protoV2 "google.golang.org/protobuf/proto"
|
||||
"google.golang.org/protobuf/reflect/protoreflect"
|
||||
"google.golang.org/protobuf/reflect/protoregistry"
|
||||
)
|
||||
|
||||
const wrapTextUnmarshalV2 = false
|
||||
|
||||
// ParseError is returned by UnmarshalText.
|
||||
type ParseError struct {
|
||||
Message string
|
||||
|
||||
// Deprecated: Do not use.
|
||||
Line, Offset int
|
||||
}
|
||||
|
||||
func (e *ParseError) Error() string {
|
||||
if wrapTextUnmarshalV2 {
|
||||
return e.Message
|
||||
}
|
||||
if e.Line == 1 {
|
||||
return fmt.Sprintf("line 1.%d: %v", e.Offset, e.Message)
|
||||
}
|
||||
return fmt.Sprintf("line %d: %v", e.Line, e.Message)
|
||||
}
|
||||
|
||||
// UnmarshalText parses a proto text formatted string into m.
|
||||
func UnmarshalText(s string, m Message) error {
|
||||
if u, ok := m.(encoding.TextUnmarshaler); ok {
|
||||
return u.UnmarshalText([]byte(s))
|
||||
}
|
||||
|
||||
m.Reset()
|
||||
mi := MessageV2(m)
|
||||
|
||||
if wrapTextUnmarshalV2 {
|
||||
err := prototext.UnmarshalOptions{
|
||||
AllowPartial: true,
|
||||
}.Unmarshal([]byte(s), mi)
|
||||
if err != nil {
|
||||
return &ParseError{Message: err.Error()}
|
||||
}
|
||||
return checkRequiredNotSet(mi)
|
||||
} else {
|
||||
if err := newTextParser(s).unmarshalMessage(mi.ProtoReflect(), ""); err != nil {
|
||||
return err
|
||||
}
|
||||
return checkRequiredNotSet(mi)
|
||||
}
|
||||
}
|
||||
|
||||
type textParser struct {
|
||||
s string // remaining input
|
||||
done bool // whether the parsing is finished (success or error)
|
||||
backed bool // whether back() was called
|
||||
offset, line int
|
||||
cur token
|
||||
}
|
||||
|
||||
type token struct {
|
||||
value string
|
||||
err *ParseError
|
||||
line int // line number
|
||||
offset int // byte number from start of input, not start of line
|
||||
unquoted string // the unquoted version of value, if it was a quoted string
|
||||
}
|
||||
|
||||
func newTextParser(s string) *textParser {
|
||||
p := new(textParser)
|
||||
p.s = s
|
||||
p.line = 1
|
||||
p.cur.line = 1
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *textParser) unmarshalMessage(m protoreflect.Message, terminator string) (err error) {
|
||||
md := m.Descriptor()
|
||||
fds := md.Fields()
|
||||
|
||||
// A struct is a sequence of "name: value", terminated by one of
|
||||
// '>' or '}', or the end of the input. A name may also be
|
||||
// "[extension]" or "[type/url]".
|
||||
//
|
||||
// The whole struct can also be an expanded Any message, like:
|
||||
// [type/url] < ... struct contents ... >
|
||||
seen := make(map[protoreflect.FieldNumber]bool)
|
||||
for {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
if tok.value == terminator {
|
||||
break
|
||||
}
|
||||
if tok.value == "[" {
|
||||
if err := p.unmarshalExtensionOrAny(m, seen); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// This is a normal, non-extension field.
|
||||
name := protoreflect.Name(tok.value)
|
||||
fd := fds.ByName(name)
|
||||
switch {
|
||||
case fd == nil:
|
||||
gd := fds.ByName(protoreflect.Name(strings.ToLower(string(name))))
|
||||
if gd != nil && gd.Kind() == protoreflect.GroupKind && gd.Message().Name() == name {
|
||||
fd = gd
|
||||
}
|
||||
case fd.Kind() == protoreflect.GroupKind && fd.Message().Name() != name:
|
||||
fd = nil
|
||||
case fd.IsWeak() && fd.Message().IsPlaceholder():
|
||||
fd = nil
|
||||
}
|
||||
if fd == nil {
|
||||
typeName := string(md.FullName())
|
||||
if m, ok := m.Interface().(Message); ok {
|
||||
t := reflect.TypeOf(m)
|
||||
if t.Kind() == reflect.Ptr {
|
||||
typeName = t.Elem().String()
|
||||
}
|
||||
}
|
||||
return p.errorf("unknown field name %q in %v", name, typeName)
|
||||
}
|
||||
if od := fd.ContainingOneof(); od != nil && m.WhichOneof(od) != nil {
|
||||
return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, od.Name())
|
||||
}
|
||||
if fd.Cardinality() != protoreflect.Repeated && seen[fd.Number()] {
|
||||
return p.errorf("non-repeated field %q was repeated", fd.Name())
|
||||
}
|
||||
seen[fd.Number()] = true
|
||||
|
||||
// Consume any colon.
|
||||
if err := p.checkForColon(fd); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Parse into the field.
|
||||
v := m.Get(fd)
|
||||
if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) {
|
||||
v = m.Mutable(fd)
|
||||
}
|
||||
if v, err = p.unmarshalValue(v, fd); err != nil {
|
||||
return err
|
||||
}
|
||||
m.Set(fd, v)
|
||||
|
||||
if err := p.consumeOptionalSeparator(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *textParser) unmarshalExtensionOrAny(m protoreflect.Message, seen map[protoreflect.FieldNumber]bool) error {
|
||||
name, err := p.consumeExtensionOrAnyName()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If it contains a slash, it's an Any type URL.
|
||||
if slashIdx := strings.LastIndex(name, "/"); slashIdx >= 0 {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
// consume an optional colon
|
||||
if tok.value == ":" {
|
||||
tok = p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
}
|
||||
|
||||
var terminator string
|
||||
switch tok.value {
|
||||
case "<":
|
||||
terminator = ">"
|
||||
case "{":
|
||||
terminator = "}"
|
||||
default:
|
||||
return p.errorf("expected '{' or '<', found %q", tok.value)
|
||||
}
|
||||
|
||||
mt, err := protoregistry.GlobalTypes.FindMessageByURL(name)
|
||||
if err != nil {
|
||||
return p.errorf("unrecognized message %q in google.protobuf.Any", name[slashIdx+len("/"):])
|
||||
}
|
||||
m2 := mt.New()
|
||||
if err := p.unmarshalMessage(m2, terminator); err != nil {
|
||||
return err
|
||||
}
|
||||
b, err := protoV2.Marshal(m2.Interface())
|
||||
if err != nil {
|
||||
return p.errorf("failed to marshal message of type %q: %v", name[slashIdx+len("/"):], err)
|
||||
}
|
||||
|
||||
urlFD := m.Descriptor().Fields().ByName("type_url")
|
||||
valFD := m.Descriptor().Fields().ByName("value")
|
||||
if seen[urlFD.Number()] {
|
||||
return p.errorf("Any message unpacked multiple times, or %q already set", urlFD.Name())
|
||||
}
|
||||
if seen[valFD.Number()] {
|
||||
return p.errorf("Any message unpacked multiple times, or %q already set", valFD.Name())
|
||||
}
|
||||
m.Set(urlFD, protoreflect.ValueOfString(name))
|
||||
m.Set(valFD, protoreflect.ValueOfBytes(b))
|
||||
seen[urlFD.Number()] = true
|
||||
seen[valFD.Number()] = true
|
||||
return nil
|
||||
}
|
||||
|
||||
xname := protoreflect.FullName(name)
|
||||
xt, _ := protoregistry.GlobalTypes.FindExtensionByName(xname)
|
||||
if xt == nil && isMessageSet(m.Descriptor()) {
|
||||
xt, _ = protoregistry.GlobalTypes.FindExtensionByName(xname.Append("message_set_extension"))
|
||||
}
|
||||
if xt == nil {
|
||||
return p.errorf("unrecognized extension %q", name)
|
||||
}
|
||||
fd := xt.TypeDescriptor()
|
||||
if fd.ContainingMessage().FullName() != m.Descriptor().FullName() {
|
||||
return p.errorf("extension field %q does not extend message %q", name, m.Descriptor().FullName())
|
||||
}
|
||||
|
||||
if err := p.checkForColon(fd); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
v := m.Get(fd)
|
||||
if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) {
|
||||
v = m.Mutable(fd)
|
||||
}
|
||||
v, err = p.unmarshalValue(v, fd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m.Set(fd, v)
|
||||
return p.consumeOptionalSeparator()
|
||||
}
|
||||
|
||||
func (p *textParser) unmarshalValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return v, tok.err
|
||||
}
|
||||
if tok.value == "" {
|
||||
return v, p.errorf("unexpected EOF")
|
||||
}
|
||||
|
||||
switch {
|
||||
case fd.IsList():
|
||||
lv := v.List()
|
||||
var err error
|
||||
if tok.value == "[" {
|
||||
// Repeated field with list notation, like [1,2,3].
|
||||
for {
|
||||
vv := lv.NewElement()
|
||||
vv, err = p.unmarshalSingularValue(vv, fd)
|
||||
if err != nil {
|
||||
return v, err
|
||||
}
|
||||
lv.Append(vv)
|
||||
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return v, tok.err
|
||||
}
|
||||
if tok.value == "]" {
|
||||
break
|
||||
}
|
||||
if tok.value != "," {
|
||||
return v, p.errorf("Expected ']' or ',' found %q", tok.value)
|
||||
}
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// One value of the repeated field.
|
||||
p.back()
|
||||
vv := lv.NewElement()
|
||||
vv, err = p.unmarshalSingularValue(vv, fd)
|
||||
if err != nil {
|
||||
return v, err
|
||||
}
|
||||
lv.Append(vv)
|
||||
return v, nil
|
||||
case fd.IsMap():
|
||||
// The map entry should be this sequence of tokens:
|
||||
// < key : KEY value : VALUE >
|
||||
// However, implementations may omit key or value, and technically
|
||||
// we should support them in any order.
|
||||
var terminator string
|
||||
switch tok.value {
|
||||
case "<":
|
||||
terminator = ">"
|
||||
case "{":
|
||||
terminator = "}"
|
||||
default:
|
||||
return v, p.errorf("expected '{' or '<', found %q", tok.value)
|
||||
}
|
||||
|
||||
keyFD := fd.MapKey()
|
||||
valFD := fd.MapValue()
|
||||
|
||||
mv := v.Map()
|
||||
kv := keyFD.Default()
|
||||
vv := mv.NewValue()
|
||||
for {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return v, tok.err
|
||||
}
|
||||
if tok.value == terminator {
|
||||
break
|
||||
}
|
||||
var err error
|
||||
switch tok.value {
|
||||
case "key":
|
||||
if err := p.consumeToken(":"); err != nil {
|
||||
return v, err
|
||||
}
|
||||
if kv, err = p.unmarshalSingularValue(kv, keyFD); err != nil {
|
||||
return v, err
|
||||
}
|
||||
if err := p.consumeOptionalSeparator(); err != nil {
|
||||
return v, err
|
||||
}
|
||||
case "value":
|
||||
if err := p.checkForColon(valFD); err != nil {
|
||||
return v, err
|
||||
}
|
||||
if vv, err = p.unmarshalSingularValue(vv, valFD); err != nil {
|
||||
return v, err
|
||||
}
|
||||
if err := p.consumeOptionalSeparator(); err != nil {
|
||||
return v, err
|
||||
}
|
||||
default:
|
||||
p.back()
|
||||
return v, p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value)
|
||||
}
|
||||
}
|
||||
mv.Set(kv.MapKey(), vv)
|
||||
return v, nil
|
||||
default:
|
||||
p.back()
|
||||
return p.unmarshalSingularValue(v, fd)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *textParser) unmarshalSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return v, tok.err
|
||||
}
|
||||
if tok.value == "" {
|
||||
return v, p.errorf("unexpected EOF")
|
||||
}
|
||||
|
||||
switch fd.Kind() {
|
||||
case protoreflect.BoolKind:
|
||||
switch tok.value {
|
||||
case "true", "1", "t", "True":
|
||||
return protoreflect.ValueOfBool(true), nil
|
||||
case "false", "0", "f", "False":
|
||||
return protoreflect.ValueOfBool(false), nil
|
||||
}
|
||||
case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
|
||||
if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
|
||||
return protoreflect.ValueOfInt32(int32(x)), nil
|
||||
}
|
||||
|
||||
// The C++ parser accepts large positive hex numbers that uses
|
||||
// two's complement arithmetic to represent negative numbers.
|
||||
// This feature is here for backwards compatibility with C++.
|
||||
if strings.HasPrefix(tok.value, "0x") {
|
||||
if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
|
||||
return protoreflect.ValueOfInt32(int32(-(int64(^x) + 1))), nil
|
||||
}
|
||||
}
|
||||
case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
|
||||
if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
|
||||
return protoreflect.ValueOfInt64(int64(x)), nil
|
||||
}
|
||||
|
||||
// The C++ parser accepts large positive hex numbers that uses
|
||||
// two's complement arithmetic to represent negative numbers.
|
||||
// This feature is here for backwards compatibility with C++.
|
||||
if strings.HasPrefix(tok.value, "0x") {
|
||||
if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
|
||||
return protoreflect.ValueOfInt64(int64(-(int64(^x) + 1))), nil
|
||||
}
|
||||
}
|
||||
case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
|
||||
if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
|
||||
return protoreflect.ValueOfUint32(uint32(x)), nil
|
||||
}
|
||||
case protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
|
||||
if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
|
||||
return protoreflect.ValueOfUint64(uint64(x)), nil
|
||||
}
|
||||
case protoreflect.FloatKind:
|
||||
// Ignore 'f' for compatibility with output generated by C++,
|
||||
// but don't remove 'f' when the value is "-inf" or "inf".
|
||||
v := tok.value
|
||||
if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" {
|
||||
v = v[:len(v)-len("f")]
|
||||
}
|
||||
if x, err := strconv.ParseFloat(v, 32); err == nil {
|
||||
return protoreflect.ValueOfFloat32(float32(x)), nil
|
||||
}
|
||||
case protoreflect.DoubleKind:
|
||||
// Ignore 'f' for compatibility with output generated by C++,
|
||||
// but don't remove 'f' when the value is "-inf" or "inf".
|
||||
v := tok.value
|
||||
if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" {
|
||||
v = v[:len(v)-len("f")]
|
||||
}
|
||||
if x, err := strconv.ParseFloat(v, 64); err == nil {
|
||||
return protoreflect.ValueOfFloat64(float64(x)), nil
|
||||
}
|
||||
case protoreflect.StringKind:
|
||||
if isQuote(tok.value[0]) {
|
||||
return protoreflect.ValueOfString(tok.unquoted), nil
|
||||
}
|
||||
case protoreflect.BytesKind:
|
||||
if isQuote(tok.value[0]) {
|
||||
return protoreflect.ValueOfBytes([]byte(tok.unquoted)), nil
|
||||
}
|
||||
case protoreflect.EnumKind:
|
||||
if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
|
||||
return protoreflect.ValueOfEnum(protoreflect.EnumNumber(x)), nil
|
||||
}
|
||||
vd := fd.Enum().Values().ByName(protoreflect.Name(tok.value))
|
||||
if vd != nil {
|
||||
return protoreflect.ValueOfEnum(vd.Number()), nil
|
||||
}
|
||||
case protoreflect.MessageKind, protoreflect.GroupKind:
|
||||
var terminator string
|
||||
switch tok.value {
|
||||
case "{":
|
||||
terminator = "}"
|
||||
case "<":
|
||||
terminator = ">"
|
||||
default:
|
||||
return v, p.errorf("expected '{' or '<', found %q", tok.value)
|
||||
}
|
||||
err := p.unmarshalMessage(v.Message(), terminator)
|
||||
return v, err
|
||||
default:
|
||||
panic(fmt.Sprintf("invalid kind %v", fd.Kind()))
|
||||
}
|
||||
return v, p.errorf("invalid %v: %v", fd.Kind(), tok.value)
|
||||
}
|
||||
|
||||
// Consume a ':' from the input stream (if the next token is a colon),
|
||||
// returning an error if a colon is needed but not present.
|
||||
func (p *textParser) checkForColon(fd protoreflect.FieldDescriptor) *ParseError {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
if tok.value != ":" {
|
||||
if fd.Message() == nil {
|
||||
return p.errorf("expected ':', found %q", tok.value)
|
||||
}
|
||||
p.back()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// consumeExtensionOrAnyName consumes an extension name or an Any type URL and
|
||||
// the following ']'. It returns the name or URL consumed.
|
||||
func (p *textParser) consumeExtensionOrAnyName() (string, error) {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return "", tok.err
|
||||
}
|
||||
|
||||
// If extension name or type url is quoted, it's a single token.
|
||||
if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] {
|
||||
name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0]))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return name, p.consumeToken("]")
|
||||
}
|
||||
|
||||
// Consume everything up to "]"
|
||||
var parts []string
|
||||
for tok.value != "]" {
|
||||
parts = append(parts, tok.value)
|
||||
tok = p.next()
|
||||
if tok.err != nil {
|
||||
return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
|
||||
}
|
||||
if p.done && tok.value != "]" {
|
||||
return "", p.errorf("unclosed type_url or extension name")
|
||||
}
|
||||
}
|
||||
return strings.Join(parts, ""), nil
|
||||
}
|
||||
|
||||
// consumeOptionalSeparator consumes an optional semicolon or comma.
|
||||
// It is used in unmarshalMessage to provide backward compatibility.
|
||||
func (p *textParser) consumeOptionalSeparator() error {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
if tok.value != ";" && tok.value != "," {
|
||||
p.back()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
|
||||
pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
|
||||
p.cur.err = pe
|
||||
p.done = true
|
||||
return pe
|
||||
}
|
||||
|
||||
func (p *textParser) skipWhitespace() {
|
||||
i := 0
|
||||
for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
|
||||
if p.s[i] == '#' {
|
||||
// comment; skip to end of line or input
|
||||
for i < len(p.s) && p.s[i] != '\n' {
|
||||
i++
|
||||
}
|
||||
if i == len(p.s) {
|
||||
break
|
||||
}
|
||||
}
|
||||
if p.s[i] == '\n' {
|
||||
p.line++
|
||||
}
|
||||
i++
|
||||
}
|
||||
p.offset += i
|
||||
p.s = p.s[i:len(p.s)]
|
||||
if len(p.s) == 0 {
|
||||
p.done = true
|
||||
}
|
||||
}
|
||||
|
||||
func (p *textParser) advance() {
|
||||
// Skip whitespace
|
||||
p.skipWhitespace()
|
||||
if p.done {
|
||||
return
|
||||
}
|
||||
|
||||
// Start of non-whitespace
|
||||
p.cur.err = nil
|
||||
p.cur.offset, p.cur.line = p.offset, p.line
|
||||
p.cur.unquoted = ""
|
||||
switch p.s[0] {
|
||||
case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/':
|
||||
// Single symbol
|
||||
p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
|
||||
case '"', '\'':
|
||||
// Quoted string
|
||||
i := 1
|
||||
for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
|
||||
if p.s[i] == '\\' && i+1 < len(p.s) {
|
||||
// skip escaped char
|
||||
i++
|
||||
}
|
||||
i++
|
||||
}
|
||||
if i >= len(p.s) || p.s[i] != p.s[0] {
|
||||
p.errorf("unmatched quote")
|
||||
return
|
||||
}
|
||||
unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
|
||||
if err != nil {
|
||||
p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
|
||||
return
|
||||
}
|
||||
p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
|
||||
p.cur.unquoted = unq
|
||||
default:
|
||||
i := 0
|
||||
for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
|
||||
i++
|
||||
}
|
||||
if i == 0 {
|
||||
p.errorf("unexpected byte %#x", p.s[0])
|
||||
return
|
||||
}
|
||||
p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
|
||||
}
|
||||
p.offset += len(p.cur.value)
|
||||
}
|
||||
|
||||
// Back off the parser by one token. Can only be done between calls to next().
|
||||
// It makes the next advance() a no-op.
|
||||
func (p *textParser) back() { p.backed = true }
|
||||
|
||||
// Advances the parser and returns the new current token.
|
||||
func (p *textParser) next() *token {
|
||||
if p.backed || p.done {
|
||||
p.backed = false
|
||||
return &p.cur
|
||||
}
|
||||
p.advance()
|
||||
if p.done {
|
||||
p.cur.value = ""
|
||||
} else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) {
|
||||
// Look for multiple quoted strings separated by whitespace,
|
||||
// and concatenate them.
|
||||
cat := p.cur
|
||||
for {
|
||||
p.skipWhitespace()
|
||||
if p.done || !isQuote(p.s[0]) {
|
||||
break
|
||||
}
|
||||
p.advance()
|
||||
if p.cur.err != nil {
|
||||
return &p.cur
|
||||
}
|
||||
cat.value += " " + p.cur.value
|
||||
cat.unquoted += p.cur.unquoted
|
||||
}
|
||||
p.done = false // parser may have seen EOF, but we want to return cat
|
||||
p.cur = cat
|
||||
}
|
||||
return &p.cur
|
||||
}
|
||||
|
||||
func (p *textParser) consumeToken(s string) error {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
if tok.value != s {
|
||||
p.back()
|
||||
return p.errorf("expected %q, found %q", s, tok.value)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var errBadUTF8 = errors.New("proto: bad UTF-8")
|
||||
|
||||
func unquoteC(s string, quote rune) (string, error) {
|
||||
// This is based on C++'s tokenizer.cc.
|
||||
// Despite its name, this is *not* parsing C syntax.
|
||||
// For instance, "\0" is an invalid quoted string.
|
||||
|
||||
// Avoid allocation in trivial cases.
|
||||
simple := true
|
||||
for _, r := range s {
|
||||
if r == '\\' || r == quote {
|
||||
simple = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if simple {
|
||||
return s, nil
|
||||
}
|
||||
|
||||
buf := make([]byte, 0, 3*len(s)/2)
|
||||
for len(s) > 0 {
|
||||
r, n := utf8.DecodeRuneInString(s)
|
||||
if r == utf8.RuneError && n == 1 {
|
||||
return "", errBadUTF8
|
||||
}
|
||||
s = s[n:]
|
||||
if r != '\\' {
|
||||
if r < utf8.RuneSelf {
|
||||
buf = append(buf, byte(r))
|
||||
} else {
|
||||
buf = append(buf, string(r)...)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
ch, tail, err := unescape(s)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
buf = append(buf, ch...)
|
||||
s = tail
|
||||
}
|
||||
return string(buf), nil
|
||||
}
|
||||
|
||||
func unescape(s string) (ch string, tail string, err error) {
|
||||
r, n := utf8.DecodeRuneInString(s)
|
||||
if r == utf8.RuneError && n == 1 {
|
||||
return "", "", errBadUTF8
|
||||
}
|
||||
s = s[n:]
|
||||
switch r {
|
||||
case 'a':
|
||||
return "\a", s, nil
|
||||
case 'b':
|
||||
return "\b", s, nil
|
||||
case 'f':
|
||||
return "\f", s, nil
|
||||
case 'n':
|
||||
return "\n", s, nil
|
||||
case 'r':
|
||||
return "\r", s, nil
|
||||
case 't':
|
||||
return "\t", s, nil
|
||||
case 'v':
|
||||
return "\v", s, nil
|
||||
case '?':
|
||||
return "?", s, nil // trigraph workaround
|
||||
case '\'', '"', '\\':
|
||||
return string(r), s, nil
|
||||
case '0', '1', '2', '3', '4', '5', '6', '7':
|
||||
if len(s) < 2 {
|
||||
return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
|
||||
}
|
||||
ss := string(r) + s[:2]
|
||||
s = s[2:]
|
||||
i, err := strconv.ParseUint(ss, 8, 8)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss)
|
||||
}
|
||||
return string([]byte{byte(i)}), s, nil
|
||||
case 'x', 'X', 'u', 'U':
|
||||
var n int
|
||||
switch r {
|
||||
case 'x', 'X':
|
||||
n = 2
|
||||
case 'u':
|
||||
n = 4
|
||||
case 'U':
|
||||
n = 8
|
||||
}
|
||||
if len(s) < n {
|
||||
return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n)
|
||||
}
|
||||
ss := s[:n]
|
||||
s = s[n:]
|
||||
i, err := strconv.ParseUint(ss, 16, 64)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss)
|
||||
}
|
||||
if r == 'x' || r == 'X' {
|
||||
return string([]byte{byte(i)}), s, nil
|
||||
}
|
||||
if i > utf8.MaxRune {
|
||||
return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss)
|
||||
}
|
||||
return string(rune(i)), s, nil
|
||||
}
|
||||
return "", "", fmt.Errorf(`unknown escape \%c`, r)
|
||||
}
|
||||
|
||||
func isIdentOrNumberChar(c byte) bool {
|
||||
switch {
|
||||
case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
|
||||
return true
|
||||
case '0' <= c && c <= '9':
|
||||
return true
|
||||
}
|
||||
switch c {
|
||||
case '-', '+', '.', '_':
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func isWhitespace(c byte) bool {
|
||||
switch c {
|
||||
case ' ', '\t', '\n', '\r':
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func isQuote(c byte) bool {
|
||||
switch c {
|
||||
case '"', '\'':
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
560
gateway/vendor/github.com/golang/protobuf/proto/text_encode.go
generated
vendored
Normal file
560
gateway/vendor/github.com/golang/protobuf/proto/text_encode.go
generated
vendored
Normal file
@ -0,0 +1,560 @@
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"google.golang.org/protobuf/encoding/prototext"
|
||||
"google.golang.org/protobuf/encoding/protowire"
|
||||
"google.golang.org/protobuf/proto"
|
||||
"google.golang.org/protobuf/reflect/protoreflect"
|
||||
"google.golang.org/protobuf/reflect/protoregistry"
|
||||
)
|
||||
|
||||
const wrapTextMarshalV2 = false
|
||||
|
||||
// TextMarshaler is a configurable text format marshaler.
|
||||
type TextMarshaler struct {
|
||||
Compact bool // use compact text format (one line)
|
||||
ExpandAny bool // expand google.protobuf.Any messages of known types
|
||||
}
|
||||
|
||||
// Marshal writes the proto text format of m to w.
|
||||
func (tm *TextMarshaler) Marshal(w io.Writer, m Message) error {
|
||||
b, err := tm.marshal(m)
|
||||
if len(b) > 0 {
|
||||
if _, err := w.Write(b); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Text returns a proto text formatted string of m.
|
||||
func (tm *TextMarshaler) Text(m Message) string {
|
||||
b, _ := tm.marshal(m)
|
||||
return string(b)
|
||||
}
|
||||
|
||||
func (tm *TextMarshaler) marshal(m Message) ([]byte, error) {
|
||||
mr := MessageReflect(m)
|
||||
if mr == nil || !mr.IsValid() {
|
||||
return []byte("<nil>"), nil
|
||||
}
|
||||
|
||||
if wrapTextMarshalV2 {
|
||||
if m, ok := m.(encoding.TextMarshaler); ok {
|
||||
return m.MarshalText()
|
||||
}
|
||||
|
||||
opts := prototext.MarshalOptions{
|
||||
AllowPartial: true,
|
||||
EmitUnknown: true,
|
||||
}
|
||||
if !tm.Compact {
|
||||
opts.Indent = " "
|
||||
}
|
||||
if !tm.ExpandAny {
|
||||
opts.Resolver = (*protoregistry.Types)(nil)
|
||||
}
|
||||
return opts.Marshal(mr.Interface())
|
||||
} else {
|
||||
w := &textWriter{
|
||||
compact: tm.Compact,
|
||||
expandAny: tm.ExpandAny,
|
||||
complete: true,
|
||||
}
|
||||
|
||||
if m, ok := m.(encoding.TextMarshaler); ok {
|
||||
b, err := m.MarshalText()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
w.Write(b)
|
||||
return w.buf, nil
|
||||
}
|
||||
|
||||
err := w.writeMessage(mr)
|
||||
return w.buf, err
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
defaultTextMarshaler = TextMarshaler{}
|
||||
compactTextMarshaler = TextMarshaler{Compact: true}
|
||||
)
|
||||
|
||||
// MarshalText writes the proto text format of m to w.
|
||||
func MarshalText(w io.Writer, m Message) error { return defaultTextMarshaler.Marshal(w, m) }
|
||||
|
||||
// MarshalTextString returns a proto text formatted string of m.
|
||||
func MarshalTextString(m Message) string { return defaultTextMarshaler.Text(m) }
|
||||
|
||||
// CompactText writes the compact proto text format of m to w.
|
||||
func CompactText(w io.Writer, m Message) error { return compactTextMarshaler.Marshal(w, m) }
|
||||
|
||||
// CompactTextString returns a compact proto text formatted string of m.
|
||||
func CompactTextString(m Message) string { return compactTextMarshaler.Text(m) }
|
||||
|
||||
var (
|
||||
newline = []byte("\n")
|
||||
endBraceNewline = []byte("}\n")
|
||||
posInf = []byte("inf")
|
||||
negInf = []byte("-inf")
|
||||
nan = []byte("nan")
|
||||
)
|
||||
|
||||
// textWriter is an io.Writer that tracks its indentation level.
|
||||
type textWriter struct {
|
||||
compact bool // same as TextMarshaler.Compact
|
||||
expandAny bool // same as TextMarshaler.ExpandAny
|
||||
complete bool // whether the current position is a complete line
|
||||
indent int // indentation level; never negative
|
||||
buf []byte
|
||||
}
|
||||
|
||||
func (w *textWriter) Write(p []byte) (n int, _ error) {
|
||||
newlines := bytes.Count(p, newline)
|
||||
if newlines == 0 {
|
||||
if !w.compact && w.complete {
|
||||
w.writeIndent()
|
||||
}
|
||||
w.buf = append(w.buf, p...)
|
||||
w.complete = false
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
frags := bytes.SplitN(p, newline, newlines+1)
|
||||
if w.compact {
|
||||
for i, frag := range frags {
|
||||
if i > 0 {
|
||||
w.buf = append(w.buf, ' ')
|
||||
n++
|
||||
}
|
||||
w.buf = append(w.buf, frag...)
|
||||
n += len(frag)
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
for i, frag := range frags {
|
||||
if w.complete {
|
||||
w.writeIndent()
|
||||
}
|
||||
w.buf = append(w.buf, frag...)
|
||||
n += len(frag)
|
||||
if i+1 < len(frags) {
|
||||
w.buf = append(w.buf, '\n')
|
||||
n++
|
||||
}
|
||||
}
|
||||
w.complete = len(frags[len(frags)-1]) == 0
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (w *textWriter) WriteByte(c byte) error {
|
||||
if w.compact && c == '\n' {
|
||||
c = ' '
|
||||
}
|
||||
if !w.compact && w.complete {
|
||||
w.writeIndent()
|
||||
}
|
||||
w.buf = append(w.buf, c)
|
||||
w.complete = c == '\n'
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *textWriter) writeName(fd protoreflect.FieldDescriptor) {
|
||||
if !w.compact && w.complete {
|
||||
w.writeIndent()
|
||||
}
|
||||
w.complete = false
|
||||
|
||||
if fd.Kind() != protoreflect.GroupKind {
|
||||
w.buf = append(w.buf, fd.Name()...)
|
||||
w.WriteByte(':')
|
||||
} else {
|
||||
// Use message type name for group field name.
|
||||
w.buf = append(w.buf, fd.Message().Name()...)
|
||||
}
|
||||
|
||||
if !w.compact {
|
||||
w.WriteByte(' ')
|
||||
}
|
||||
}
|
||||
|
||||
func requiresQuotes(u string) bool {
|
||||
// When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
|
||||
for _, ch := range u {
|
||||
switch {
|
||||
case ch == '.' || ch == '/' || ch == '_':
|
||||
continue
|
||||
case '0' <= ch && ch <= '9':
|
||||
continue
|
||||
case 'A' <= ch && ch <= 'Z':
|
||||
continue
|
||||
case 'a' <= ch && ch <= 'z':
|
||||
continue
|
||||
default:
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// writeProto3Any writes an expanded google.protobuf.Any message.
|
||||
//
|
||||
// It returns (false, nil) if sv value can't be unmarshaled (e.g. because
|
||||
// required messages are not linked in).
|
||||
//
|
||||
// It returns (true, error) when sv was written in expanded format or an error
|
||||
// was encountered.
|
||||
func (w *textWriter) writeProto3Any(m protoreflect.Message) (bool, error) {
|
||||
md := m.Descriptor()
|
||||
fdURL := md.Fields().ByName("type_url")
|
||||
fdVal := md.Fields().ByName("value")
|
||||
|
||||
url := m.Get(fdURL).String()
|
||||
mt, err := protoregistry.GlobalTypes.FindMessageByURL(url)
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
b := m.Get(fdVal).Bytes()
|
||||
m2 := mt.New()
|
||||
if err := proto.Unmarshal(b, m2.Interface()); err != nil {
|
||||
return false, nil
|
||||
}
|
||||
w.Write([]byte("["))
|
||||
if requiresQuotes(url) {
|
||||
w.writeQuotedString(url)
|
||||
} else {
|
||||
w.Write([]byte(url))
|
||||
}
|
||||
if w.compact {
|
||||
w.Write([]byte("]:<"))
|
||||
} else {
|
||||
w.Write([]byte("]: <\n"))
|
||||
w.indent++
|
||||
}
|
||||
if err := w.writeMessage(m2); err != nil {
|
||||
return true, err
|
||||
}
|
||||
if w.compact {
|
||||
w.Write([]byte("> "))
|
||||
} else {
|
||||
w.indent--
|
||||
w.Write([]byte(">\n"))
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (w *textWriter) writeMessage(m protoreflect.Message) error {
|
||||
md := m.Descriptor()
|
||||
if w.expandAny && md.FullName() == "google.protobuf.Any" {
|
||||
if canExpand, err := w.writeProto3Any(m); canExpand {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
fds := md.Fields()
|
||||
for i := 0; i < fds.Len(); {
|
||||
fd := fds.Get(i)
|
||||
if od := fd.ContainingOneof(); od != nil {
|
||||
fd = m.WhichOneof(od)
|
||||
i += od.Fields().Len()
|
||||
} else {
|
||||
i++
|
||||
}
|
||||
if fd == nil || !m.Has(fd) {
|
||||
continue
|
||||
}
|
||||
|
||||
switch {
|
||||
case fd.IsList():
|
||||
lv := m.Get(fd).List()
|
||||
for j := 0; j < lv.Len(); j++ {
|
||||
w.writeName(fd)
|
||||
v := lv.Get(j)
|
||||
if err := w.writeSingularValue(v, fd); err != nil {
|
||||
return err
|
||||
}
|
||||
w.WriteByte('\n')
|
||||
}
|
||||
case fd.IsMap():
|
||||
kfd := fd.MapKey()
|
||||
vfd := fd.MapValue()
|
||||
mv := m.Get(fd).Map()
|
||||
|
||||
type entry struct{ key, val protoreflect.Value }
|
||||
var entries []entry
|
||||
mv.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool {
|
||||
entries = append(entries, entry{k.Value(), v})
|
||||
return true
|
||||
})
|
||||
sort.Slice(entries, func(i, j int) bool {
|
||||
switch kfd.Kind() {
|
||||
case protoreflect.BoolKind:
|
||||
return !entries[i].key.Bool() && entries[j].key.Bool()
|
||||
case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
|
||||
return entries[i].key.Int() < entries[j].key.Int()
|
||||
case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
|
||||
return entries[i].key.Uint() < entries[j].key.Uint()
|
||||
case protoreflect.StringKind:
|
||||
return entries[i].key.String() < entries[j].key.String()
|
||||
default:
|
||||
panic("invalid kind")
|
||||
}
|
||||
})
|
||||
for _, entry := range entries {
|
||||
w.writeName(fd)
|
||||
w.WriteByte('<')
|
||||
if !w.compact {
|
||||
w.WriteByte('\n')
|
||||
}
|
||||
w.indent++
|
||||
w.writeName(kfd)
|
||||
if err := w.writeSingularValue(entry.key, kfd); err != nil {
|
||||
return err
|
||||
}
|
||||
w.WriteByte('\n')
|
||||
w.writeName(vfd)
|
||||
if err := w.writeSingularValue(entry.val, vfd); err != nil {
|
||||
return err
|
||||
}
|
||||
w.WriteByte('\n')
|
||||
w.indent--
|
||||
w.WriteByte('>')
|
||||
w.WriteByte('\n')
|
||||
}
|
||||
default:
|
||||
w.writeName(fd)
|
||||
if err := w.writeSingularValue(m.Get(fd), fd); err != nil {
|
||||
return err
|
||||
}
|
||||
w.WriteByte('\n')
|
||||
}
|
||||
}
|
||||
|
||||
if b := m.GetUnknown(); len(b) > 0 {
|
||||
w.writeUnknownFields(b)
|
||||
}
|
||||
return w.writeExtensions(m)
|
||||
}
|
||||
|
||||
func (w *textWriter) writeSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) error {
|
||||
switch fd.Kind() {
|
||||
case protoreflect.FloatKind, protoreflect.DoubleKind:
|
||||
switch vf := v.Float(); {
|
||||
case math.IsInf(vf, +1):
|
||||
w.Write(posInf)
|
||||
case math.IsInf(vf, -1):
|
||||
w.Write(negInf)
|
||||
case math.IsNaN(vf):
|
||||
w.Write(nan)
|
||||
default:
|
||||
fmt.Fprint(w, v.Interface())
|
||||
}
|
||||
case protoreflect.StringKind:
|
||||
// NOTE: This does not validate UTF-8 for historical reasons.
|
||||
w.writeQuotedString(string(v.String()))
|
||||
case protoreflect.BytesKind:
|
||||
w.writeQuotedString(string(v.Bytes()))
|
||||
case protoreflect.MessageKind, protoreflect.GroupKind:
|
||||
var bra, ket byte = '<', '>'
|
||||
if fd.Kind() == protoreflect.GroupKind {
|
||||
bra, ket = '{', '}'
|
||||
}
|
||||
w.WriteByte(bra)
|
||||
if !w.compact {
|
||||
w.WriteByte('\n')
|
||||
}
|
||||
w.indent++
|
||||
m := v.Message()
|
||||
if m2, ok := m.Interface().(encoding.TextMarshaler); ok {
|
||||
b, err := m2.MarshalText()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w.Write(b)
|
||||
} else {
|
||||
w.writeMessage(m)
|
||||
}
|
||||
w.indent--
|
||||
w.WriteByte(ket)
|
||||
case protoreflect.EnumKind:
|
||||
if ev := fd.Enum().Values().ByNumber(v.Enum()); ev != nil {
|
||||
fmt.Fprint(w, ev.Name())
|
||||
} else {
|
||||
fmt.Fprint(w, v.Enum())
|
||||
}
|
||||
default:
|
||||
fmt.Fprint(w, v.Interface())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// writeQuotedString writes a quoted string in the protocol buffer text format.
|
||||
func (w *textWriter) writeQuotedString(s string) {
|
||||
w.WriteByte('"')
|
||||
for i := 0; i < len(s); i++ {
|
||||
switch c := s[i]; c {
|
||||
case '\n':
|
||||
w.buf = append(w.buf, `\n`...)
|
||||
case '\r':
|
||||
w.buf = append(w.buf, `\r`...)
|
||||
case '\t':
|
||||
w.buf = append(w.buf, `\t`...)
|
||||
case '"':
|
||||
w.buf = append(w.buf, `\"`...)
|
||||
case '\\':
|
||||
w.buf = append(w.buf, `\\`...)
|
||||
default:
|
||||
if isPrint := c >= 0x20 && c < 0x7f; isPrint {
|
||||
w.buf = append(w.buf, c)
|
||||
} else {
|
||||
w.buf = append(w.buf, fmt.Sprintf(`\%03o`, c)...)
|
||||
}
|
||||
}
|
||||
}
|
||||
w.WriteByte('"')
|
||||
}
|
||||
|
||||
func (w *textWriter) writeUnknownFields(b []byte) {
|
||||
if !w.compact {
|
||||
fmt.Fprintf(w, "/* %d unknown bytes */\n", len(b))
|
||||
}
|
||||
|
||||
for len(b) > 0 {
|
||||
num, wtyp, n := protowire.ConsumeTag(b)
|
||||
if n < 0 {
|
||||
return
|
||||
}
|
||||
b = b[n:]
|
||||
|
||||
if wtyp == protowire.EndGroupType {
|
||||
w.indent--
|
||||
w.Write(endBraceNewline)
|
||||
continue
|
||||
}
|
||||
fmt.Fprint(w, num)
|
||||
if wtyp != protowire.StartGroupType {
|
||||
w.WriteByte(':')
|
||||
}
|
||||
if !w.compact || wtyp == protowire.StartGroupType {
|
||||
w.WriteByte(' ')
|
||||
}
|
||||
switch wtyp {
|
||||
case protowire.VarintType:
|
||||
v, n := protowire.ConsumeVarint(b)
|
||||
if n < 0 {
|
||||
return
|
||||
}
|
||||
b = b[n:]
|
||||
fmt.Fprint(w, v)
|
||||
case protowire.Fixed32Type:
|
||||
v, n := protowire.ConsumeFixed32(b)
|
||||
if n < 0 {
|
||||
return
|
||||
}
|
||||
b = b[n:]
|
||||
fmt.Fprint(w, v)
|
||||
case protowire.Fixed64Type:
|
||||
v, n := protowire.ConsumeFixed64(b)
|
||||
if n < 0 {
|
||||
return
|
||||
}
|
||||
b = b[n:]
|
||||
fmt.Fprint(w, v)
|
||||
case protowire.BytesType:
|
||||
v, n := protowire.ConsumeBytes(b)
|
||||
if n < 0 {
|
||||
return
|
||||
}
|
||||
b = b[n:]
|
||||
fmt.Fprintf(w, "%q", v)
|
||||
case protowire.StartGroupType:
|
||||
w.WriteByte('{')
|
||||
w.indent++
|
||||
default:
|
||||
fmt.Fprintf(w, "/* unknown wire type %d */", wtyp)
|
||||
}
|
||||
w.WriteByte('\n')
|
||||
}
|
||||
}
|
||||
|
||||
// writeExtensions writes all the extensions in m.
|
||||
func (w *textWriter) writeExtensions(m protoreflect.Message) error {
|
||||
md := m.Descriptor()
|
||||
if md.ExtensionRanges().Len() == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
type ext struct {
|
||||
desc protoreflect.FieldDescriptor
|
||||
val protoreflect.Value
|
||||
}
|
||||
var exts []ext
|
||||
m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
|
||||
if fd.IsExtension() {
|
||||
exts = append(exts, ext{fd, v})
|
||||
}
|
||||
return true
|
||||
})
|
||||
sort.Slice(exts, func(i, j int) bool {
|
||||
return exts[i].desc.Number() < exts[j].desc.Number()
|
||||
})
|
||||
|
||||
for _, ext := range exts {
|
||||
// For message set, use the name of the message as the extension name.
|
||||
name := string(ext.desc.FullName())
|
||||
if isMessageSet(ext.desc.ContainingMessage()) {
|
||||
name = strings.TrimSuffix(name, ".message_set_extension")
|
||||
}
|
||||
|
||||
if !ext.desc.IsList() {
|
||||
if err := w.writeSingularExtension(name, ext.val, ext.desc); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
lv := ext.val.List()
|
||||
for i := 0; i < lv.Len(); i++ {
|
||||
if err := w.writeSingularExtension(name, lv.Get(i), ext.desc); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *textWriter) writeSingularExtension(name string, v protoreflect.Value, fd protoreflect.FieldDescriptor) error {
|
||||
fmt.Fprintf(w, "[%s]:", name)
|
||||
if !w.compact {
|
||||
w.WriteByte(' ')
|
||||
}
|
||||
if err := w.writeSingularValue(v, fd); err != nil {
|
||||
return err
|
||||
}
|
||||
w.WriteByte('\n')
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *textWriter) writeIndent() {
|
||||
if !w.complete {
|
||||
return
|
||||
}
|
||||
for i := 0; i < w.indent*2; i++ {
|
||||
w.buf = append(w.buf, ' ')
|
||||
}
|
||||
w.complete = false
|
||||
}
|
78
gateway/vendor/github.com/golang/protobuf/proto/wire.go
generated
vendored
Normal file
78
gateway/vendor/github.com/golang/protobuf/proto/wire.go
generated
vendored
Normal file
@ -0,0 +1,78 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
protoV2 "google.golang.org/protobuf/proto"
|
||||
"google.golang.org/protobuf/runtime/protoiface"
|
||||
)
|
||||
|
||||
// Size returns the size in bytes of the wire-format encoding of m.
|
||||
func Size(m Message) int {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
mi := MessageV2(m)
|
||||
return protoV2.Size(mi)
|
||||
}
|
||||
|
||||
// Marshal returns the wire-format encoding of m.
|
||||
func Marshal(m Message) ([]byte, error) {
|
||||
b, err := marshalAppend(nil, m, false)
|
||||
if b == nil {
|
||||
b = zeroBytes
|
||||
}
|
||||
return b, err
|
||||
}
|
||||
|
||||
var zeroBytes = make([]byte, 0, 0)
|
||||
|
||||
func marshalAppend(buf []byte, m Message, deterministic bool) ([]byte, error) {
|
||||
if m == nil {
|
||||
return nil, ErrNil
|
||||
}
|
||||
mi := MessageV2(m)
|
||||
nbuf, err := protoV2.MarshalOptions{
|
||||
Deterministic: deterministic,
|
||||
AllowPartial: true,
|
||||
}.MarshalAppend(buf, mi)
|
||||
if err != nil {
|
||||
return buf, err
|
||||
}
|
||||
if len(buf) == len(nbuf) {
|
||||
if !mi.ProtoReflect().IsValid() {
|
||||
return buf, ErrNil
|
||||
}
|
||||
}
|
||||
return nbuf, checkRequiredNotSet(mi)
|
||||
}
|
||||
|
||||
// Unmarshal parses a wire-format message in b and places the decoded results in m.
|
||||
//
|
||||
// Unmarshal resets m before starting to unmarshal, so any existing data in m is always
|
||||
// removed. Use UnmarshalMerge to preserve and append to existing data.
|
||||
func Unmarshal(b []byte, m Message) error {
|
||||
m.Reset()
|
||||
return UnmarshalMerge(b, m)
|
||||
}
|
||||
|
||||
// UnmarshalMerge parses a wire-format message in b and places the decoded results in m.
|
||||
func UnmarshalMerge(b []byte, m Message) error {
|
||||
mi := MessageV2(m)
|
||||
out, err := protoV2.UnmarshalOptions{
|
||||
AllowPartial: true,
|
||||
Merge: true,
|
||||
}.UnmarshalState(protoiface.UnmarshalInput{
|
||||
Buf: b,
|
||||
Message: mi.ProtoReflect(),
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if out.Flags&protoiface.UnmarshalInitialized > 0 {
|
||||
return nil
|
||||
}
|
||||
return checkRequiredNotSet(mi)
|
||||
}
|
34
gateway/vendor/github.com/golang/protobuf/proto/wrappers.go
generated
vendored
Normal file
34
gateway/vendor/github.com/golang/protobuf/proto/wrappers.go
generated
vendored
Normal file
@ -0,0 +1,34 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package proto
|
||||
|
||||
// Bool stores v in a new bool value and returns a pointer to it.
|
||||
func Bool(v bool) *bool { return &v }
|
||||
|
||||
// Int stores v in a new int32 value and returns a pointer to it.
|
||||
//
|
||||
// Deprecated: Use Int32 instead.
|
||||
func Int(v int) *int32 { return Int32(int32(v)) }
|
||||
|
||||
// Int32 stores v in a new int32 value and returns a pointer to it.
|
||||
func Int32(v int32) *int32 { return &v }
|
||||
|
||||
// Int64 stores v in a new int64 value and returns a pointer to it.
|
||||
func Int64(v int64) *int64 { return &v }
|
||||
|
||||
// Uint32 stores v in a new uint32 value and returns a pointer to it.
|
||||
func Uint32(v uint32) *uint32 { return &v }
|
||||
|
||||
// Uint64 stores v in a new uint64 value and returns a pointer to it.
|
||||
func Uint64(v uint64) *uint64 { return &v }
|
||||
|
||||
// Float32 stores v in a new float32 value and returns a pointer to it.
|
||||
func Float32(v float32) *float32 { return &v }
|
||||
|
||||
// Float64 stores v in a new float64 value and returns a pointer to it.
|
||||
func Float64(v float64) *float64 { return &v }
|
||||
|
||||
// String stores v in a new string value and returns a pointer to it.
|
||||
func String(v string) *string { return &v }
|
64
gateway/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
generated
vendored
Normal file
64
gateway/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
generated
vendored
Normal file
@ -0,0 +1,64 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
|
||||
|
||||
package timestamp
|
||||
|
||||
import (
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
|
||||
reflect "reflect"
|
||||
)
|
||||
|
||||
// Symbols defined in public import of google/protobuf/timestamp.proto.
|
||||
|
||||
type Timestamp = timestamppb.Timestamp
|
||||
|
||||
var File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = []byte{
|
||||
0x0a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
|
||||
0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
|
||||
0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2f, 0x74, 0x69,
|
||||
0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67,
|
||||
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74,
|
||||
0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x37,
|
||||
0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
|
||||
0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
|
||||
0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x3b, 0x74, 0x69,
|
||||
0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
||||
0x33,
|
||||
}
|
||||
|
||||
var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = []interface{}{}
|
||||
var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = []int32{
|
||||
0, // [0:0] is the sub-list for method output_type
|
||||
0, // [0:0] is the sub-list for method input_type
|
||||
0, // [0:0] is the sub-list for extension type_name
|
||||
0, // [0:0] is the sub-list for extension extendee
|
||||
0, // [0:0] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() }
|
||||
func file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() {
|
||||
if File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto != nil {
|
||||
return
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 0,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes,
|
||||
DependencyIndexes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs,
|
||||
}.Build()
|
||||
File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto = out.File
|
||||
file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = nil
|
||||
file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = nil
|
||||
file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = nil
|
||||
}
|
1
gateway/vendor/github.com/gorilla/mux/.gitignore
generated
vendored
1
gateway/vendor/github.com/gorilla/mux/.gitignore
generated
vendored
@ -1 +0,0 @@
|
||||
coverage.coverprofile
|
8
gateway/vendor/github.com/gorilla/mux/AUTHORS
generated
vendored
Normal file
8
gateway/vendor/github.com/gorilla/mux/AUTHORS
generated
vendored
Normal file
@ -0,0 +1,8 @@
|
||||
# This is the official list of gorilla/mux authors for copyright purposes.
|
||||
#
|
||||
# Please keep the list sorted.
|
||||
|
||||
Google LLC (https://opensource.google.com/)
|
||||
Kamil Kisielk <kamil@kamilkisiel.net>
|
||||
Matt Silverlock <matt@eatsleeprepeat.net>
|
||||
Rodrigo Moraes (https://github.com/moraes)
|
2
gateway/vendor/github.com/gorilla/mux/LICENSE
generated
vendored
2
gateway/vendor/github.com/gorilla/mux/LICENSE
generated
vendored
@ -1,4 +1,4 @@
|
||||
Copyright (c) 2023 The Gorilla Authors. All rights reserved.
|
||||
Copyright (c) 2012-2018 The Gorilla Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
|
34
gateway/vendor/github.com/gorilla/mux/Makefile
generated
vendored
34
gateway/vendor/github.com/gorilla/mux/Makefile
generated
vendored
@ -1,34 +0,0 @@
|
||||
GO_LINT=$(shell which golangci-lint 2> /dev/null || echo '')
|
||||
GO_LINT_URI=github.com/golangci/golangci-lint/cmd/golangci-lint@latest
|
||||
|
||||
GO_SEC=$(shell which gosec 2> /dev/null || echo '')
|
||||
GO_SEC_URI=github.com/securego/gosec/v2/cmd/gosec@latest
|
||||
|
||||
GO_VULNCHECK=$(shell which govulncheck 2> /dev/null || echo '')
|
||||
GO_VULNCHECK_URI=golang.org/x/vuln/cmd/govulncheck@latest
|
||||
|
||||
.PHONY: golangci-lint
|
||||
golangci-lint:
|
||||
$(if $(GO_LINT), ,go install $(GO_LINT_URI))
|
||||
@echo "##### Running golangci-lint"
|
||||
golangci-lint run -v
|
||||
|
||||
.PHONY: gosec
|
||||
gosec:
|
||||
$(if $(GO_SEC), ,go install $(GO_SEC_URI))
|
||||
@echo "##### Running gosec"
|
||||
gosec ./...
|
||||
|
||||
.PHONY: govulncheck
|
||||
govulncheck:
|
||||
$(if $(GO_VULNCHECK), ,go install $(GO_VULNCHECK_URI))
|
||||
@echo "##### Running govulncheck"
|
||||
govulncheck ./...
|
||||
|
||||
.PHONY: verify
|
||||
verify: golangci-lint gosec govulncheck
|
||||
|
||||
.PHONY: test
|
||||
test:
|
||||
@echo "##### Running tests"
|
||||
go test -race -cover -coverprofile=coverage.coverprofile -covermode=atomic -v ./...
|
61
gateway/vendor/github.com/gorilla/mux/README.md
generated
vendored
61
gateway/vendor/github.com/gorilla/mux/README.md
generated
vendored
@ -1,12 +1,12 @@
|
||||
# gorilla/mux
|
||||
|
||||

|
||||
[](https://codecov.io/github/gorilla/mux)
|
||||
[](https://godoc.org/github.com/gorilla/mux)
|
||||
[](https://sourcegraph.com/github.com/gorilla/mux?badge)
|
||||
[](https://godoc.org/github.com/gorilla/mux)
|
||||
[](https://circleci.com/gh/gorilla/mux)
|
||||
[](https://sourcegraph.com/github.com/gorilla/mux?badge)
|
||||
|
||||

|
||||
|
||||

|
||||
https://www.gorillatoolkit.org/pkg/mux
|
||||
|
||||
Package `gorilla/mux` implements a request router and dispatcher for matching incoming requests to
|
||||
their respective handler.
|
||||
@ -247,25 +247,32 @@ type spaHandler struct {
|
||||
// file located at the index path on the SPA handler will be served. This
|
||||
// is suitable behavior for serving an SPA (single page application).
|
||||
func (h spaHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
// Join internally call path.Clean to prevent directory traversal
|
||||
path := filepath.Join(h.staticPath, r.URL.Path)
|
||||
|
||||
// check whether a file exists or is a directory at the given path
|
||||
fi, err := os.Stat(path)
|
||||
if os.IsNotExist(err) || fi.IsDir() {
|
||||
// file does not exist or path is a directory, serve index.html
|
||||
http.ServeFile(w, r, filepath.Join(h.staticPath, h.indexPath))
|
||||
// get the absolute path to prevent directory traversal
|
||||
path, err := filepath.Abs(r.URL.Path)
|
||||
if err != nil {
|
||||
// if we failed to get the absolute path respond with a 400 bad request
|
||||
// and stop
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
// if we got an error (that wasn't that the file doesn't exist) stating the
|
||||
// file, return a 500 internal server error and stop
|
||||
// prepend the path with the path to the static directory
|
||||
path = filepath.Join(h.staticPath, path)
|
||||
|
||||
// check whether a file exists at the given path
|
||||
_, err = os.Stat(path)
|
||||
if os.IsNotExist(err) {
|
||||
// file does not exist, serve index.html
|
||||
http.ServeFile(w, r, filepath.Join(h.staticPath, h.indexPath))
|
||||
return
|
||||
} else if err != nil {
|
||||
// if we got an error (that wasn't that the file doesn't exist) stating the
|
||||
// file, return a 500 internal server error and stop
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
return
|
||||
}
|
||||
|
||||
// otherwise, use http.FileServer to serve the static file
|
||||
// otherwise, use http.FileServer to serve the static dir
|
||||
http.FileServer(http.Dir(h.staticPath)).ServeHTTP(w, r)
|
||||
}
|
||||
|
||||
@ -368,19 +375,6 @@ url, err := r.Get("article").URL("subdomain", "news",
|
||||
"id", "42")
|
||||
```
|
||||
|
||||
To find all the required variables for a given route when calling `URL()`, the method `GetVarNames()` is available:
|
||||
```go
|
||||
r := mux.NewRouter()
|
||||
r.Host("{domain}").
|
||||
Path("/{group}/{item_id}").
|
||||
Queries("some_data1", "{some_data1}").
|
||||
Queries("some_data2", "{some_data2}").
|
||||
Name("article")
|
||||
|
||||
// Will print [domain group item_id some_data1 some_data2] <nil>
|
||||
fmt.Println(r.Get("article").GetVarNames())
|
||||
|
||||
```
|
||||
### Walking Routes
|
||||
|
||||
The `Walk` function on `mux.Router` can be used to visit all of the routes that are registered on a router. For example,
|
||||
@ -578,7 +572,7 @@ func (amw *authenticationMiddleware) Middleware(next http.Handler) http.Handler
|
||||
r := mux.NewRouter()
|
||||
r.HandleFunc("/", handler)
|
||||
|
||||
amw := authenticationMiddleware{tokenUsers: make(map[string]string)}
|
||||
amw := authenticationMiddleware{}
|
||||
amw.Populate()
|
||||
|
||||
r.Use(amw.Middleware)
|
||||
@ -764,8 +758,7 @@ func TestMetricsHandler(t *testing.T) {
|
||||
|
||||
rr := httptest.NewRecorder()
|
||||
|
||||
// To add the vars to the context,
|
||||
// we need to create a router through which we can pass the request.
|
||||
// Need to create a router that we can pass the request through so that the vars will be added to the context
|
||||
router := mux.NewRouter()
|
||||
router.HandleFunc("/metrics/{type}", MetricsHandler)
|
||||
router.ServeHTTP(rr, req)
|
||||
|
25
gateway/vendor/github.com/gorilla/mux/doc.go
generated
vendored
25
gateway/vendor/github.com/gorilla/mux/doc.go
generated
vendored
@ -10,18 +10,18 @@ http.ServeMux, mux.Router matches incoming requests against a list of
|
||||
registered routes and calls a handler for the route that matches the URL
|
||||
or other conditions. The main features are:
|
||||
|
||||
- Requests can be matched based on URL host, path, path prefix, schemes,
|
||||
header and query values, HTTP methods or using custom matchers.
|
||||
- URL hosts, paths and query values can have variables with an optional
|
||||
regular expression.
|
||||
- Registered URLs can be built, or "reversed", which helps maintaining
|
||||
references to resources.
|
||||
- Routes can be used as subrouters: nested routes are only tested if the
|
||||
parent route matches. This is useful to define groups of routes that
|
||||
share common conditions like a host, a path prefix or other repeated
|
||||
attributes. As a bonus, this optimizes request matching.
|
||||
- It implements the http.Handler interface so it is compatible with the
|
||||
standard http.ServeMux.
|
||||
* Requests can be matched based on URL host, path, path prefix, schemes,
|
||||
header and query values, HTTP methods or using custom matchers.
|
||||
* URL hosts, paths and query values can have variables with an optional
|
||||
regular expression.
|
||||
* Registered URLs can be built, or "reversed", which helps maintaining
|
||||
references to resources.
|
||||
* Routes can be used as subrouters: nested routes are only tested if the
|
||||
parent route matches. This is useful to define groups of routes that
|
||||
share common conditions like a host, a path prefix or other repeated
|
||||
attributes. As a bonus, this optimizes request matching.
|
||||
* It implements the http.Handler interface so it is compatible with the
|
||||
standard http.ServeMux.
|
||||
|
||||
Let's start registering a couple of URL paths and handlers:
|
||||
|
||||
@ -301,5 +301,6 @@ A more complex authentication middleware, which maps session token to users, cou
|
||||
r.Use(amw.Middleware)
|
||||
|
||||
Note: The handler chain will be stopped if your middleware doesn't call `next.ServeHTTP()` with the corresponding parameters. This can be used to abort a request if the middleware writer wants to.
|
||||
|
||||
*/
|
||||
package mux
|
||||
|
16
gateway/vendor/github.com/gorilla/mux/mux.go
generated
vendored
16
gateway/vendor/github.com/gorilla/mux/mux.go
generated
vendored
@ -31,26 +31,24 @@ func NewRouter() *Router {
|
||||
// It implements the http.Handler interface, so it can be registered to serve
|
||||
// requests:
|
||||
//
|
||||
// var router = mux.NewRouter()
|
||||
// var router = mux.NewRouter()
|
||||
//
|
||||
// func main() {
|
||||
// http.Handle("/", router)
|
||||
// }
|
||||
// func main() {
|
||||
// http.Handle("/", router)
|
||||
// }
|
||||
//
|
||||
// Or, for Google App Engine, register it in a init() function:
|
||||
//
|
||||
// func init() {
|
||||
// http.Handle("/", router)
|
||||
// }
|
||||
// func init() {
|
||||
// http.Handle("/", router)
|
||||
// }
|
||||
//
|
||||
// This will send all incoming requests to the router.
|
||||
type Router struct {
|
||||
// Configurable Handler to be used when no route matches.
|
||||
// This can be used to render your own 404 Not Found errors.
|
||||
NotFoundHandler http.Handler
|
||||
|
||||
// Configurable Handler to be used when the request method does not match the route.
|
||||
// This can be used to render your own 405 Method Not Allowed errors.
|
||||
MethodNotAllowedHandler http.Handler
|
||||
|
||||
// Routes to be matched, in order.
|
||||
|
10
gateway/vendor/github.com/gorilla/mux/regexp.go
generated
vendored
10
gateway/vendor/github.com/gorilla/mux/regexp.go
generated
vendored
@ -22,10 +22,10 @@ type routeRegexpOptions struct {
|
||||
type regexpType int
|
||||
|
||||
const (
|
||||
regexpTypePath regexpType = iota
|
||||
regexpTypeHost
|
||||
regexpTypePrefix
|
||||
regexpTypeQuery
|
||||
regexpTypePath regexpType = 0
|
||||
regexpTypeHost regexpType = 1
|
||||
regexpTypePrefix regexpType = 2
|
||||
regexpTypeQuery regexpType = 3
|
||||
)
|
||||
|
||||
// newRouteRegexp parses a route template and returns a routeRegexp,
|
||||
@ -195,7 +195,7 @@ func (r *routeRegexp) Match(req *http.Request, match *RouteMatch) bool {
|
||||
|
||||
// url builds a URL part using the given values.
|
||||
func (r *routeRegexp) url(values map[string]string) (string, error) {
|
||||
urlValues := make([]interface{}, len(r.varsN))
|
||||
urlValues := make([]interface{}, len(r.varsN), len(r.varsN))
|
||||
for k, v := range r.varsN {
|
||||
value, ok := values[v]
|
||||
if !ok {
|
||||
|
109
gateway/vendor/github.com/gorilla/mux/route.go
generated
vendored
109
gateway/vendor/github.com/gorilla/mux/route.go
generated
vendored
@ -64,18 +64,8 @@ func (r *Route) Match(req *http.Request, match *RouteMatch) bool {
|
||||
match.MatchErr = nil
|
||||
}
|
||||
|
||||
matchErr = nil // nolint:ineffassign
|
||||
matchErr = nil
|
||||
return false
|
||||
} else {
|
||||
// Multiple routes may share the same path but use different HTTP methods. For instance:
|
||||
// Route 1: POST "/users/{id}".
|
||||
// Route 2: GET "/users/{id}", parameters: "id": "[0-9]+".
|
||||
//
|
||||
// The router must handle these cases correctly. For a GET request to "/users/abc" with "id" as "-2",
|
||||
// The router should return a "Not Found" error as no route fully matches this request.
|
||||
if match.MatchErr == ErrMethodMismatch {
|
||||
match.MatchErr = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -240,9 +230,9 @@ func (m headerMatcher) Match(r *http.Request, match *RouteMatch) bool {
|
||||
// Headers adds a matcher for request header values.
|
||||
// It accepts a sequence of key/value pairs to be matched. For example:
|
||||
//
|
||||
// r := mux.NewRouter().NewRoute()
|
||||
// r.Headers("Content-Type", "application/json",
|
||||
// "X-Requested-With", "XMLHttpRequest")
|
||||
// r := mux.NewRouter()
|
||||
// r.Headers("Content-Type", "application/json",
|
||||
// "X-Requested-With", "XMLHttpRequest")
|
||||
//
|
||||
// The above route will only match if both request header values match.
|
||||
// If the value is an empty string, it will match any value if the key is set.
|
||||
@ -265,9 +255,9 @@ func (m headerRegexMatcher) Match(r *http.Request, match *RouteMatch) bool {
|
||||
// HeadersRegexp accepts a sequence of key/value pairs, where the value has regex
|
||||
// support. For example:
|
||||
//
|
||||
// r := mux.NewRouter().NewRoute()
|
||||
// r.HeadersRegexp("Content-Type", "application/(text|json)",
|
||||
// "X-Requested-With", "XMLHttpRequest")
|
||||
// r := mux.NewRouter()
|
||||
// r.HeadersRegexp("Content-Type", "application/(text|json)",
|
||||
// "X-Requested-With", "XMLHttpRequest")
|
||||
//
|
||||
// The above route will only match if both the request header matches both regular expressions.
|
||||
// If the value is an empty string, it will match any value if the key is set.
|
||||
@ -293,10 +283,10 @@ func (r *Route) HeadersRegexp(pairs ...string) *Route {
|
||||
//
|
||||
// For example:
|
||||
//
|
||||
// r := mux.NewRouter().NewRoute()
|
||||
// r.Host("www.example.com")
|
||||
// r.Host("{subdomain}.domain.com")
|
||||
// r.Host("{subdomain:[a-z]+}.domain.com")
|
||||
// r := mux.NewRouter()
|
||||
// r.Host("www.example.com")
|
||||
// r.Host("{subdomain}.domain.com")
|
||||
// r.Host("{subdomain:[a-z]+}.domain.com")
|
||||
//
|
||||
// Variable names must be unique in a given route. They can be retrieved
|
||||
// calling mux.Vars(request).
|
||||
@ -352,11 +342,11 @@ func (r *Route) Methods(methods ...string) *Route {
|
||||
//
|
||||
// For example:
|
||||
//
|
||||
// r := mux.NewRouter().NewRoute()
|
||||
// r.Path("/products/").Handler(ProductsHandler)
|
||||
// r.Path("/products/{key}").Handler(ProductsHandler)
|
||||
// r.Path("/articles/{category}/{id:[0-9]+}").
|
||||
// Handler(ArticleHandler)
|
||||
// r := mux.NewRouter()
|
||||
// r.Path("/products/").Handler(ProductsHandler)
|
||||
// r.Path("/products/{key}").Handler(ProductsHandler)
|
||||
// r.Path("/articles/{category}/{id:[0-9]+}").
|
||||
// Handler(ArticleHandler)
|
||||
//
|
||||
// Variable names must be unique in a given route. They can be retrieved
|
||||
// calling mux.Vars(request).
|
||||
@ -387,8 +377,8 @@ func (r *Route) PathPrefix(tpl string) *Route {
|
||||
// It accepts a sequence of key/value pairs. Values may define variables.
|
||||
// For example:
|
||||
//
|
||||
// r := mux.NewRouter().NewRoute()
|
||||
// r.Queries("foo", "bar", "id", "{id:[0-9]+}")
|
||||
// r := mux.NewRouter()
|
||||
// r.Queries("foo", "bar", "id", "{id:[0-9]+}")
|
||||
//
|
||||
// The above route will only match if the URL contains the defined queries
|
||||
// values, e.g.: ?foo=bar&id=42.
|
||||
@ -483,11 +473,11 @@ func (r *Route) BuildVarsFunc(f BuildVarsFunc) *Route {
|
||||
//
|
||||
// It will test the inner routes only if the parent route matched. For example:
|
||||
//
|
||||
// r := mux.NewRouter().NewRoute()
|
||||
// s := r.Host("www.example.com").Subrouter()
|
||||
// s.HandleFunc("/products/", ProductsHandler)
|
||||
// s.HandleFunc("/products/{key}", ProductHandler)
|
||||
// s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler)
|
||||
// r := mux.NewRouter()
|
||||
// s := r.Host("www.example.com").Subrouter()
|
||||
// s.HandleFunc("/products/", ProductsHandler)
|
||||
// s.HandleFunc("/products/{key}", ProductHandler)
|
||||
// s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler)
|
||||
//
|
||||
// Here, the routes registered in the subrouter won't be tested if the host
|
||||
// doesn't match.
|
||||
@ -507,36 +497,36 @@ func (r *Route) Subrouter() *Router {
|
||||
// It accepts a sequence of key/value pairs for the route variables. For
|
||||
// example, given this route:
|
||||
//
|
||||
// r := mux.NewRouter()
|
||||
// r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).
|
||||
// Name("article")
|
||||
// r := mux.NewRouter()
|
||||
// r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).
|
||||
// Name("article")
|
||||
//
|
||||
// ...a URL for it can be built using:
|
||||
//
|
||||
// url, err := r.Get("article").URL("category", "technology", "id", "42")
|
||||
// url, err := r.Get("article").URL("category", "technology", "id", "42")
|
||||
//
|
||||
// ...which will return an url.URL with the following path:
|
||||
//
|
||||
// "/articles/technology/42"
|
||||
// "/articles/technology/42"
|
||||
//
|
||||
// This also works for host variables:
|
||||
//
|
||||
// r := mux.NewRouter()
|
||||
// r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).
|
||||
// Host("{subdomain}.domain.com").
|
||||
// Name("article")
|
||||
// r := mux.NewRouter()
|
||||
// r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).
|
||||
// Host("{subdomain}.domain.com").
|
||||
// Name("article")
|
||||
//
|
||||
// // url.String() will be "http://news.domain.com/articles/technology/42"
|
||||
// url, err := r.Get("article").URL("subdomain", "news",
|
||||
// "category", "technology",
|
||||
// "id", "42")
|
||||
// // url.String() will be "http://news.domain.com/articles/technology/42"
|
||||
// url, err := r.Get("article").URL("subdomain", "news",
|
||||
// "category", "technology",
|
||||
// "id", "42")
|
||||
//
|
||||
// The scheme of the resulting url will be the first argument that was passed to Schemes:
|
||||
//
|
||||
// // url.String() will be "https://example.com"
|
||||
// r := mux.NewRouter().NewRoute()
|
||||
// url, err := r.Host("example.com")
|
||||
// .Schemes("https", "http").URL()
|
||||
// // url.String() will be "https://example.com"
|
||||
// r := mux.NewRouter()
|
||||
// url, err := r.Host("example.com")
|
||||
// .Schemes("https", "http").URL()
|
||||
//
|
||||
// All variables defined in the route are required, and their values must
|
||||
// conform to the corresponding patterns.
|
||||
@ -728,25 +718,6 @@ func (r *Route) GetHostTemplate() (string, error) {
|
||||
return r.regexp.host.template, nil
|
||||
}
|
||||
|
||||
// GetVarNames returns the names of all variables added by regexp matchers
|
||||
// These can be used to know which route variables should be passed into r.URL()
|
||||
func (r *Route) GetVarNames() ([]string, error) {
|
||||
if r.err != nil {
|
||||
return nil, r.err
|
||||
}
|
||||
var varNames []string
|
||||
if r.regexp.host != nil {
|
||||
varNames = append(varNames, r.regexp.host.varsN...)
|
||||
}
|
||||
if r.regexp.path != nil {
|
||||
varNames = append(varNames, r.regexp.path.varsN...)
|
||||
}
|
||||
for _, regx := range r.regexp.queries {
|
||||
varNames = append(varNames, regx.varsN...)
|
||||
}
|
||||
return varNames, nil
|
||||
}
|
||||
|
||||
// prepareVars converts the route variable pairs into a map. If the route has a
|
||||
// BuildVarsFunc, it is invoked.
|
||||
func (r *Route) prepareVars(pairs ...string) (map[string]string, error) {
|
||||
|
2
gateway/vendor/github.com/klauspost/compress/.gitattributes
generated
vendored
2
gateway/vendor/github.com/klauspost/compress/.gitattributes
generated
vendored
@ -1,2 +0,0 @@
|
||||
* -text
|
||||
*.bin -text -diff
|
32
gateway/vendor/github.com/klauspost/compress/.gitignore
generated
vendored
32
gateway/vendor/github.com/klauspost/compress/.gitignore
generated
vendored
@ -1,32 +0,0 @@
|
||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
*.test
|
||||
*.prof
|
||||
/s2/cmd/_s2sx/sfx-exe
|
||||
|
||||
# Linux perf files
|
||||
perf.data
|
||||
perf.data.old
|
||||
|
||||
# gdb history
|
||||
.gdb_history
|
123
gateway/vendor/github.com/klauspost/compress/.goreleaser.yml
generated
vendored
123
gateway/vendor/github.com/klauspost/compress/.goreleaser.yml
generated
vendored
@ -1,123 +0,0 @@
|
||||
version: 2
|
||||
|
||||
before:
|
||||
hooks:
|
||||
- ./gen.sh
|
||||
|
||||
builds:
|
||||
-
|
||||
id: "s2c"
|
||||
binary: s2c
|
||||
main: ./s2/cmd/s2c/main.go
|
||||
flags:
|
||||
- -trimpath
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos:
|
||||
- aix
|
||||
- linux
|
||||
- freebsd
|
||||
- netbsd
|
||||
- windows
|
||||
- darwin
|
||||
goarch:
|
||||
- 386
|
||||
- amd64
|
||||
- arm
|
||||
- arm64
|
||||
- ppc64
|
||||
- ppc64le
|
||||
- mips64
|
||||
- mips64le
|
||||
goarm:
|
||||
- 7
|
||||
-
|
||||
id: "s2d"
|
||||
binary: s2d
|
||||
main: ./s2/cmd/s2d/main.go
|
||||
flags:
|
||||
- -trimpath
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos:
|
||||
- aix
|
||||
- linux
|
||||
- freebsd
|
||||
- netbsd
|
||||
- windows
|
||||
- darwin
|
||||
goarch:
|
||||
- 386
|
||||
- amd64
|
||||
- arm
|
||||
- arm64
|
||||
- ppc64
|
||||
- ppc64le
|
||||
- mips64
|
||||
- mips64le
|
||||
goarm:
|
||||
- 7
|
||||
-
|
||||
id: "s2sx"
|
||||
binary: s2sx
|
||||
main: ./s2/cmd/_s2sx/main.go
|
||||
flags:
|
||||
- -modfile=s2sx.mod
|
||||
- -trimpath
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos:
|
||||
- aix
|
||||
- linux
|
||||
- freebsd
|
||||
- netbsd
|
||||
- windows
|
||||
- darwin
|
||||
goarch:
|
||||
- 386
|
||||
- amd64
|
||||
- arm
|
||||
- arm64
|
||||
- ppc64
|
||||
- ppc64le
|
||||
- mips64
|
||||
- mips64le
|
||||
goarm:
|
||||
- 7
|
||||
|
||||
archives:
|
||||
-
|
||||
id: s2-binaries
|
||||
name_template: "s2-{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}"
|
||||
format_overrides:
|
||||
- goos: windows
|
||||
format: zip
|
||||
files:
|
||||
- unpack/*
|
||||
- s2/LICENSE
|
||||
- s2/README.md
|
||||
checksum:
|
||||
name_template: 'checksums.txt'
|
||||
snapshot:
|
||||
version_template: "{{ .Tag }}-next"
|
||||
changelog:
|
||||
sort: asc
|
||||
filters:
|
||||
exclude:
|
||||
- '^doc:'
|
||||
- '^docs:'
|
||||
- '^test:'
|
||||
- '^tests:'
|
||||
- '^Update\sREADME.md'
|
||||
|
||||
nfpms:
|
||||
-
|
||||
file_name_template: "s2_package__{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}"
|
||||
vendor: Klaus Post
|
||||
homepage: https://github.com/klauspost/compress
|
||||
maintainer: Klaus Post <klauspost@gmail.com>
|
||||
description: S2 Compression Tool
|
||||
license: BSD 3-Clause
|
||||
formats:
|
||||
- deb
|
||||
- rpm
|
721
gateway/vendor/github.com/klauspost/compress/README.md
generated
vendored
721
gateway/vendor/github.com/klauspost/compress/README.md
generated
vendored
@ -1,721 +0,0 @@
|
||||
# compress
|
||||
|
||||
This package provides various compression algorithms.
|
||||
|
||||
* [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and decompression in pure Go.
|
||||
* [S2](https://github.com/klauspost/compress/tree/master/s2#s2-compression) is a high performance replacement for Snappy.
|
||||
* Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib).
|
||||
* [snappy](https://github.com/klauspost/compress/tree/master/snappy) is a drop-in replacement for `github.com/golang/snappy` offering better compression and concurrent streams.
|
||||
* [huff0](https://github.com/klauspost/compress/tree/master/huff0) and [FSE](https://github.com/klauspost/compress/tree/master/fse) implementations for raw entropy encoding.
|
||||
* [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp) Provides client and server wrappers for handling gzipped requests efficiently.
|
||||
* [pgzip](https://github.com/klauspost/pgzip) is a separate package that provides a very fast parallel gzip implementation.
|
||||
|
||||
[](https://pkg.go.dev/github.com/klauspost/compress?tab=subdirectories)
|
||||
[](https://github.com/klauspost/compress/actions/workflows/go.yml)
|
||||
[](https://sourcegraph.com/github.com/klauspost/compress?badge)
|
||||
|
||||
# changelog
|
||||
|
||||
* Sep 23rd, 2024 - [1.17.10](https://github.com/klauspost/compress/releases/tag/v1.17.10)
|
||||
* gzhttp: Add TransportAlwaysDecompress option. https://github.com/klauspost/compress/pull/978
|
||||
* gzhttp: Add supported decompress request body by @mirecl in https://github.com/klauspost/compress/pull/1002
|
||||
* s2: Add EncodeBuffer buffer recycling callback https://github.com/klauspost/compress/pull/982
|
||||
* zstd: Improve memory usage on small streaming encodes https://github.com/klauspost/compress/pull/1007
|
||||
* flate: read data written with partial flush by @vajexal in https://github.com/klauspost/compress/pull/996
|
||||
|
||||
* Jun 12th, 2024 - [1.17.9](https://github.com/klauspost/compress/releases/tag/v1.17.9)
|
||||
* s2: Reduce ReadFrom temporary allocations https://github.com/klauspost/compress/pull/949
|
||||
* flate, zstd: Shave some bytes off amd64 matchLen by @greatroar in https://github.com/klauspost/compress/pull/963
|
||||
* Upgrade zip/zlib to 1.22.4 upstream https://github.com/klauspost/compress/pull/970 https://github.com/klauspost/compress/pull/971
|
||||
* zstd: BuildDict fails with RLE table https://github.com/klauspost/compress/pull/951
|
||||
|
||||
* Apr 9th, 2024 - [1.17.8](https://github.com/klauspost/compress/releases/tag/v1.17.8)
|
||||
* zstd: Reject blocks where reserved values are not 0 https://github.com/klauspost/compress/pull/885
|
||||
* zstd: Add RLE detection+encoding https://github.com/klauspost/compress/pull/938
|
||||
|
||||
* Feb 21st, 2024 - [1.17.7](https://github.com/klauspost/compress/releases/tag/v1.17.7)
|
||||
* s2: Add AsyncFlush method: Complete the block without flushing by @Jille in https://github.com/klauspost/compress/pull/927
|
||||
* s2: Fix literal+repeat exceeds dst crash https://github.com/klauspost/compress/pull/930
|
||||
|
||||
* Feb 5th, 2024 - [1.17.6](https://github.com/klauspost/compress/releases/tag/v1.17.6)
|
||||
* zstd: Fix incorrect repeat coding in best mode https://github.com/klauspost/compress/pull/923
|
||||
* s2: Fix DecodeConcurrent deadlock on errors https://github.com/klauspost/compress/pull/925
|
||||
|
||||
* Jan 26th, 2024 - [v1.17.5](https://github.com/klauspost/compress/releases/tag/v1.17.5)
|
||||
* flate: Fix reset with dictionary on custom window encodes https://github.com/klauspost/compress/pull/912
|
||||
* zstd: Add Frame header encoding and stripping https://github.com/klauspost/compress/pull/908
|
||||
* zstd: Limit better/best default window to 8MB https://github.com/klauspost/compress/pull/913
|
||||
* zstd: Speed improvements by @greatroar in https://github.com/klauspost/compress/pull/896 https://github.com/klauspost/compress/pull/910
|
||||
* s2: Fix callbacks for skippable blocks and disallow 0xfe (Padding) by @Jille in https://github.com/klauspost/compress/pull/916 https://github.com/klauspost/compress/pull/917
|
||||
https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/compress/pull/918
|
||||
|
||||
* Dec 1st, 2023 - [v1.17.4](https://github.com/klauspost/compress/releases/tag/v1.17.4)
|
||||
* huff0: Speed up symbol counting by @greatroar in https://github.com/klauspost/compress/pull/887
|
||||
* huff0: Remove byteReader by @greatroar in https://github.com/klauspost/compress/pull/886
|
||||
* gzhttp: Allow overriding decompression on transport https://github.com/klauspost/compress/pull/892
|
||||
* gzhttp: Clamp compression level https://github.com/klauspost/compress/pull/890
|
||||
* gzip: Error out if reserved bits are set https://github.com/klauspost/compress/pull/891
|
||||
|
||||
* Nov 15th, 2023 - [v1.17.3](https://github.com/klauspost/compress/releases/tag/v1.17.3)
|
||||
* fse: Fix max header size https://github.com/klauspost/compress/pull/881
|
||||
* zstd: Improve better/best compression https://github.com/klauspost/compress/pull/877
|
||||
* gzhttp: Fix missing content type on Close https://github.com/klauspost/compress/pull/883
|
||||
|
||||
* Oct 22nd, 2023 - [v1.17.2](https://github.com/klauspost/compress/releases/tag/v1.17.2)
|
||||
* zstd: Fix rare *CORRUPTION* output in "best" mode. See https://github.com/klauspost/compress/pull/876
|
||||
|
||||
* Oct 14th, 2023 - [v1.17.1](https://github.com/klauspost/compress/releases/tag/v1.17.1)
|
||||
* s2: Fix S2 "best" dictionary wrong encoding by @klauspost in https://github.com/klauspost/compress/pull/871
|
||||
* flate: Reduce allocations in decompressor and minor code improvements by @fakefloordiv in https://github.com/klauspost/compress/pull/869
|
||||
* s2: Fix EstimateBlockSize on 6&7 length input by @klauspost in https://github.com/klauspost/compress/pull/867
|
||||
|
||||
* Sept 19th, 2023 - [v1.17.0](https://github.com/klauspost/compress/releases/tag/v1.17.0)
|
||||
* Add experimental dictionary builder https://github.com/klauspost/compress/pull/853
|
||||
* Add xerial snappy read/writer https://github.com/klauspost/compress/pull/838
|
||||
* flate: Add limited window compression https://github.com/klauspost/compress/pull/843
|
||||
* s2: Do 2 overlapping match checks https://github.com/klauspost/compress/pull/839
|
||||
* flate: Add amd64 assembly matchlen https://github.com/klauspost/compress/pull/837
|
||||
* gzip: Copy bufio.Reader on Reset by @thatguystone in https://github.com/klauspost/compress/pull/860
|
||||
|
||||
<details>
|
||||
<summary>See changes to v1.16.x</summary>
|
||||
|
||||
|
||||
* July 1st, 2023 - [v1.16.7](https://github.com/klauspost/compress/releases/tag/v1.16.7)
|
||||
* zstd: Fix default level first dictionary encode https://github.com/klauspost/compress/pull/829
|
||||
* s2: add GetBufferCapacity() method by @GiedriusS in https://github.com/klauspost/compress/pull/832
|
||||
|
||||
* June 13, 2023 - [v1.16.6](https://github.com/klauspost/compress/releases/tag/v1.16.6)
|
||||
* zstd: correctly ignore WithEncoderPadding(1) by @ianlancetaylor in https://github.com/klauspost/compress/pull/806
|
||||
* zstd: Add amd64 match length assembly https://github.com/klauspost/compress/pull/824
|
||||
* gzhttp: Handle informational headers by @rtribotte in https://github.com/klauspost/compress/pull/815
|
||||
* s2: Improve Better compression slightly https://github.com/klauspost/compress/pull/663
|
||||
|
||||
* Apr 16, 2023 - [v1.16.5](https://github.com/klauspost/compress/releases/tag/v1.16.5)
|
||||
* zstd: readByte needs to use io.ReadFull by @jnoxon in https://github.com/klauspost/compress/pull/802
|
||||
* gzip: Fix WriterTo after initial read https://github.com/klauspost/compress/pull/804
|
||||
|
||||
* Apr 5, 2023 - [v1.16.4](https://github.com/klauspost/compress/releases/tag/v1.16.4)
|
||||
* zstd: Improve zstd best efficiency by @greatroar and @klauspost in https://github.com/klauspost/compress/pull/784
|
||||
* zstd: Respect WithAllLitEntropyCompression https://github.com/klauspost/compress/pull/792
|
||||
* zstd: Fix amd64 not always detecting corrupt data https://github.com/klauspost/compress/pull/785
|
||||
* zstd: Various minor improvements by @greatroar in https://github.com/klauspost/compress/pull/788 https://github.com/klauspost/compress/pull/794 https://github.com/klauspost/compress/pull/795
|
||||
* s2: Fix huge block overflow https://github.com/klauspost/compress/pull/779
|
||||
* s2: Allow CustomEncoder fallback https://github.com/klauspost/compress/pull/780
|
||||
* gzhttp: Support ResponseWriter Unwrap() in gzhttp handler by @jgimenez in https://github.com/klauspost/compress/pull/799
|
||||
|
||||
* Mar 13, 2023 - [v1.16.1](https://github.com/klauspost/compress/releases/tag/v1.16.1)
|
||||
* zstd: Speed up + improve best encoder by @greatroar in https://github.com/klauspost/compress/pull/776
|
||||
* gzhttp: Add optional [BREACH mitigation](https://github.com/klauspost/compress/tree/master/gzhttp#breach-mitigation). https://github.com/klauspost/compress/pull/762 https://github.com/klauspost/compress/pull/768 https://github.com/klauspost/compress/pull/769 https://github.com/klauspost/compress/pull/770 https://github.com/klauspost/compress/pull/767
|
||||
* s2: Add Intel LZ4s converter https://github.com/klauspost/compress/pull/766
|
||||
* zstd: Minor bug fixes https://github.com/klauspost/compress/pull/771 https://github.com/klauspost/compress/pull/772 https://github.com/klauspost/compress/pull/773
|
||||
* huff0: Speed up compress1xDo by @greatroar in https://github.com/klauspost/compress/pull/774
|
||||
|
||||
* Feb 26, 2023 - [v1.16.0](https://github.com/klauspost/compress/releases/tag/v1.16.0)
|
||||
* s2: Add [Dictionary](https://github.com/klauspost/compress/tree/master/s2#dictionaries) support. https://github.com/klauspost/compress/pull/685
|
||||
* s2: Add Compression Size Estimate. https://github.com/klauspost/compress/pull/752
|
||||
* s2: Add support for custom stream encoder. https://github.com/klauspost/compress/pull/755
|
||||
* s2: Add LZ4 block converter. https://github.com/klauspost/compress/pull/748
|
||||
* s2: Support io.ReaderAt in ReadSeeker. https://github.com/klauspost/compress/pull/747
|
||||
* s2c/s2sx: Use concurrent decoding. https://github.com/klauspost/compress/pull/746
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>See changes to v1.15.x</summary>
|
||||
|
||||
* Jan 21st, 2023 (v1.15.15)
|
||||
* deflate: Improve level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/739
|
||||
* zstd: Add delta encoding support by @greatroar in https://github.com/klauspost/compress/pull/728
|
||||
* zstd: Various speed improvements by @greatroar https://github.com/klauspost/compress/pull/741 https://github.com/klauspost/compress/pull/734 https://github.com/klauspost/compress/pull/736 https://github.com/klauspost/compress/pull/744 https://github.com/klauspost/compress/pull/743 https://github.com/klauspost/compress/pull/745
|
||||
* gzhttp: Add SuffixETag() and DropETag() options to prevent ETag collisions on compressed responses by @willbicks in https://github.com/klauspost/compress/pull/740
|
||||
|
||||
* Jan 3rd, 2023 (v1.15.14)
|
||||
|
||||
* flate: Improve speed in big stateless blocks https://github.com/klauspost/compress/pull/718
|
||||
* zstd: Minor speed tweaks by @greatroar in https://github.com/klauspost/compress/pull/716 https://github.com/klauspost/compress/pull/720
|
||||
* export NoGzipResponseWriter for custom ResponseWriter wrappers by @harshavardhana in https://github.com/klauspost/compress/pull/722
|
||||
* s2: Add example for indexing and existing stream https://github.com/klauspost/compress/pull/723
|
||||
|
||||
* Dec 11, 2022 (v1.15.13)
|
||||
* zstd: Add [MaxEncodedSize](https://pkg.go.dev/github.com/klauspost/compress@v1.15.13/zstd#Encoder.MaxEncodedSize) to encoder https://github.com/klauspost/compress/pull/691
|
||||
* zstd: Various tweaks and improvements https://github.com/klauspost/compress/pull/693 https://github.com/klauspost/compress/pull/695 https://github.com/klauspost/compress/pull/696 https://github.com/klauspost/compress/pull/701 https://github.com/klauspost/compress/pull/702 https://github.com/klauspost/compress/pull/703 https://github.com/klauspost/compress/pull/704 https://github.com/klauspost/compress/pull/705 https://github.com/klauspost/compress/pull/706 https://github.com/klauspost/compress/pull/707 https://github.com/klauspost/compress/pull/708
|
||||
|
||||
* Oct 26, 2022 (v1.15.12)
|
||||
|
||||
* zstd: Tweak decoder allocs. https://github.com/klauspost/compress/pull/680
|
||||
* gzhttp: Always delete `HeaderNoCompression` https://github.com/klauspost/compress/pull/683
|
||||
|
||||
* Sept 26, 2022 (v1.15.11)
|
||||
|
||||
* flate: Improve level 1-3 compression https://github.com/klauspost/compress/pull/678
|
||||
* zstd: Improve "best" compression by @nightwolfz in https://github.com/klauspost/compress/pull/677
|
||||
* zstd: Fix+reduce decompression allocations https://github.com/klauspost/compress/pull/668
|
||||
* zstd: Fix non-effective noescape tag https://github.com/klauspost/compress/pull/667
|
||||
|
||||
* Sept 16, 2022 (v1.15.10)
|
||||
|
||||
* zstd: Add [WithDecodeAllCapLimit](https://pkg.go.dev/github.com/klauspost/compress@v1.15.10/zstd#WithDecodeAllCapLimit) https://github.com/klauspost/compress/pull/649
|
||||
* Add Go 1.19 - deprecate Go 1.16 https://github.com/klauspost/compress/pull/651
|
||||
* flate: Improve level 5+6 compression https://github.com/klauspost/compress/pull/656
|
||||
* zstd: Improve "better" compression https://github.com/klauspost/compress/pull/657
|
||||
* s2: Improve "best" compression https://github.com/klauspost/compress/pull/658
|
||||
* s2: Improve "better" compression. https://github.com/klauspost/compress/pull/635
|
||||
* s2: Slightly faster non-assembly decompression https://github.com/klauspost/compress/pull/646
|
||||
* Use arrays for constant size copies https://github.com/klauspost/compress/pull/659
|
||||
|
||||
* July 21, 2022 (v1.15.9)
|
||||
|
||||
* zstd: Fix decoder crash on amd64 (no BMI) on invalid input https://github.com/klauspost/compress/pull/645
|
||||
* zstd: Disable decoder extended memory copies (amd64) due to possible crashes https://github.com/klauspost/compress/pull/644
|
||||
* zstd: Allow single segments up to "max decoded size" by @klauspost in https://github.com/klauspost/compress/pull/643
|
||||
|
||||
* July 13, 2022 (v1.15.8)
|
||||
|
||||
* gzip: fix stack exhaustion bug in Reader.Read https://github.com/klauspost/compress/pull/641
|
||||
* s2: Add Index header trim/restore https://github.com/klauspost/compress/pull/638
|
||||
* zstd: Optimize seqdeq amd64 asm by @greatroar in https://github.com/klauspost/compress/pull/636
|
||||
* zstd: Improve decoder memcopy https://github.com/klauspost/compress/pull/637
|
||||
* huff0: Pass a single bitReader pointer to asm by @greatroar in https://github.com/klauspost/compress/pull/634
|
||||
* zstd: Branchless getBits for amd64 w/o BMI2 by @greatroar in https://github.com/klauspost/compress/pull/640
|
||||
* gzhttp: Remove header before writing https://github.com/klauspost/compress/pull/639
|
||||
|
||||
* June 29, 2022 (v1.15.7)
|
||||
|
||||
* s2: Fix absolute forward seeks https://github.com/klauspost/compress/pull/633
|
||||
* zip: Merge upstream https://github.com/klauspost/compress/pull/631
|
||||
* zip: Re-add zip64 fix https://github.com/klauspost/compress/pull/624
|
||||
* zstd: translate fseDecoder.buildDtable into asm by @WojciechMula in https://github.com/klauspost/compress/pull/598
|
||||
* flate: Faster histograms https://github.com/klauspost/compress/pull/620
|
||||
* deflate: Use compound hcode https://github.com/klauspost/compress/pull/622
|
||||
|
||||
* June 3, 2022 (v1.15.6)
|
||||
* s2: Improve coding for long, close matches https://github.com/klauspost/compress/pull/613
|
||||
* s2c: Add Snappy/S2 stream recompression https://github.com/klauspost/compress/pull/611
|
||||
* zstd: Always use configured block size https://github.com/klauspost/compress/pull/605
|
||||
* zstd: Fix incorrect hash table placement for dict encoding in default https://github.com/klauspost/compress/pull/606
|
||||
* zstd: Apply default config to ZipDecompressor without options https://github.com/klauspost/compress/pull/608
|
||||
* gzhttp: Exclude more common archive formats https://github.com/klauspost/compress/pull/612
|
||||
* s2: Add ReaderIgnoreCRC https://github.com/klauspost/compress/pull/609
|
||||
* s2: Remove sanity load on index creation https://github.com/klauspost/compress/pull/607
|
||||
* snappy: Use dedicated function for scoring https://github.com/klauspost/compress/pull/614
|
||||
* s2c+s2d: Use official snappy framed extension https://github.com/klauspost/compress/pull/610
|
||||
|
||||
* May 25, 2022 (v1.15.5)
|
||||
* s2: Add concurrent stream decompression https://github.com/klauspost/compress/pull/602
|
||||
* s2: Fix final emit oob read crash on amd64 https://github.com/klauspost/compress/pull/601
|
||||
* huff0: asm implementation of Decompress1X by @WojciechMula https://github.com/klauspost/compress/pull/596
|
||||
* zstd: Use 1 less goroutine for stream decoding https://github.com/klauspost/compress/pull/588
|
||||
* zstd: Copy literal in 16 byte blocks when possible https://github.com/klauspost/compress/pull/592
|
||||
* zstd: Speed up when WithDecoderLowmem(false) https://github.com/klauspost/compress/pull/599
|
||||
* zstd: faster next state update in BMI2 version of decode by @WojciechMula in https://github.com/klauspost/compress/pull/593
|
||||
* huff0: Do not check max size when reading table. https://github.com/klauspost/compress/pull/586
|
||||
* flate: Inplace hashing for level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/590
|
||||
|
||||
|
||||
* May 11, 2022 (v1.15.4)
|
||||
* huff0: decompress directly into output by @WojciechMula in [#577](https://github.com/klauspost/compress/pull/577)
|
||||
* inflate: Keep dict on stack [#581](https://github.com/klauspost/compress/pull/581)
|
||||
* zstd: Faster decoding memcopy in asm [#583](https://github.com/klauspost/compress/pull/583)
|
||||
* zstd: Fix ignored crc [#580](https://github.com/klauspost/compress/pull/580)
|
||||
|
||||
* May 5, 2022 (v1.15.3)
|
||||
* zstd: Allow to ignore checksum checking by @WojciechMula [#572](https://github.com/klauspost/compress/pull/572)
|
||||
* s2: Fix incorrect seek for io.SeekEnd in [#575](https://github.com/klauspost/compress/pull/575)
|
||||
|
||||
* Apr 26, 2022 (v1.15.2)
|
||||
* zstd: Add x86-64 assembly for decompression on streams and blocks. Contributed by [@WojciechMula](https://github.com/WojciechMula). Typically 2x faster. [#528](https://github.com/klauspost/compress/pull/528) [#531](https://github.com/klauspost/compress/pull/531) [#545](https://github.com/klauspost/compress/pull/545) [#537](https://github.com/klauspost/compress/pull/537)
|
||||
* zstd: Add options to ZipDecompressor and fixes [#539](https://github.com/klauspost/compress/pull/539)
|
||||
* s2: Use sorted search for index [#555](https://github.com/klauspost/compress/pull/555)
|
||||
* Minimum version is Go 1.16, added CI test on 1.18.
|
||||
|
||||
* Mar 11, 2022 (v1.15.1)
|
||||
* huff0: Add x86 assembly of Decode4X by @WojciechMula in [#512](https://github.com/klauspost/compress/pull/512)
|
||||
* zstd: Reuse zip decoders in [#514](https://github.com/klauspost/compress/pull/514)
|
||||
* zstd: Detect extra block data and report as corrupted in [#520](https://github.com/klauspost/compress/pull/520)
|
||||
* zstd: Handle zero sized frame content size stricter in [#521](https://github.com/klauspost/compress/pull/521)
|
||||
* zstd: Add stricter block size checks in [#523](https://github.com/klauspost/compress/pull/523)
|
||||
|
||||
* Mar 3, 2022 (v1.15.0)
|
||||
* zstd: Refactor decoder by @klauspost in [#498](https://github.com/klauspost/compress/pull/498)
|
||||
* zstd: Add stream encoding without goroutines by @klauspost in [#505](https://github.com/klauspost/compress/pull/505)
|
||||
* huff0: Prevent single blocks exceeding 16 bits by @klauspost in[#507](https://github.com/klauspost/compress/pull/507)
|
||||
* flate: Inline literal emission by @klauspost in [#509](https://github.com/klauspost/compress/pull/509)
|
||||
* gzhttp: Add zstd to transport by @klauspost in [#400](https://github.com/klauspost/compress/pull/400)
|
||||
* gzhttp: Make content-type optional by @klauspost in [#510](https://github.com/klauspost/compress/pull/510)
|
||||
|
||||
Both compression and decompression now supports "synchronous" stream operations. This means that whenever "concurrency" is set to 1, they will operate without spawning goroutines.
|
||||
|
||||
Stream decompression is now faster on asynchronous, since the goroutine allocation much more effectively splits the workload. On typical streams this will typically use 2 cores fully for decompression. When a stream has finished decoding no goroutines will be left over, so decoders can now safely be pooled and still be garbage collected.
|
||||
|
||||
While the release has been extensively tested, it is recommended to testing when upgrading.
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>See changes to v1.14.x</summary>
|
||||
|
||||
* Feb 22, 2022 (v1.14.4)
|
||||
* flate: Fix rare huffman only (-2) corruption. [#503](https://github.com/klauspost/compress/pull/503)
|
||||
* zip: Update deprecated CreateHeaderRaw to correctly call CreateRaw by @saracen in [#502](https://github.com/klauspost/compress/pull/502)
|
||||
* zip: don't read data descriptor early by @saracen in [#501](https://github.com/klauspost/compress/pull/501) #501
|
||||
* huff0: Use static decompression buffer up to 30% faster by @klauspost in [#499](https://github.com/klauspost/compress/pull/499) [#500](https://github.com/klauspost/compress/pull/500)
|
||||
|
||||
* Feb 17, 2022 (v1.14.3)
|
||||
* flate: Improve fastest levels compression speed ~10% more throughput. [#482](https://github.com/klauspost/compress/pull/482) [#489](https://github.com/klauspost/compress/pull/489) [#490](https://github.com/klauspost/compress/pull/490) [#491](https://github.com/klauspost/compress/pull/491) [#494](https://github.com/klauspost/compress/pull/494) [#478](https://github.com/klauspost/compress/pull/478)
|
||||
* flate: Faster decompression speed, ~5-10%. [#483](https://github.com/klauspost/compress/pull/483)
|
||||
* s2: Faster compression with Go v1.18 and amd64 microarch level 3+. [#484](https://github.com/klauspost/compress/pull/484) [#486](https://github.com/klauspost/compress/pull/486)
|
||||
|
||||
* Jan 25, 2022 (v1.14.2)
|
||||
* zstd: improve header decoder by @dsnet [#476](https://github.com/klauspost/compress/pull/476)
|
||||
* zstd: Add bigger default blocks [#469](https://github.com/klauspost/compress/pull/469)
|
||||
* zstd: Remove unused decompression buffer [#470](https://github.com/klauspost/compress/pull/470)
|
||||
* zstd: Fix logically dead code by @ningmingxiao [#472](https://github.com/klauspost/compress/pull/472)
|
||||
* flate: Improve level 7-9 [#471](https://github.com/klauspost/compress/pull/471) [#473](https://github.com/klauspost/compress/pull/473)
|
||||
* zstd: Add noasm tag for xxhash [#475](https://github.com/klauspost/compress/pull/475)
|
||||
|
||||
* Jan 11, 2022 (v1.14.1)
|
||||
* s2: Add stream index in [#462](https://github.com/klauspost/compress/pull/462)
|
||||
* flate: Speed and efficiency improvements in [#439](https://github.com/klauspost/compress/pull/439) [#461](https://github.com/klauspost/compress/pull/461) [#455](https://github.com/klauspost/compress/pull/455) [#452](https://github.com/klauspost/compress/pull/452) [#458](https://github.com/klauspost/compress/pull/458)
|
||||
* zstd: Performance improvement in [#420]( https://github.com/klauspost/compress/pull/420) [#456](https://github.com/klauspost/compress/pull/456) [#437](https://github.com/klauspost/compress/pull/437) [#467](https://github.com/klauspost/compress/pull/467) [#468](https://github.com/klauspost/compress/pull/468)
|
||||
* zstd: add arm64 xxhash assembly in [#464](https://github.com/klauspost/compress/pull/464)
|
||||
* Add garbled for binaries for s2 in [#445](https://github.com/klauspost/compress/pull/445)
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>See changes to v1.13.x</summary>
|
||||
|
||||
* Aug 30, 2021 (v1.13.5)
|
||||
* gz/zlib/flate: Alias stdlib errors [#425](https://github.com/klauspost/compress/pull/425)
|
||||
* s2: Add block support to commandline tools [#413](https://github.com/klauspost/compress/pull/413)
|
||||
* zstd: pooledZipWriter should return Writers to the same pool [#426](https://github.com/klauspost/compress/pull/426)
|
||||
* Removed golang/snappy as external dependency for tests [#421](https://github.com/klauspost/compress/pull/421)
|
||||
|
||||
* Aug 12, 2021 (v1.13.4)
|
||||
* Add [snappy replacement package](https://github.com/klauspost/compress/tree/master/snappy).
|
||||
* zstd: Fix incorrect encoding in "best" mode [#415](https://github.com/klauspost/compress/pull/415)
|
||||
|
||||
* Aug 3, 2021 (v1.13.3)
|
||||
* zstd: Improve Best compression [#404](https://github.com/klauspost/compress/pull/404)
|
||||
* zstd: Fix WriteTo error forwarding [#411](https://github.com/klauspost/compress/pull/411)
|
||||
* gzhttp: Return http.HandlerFunc instead of http.Handler. Unlikely breaking change. [#406](https://github.com/klauspost/compress/pull/406)
|
||||
* s2sx: Fix max size error [#399](https://github.com/klauspost/compress/pull/399)
|
||||
* zstd: Add optional stream content size on reset [#401](https://github.com/klauspost/compress/pull/401)
|
||||
* zstd: use SpeedBestCompression for level >= 10 [#410](https://github.com/klauspost/compress/pull/410)
|
||||
|
||||
* Jun 14, 2021 (v1.13.1)
|
||||
* s2: Add full Snappy output support [#396](https://github.com/klauspost/compress/pull/396)
|
||||
* zstd: Add configurable [Decoder window](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithDecoderMaxWindow) size [#394](https://github.com/klauspost/compress/pull/394)
|
||||
* gzhttp: Add header to skip compression [#389](https://github.com/klauspost/compress/pull/389)
|
||||
* s2: Improve speed with bigger output margin [#395](https://github.com/klauspost/compress/pull/395)
|
||||
|
||||
* Jun 3, 2021 (v1.13.0)
|
||||
* Added [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp#gzip-handler) which allows wrapping HTTP servers and clients with GZIP compressors.
|
||||
* zstd: Detect short invalid signatures [#382](https://github.com/klauspost/compress/pull/382)
|
||||
* zstd: Spawn decoder goroutine only if needed. [#380](https://github.com/klauspost/compress/pull/380)
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>See changes to v1.12.x</summary>
|
||||
|
||||
* May 25, 2021 (v1.12.3)
|
||||
* deflate: Better/faster Huffman encoding [#374](https://github.com/klauspost/compress/pull/374)
|
||||
* deflate: Allocate less for history. [#375](https://github.com/klauspost/compress/pull/375)
|
||||
* zstd: Forward read errors [#373](https://github.com/klauspost/compress/pull/373)
|
||||
|
||||
* Apr 27, 2021 (v1.12.2)
|
||||
* zstd: Improve better/best compression [#360](https://github.com/klauspost/compress/pull/360) [#364](https://github.com/klauspost/compress/pull/364) [#365](https://github.com/klauspost/compress/pull/365)
|
||||
* zstd: Add helpers to compress/decompress zstd inside zip files [#363](https://github.com/klauspost/compress/pull/363)
|
||||
* deflate: Improve level 5+6 compression [#367](https://github.com/klauspost/compress/pull/367)
|
||||
* s2: Improve better/best compression [#358](https://github.com/klauspost/compress/pull/358) [#359](https://github.com/klauspost/compress/pull/358)
|
||||
* s2: Load after checking src limit on amd64. [#362](https://github.com/klauspost/compress/pull/362)
|
||||
* s2sx: Limit max executable size [#368](https://github.com/klauspost/compress/pull/368)
|
||||
|
||||
* Apr 14, 2021 (v1.12.1)
|
||||
* snappy package removed. Upstream added as dependency.
|
||||
* s2: Better compression in "best" mode [#353](https://github.com/klauspost/compress/pull/353)
|
||||
* s2sx: Add stdin input and detect pre-compressed from signature [#352](https://github.com/klauspost/compress/pull/352)
|
||||
* s2c/s2d: Add http as possible input [#348](https://github.com/klauspost/compress/pull/348)
|
||||
* s2c/s2d/s2sx: Always truncate when writing files [#352](https://github.com/klauspost/compress/pull/352)
|
||||
* zstd: Reduce memory usage further when using [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) [#346](https://github.com/klauspost/compress/pull/346)
|
||||
* s2: Fix potential problem with amd64 assembly and profilers [#349](https://github.com/klauspost/compress/pull/349)
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>See changes to v1.11.x</summary>
|
||||
|
||||
* Mar 26, 2021 (v1.11.13)
|
||||
* zstd: Big speedup on small dictionary encodes [#344](https://github.com/klauspost/compress/pull/344) [#345](https://github.com/klauspost/compress/pull/345)
|
||||
* zstd: Add [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) encoder option [#336](https://github.com/klauspost/compress/pull/336)
|
||||
* deflate: Improve entropy compression [#338](https://github.com/klauspost/compress/pull/338)
|
||||
* s2: Clean up and minor performance improvement in best [#341](https://github.com/klauspost/compress/pull/341)
|
||||
|
||||
* Mar 5, 2021 (v1.11.12)
|
||||
* s2: Add `s2sx` binary that creates [self extracting archives](https://github.com/klauspost/compress/tree/master/s2#s2sx-self-extracting-archives).
|
||||
* s2: Speed up decompression on non-assembly platforms [#328](https://github.com/klauspost/compress/pull/328)
|
||||
|
||||
* Mar 1, 2021 (v1.11.9)
|
||||
* s2: Add ARM64 decompression assembly. Around 2x output speed. [#324](https://github.com/klauspost/compress/pull/324)
|
||||
* s2: Improve "better" speed and efficiency. [#325](https://github.com/klauspost/compress/pull/325)
|
||||
* s2: Fix binaries.
|
||||
|
||||
* Feb 25, 2021 (v1.11.8)
|
||||
* s2: Fixed occasional out-of-bounds write on amd64. Upgrade recommended.
|
||||
* s2: Add AMD64 assembly for better mode. 25-50% faster. [#315](https://github.com/klauspost/compress/pull/315)
|
||||
* s2: Less upfront decoder allocation. [#322](https://github.com/klauspost/compress/pull/322)
|
||||
* zstd: Faster "compression" of incompressible data. [#314](https://github.com/klauspost/compress/pull/314)
|
||||
* zip: Fix zip64 headers. [#313](https://github.com/klauspost/compress/pull/313)
|
||||
|
||||
* Jan 14, 2021 (v1.11.7)
|
||||
* Use Bytes() interface to get bytes across packages. [#309](https://github.com/klauspost/compress/pull/309)
|
||||
* s2: Add 'best' compression option. [#310](https://github.com/klauspost/compress/pull/310)
|
||||
* s2: Add ReaderMaxBlockSize, changes `s2.NewReader` signature to include varargs. [#311](https://github.com/klauspost/compress/pull/311)
|
||||
* s2: Fix crash on small better buffers. [#308](https://github.com/klauspost/compress/pull/308)
|
||||
* s2: Clean up decoder. [#312](https://github.com/klauspost/compress/pull/312)
|
||||
|
||||
* Jan 7, 2021 (v1.11.6)
|
||||
* zstd: Make decoder allocations smaller [#306](https://github.com/klauspost/compress/pull/306)
|
||||
* zstd: Free Decoder resources when Reset is called with a nil io.Reader [#305](https://github.com/klauspost/compress/pull/305)
|
||||
|
||||
* Dec 20, 2020 (v1.11.4)
|
||||
* zstd: Add Best compression mode [#304](https://github.com/klauspost/compress/pull/304)
|
||||
* Add header decoder [#299](https://github.com/klauspost/compress/pull/299)
|
||||
* s2: Add uncompressed stream option [#297](https://github.com/klauspost/compress/pull/297)
|
||||
* Simplify/speed up small blocks with known max size. [#300](https://github.com/klauspost/compress/pull/300)
|
||||
* zstd: Always reset literal dict encoder [#303](https://github.com/klauspost/compress/pull/303)
|
||||
|
||||
* Nov 15, 2020 (v1.11.3)
|
||||
* inflate: 10-15% faster decompression [#293](https://github.com/klauspost/compress/pull/293)
|
||||
* zstd: Tweak DecodeAll default allocation [#295](https://github.com/klauspost/compress/pull/295)
|
||||
|
||||
* Oct 11, 2020 (v1.11.2)
|
||||
* s2: Fix out of bounds read in "better" block compression [#291](https://github.com/klauspost/compress/pull/291)
|
||||
|
||||
* Oct 1, 2020 (v1.11.1)
|
||||
* zstd: Set allLitEntropy true in default configuration [#286](https://github.com/klauspost/compress/pull/286)
|
||||
|
||||
* Sept 8, 2020 (v1.11.0)
|
||||
* zstd: Add experimental compression [dictionaries](https://github.com/klauspost/compress/tree/master/zstd#dictionaries) [#281](https://github.com/klauspost/compress/pull/281)
|
||||
* zstd: Fix mixed Write and ReadFrom calls [#282](https://github.com/klauspost/compress/pull/282)
|
||||
* inflate/gz: Limit variable shifts, ~5% faster decompression [#274](https://github.com/klauspost/compress/pull/274)
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>See changes to v1.10.x</summary>
|
||||
|
||||
* July 8, 2020 (v1.10.11)
|
||||
* zstd: Fix extra block when compressing with ReadFrom. [#278](https://github.com/klauspost/compress/pull/278)
|
||||
* huff0: Also populate compression table when reading decoding table. [#275](https://github.com/klauspost/compress/pull/275)
|
||||
|
||||
* June 23, 2020 (v1.10.10)
|
||||
* zstd: Skip entropy compression in fastest mode when no matches. [#270](https://github.com/klauspost/compress/pull/270)
|
||||
|
||||
* June 16, 2020 (v1.10.9):
|
||||
* zstd: API change for specifying dictionaries. See [#268](https://github.com/klauspost/compress/pull/268)
|
||||
* zip: update CreateHeaderRaw to handle zip64 fields. [#266](https://github.com/klauspost/compress/pull/266)
|
||||
* Fuzzit tests removed. The service has been purchased and is no longer available.
|
||||
|
||||
* June 5, 2020 (v1.10.8):
|
||||
* 1.15x faster zstd block decompression. [#265](https://github.com/klauspost/compress/pull/265)
|
||||
|
||||
* June 1, 2020 (v1.10.7):
|
||||
* Added zstd decompression [dictionary support](https://github.com/klauspost/compress/tree/master/zstd#dictionaries)
|
||||
* Increase zstd decompression speed up to 1.19x. [#259](https://github.com/klauspost/compress/pull/259)
|
||||
* Remove internal reset call in zstd compression and reduce allocations. [#263](https://github.com/klauspost/compress/pull/263)
|
||||
|
||||
* May 21, 2020: (v1.10.6)
|
||||
* zstd: Reduce allocations while decoding. [#258](https://github.com/klauspost/compress/pull/258), [#252](https://github.com/klauspost/compress/pull/252)
|
||||
* zstd: Stricter decompression checks.
|
||||
|
||||
* April 12, 2020: (v1.10.5)
|
||||
* s2-commands: Flush output when receiving SIGINT. [#239](https://github.com/klauspost/compress/pull/239)
|
||||
|
||||
* Apr 8, 2020: (v1.10.4)
|
||||
* zstd: Minor/special case optimizations. [#251](https://github.com/klauspost/compress/pull/251), [#250](https://github.com/klauspost/compress/pull/250), [#249](https://github.com/klauspost/compress/pull/249), [#247](https://github.com/klauspost/compress/pull/247)
|
||||
* Mar 11, 2020: (v1.10.3)
|
||||
* s2: Use S2 encoder in pure Go mode for Snappy output as well. [#245](https://github.com/klauspost/compress/pull/245)
|
||||
* s2: Fix pure Go block encoder. [#244](https://github.com/klauspost/compress/pull/244)
|
||||
* zstd: Added "better compression" mode. [#240](https://github.com/klauspost/compress/pull/240)
|
||||
* zstd: Improve speed of fastest compression mode by 5-10% [#241](https://github.com/klauspost/compress/pull/241)
|
||||
* zstd: Skip creating encoders when not needed. [#238](https://github.com/klauspost/compress/pull/238)
|
||||
|
||||
* Feb 27, 2020: (v1.10.2)
|
||||
* Close to 50% speedup in inflate (gzip/zip decompression). [#236](https://github.com/klauspost/compress/pull/236) [#234](https://github.com/klauspost/compress/pull/234) [#232](https://github.com/klauspost/compress/pull/232)
|
||||
* Reduce deflate level 1-6 memory usage up to 59%. [#227](https://github.com/klauspost/compress/pull/227)
|
||||
|
||||
* Feb 18, 2020: (v1.10.1)
|
||||
* Fix zstd crash when resetting multiple times without sending data. [#226](https://github.com/klauspost/compress/pull/226)
|
||||
* deflate: Fix dictionary use on level 1-6. [#224](https://github.com/klauspost/compress/pull/224)
|
||||
* Remove deflate writer reference when closing. [#224](https://github.com/klauspost/compress/pull/224)
|
||||
|
||||
* Feb 4, 2020: (v1.10.0)
|
||||
* Add optional dictionary to [stateless deflate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc#StatelessDeflate). Breaking change, send `nil` for previous behaviour. [#216](https://github.com/klauspost/compress/pull/216)
|
||||
* Fix buffer overflow on repeated small block deflate. [#218](https://github.com/klauspost/compress/pull/218)
|
||||
* Allow copying content from an existing ZIP file without decompressing+compressing. [#214](https://github.com/klauspost/compress/pull/214)
|
||||
* Added [S2](https://github.com/klauspost/compress/tree/master/s2#s2-compression) AMD64 assembler and various optimizations. Stream speed >10GB/s. [#186](https://github.com/klauspost/compress/pull/186)
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>See changes prior to v1.10.0</summary>
|
||||
|
||||
* Jan 20,2020 (v1.9.8) Optimize gzip/deflate with better size estimates and faster table generation. [#207](https://github.com/klauspost/compress/pull/207) by [luyu6056](https://github.com/luyu6056), [#206](https://github.com/klauspost/compress/pull/206).
|
||||
* Jan 11, 2020: S2 Encode/Decode will use provided buffer if capacity is big enough. [#204](https://github.com/klauspost/compress/pull/204)
|
||||
* Jan 5, 2020: (v1.9.7) Fix another zstd regression in v1.9.5 - v1.9.6 removed.
|
||||
* Jan 4, 2020: (v1.9.6) Regression in v1.9.5 fixed causing corrupt zstd encodes in rare cases.
|
||||
* Jan 4, 2020: Faster IO in [s2c + s2d commandline tools](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) compression/decompression. [#192](https://github.com/klauspost/compress/pull/192)
|
||||
* Dec 29, 2019: Removed v1.9.5 since fuzz tests showed a compatibility problem with the reference zstandard decoder.
|
||||
* Dec 29, 2019: (v1.9.5) zstd: 10-20% faster block compression. [#199](https://github.com/klauspost/compress/pull/199)
|
||||
* Dec 29, 2019: [zip](https://godoc.org/github.com/klauspost/compress/zip) package updated with latest Go features
|
||||
* Dec 29, 2019: zstd: Single segment flag condintions tweaked. [#197](https://github.com/klauspost/compress/pull/197)
|
||||
* Dec 18, 2019: s2: Faster compression when ReadFrom is used. [#198](https://github.com/klauspost/compress/pull/198)
|
||||
* Dec 10, 2019: s2: Fix repeat length output when just above at 16MB limit.
|
||||
* Dec 10, 2019: zstd: Add function to get decoder as io.ReadCloser. [#191](https://github.com/klauspost/compress/pull/191)
|
||||
* Dec 3, 2019: (v1.9.4) S2: limit max repeat length. [#188](https://github.com/klauspost/compress/pull/188)
|
||||
* Dec 3, 2019: Add [WithNoEntropyCompression](https://godoc.org/github.com/klauspost/compress/zstd#WithNoEntropyCompression) to zstd [#187](https://github.com/klauspost/compress/pull/187)
|
||||
* Dec 3, 2019: Reduce memory use for tests. Check for leaked goroutines.
|
||||
* Nov 28, 2019 (v1.9.3) Less allocations in stateless deflate.
|
||||
* Nov 28, 2019: 5-20% Faster huff0 decode. Impacts zstd as well. [#184](https://github.com/klauspost/compress/pull/184)
|
||||
* Nov 12, 2019 (v1.9.2) Added [Stateless Compression](#stateless-compression) for gzip/deflate.
|
||||
* Nov 12, 2019: Fixed zstd decompression of large single blocks. [#180](https://github.com/klauspost/compress/pull/180)
|
||||
* Nov 11, 2019: Set default [s2c](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) block size to 4MB.
|
||||
* Nov 11, 2019: Reduce inflate memory use by 1KB.
|
||||
* Nov 10, 2019: Less allocations in deflate bit writer.
|
||||
* Nov 10, 2019: Fix inconsistent error returned by zstd decoder.
|
||||
* Oct 28, 2019 (v1.9.1) ztsd: Fix crash when compressing blocks. [#174](https://github.com/klauspost/compress/pull/174)
|
||||
* Oct 24, 2019 (v1.9.0) zstd: Fix rare data corruption [#173](https://github.com/klauspost/compress/pull/173)
|
||||
* Oct 24, 2019 zstd: Fix huff0 out of buffer write [#171](https://github.com/klauspost/compress/pull/171) and always return errors [#172](https://github.com/klauspost/compress/pull/172)
|
||||
* Oct 10, 2019: Big deflate rewrite, 30-40% faster with better compression [#105](https://github.com/klauspost/compress/pull/105)
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>See changes prior to v1.9.0</summary>
|
||||
|
||||
* Oct 10, 2019: (v1.8.6) zstd: Allow partial reads to get flushed data. [#169](https://github.com/klauspost/compress/pull/169)
|
||||
* Oct 3, 2019: Fix inconsistent results on broken zstd streams.
|
||||
* Sep 25, 2019: Added `-rm` (remove source files) and `-q` (no output except errors) to `s2c` and `s2d` [commands](https://github.com/klauspost/compress/tree/master/s2#commandline-tools)
|
||||
* Sep 16, 2019: (v1.8.4) Add `s2c` and `s2d` [commandline tools](https://github.com/klauspost/compress/tree/master/s2#commandline-tools).
|
||||
* Sep 10, 2019: (v1.8.3) Fix s2 decoder [Skip](https://godoc.org/github.com/klauspost/compress/s2#Reader.Skip).
|
||||
* Sep 7, 2019: zstd: Added [WithWindowSize](https://godoc.org/github.com/klauspost/compress/zstd#WithWindowSize), contributed by [ianwilkes](https://github.com/ianwilkes).
|
||||
* Sep 5, 2019: (v1.8.2) Add [WithZeroFrames](https://godoc.org/github.com/klauspost/compress/zstd#WithZeroFrames) which adds full zero payload block encoding option.
|
||||
* Sep 5, 2019: Lazy initialization of zstandard predefined en/decoder tables.
|
||||
* Aug 26, 2019: (v1.8.1) S2: 1-2% compression increase in "better" compression mode.
|
||||
* Aug 26, 2019: zstd: Check maximum size of Huffman 1X compressed literals while decoding.
|
||||
* Aug 24, 2019: (v1.8.0) Added [S2 compression](https://github.com/klauspost/compress/tree/master/s2#s2-compression), a high performance replacement for Snappy.
|
||||
* Aug 21, 2019: (v1.7.6) Fixed minor issues found by fuzzer. One could lead to zstd not decompressing.
|
||||
* Aug 18, 2019: Add [fuzzit](https://fuzzit.dev/) continuous fuzzing.
|
||||
* Aug 14, 2019: zstd: Skip incompressible data 2x faster. [#147](https://github.com/klauspost/compress/pull/147)
|
||||
* Aug 4, 2019 (v1.7.5): Better literal compression. [#146](https://github.com/klauspost/compress/pull/146)
|
||||
* Aug 4, 2019: Faster zstd compression. [#143](https://github.com/klauspost/compress/pull/143) [#144](https://github.com/klauspost/compress/pull/144)
|
||||
* Aug 4, 2019: Faster zstd decompression. [#145](https://github.com/klauspost/compress/pull/145) [#143](https://github.com/klauspost/compress/pull/143) [#142](https://github.com/klauspost/compress/pull/142)
|
||||
* July 15, 2019 (v1.7.4): Fix double EOF block in rare cases on zstd encoder.
|
||||
* July 15, 2019 (v1.7.3): Minor speedup/compression increase in default zstd encoder.
|
||||
* July 14, 2019: zstd decoder: Fix decompression error on multiple uses with mixed content.
|
||||
* July 7, 2019 (v1.7.2): Snappy update, zstd decoder potential race fix.
|
||||
* June 17, 2019: zstd decompression bugfix.
|
||||
* June 17, 2019: fix 32 bit builds.
|
||||
* June 17, 2019: Easier use in modules (less dependencies).
|
||||
* June 9, 2019: New stronger "default" [zstd](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression mode. Matches zstd default compression ratio.
|
||||
* June 5, 2019: 20-40% throughput in [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and better compression.
|
||||
* June 5, 2019: deflate/gzip compression: Reduce memory usage of lower compression levels.
|
||||
* June 2, 2019: Added [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression!
|
||||
* May 25, 2019: deflate/gzip: 10% faster bit writer, mostly visible in lower levels.
|
||||
* Apr 22, 2019: [zstd](https://github.com/klauspost/compress/tree/master/zstd#zstd) decompression added.
|
||||
* Aug 1, 2018: Added [huff0 README](https://github.com/klauspost/compress/tree/master/huff0#huff0-entropy-compression).
|
||||
* Jul 8, 2018: Added [Performance Update 2018](#performance-update-2018) below.
|
||||
* Jun 23, 2018: Merged [Go 1.11 inflate optimizations](https://go-review.googlesource.com/c/go/+/102235). Go 1.9 is now required. Backwards compatible version tagged with [v1.3.0](https://github.com/klauspost/compress/releases/tag/v1.3.0).
|
||||
* Apr 2, 2018: Added [huff0](https://godoc.org/github.com/klauspost/compress/huff0) en/decoder. Experimental for now, API may change.
|
||||
* Mar 4, 2018: Added [FSE Entropy](https://godoc.org/github.com/klauspost/compress/fse) en/decoder. Experimental for now, API may change.
|
||||
* Nov 3, 2017: Add compression [Estimate](https://godoc.org/github.com/klauspost/compress#Estimate) function.
|
||||
* May 28, 2017: Reduce allocations when resetting decoder.
|
||||
* Apr 02, 2017: Change back to official crc32, since changes were merged in Go 1.7.
|
||||
* Jan 14, 2017: Reduce stack pressure due to array copies. See [Issue #18625](https://github.com/golang/go/issues/18625).
|
||||
* Oct 25, 2016: Level 2-4 have been rewritten and now offers significantly better performance than before.
|
||||
* Oct 20, 2016: Port zlib changes from Go 1.7 to fix zlib writer issue. Please update.
|
||||
* Oct 16, 2016: Go 1.7 changes merged. Apples to apples this package is a few percent faster, but has a significantly better balance between speed and compression per level.
|
||||
* Mar 24, 2016: Always attempt Huffman encoding on level 4-7. This improves base 64 encoded data compression.
|
||||
* Mar 24, 2016: Small speedup for level 1-3.
|
||||
* Feb 19, 2016: Faster bit writer, level -2 is 15% faster, level 1 is 4% faster.
|
||||
* Feb 19, 2016: Handle small payloads faster in level 1-3.
|
||||
* Feb 19, 2016: Added faster level 2 + 3 compression modes.
|
||||
* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progression in terms of compression. New default level is 5.
|
||||
* Feb 14, 2016: Snappy: Merge upstream changes.
|
||||
* Feb 14, 2016: Snappy: Fix aggressive skipping.
|
||||
* Feb 14, 2016: Snappy: Update benchmark.
|
||||
* Feb 13, 2016: Deflate: Fixed assembler problem that could lead to sub-optimal compression.
|
||||
* Feb 12, 2016: Snappy: Added AMD64 SSE 4.2 optimizations to matching, which makes easy to compress material run faster. Typical speedup is around 25%.
|
||||
* Feb 9, 2016: Added Snappy package fork. This version is 5-7% faster, much more on hard to compress content.
|
||||
* Jan 30, 2016: Optimize level 1 to 3 by not considering static dictionary or storing uncompressed. ~4-5% speedup.
|
||||
* Jan 16, 2016: Optimization on deflate level 1,2,3 compression.
|
||||
* Jan 8 2016: Merge [CL 18317](https://go-review.googlesource.com/#/c/18317): fix reading, writing of zip64 archives.
|
||||
* Dec 8 2015: Make level 1 and -2 deterministic even if write size differs.
|
||||
* Dec 8 2015: Split encoding functions, so hashing and matching can potentially be inlined. 1-3% faster on AMD64. 5% faster on other platforms.
|
||||
* Dec 8 2015: Fixed rare [one byte out-of bounds read](https://github.com/klauspost/compress/issues/20). Please update!
|
||||
* Nov 23 2015: Optimization on token writer. ~2-4% faster. Contributed by [@dsnet](https://github.com/dsnet).
|
||||
* Nov 20 2015: Small optimization to bit writer on 64 bit systems.
|
||||
* Nov 17 2015: Fixed out-of-bound errors if the underlying Writer returned an error. See [#15](https://github.com/klauspost/compress/issues/15).
|
||||
* Nov 12 2015: Added [io.WriterTo](https://golang.org/pkg/io/#WriterTo) support to gzip/inflate.
|
||||
* Nov 11 2015: Merged [CL 16669](https://go-review.googlesource.com/#/c/16669/4): archive/zip: enable overriding (de)compressors per file
|
||||
* Oct 15 2015: Added skipping on uncompressible data. Random data speed up >5x.
|
||||
|
||||
</details>
|
||||
|
||||
# deflate usage
|
||||
|
||||
The packages are drop-in replacements for standard libraries. Simply replace the import path to use them:
|
||||
|
||||
| old import | new import | Documentation
|
||||
|--------------------|-----------------------------------------|--------------------|
|
||||
| `compress/gzip` | `github.com/klauspost/compress/gzip` | [gzip](https://pkg.go.dev/github.com/klauspost/compress/gzip?tab=doc)
|
||||
| `compress/zlib` | `github.com/klauspost/compress/zlib` | [zlib](https://pkg.go.dev/github.com/klauspost/compress/zlib?tab=doc)
|
||||
| `archive/zip` | `github.com/klauspost/compress/zip` | [zip](https://pkg.go.dev/github.com/klauspost/compress/zip?tab=doc)
|
||||
| `compress/flate` | `github.com/klauspost/compress/flate` | [flate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc)
|
||||
|
||||
* Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib).
|
||||
|
||||
You may also be interested in [pgzip](https://github.com/klauspost/pgzip), which is a drop in replacement for gzip, which support multithreaded compression on big files and the optimized [crc32](https://github.com/klauspost/crc32) package used by these packages.
|
||||
|
||||
The packages contains the same as the standard library, so you can use the godoc for that: [gzip](http://golang.org/pkg/compress/gzip/), [zip](http://golang.org/pkg/archive/zip/), [zlib](http://golang.org/pkg/compress/zlib/), [flate](http://golang.org/pkg/compress/flate/).
|
||||
|
||||
Currently there is only minor speedup on decompression (mostly CRC32 calculation).
|
||||
|
||||
Memory usage is typically 1MB for a Writer. stdlib is in the same range.
|
||||
If you expect to have a lot of concurrently allocated Writers consider using
|
||||
the stateless compress described below.
|
||||
|
||||
For compression performance, see: [this spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing).
|
||||
|
||||
To disable all assembly add `-tags=noasm`. This works across all packages.
|
||||
|
||||
# Stateless compression
|
||||
|
||||
This package offers stateless compression as a special option for gzip/deflate.
|
||||
It will do compression but without maintaining any state between Write calls.
|
||||
|
||||
This means there will be no memory kept between Write calls, but compression and speed will be suboptimal.
|
||||
|
||||
This is only relevant in cases where you expect to run many thousands of compressors concurrently,
|
||||
but with very little activity. This is *not* intended for regular web servers serving individual requests.
|
||||
|
||||
Because of this, the size of actual Write calls will affect output size.
|
||||
|
||||
In gzip, specify level `-3` / `gzip.StatelessCompression` to enable.
|
||||
|
||||
For direct deflate use, NewStatelessWriter and StatelessDeflate are available. See [documentation](https://godoc.org/github.com/klauspost/compress/flate#NewStatelessWriter)
|
||||
|
||||
A `bufio.Writer` can of course be used to control write sizes. For example, to use a 4KB buffer:
|
||||
|
||||
```go
|
||||
// replace 'ioutil.Discard' with your output.
|
||||
gzw, err := gzip.NewWriterLevel(ioutil.Discard, gzip.StatelessCompression)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer gzw.Close()
|
||||
|
||||
w := bufio.NewWriterSize(gzw, 4096)
|
||||
defer w.Flush()
|
||||
|
||||
// Write to 'w'
|
||||
```
|
||||
|
||||
This will only use up to 4KB in memory when the writer is idle.
|
||||
|
||||
Compression is almost always worse than the fastest compression level
|
||||
and each write will allocate (a little) memory.
|
||||
|
||||
# Performance Update 2018
|
||||
|
||||
It has been a while since we have been looking at the speed of this package compared to the standard library, so I thought I would re-do my tests and give some overall recommendations based on the current state. All benchmarks have been performed with Go 1.10 on my Desktop Intel(R) Core(TM) i7-2600 CPU @3.40GHz. Since I last ran the tests, I have gotten more RAM, which means tests with big files are no longer limited by my SSD.
|
||||
|
||||
The raw results are in my [updated spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). Due to cgo changes and upstream updates i could not get the cgo version of gzip to compile. Instead I included the [zstd](https://github.com/datadog/zstd) cgo implementation. If I get cgo gzip to work again, I might replace the results in the sheet.
|
||||
|
||||
The columns to take note of are: *MB/s* - the throughput. *Reduction* - the data size reduction in percent of the original. *Rel Speed* relative speed compared to the standard library at the same level. *Smaller* - how many percent smaller is the compressed output compared to stdlib. Negative means the output was bigger. *Loss* means the loss (or gain) in compression as a percentage difference of the input.
|
||||
|
||||
The `gzstd` (standard library gzip) and `gzkp` (this package gzip) only uses one CPU core. [`pgzip`](https://github.com/klauspost/pgzip), [`bgzf`](https://github.com/biogo/hts/tree/master/bgzf) uses all 4 cores. [`zstd`](https://github.com/DataDog/zstd) uses one core, and is a beast (but not Go, yet).
|
||||
|
||||
|
||||
## Overall differences.
|
||||
|
||||
There appears to be a roughly 5-10% speed advantage over the standard library when comparing at similar compression levels.
|
||||
|
||||
The biggest difference you will see is the result of [re-balancing](https://blog.klauspost.com/rebalancing-deflate-compression-levels/) the compression levels. I wanted by library to give a smoother transition between the compression levels than the standard library.
|
||||
|
||||
This package attempts to provide a more smooth transition, where "1" is taking a lot of shortcuts, "5" is the reasonable trade-off and "9" is the "give me the best compression", and the values in between gives something reasonable in between. The standard library has big differences in levels 1-4, but levels 5-9 having no significant gains - often spending a lot more time than can be justified by the achieved compression.
|
||||
|
||||
There are links to all the test data in the [spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing) in the top left field on each tab.
|
||||
|
||||
## Web Content
|
||||
|
||||
This test set aims to emulate typical use in a web server. The test-set is 4GB data in 53k files, and is a mixture of (mostly) HTML, JS, CSS.
|
||||
|
||||
Since level 1 and 9 are close to being the same code, they are quite close. But looking at the levels in-between the differences are quite big.
|
||||
|
||||
Looking at level 6, this package is 88% faster, but will output about 6% more data. For a web server, this means you can serve 88% more data, but have to pay for 6% more bandwidth. You can draw your own conclusions on what would be the most expensive for your case.
|
||||
|
||||
## Object files
|
||||
|
||||
This test is for typical data files stored on a server. In this case it is a collection of Go precompiled objects. They are very compressible.
|
||||
|
||||
The picture is similar to the web content, but with small differences since this is very compressible. Levels 2-3 offer good speed, but is sacrificing quite a bit of compression.
|
||||
|
||||
The standard library seems suboptimal on level 3 and 4 - offering both worse compression and speed than level 6 & 7 of this package respectively.
|
||||
|
||||
## Highly Compressible File
|
||||
|
||||
This is a JSON file with very high redundancy. The reduction starts at 95% on level 1, so in real life terms we are dealing with something like a highly redundant stream of data, etc.
|
||||
|
||||
It is definitely visible that we are dealing with specialized content here, so the results are very scattered. This package does not do very well at levels 1-4, but picks up significantly at level 5 and levels 7 and 8 offering great speed for the achieved compression.
|
||||
|
||||
So if you know you content is extremely compressible you might want to go slightly higher than the defaults. The standard library has a huge gap between levels 3 and 4 in terms of speed (2.75x slowdown), so it offers little "middle ground".
|
||||
|
||||
## Medium-High Compressible
|
||||
|
||||
This is a pretty common test corpus: [enwik9](http://mattmahoney.net/dc/textdata.html). It contains the first 10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. This is a very good test of typical text based compression and more data heavy streams.
|
||||
|
||||
We see a similar picture here as in "Web Content". On equal levels some compression is sacrificed for more speed. Level 5 seems to be the best trade-off between speed and size, beating stdlib level 3 in both.
|
||||
|
||||
## Medium Compressible
|
||||
|
||||
I will combine two test sets, one [10GB file set](http://mattmahoney.net/dc/10gb.html) and a VM disk image (~8GB). Both contain different data types and represent a typical backup scenario.
|
||||
|
||||
The most notable thing is how quickly the standard library drops to very low compression speeds around level 5-6 without any big gains in compression. Since this type of data is fairly common, this does not seem like good behavior.
|
||||
|
||||
|
||||
## Un-compressible Content
|
||||
|
||||
This is mainly a test of how good the algorithms are at detecting un-compressible input. The standard library only offers this feature with very conservative settings at level 1. Obviously there is no reason for the algorithms to try to compress input that cannot be compressed. The only downside is that it might skip some compressible data on false detections.
|
||||
|
||||
|
||||
## Huffman only compression
|
||||
|
||||
This compression library adds a special compression level, named `HuffmanOnly`, which allows near linear time compression. This is done by completely disabling matching of previous data, and only reduce the number of bits to represent each character.
|
||||
|
||||
This means that often used characters, like 'e' and ' ' (space) in text use the fewest bits to represent, and rare characters like '¤' takes more bits to represent. For more information see [wikipedia](https://en.wikipedia.org/wiki/Huffman_coding) or this nice [video](https://youtu.be/ZdooBTdW5bM).
|
||||
|
||||
Since this type of compression has much less variance, the compression speed is mostly unaffected by the input data, and is usually more than *180MB/s* for a single core.
|
||||
|
||||
The downside is that the compression ratio is usually considerably worse than even the fastest conventional compression. The compression ratio can never be better than 8:1 (12.5%).
|
||||
|
||||
The linear time compression can be used as a "better than nothing" mode, where you cannot risk the encoder to slow down on some content. For comparison, the size of the "Twain" text is *233460 bytes* (+29% vs. level 1) and encode speed is 144MB/s (4.5x level 1). So in this case you trade a 30% size increase for a 4 times speedup.
|
||||
|
||||
For more information see my blog post on [Fast Linear Time Compression](http://blog.klauspost.com/constant-time-gzipzip-compression/).
|
||||
|
||||
This is implemented on Go 1.7 as "Huffman Only" mode, though not exposed for gzip.
|
||||
|
||||
# Other packages
|
||||
|
||||
Here are other packages of good quality and pure Go (no cgo wrappers or autoconverted code):
|
||||
|
||||
* [github.com/pierrec/lz4](https://github.com/pierrec/lz4) - strong multithreaded LZ4 compression.
|
||||
* [github.com/cosnicolaou/pbzip2](https://github.com/cosnicolaou/pbzip2) - multithreaded bzip2 decompression.
|
||||
* [github.com/dsnet/compress](https://github.com/dsnet/compress) - brotli decompression, bzip2 writer.
|
||||
* [github.com/ronanh/intcomp](https://github.com/ronanh/intcomp) - Integer compression.
|
||||
* [github.com/spenczar/fpc](https://github.com/spenczar/fpc) - Float compression.
|
||||
* [github.com/minio/zipindex](https://github.com/minio/zipindex) - External ZIP directory index.
|
||||
* [github.com/ybirader/pzip](https://github.com/ybirader/pzip) - Fast concurrent zip archiver and extractor.
|
||||
|
||||
# license
|
||||
|
||||
This code is licensed under the same conditions as the original Go code. See LICENSE file.
|
25
gateway/vendor/github.com/klauspost/compress/SECURITY.md
generated
vendored
25
gateway/vendor/github.com/klauspost/compress/SECURITY.md
generated
vendored
@ -1,25 +0,0 @@
|
||||
# Security Policy
|
||||
|
||||
## Supported Versions
|
||||
|
||||
Security updates are applied only to the latest release.
|
||||
|
||||
## Vulnerability Definition
|
||||
|
||||
A security vulnerability is a bug that with certain input triggers a crash or an infinite loop. Most calls will have varying execution time and only in rare cases will slow operation be considered a security vulnerability.
|
||||
|
||||
Corrupted output generally is not considered a security vulnerability, unless independent operations are able to affect each other. Note that not all functionality is re-entrant and safe to use concurrently.
|
||||
|
||||
Out-of-memory crashes only applies if the en/decoder uses an abnormal amount of memory, with appropriate options applied, to limit maximum window size, concurrency, etc. However, if you are in doubt you are welcome to file a security issue.
|
||||
|
||||
It is assumed that all callers are trusted, meaning internal data exposed through reflection or inspection of returned data structures is not considered a vulnerability.
|
||||
|
||||
Vulnerabilities resulting from compiler/assembler errors should be reported upstream. Depending on the severity this package may or may not implement a workaround.
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
If you have discovered a security vulnerability in this project, please report it privately. **Do not disclose it as a public issue.** This gives us time to work with you to fix the issue before public exposure, reducing the chance that the exploit will be used before a patch is released.
|
||||
|
||||
Please disclose it at [security advisory](https://github.com/klauspost/compress/security/advisories/new). If possible please provide a minimal reproducer. If the issue only applies to a single platform, it would be helpful to provide access to that.
|
||||
|
||||
This project is maintained by a team of volunteers on a reasonable-effort basis. As such, vulnerabilities will be disclosed in a best effort base.
|
85
gateway/vendor/github.com/klauspost/compress/compressible.go
generated
vendored
85
gateway/vendor/github.com/klauspost/compress/compressible.go
generated
vendored
@ -1,85 +0,0 @@
|
||||
package compress
|
||||
|
||||
import "math"
|
||||
|
||||
// Estimate returns a normalized compressibility estimate of block b.
|
||||
// Values close to zero are likely uncompressible.
|
||||
// Values above 0.1 are likely to be compressible.
|
||||
// Values above 0.5 are very compressible.
|
||||
// Very small lengths will return 0.
|
||||
func Estimate(b []byte) float64 {
|
||||
if len(b) < 16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
// Correctly predicted order 1
|
||||
hits := 0
|
||||
lastMatch := false
|
||||
var o1 [256]byte
|
||||
var hist [256]int
|
||||
c1 := byte(0)
|
||||
for _, c := range b {
|
||||
if c == o1[c1] {
|
||||
// We only count a hit if there was two correct predictions in a row.
|
||||
if lastMatch {
|
||||
hits++
|
||||
}
|
||||
lastMatch = true
|
||||
} else {
|
||||
lastMatch = false
|
||||
}
|
||||
o1[c1] = c
|
||||
c1 = c
|
||||
hist[c]++
|
||||
}
|
||||
|
||||
// Use x^0.6 to give better spread
|
||||
prediction := math.Pow(float64(hits)/float64(len(b)), 0.6)
|
||||
|
||||
// Calculate histogram distribution
|
||||
variance := float64(0)
|
||||
avg := float64(len(b)) / 256
|
||||
|
||||
for _, v := range hist {
|
||||
Δ := float64(v) - avg
|
||||
variance += Δ * Δ
|
||||
}
|
||||
|
||||
stddev := math.Sqrt(float64(variance)) / float64(len(b))
|
||||
exp := math.Sqrt(1 / float64(len(b)))
|
||||
|
||||
// Subtract expected stddev
|
||||
stddev -= exp
|
||||
if stddev < 0 {
|
||||
stddev = 0
|
||||
}
|
||||
stddev *= 1 + exp
|
||||
|
||||
// Use x^0.4 to give better spread
|
||||
entropy := math.Pow(stddev, 0.4)
|
||||
|
||||
// 50/50 weight between prediction and histogram distribution
|
||||
return math.Pow((prediction+entropy)/2, 0.9)
|
||||
}
|
||||
|
||||
// ShannonEntropyBits returns the number of bits minimum required to represent
|
||||
// an entropy encoding of the input bytes.
|
||||
// https://en.wiktionary.org/wiki/Shannon_entropy
|
||||
func ShannonEntropyBits(b []byte) int {
|
||||
if len(b) == 0 {
|
||||
return 0
|
||||
}
|
||||
var hist [256]int
|
||||
for _, c := range b {
|
||||
hist[c]++
|
||||
}
|
||||
shannon := float64(0)
|
||||
invTotal := 1.0 / float64(len(b))
|
||||
for _, v := range hist[:] {
|
||||
if v > 0 {
|
||||
n := float64(v)
|
||||
shannon += math.Ceil(-math.Log2(n*invTotal) * n)
|
||||
}
|
||||
}
|
||||
return int(math.Ceil(shannon))
|
||||
}
|
1017
gateway/vendor/github.com/klauspost/compress/flate/deflate.go
generated
vendored
1017
gateway/vendor/github.com/klauspost/compress/flate/deflate.go
generated
vendored
File diff suppressed because it is too large
Load Diff
184
gateway/vendor/github.com/klauspost/compress/flate/dict_decoder.go
generated
vendored
184
gateway/vendor/github.com/klauspost/compress/flate/dict_decoder.go
generated
vendored
@ -1,184 +0,0 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package flate
|
||||
|
||||
// dictDecoder implements the LZ77 sliding dictionary as used in decompression.
|
||||
// LZ77 decompresses data through sequences of two forms of commands:
|
||||
//
|
||||
// - Literal insertions: Runs of one or more symbols are inserted into the data
|
||||
// stream as is. This is accomplished through the writeByte method for a
|
||||
// single symbol, or combinations of writeSlice/writeMark for multiple symbols.
|
||||
// Any valid stream must start with a literal insertion if no preset dictionary
|
||||
// is used.
|
||||
//
|
||||
// - Backward copies: Runs of one or more symbols are copied from previously
|
||||
// emitted data. Backward copies come as the tuple (dist, length) where dist
|
||||
// determines how far back in the stream to copy from and length determines how
|
||||
// many bytes to copy. Note that it is valid for the length to be greater than
|
||||
// the distance. Since LZ77 uses forward copies, that situation is used to
|
||||
// perform a form of run-length encoding on repeated runs of symbols.
|
||||
// The writeCopy and tryWriteCopy are used to implement this command.
|
||||
//
|
||||
// For performance reasons, this implementation performs little to no sanity
|
||||
// checks about the arguments. As such, the invariants documented for each
|
||||
// method call must be respected.
|
||||
type dictDecoder struct {
|
||||
hist []byte // Sliding window history
|
||||
|
||||
// Invariant: 0 <= rdPos <= wrPos <= len(hist)
|
||||
wrPos int // Current output position in buffer
|
||||
rdPos int // Have emitted hist[:rdPos] already
|
||||
full bool // Has a full window length been written yet?
|
||||
}
|
||||
|
||||
// init initializes dictDecoder to have a sliding window dictionary of the given
|
||||
// size. If a preset dict is provided, it will initialize the dictionary with
|
||||
// the contents of dict.
|
||||
func (dd *dictDecoder) init(size int, dict []byte) {
|
||||
*dd = dictDecoder{hist: dd.hist}
|
||||
|
||||
if cap(dd.hist) < size {
|
||||
dd.hist = make([]byte, size)
|
||||
}
|
||||
dd.hist = dd.hist[:size]
|
||||
|
||||
if len(dict) > len(dd.hist) {
|
||||
dict = dict[len(dict)-len(dd.hist):]
|
||||
}
|
||||
dd.wrPos = copy(dd.hist, dict)
|
||||
if dd.wrPos == len(dd.hist) {
|
||||
dd.wrPos = 0
|
||||
dd.full = true
|
||||
}
|
||||
dd.rdPos = dd.wrPos
|
||||
}
|
||||
|
||||
// histSize reports the total amount of historical data in the dictionary.
|
||||
func (dd *dictDecoder) histSize() int {
|
||||
if dd.full {
|
||||
return len(dd.hist)
|
||||
}
|
||||
return dd.wrPos
|
||||
}
|
||||
|
||||
// availRead reports the number of bytes that can be flushed by readFlush.
|
||||
func (dd *dictDecoder) availRead() int {
|
||||
return dd.wrPos - dd.rdPos
|
||||
}
|
||||
|
||||
// availWrite reports the available amount of output buffer space.
|
||||
func (dd *dictDecoder) availWrite() int {
|
||||
return len(dd.hist) - dd.wrPos
|
||||
}
|
||||
|
||||
// writeSlice returns a slice of the available buffer to write data to.
|
||||
//
|
||||
// This invariant will be kept: len(s) <= availWrite()
|
||||
func (dd *dictDecoder) writeSlice() []byte {
|
||||
return dd.hist[dd.wrPos:]
|
||||
}
|
||||
|
||||
// writeMark advances the writer pointer by cnt.
|
||||
//
|
||||
// This invariant must be kept: 0 <= cnt <= availWrite()
|
||||
func (dd *dictDecoder) writeMark(cnt int) {
|
||||
dd.wrPos += cnt
|
||||
}
|
||||
|
||||
// writeByte writes a single byte to the dictionary.
|
||||
//
|
||||
// This invariant must be kept: 0 < availWrite()
|
||||
func (dd *dictDecoder) writeByte(c byte) {
|
||||
dd.hist[dd.wrPos] = c
|
||||
dd.wrPos++
|
||||
}
|
||||
|
||||
// writeCopy copies a string at a given (dist, length) to the output.
|
||||
// This returns the number of bytes copied and may be less than the requested
|
||||
// length if the available space in the output buffer is too small.
|
||||
//
|
||||
// This invariant must be kept: 0 < dist <= histSize()
|
||||
func (dd *dictDecoder) writeCopy(dist, length int) int {
|
||||
dstBase := dd.wrPos
|
||||
dstPos := dstBase
|
||||
srcPos := dstPos - dist
|
||||
endPos := dstPos + length
|
||||
if endPos > len(dd.hist) {
|
||||
endPos = len(dd.hist)
|
||||
}
|
||||
|
||||
// Copy non-overlapping section after destination position.
|
||||
//
|
||||
// This section is non-overlapping in that the copy length for this section
|
||||
// is always less than or equal to the backwards distance. This can occur
|
||||
// if a distance refers to data that wraps-around in the buffer.
|
||||
// Thus, a backwards copy is performed here; that is, the exact bytes in
|
||||
// the source prior to the copy is placed in the destination.
|
||||
if srcPos < 0 {
|
||||
srcPos += len(dd.hist)
|
||||
dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:])
|
||||
srcPos = 0
|
||||
}
|
||||
|
||||
// Copy possibly overlapping section before destination position.
|
||||
//
|
||||
// This section can overlap if the copy length for this section is larger
|
||||
// than the backwards distance. This is allowed by LZ77 so that repeated
|
||||
// strings can be succinctly represented using (dist, length) pairs.
|
||||
// Thus, a forwards copy is performed here; that is, the bytes copied is
|
||||
// possibly dependent on the resulting bytes in the destination as the copy
|
||||
// progresses along. This is functionally equivalent to the following:
|
||||
//
|
||||
// for i := 0; i < endPos-dstPos; i++ {
|
||||
// dd.hist[dstPos+i] = dd.hist[srcPos+i]
|
||||
// }
|
||||
// dstPos = endPos
|
||||
//
|
||||
for dstPos < endPos {
|
||||
dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos])
|
||||
}
|
||||
|
||||
dd.wrPos = dstPos
|
||||
return dstPos - dstBase
|
||||
}
|
||||
|
||||
// tryWriteCopy tries to copy a string at a given (distance, length) to the
|
||||
// output. This specialized version is optimized for short distances.
|
||||
//
|
||||
// This method is designed to be inlined for performance reasons.
|
||||
//
|
||||
// This invariant must be kept: 0 < dist <= histSize()
|
||||
func (dd *dictDecoder) tryWriteCopy(dist, length int) int {
|
||||
dstPos := dd.wrPos
|
||||
endPos := dstPos + length
|
||||
if dstPos < dist || endPos > len(dd.hist) {
|
||||
return 0
|
||||
}
|
||||
dstBase := dstPos
|
||||
srcPos := dstPos - dist
|
||||
|
||||
// Copy possibly overlapping section before destination position.
|
||||
loop:
|
||||
dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos])
|
||||
if dstPos < endPos {
|
||||
goto loop // Avoid for-loop so that this function can be inlined
|
||||
}
|
||||
|
||||
dd.wrPos = dstPos
|
||||
return dstPos - dstBase
|
||||
}
|
||||
|
||||
// readFlush returns a slice of the historical buffer that is ready to be
|
||||
// emitted to the user. The data returned by readFlush must be fully consumed
|
||||
// before calling any other dictDecoder methods.
|
||||
func (dd *dictDecoder) readFlush() []byte {
|
||||
toRead := dd.hist[dd.rdPos:dd.wrPos]
|
||||
dd.rdPos = dd.wrPos
|
||||
if dd.wrPos == len(dd.hist) {
|
||||
dd.wrPos, dd.rdPos = 0, 0
|
||||
dd.full = true
|
||||
}
|
||||
return toRead
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user