mirror of
https://github.com/openfaas/faasd.git
synced 2025-06-08 16:06:47 +00:00
Update dependencies
* Updates netlink/netns * Updates x/sys, arkade and apimachinery Build passes, minor updates. Signed-off-by: Alex Ellis (OpenFaaS Ltd) <alexellis2@gmail.com>
This commit is contained in:
parent
60b724f014
commit
95792f8d58
40
go.mod
40
go.mod
@ -3,28 +3,28 @@ module github.com/openfaas/faasd
|
||||
go 1.18
|
||||
|
||||
require (
|
||||
github.com/alexellis/arkade v0.0.0-20220816075441-1a4d561feb3e
|
||||
github.com/alexellis/arkade v0.0.0-20220922114024-7b7ade38cff9
|
||||
github.com/alexellis/go-execute v0.5.0
|
||||
github.com/compose-spec/compose-go v0.0.0-20200528042322-36d8ce368e05
|
||||
github.com/containerd/containerd v1.6.6
|
||||
github.com/containerd/go-cni v1.1.6
|
||||
github.com/containerd/go-cni v1.1.7
|
||||
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf
|
||||
github.com/docker/cli v0.0.0-20191105005515-99c5edceb48d
|
||||
github.com/docker/cli v20.10.17+incompatible
|
||||
github.com/docker/distribution v2.8.1+incompatible
|
||||
github.com/docker/docker v17.12.0-ce-rc1.0.20191113042239-ea84732a7725+incompatible // indirect
|
||||
github.com/docker/go-units v0.4.0
|
||||
github.com/docker/docker v20.10.17+incompatible // indirect
|
||||
github.com/docker/go-units v0.5.0
|
||||
github.com/gorilla/mux v1.8.0
|
||||
github.com/morikuni/aec v1.0.0
|
||||
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417
|
||||
github.com/openfaas/faas-provider v0.19.1
|
||||
github.com/openfaas/faas/gateway v0.0.0-20220811112234-2cdc7f308316
|
||||
github.com/openfaas/faas/gateway v0.0.0-20220929193640-1a00a55c7703
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/sethvargo/go-password v0.2.0
|
||||
github.com/spf13/cobra v1.5.0
|
||||
github.com/spf13/pflag v1.0.5
|
||||
github.com/vishvananda/netlink v1.2.1-beta.2
|
||||
github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab
|
||||
github.com/vishvananda/netlink v1.1.1-0.20210330154013-f5de75959ad5
|
||||
github.com/vishvananda/netns v0.0.0-20220913150850-18c4f4234207
|
||||
golang.org/x/sys v0.0.0-20220928140112-f11e5e49a4ec
|
||||
k8s.io/apimachinery v0.24.3
|
||||
)
|
||||
|
||||
@ -39,42 +39,42 @@ require (
|
||||
github.com/containerd/ttrpc v1.1.0 // indirect
|
||||
github.com/containerd/typeurl v1.0.2 // indirect
|
||||
github.com/containernetworking/cni v1.1.1 // indirect
|
||||
github.com/docker/docker-credential-helpers v0.6.3 // indirect
|
||||
github.com/docker/docker-credential-helpers v0.6.4 // indirect
|
||||
github.com/docker/go-connections v0.4.0 // indirect
|
||||
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect
|
||||
github.com/gogo/googleapis v1.4.0 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/google/uuid v1.2.0 // indirect
|
||||
github.com/google/uuid v1.3.0 // indirect
|
||||
github.com/imdario/mergo v0.3.12 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||
github.com/klauspost/compress v1.11.13 // indirect
|
||||
github.com/klauspost/compress v1.15.8 // indirect
|
||||
github.com/mattn/go-shellwords v1.0.10 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
|
||||
github.com/mitchellh/mapstructure v1.4.1 // indirect
|
||||
github.com/mitchellh/mapstructure v1.4.2 // indirect
|
||||
github.com/moby/locker v1.0.1 // indirect
|
||||
github.com/moby/sys/mountinfo v0.5.0 // indirect
|
||||
github.com/moby/sys/signal v0.6.0 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 // indirect
|
||||
github.com/opencontainers/image-spec v1.0.3-0.20220114050600-8b9d41f48198 // indirect
|
||||
github.com/opencontainers/runc v1.1.2 // indirect
|
||||
github.com/opencontainers/selinux v1.10.1 // indirect
|
||||
github.com/prometheus/client_golang v1.12.2 // indirect
|
||||
github.com/prometheus/client_golang v1.13.0 // indirect
|
||||
github.com/prometheus/client_model v0.2.0 // indirect
|
||||
github.com/prometheus/common v0.35.0 // indirect
|
||||
github.com/prometheus/procfs v0.7.3 // indirect
|
||||
github.com/sirupsen/logrus v1.8.1 // indirect
|
||||
github.com/prometheus/common v0.37.0 // indirect
|
||||
github.com/prometheus/procfs v0.8.0 // indirect
|
||||
github.com/sirupsen/logrus v1.9.0 // indirect
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
|
||||
go.opencensus.io v0.23.0 // indirect
|
||||
golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect
|
||||
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f // indirect
|
||||
golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde // indirect
|
||||
golang.org/x/text v0.3.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect
|
||||
google.golang.org/grpc v1.43.0 // indirect
|
||||
google.golang.org/protobuf v1.28.0 // indirect
|
||||
google.golang.org/protobuf v1.28.1 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
)
|
||||
|
101
vendor/github.com/containerd/continuity/pathdriver/path_driver.go
generated
vendored
101
vendor/github.com/containerd/continuity/pathdriver/path_driver.go
generated
vendored
@ -1,101 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package pathdriver
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// PathDriver provides all of the path manipulation functions in a common
|
||||
// interface. The context should call these and never use the `filepath`
|
||||
// package or any other package to manipulate paths.
|
||||
type PathDriver interface {
|
||||
Join(paths ...string) string
|
||||
IsAbs(path string) bool
|
||||
Rel(base, target string) (string, error)
|
||||
Base(path string) string
|
||||
Dir(path string) string
|
||||
Clean(path string) string
|
||||
Split(path string) (dir, file string)
|
||||
Separator() byte
|
||||
Abs(path string) (string, error)
|
||||
Walk(string, filepath.WalkFunc) error
|
||||
FromSlash(path string) string
|
||||
ToSlash(path string) string
|
||||
Match(pattern, name string) (matched bool, err error)
|
||||
}
|
||||
|
||||
// pathDriver is a simple default implementation calls the filepath package.
|
||||
type pathDriver struct{}
|
||||
|
||||
// LocalPathDriver is the exported pathDriver struct for convenience.
|
||||
var LocalPathDriver PathDriver = &pathDriver{}
|
||||
|
||||
func (*pathDriver) Join(paths ...string) string {
|
||||
return filepath.Join(paths...)
|
||||
}
|
||||
|
||||
func (*pathDriver) IsAbs(path string) bool {
|
||||
return filepath.IsAbs(path)
|
||||
}
|
||||
|
||||
func (*pathDriver) Rel(base, target string) (string, error) {
|
||||
return filepath.Rel(base, target)
|
||||
}
|
||||
|
||||
func (*pathDriver) Base(path string) string {
|
||||
return filepath.Base(path)
|
||||
}
|
||||
|
||||
func (*pathDriver) Dir(path string) string {
|
||||
return filepath.Dir(path)
|
||||
}
|
||||
|
||||
func (*pathDriver) Clean(path string) string {
|
||||
return filepath.Clean(path)
|
||||
}
|
||||
|
||||
func (*pathDriver) Split(path string) (dir, file string) {
|
||||
return filepath.Split(path)
|
||||
}
|
||||
|
||||
func (*pathDriver) Separator() byte {
|
||||
return filepath.Separator
|
||||
}
|
||||
|
||||
func (*pathDriver) Abs(path string) (string, error) {
|
||||
return filepath.Abs(path)
|
||||
}
|
||||
|
||||
// Note that filepath.Walk calls os.Stat, so if the context wants to
|
||||
// to call Driver.Stat() for Walk, they need to create a new struct that
|
||||
// overrides this method.
|
||||
func (*pathDriver) Walk(root string, walkFn filepath.WalkFunc) error {
|
||||
return filepath.Walk(root, walkFn)
|
||||
}
|
||||
|
||||
func (*pathDriver) FromSlash(path string) string {
|
||||
return filepath.FromSlash(path)
|
||||
}
|
||||
|
||||
func (*pathDriver) ToSlash(path string) string {
|
||||
return filepath.ToSlash(path)
|
||||
}
|
||||
|
||||
func (*pathDriver) Match(pattern, name string) (bool, error) {
|
||||
return filepath.Match(pattern, name)
|
||||
}
|
67
vendor/github.com/docker/cli/AUTHORS
generated
vendored
67
vendor/github.com/docker/cli/AUTHORS
generated
vendored
@ -8,20 +8,25 @@ Aaron.L.Xu <likexu@harmonycloud.cn>
|
||||
Abdur Rehman <abdur_rehman@mentor.com>
|
||||
Abhinandan Prativadi <abhi@docker.com>
|
||||
Abin Shahab <ashahab@altiscale.com>
|
||||
Abreto FU <public@abreto.email>
|
||||
Ace Tang <aceapril@126.com>
|
||||
Addam Hardy <addam.hardy@gmail.com>
|
||||
Adolfo Ochagavía <aochagavia92@gmail.com>
|
||||
Adrian Plata <adrian.plata@docker.com>
|
||||
Adrien Duermael <adrien@duermael.com>
|
||||
Adrien Folie <folie.adrien@gmail.com>
|
||||
Ahmet Alp Balkan <ahmetb@microsoft.com>
|
||||
Aidan Feldman <aidan.feldman@gmail.com>
|
||||
Aidan Hobson Sayers <aidanhs@cantab.net>
|
||||
AJ Bowen <aj@gandi.net>
|
||||
Akihiro Suda <suda.akihiro@lab.ntt.co.jp>
|
||||
AJ Bowen <aj@soulshake.net>
|
||||
Akhil Mohan <akhil.mohan@mayadata.io>
|
||||
Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp>
|
||||
Akim Demaille <akim.demaille@docker.com>
|
||||
Alan Thompson <cloojure@gmail.com>
|
||||
Albert Callarisa <shark234@gmail.com>
|
||||
Albin Kerouanton <albin@akerouanton.name>
|
||||
Aleksa Sarai <asarai@suse.de>
|
||||
Aleksander Piotrowski <apiotrowski312@gmail.com>
|
||||
Alessandro Boch <aboch@tetrationanalytics.com>
|
||||
Alex Mavrogiannis <alex.mavrogiannis@docker.com>
|
||||
Alex Mayer <amayer5125@gmail.com>
|
||||
@ -39,6 +44,7 @@ Amir Goldstein <amir73il@aquasec.com>
|
||||
Amit Krishnan <amit.krishnan@oracle.com>
|
||||
Amit Shukla <amit.shukla@docker.com>
|
||||
Amy Lindburg <amy.lindburg@docker.com>
|
||||
Anca Iordache <anca.iordache@docker.com>
|
||||
Anda Xu <anda.xu@docker.com>
|
||||
Andrea Luzzardi <aluzzardi@gmail.com>
|
||||
Andreas Köhler <andi5.py@gmx.net>
|
||||
@ -48,6 +54,7 @@ Andrew Macpherson <hopscotch23@gmail.com>
|
||||
Andrew McDonnell <bugs@andrewmcdonnell.net>
|
||||
Andrew Po <absourd.noise@gmail.com>
|
||||
Andrey Petrov <andrey.petrov@shazow.net>
|
||||
Andrii Berehuliak <berkusandrew@gmail.com>
|
||||
André Martins <aanm90@gmail.com>
|
||||
Andy Goldstein <agoldste@redhat.com>
|
||||
Andy Rothfusz <github@developersupport.net>
|
||||
@ -60,7 +67,9 @@ Antonis Kalipetis <akalipetis@gmail.com>
|
||||
Anusha Ragunathan <anusha.ragunathan@docker.com>
|
||||
Ao Li <la9249@163.com>
|
||||
Arash Deshmeh <adeshmeh@ca.ibm.com>
|
||||
Arko Dasgupta <arko.dasgupta@docker.com>
|
||||
Arnaud Porterie <arnaud.porterie@docker.com>
|
||||
Arthur Peka <arthur.peka@outlook.com>
|
||||
Ashwini Oruganti <ashwini.oruganti@gmail.com>
|
||||
Azat Khuyiyakhmetov <shadow_uz@mail.ru>
|
||||
Bardia Keyoumarsi <bkeyouma@ucsc.edu>
|
||||
@ -86,6 +95,7 @@ Brent Salisbury <brent.salisbury@docker.com>
|
||||
Bret Fisher <bret@bretfisher.com>
|
||||
Brian (bex) Exelbierd <bexelbie@redhat.com>
|
||||
Brian Goff <cpuguy83@gmail.com>
|
||||
Brian Wieder <brian@4wieders.com>
|
||||
Bryan Bess <squarejaw@bsbess.com>
|
||||
Bryan Boreham <bjboreham@gmail.com>
|
||||
Bryan Murphy <bmurphy1976@gmail.com>
|
||||
@ -94,6 +104,7 @@ Cameron Spear <cameronspear@gmail.com>
|
||||
Cao Weiwei <cao.weiwei30@zte.com.cn>
|
||||
Carlo Mion <mion00@gmail.com>
|
||||
Carlos Alexandro Becker <caarlos0@gmail.com>
|
||||
Carlos de Paula <me@carlosedp.com>
|
||||
Ce Gao <ce.gao@outlook.com>
|
||||
Cedric Davies <cedricda@microsoft.com>
|
||||
Cezar Sa Espinola <cezarsa@gmail.com>
|
||||
@ -127,25 +138,31 @@ Coenraad Loubser <coenraad@wish.org.za>
|
||||
Colin Hebert <hebert.colin@gmail.com>
|
||||
Collin Guarino <collin.guarino@gmail.com>
|
||||
Colm Hally <colmhally@gmail.com>
|
||||
Comical Derskeal <27731088+derskeal@users.noreply.github.com>
|
||||
Corey Farrell <git@cfware.com>
|
||||
Corey Quon <corey.quon@docker.com>
|
||||
Craig Wilhite <crwilhit@microsoft.com>
|
||||
Cristian Staretu <cristian.staretu@gmail.com>
|
||||
Daehyeok Mun <daehyeok@gmail.com>
|
||||
Dafydd Crosby <dtcrsby@gmail.com>
|
||||
Daisuke Ito <itodaisuke00@gmail.com>
|
||||
dalanlan <dalanlan925@gmail.com>
|
||||
Damien Nadé <github@livna.org>
|
||||
Dan Cotora <dan@bluevision.ro>
|
||||
Daniel Artine <daniel.artine@ufrj.br>
|
||||
Daniel Cassidy <mail@danielcassidy.me.uk>
|
||||
Daniel Dao <dqminh@cloudflare.com>
|
||||
Daniel Farrell <dfarrell@redhat.com>
|
||||
Daniel Gasienica <daniel@gasienica.ch>
|
||||
Daniel Goosen <daniel.goosen@surveysampling.com>
|
||||
Daniel Helfand <dhelfand@redhat.com>
|
||||
Daniel Hiltgen <daniel.hiltgen@docker.com>
|
||||
Daniel J Walsh <dwalsh@redhat.com>
|
||||
Daniel Nephin <dnephin@docker.com>
|
||||
Daniel Norberg <dano@spotify.com>
|
||||
Daniel Watkins <daniel@daniel-watkins.co.uk>
|
||||
Daniel Zhang <jmzwcn@gmail.com>
|
||||
Daniil Nikolenko <qoo2p5@gmail.com>
|
||||
Danny Berger <dpb587@gmail.com>
|
||||
Darren Shepherd <darren.s.shepherd@gmail.com>
|
||||
Darren Stahl <darst@microsoft.com>
|
||||
@ -178,13 +195,15 @@ Dima Stopel <dima@twistlock.com>
|
||||
Dimitry Andric <d.andric@activevideo.com>
|
||||
Ding Fei <dingfei@stars.org.cn>
|
||||
Diogo Monica <diogo@docker.com>
|
||||
Djordje Lukic <djordje.lukic@docker.com>
|
||||
Dmitry Gusev <dmitry.gusev@gmail.com>
|
||||
Dmitry Smirnov <onlyjob@member.fsf.org>
|
||||
Dmitry V. Krivenok <krivenok.dmitry@gmail.com>
|
||||
Dominik Braun <dominik.braun@nbsp.de>
|
||||
Don Kjer <don.kjer@gmail.com>
|
||||
Dong Chen <dongluo.chen@docker.com>
|
||||
Doug Davis <dug@us.ibm.com>
|
||||
Drew Erny <drew.erny@docker.com>
|
||||
Drew Erny <derny@mirantis.com>
|
||||
Ed Costello <epc@epcostello.com>
|
||||
Elango Sivanandam <elango.siva@docker.com>
|
||||
Eli Uriegas <eli.uriegas@docker.com>
|
||||
@ -215,6 +234,7 @@ Felix Rabe <felix@rabe.io>
|
||||
Filip Jareš <filipjares@gmail.com>
|
||||
Flavio Crisciani <flavio.crisciani@docker.com>
|
||||
Florian Klein <florian.klein@free.fr>
|
||||
Forest Johnson <fjohnson@peoplenetonline.com>
|
||||
Foysal Iqbal <foysal.iqbal.fb@gmail.com>
|
||||
François Scala <francois.scala@swiss-as.com>
|
||||
Fred Lifton <fred.lifton@docker.com>
|
||||
@ -231,6 +251,7 @@ George MacRorie <gmacr31@gmail.com>
|
||||
George Xie <georgexsh@gmail.com>
|
||||
Gianluca Borello <g.borello@gmail.com>
|
||||
Gildas Cuisinier <gildas.cuisinier@gcuisinier.net>
|
||||
Goksu Toprak <goksu.toprak@docker.com>
|
||||
Gou Rao <gou@portworx.com>
|
||||
Grant Reaber <grant.reaber@gmail.com>
|
||||
Greg Pflaum <gpflaum@users.noreply.github.com>
|
||||
@ -245,6 +266,7 @@ Harald Albers <github@albersweb.de>
|
||||
Harold Cooper <hrldcpr@gmail.com>
|
||||
Harry Zhang <harryz@hyper.sh>
|
||||
He Simei <hesimei@zju.edu.cn>
|
||||
Hector S <hfsam88@gmail.com>
|
||||
Helen Xie <chenjg@harmonycloud.cn>
|
||||
Henning Sprang <henning.sprang@gmail.com>
|
||||
Henry N <henrynmail-github@yahoo.de>
|
||||
@ -252,6 +274,7 @@ Hernan Garcia <hernandanielg@gmail.com>
|
||||
Hongbin Lu <hongbin034@gmail.com>
|
||||
Hu Keping <hukeping@huawei.com>
|
||||
Huayi Zhang <irachex@gmail.com>
|
||||
Hugo Gabriel Eyherabide <hugogabriel.eyherabide@gmail.com>
|
||||
huqun <huqun@zju.edu.cn>
|
||||
Huu Nguyen <huu@prismskylabs.com>
|
||||
Hyzhou Zhy <hyzhou.zhy@alibaba-inc.com>
|
||||
@ -293,7 +316,7 @@ Jeremy Unruh <jeremybunruh@gmail.com>
|
||||
Jeremy Yallop <yallop@docker.com>
|
||||
Jeroen Franse <jeroenfranse@gmail.com>
|
||||
Jesse Adametz <jesseadametz@gmail.com>
|
||||
Jessica Frazelle <jessfraz@google.com>
|
||||
Jessica Frazelle <jess@oxide.computer>
|
||||
Jezeniel Zapanta <jpzapanta22@gmail.com>
|
||||
Jian Zhang <zhangjian.fnst@cn.fujitsu.com>
|
||||
Jie Luo <luo612@zju.edu.cn>
|
||||
@ -304,6 +327,7 @@ Jimmy Song <rootsongjc@gmail.com>
|
||||
jimmyxian <jimmyxian2004@yahoo.com.cn>
|
||||
Jintao Zhang <zhangjintao9020@gmail.com>
|
||||
Joao Fernandes <joao.fernandes@docker.com>
|
||||
Joe Abbey <joe.abbey@gmail.com>
|
||||
Joe Doliner <jdoliner@pachyderm.io>
|
||||
Joe Gordon <joe.gordon0@gmail.com>
|
||||
Joel Handwell <joelhandwell@gmail.com>
|
||||
@ -313,7 +337,7 @@ Johan Euphrosine <proppy@google.com>
|
||||
Johannes 'fish' Ziemke <github@freigeist.org>
|
||||
John Feminella <jxf@jxf.me>
|
||||
John Harris <john@johnharris.io>
|
||||
John Howard (VM) <John.Howard@microsoft.com>
|
||||
John Howard <github@lowenna.com>
|
||||
John Laswell <john.n.laswell@gmail.com>
|
||||
John Maguire <jmaguire@duosecurity.com>
|
||||
John Mulhausen <john@docker.com>
|
||||
@ -322,12 +346,15 @@ John Stephens <johnstep@docker.com>
|
||||
John Tims <john.k.tims@gmail.com>
|
||||
John V. Martinez <jvmatl@gmail.com>
|
||||
John Willis <john.willis@docker.com>
|
||||
Jon Johnson <jonjohnson@google.com>
|
||||
Jonatas Baldin <jonatas.baldin@gmail.com>
|
||||
Jonathan Boulle <jonathanboulle@gmail.com>
|
||||
Jonathan Lee <jonjohn1232009@gmail.com>
|
||||
Jonathan Lomas <jonathan@floatinglomas.ca>
|
||||
Jonathan McCrohan <jmccrohan@gmail.com>
|
||||
Jonh Wendell <jonh.wendell@redhat.com>
|
||||
Jordan Jennings <jjn2009@gmail.com>
|
||||
Jose J. Escobar <53836904+jescobar-docker@users.noreply.github.com>
|
||||
Joseph Kern <jkern@semafour.net>
|
||||
Josh Bodah <jb3689@yahoo.com>
|
||||
Josh Chorlton <jchorlton@gmail.com>
|
||||
@ -351,6 +378,7 @@ Kara Alexandra <kalexandra@us.ibm.com>
|
||||
Kareem Khazem <karkhaz@karkhaz.com>
|
||||
Karthik Nayak <Karthik.188@gmail.com>
|
||||
Kat Samperi <kat.samperi@gmail.com>
|
||||
Kathryn Spiers <kathryn@spiers.me>
|
||||
Katie McLaughlin <katie@glasnt.com>
|
||||
Ke Xu <leonhartx.k@gmail.com>
|
||||
Kei Ohmura <ohmura.kei@gmail.com>
|
||||
@ -364,6 +392,7 @@ Kevin Kern <kaiwentan@harmonycloud.cn>
|
||||
Kevin Kirsche <Kev.Kirsche+GitHub@gmail.com>
|
||||
Kevin Meredith <kevin.m.meredith@gmail.com>
|
||||
Kevin Richardson <kevin@kevinrichardson.co>
|
||||
Kevin Woblick <mail@kovah.de>
|
||||
khaled souf <khaled.souf@gmail.com>
|
||||
Kim Eik <kim@heldig.org>
|
||||
Kir Kolyshkin <kolyshkin@gmail.com>
|
||||
@ -372,7 +401,6 @@ Krasi Georgiev <krasi@vip-consult.solutions>
|
||||
Kris-Mikael Krister <krismikael@protonmail.com>
|
||||
Kun Zhang <zkazure@gmail.com>
|
||||
Kunal Kushwaha <kushwaha_kunal_v7@lab.ntt.co.jp>
|
||||
Kyle Spiers <kyle@spiers.me>
|
||||
Lachlan Cooper <lachlancooper@gmail.com>
|
||||
Lai Jiangshan <jiangshanlai@gmail.com>
|
||||
Lars Kellogg-Stedman <lars@redhat.com>
|
||||
@ -402,13 +430,16 @@ Luca Favatella <luca.favatella@erlang-solutions.com>
|
||||
Luca Marturana <lucamarturana@gmail.com>
|
||||
Lucas Chan <lucas-github@lucaschan.com>
|
||||
Luka Hartwig <mail@lukahartwig.de>
|
||||
Lukas Heeren <lukas-heeren@hotmail.com>
|
||||
Lukasz Zajaczkowski <Lukasz.Zajaczkowski@ts.fujitsu.com>
|
||||
Lydell Manganti <LydellManganti@users.noreply.github.com>
|
||||
Lénaïc Huard <lhuard@amadeus.com>
|
||||
Ma Shimiao <mashimiao.fnst@cn.fujitsu.com>
|
||||
Mabin <bin.ma@huawei.com>
|
||||
Maciej Kalisz <maciej.d.kalisz@gmail.com>
|
||||
Madhav Puri <madhav.puri@gmail.com>
|
||||
Madhu Venugopal <madhu@socketplane.io>
|
||||
Madhur Batra <madhurbatra097@gmail.com>
|
||||
Malte Janduda <mail@janduda.net>
|
||||
Manjunath A Kumatagi <mkumatag@in.ibm.com>
|
||||
Mansi Nahar <mmn4185@rit.edu>
|
||||
@ -418,6 +449,7 @@ Marco Mariani <marco.mariani@alterway.fr>
|
||||
Marco Vedovati <mvedovati@suse.com>
|
||||
Marcus Martins <marcus@docker.com>
|
||||
Marianna Tessel <mtesselh@gmail.com>
|
||||
Marius Ileana <marius.ileana@gmail.com>
|
||||
Marius Sturm <marius@graylog.com>
|
||||
Mark Oates <fl0yd@me.com>
|
||||
Marsh Macy <marsma@microsoft.com>
|
||||
@ -463,12 +495,14 @@ mikelinjie <294893458@qq.com>
|
||||
Mikhail Vasin <vasin@cloud-tv.ru>
|
||||
Milind Chawre <milindchawre@gmail.com>
|
||||
Mindaugas Rukas <momomg@gmail.com>
|
||||
Miroslav Gula <miroslav.gula@naytrolabs.com>
|
||||
Misty Stanley-Jones <misty@docker.com>
|
||||
Mohammad Banikazemi <mb@us.ibm.com>
|
||||
Mohammed Aaqib Ansari <maaquib@gmail.com>
|
||||
Mohini Anne Dsouza <mohini3917@gmail.com>
|
||||
Moorthy RS <rsmoorthy@gmail.com>
|
||||
Morgan Bauer <mbauer@us.ibm.com>
|
||||
Morten Hekkvang <morten.hekkvang@sbab.se>
|
||||
Moysés Borges <moysesb@gmail.com>
|
||||
Mrunal Patel <mrunalp@gmail.com>
|
||||
muicoder <muicoder@gmail.com>
|
||||
@ -499,9 +533,11 @@ Nishant Totla <nishanttotla@gmail.com>
|
||||
NIWA Hideyuki <niwa.niwa@nifty.ne.jp>
|
||||
Noah Treuhaft <noah.treuhaft@docker.com>
|
||||
O.S. Tezer <ostezer@gmail.com>
|
||||
Odin Ugedal <odin@ugedal.com>
|
||||
ohmystack <jun.jiang02@ele.me>
|
||||
Olle Jonsson <olle.jonsson@gmail.com>
|
||||
Olli Janatuinen <olli.janatuinen@gmail.com>
|
||||
Oscar Wieman <oscrx@icloud.com>
|
||||
Otto Kekäläinen <otto@seravo.fi>
|
||||
Ovidio Mallo <ovidio.mallo@gmail.com>
|
||||
Pascal Borreli <pascal@borreli.com>
|
||||
@ -511,6 +547,7 @@ Patrick Lang <plang@microsoft.com>
|
||||
Paul <paul9869@gmail.com>
|
||||
Paul Kehrer <paul.l.kehrer@gmail.com>
|
||||
Paul Lietar <paul@lietar.net>
|
||||
Paul Mulders <justinkb@gmail.com>
|
||||
Paul Weaver <pauweave@cisco.com>
|
||||
Pavel Pospisil <pospispa@gmail.com>
|
||||
Paweł Szczekutowicz <pszczekutowicz@gmail.com>
|
||||
@ -537,6 +574,8 @@ Qiang Huang <h.huangqiang@huawei.com>
|
||||
Qinglan Peng <qinglanpeng@zju.edu.cn>
|
||||
qudongfang <qudongfang@gmail.com>
|
||||
Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com>
|
||||
Rahul Zoldyck <rahulzoldyck@gmail.com>
|
||||
Ravi Shekhar Jethani <rsjethani@gmail.com>
|
||||
Ray Tsang <rayt@google.com>
|
||||
Reficul <xuzhenglun@gmail.com>
|
||||
Remy Suen <remy.suen@gmail.com>
|
||||
@ -548,11 +587,13 @@ Richard Scothern <richard.scothern@gmail.com>
|
||||
Rick Wieman <git@rickw.nl>
|
||||
Ritesh H Shukla <sritesh@vmware.com>
|
||||
Riyaz Faizullabhoy <riyaz.faizullabhoy@docker.com>
|
||||
Rob Gulewich <rgulewich@netflix.com>
|
||||
Robert Wallis <smilingrob@gmail.com>
|
||||
Robin Naundorf <r.naundorf@fh-muenster.de>
|
||||
Robin Speekenbrink <robin@kingsquare.nl>
|
||||
Rodolfo Ortiz <rodolfo.ortiz@definityfirst.com>
|
||||
Rogelio Canedo <rcanedo@mappy.priv>
|
||||
Rohan Verma <hello@rohanverma.net>
|
||||
Roland Kammerer <roland.kammerer@linbit.com>
|
||||
Roman Dudin <katrmr@gmail.com>
|
||||
Rory Hunter <roryhunter2@gmail.com>
|
||||
@ -568,10 +609,14 @@ Sainath Grandhi <sainath.grandhi@intel.com>
|
||||
Sakeven Jiang <jc5930@sina.cn>
|
||||
Sally O'Malley <somalley@redhat.com>
|
||||
Sam Neirinck <sam@samneirinck.com>
|
||||
Samarth Shah <samashah@microsoft.com>
|
||||
Sambuddha Basu <sambuddhabasu1@gmail.com>
|
||||
Sami Tabet <salph.tabet@gmail.com>
|
||||
Samuel Cochran <sj26@sj26.com>
|
||||
Samuel Karp <skarp@amazon.com>
|
||||
Santhosh Manohar <santhosh@docker.com>
|
||||
Sargun Dhillon <sargun@netflix.com>
|
||||
Saswat Bhattacharya <sas.saswat@gmail.com>
|
||||
Scott Brenner <scott@scottbrenner.me>
|
||||
Scott Collier <emailscottcollier@gmail.com>
|
||||
Sean Christopherson <sean.j.christopherson@intel.com>
|
||||
@ -592,6 +637,7 @@ sidharthamani <sid@rancher.com>
|
||||
Silvin Lubecki <silvin.lubecki@docker.com>
|
||||
Simei He <hesimei@zju.edu.cn>
|
||||
Simon Ferquel <simon.ferquel@docker.com>
|
||||
Simon Heimberg <simon.heimberg@heimberg-ea.ch>
|
||||
Sindhu S <sindhus@live.in>
|
||||
Slava Semushin <semushin@redhat.com>
|
||||
Solomon Hykes <solomon@docker.com>
|
||||
@ -621,7 +667,10 @@ TAGOMORI Satoshi <tagomoris@gmail.com>
|
||||
taiji-tech <csuhqg@foxmail.com>
|
||||
Taylor Jones <monitorjbl@gmail.com>
|
||||
Tejaswini Duggaraju <naduggar@microsoft.com>
|
||||
Tengfei Wang <tfwang@alauda.io>
|
||||
Teppei Fukuda <knqyf263@gmail.com>
|
||||
Thatcher Peskens <thatcher@docker.com>
|
||||
Thibault Coupin <thibault.coupin@gmail.com>
|
||||
Thomas Gazagnaire <thomas@gazagnaire.org>
|
||||
Thomas Krzero <thomas.kovatchitch@gmail.com>
|
||||
Thomas Leonard <thomas.leonard@docker.com>
|
||||
@ -633,6 +682,7 @@ Tianyi Wang <capkurmagati@gmail.com>
|
||||
Tibor Vass <teabee89@gmail.com>
|
||||
Tim Dettrick <t.dettrick@uq.edu.au>
|
||||
Tim Hockin <thockin@google.com>
|
||||
Tim Sampson <tim@sampson.fi>
|
||||
Tim Smith <timbot@google.com>
|
||||
Tim Waugh <twaugh@redhat.com>
|
||||
Tim Wraight <tim.wraight@tangentlabs.co.uk>
|
||||
@ -657,9 +707,11 @@ Tristan Carel <tristan@cogniteev.com>
|
||||
Tycho Andersen <tycho@docker.com>
|
||||
Tycho Andersen <tycho@tycho.ws>
|
||||
uhayate <uhayate.gong@daocloud.io>
|
||||
Ulrich Bareth <ulrich.bareth@gmail.com>
|
||||
Ulysses Souza <ulysses.souza@docker.com>
|
||||
Umesh Yadav <umesh4257@gmail.com>
|
||||
Valentin Lorentz <progval+git@progval.net>
|
||||
Venkateswara Reddy Bukkasamudram <bukkasamudram@outlook.com>
|
||||
Veres Lajos <vlajos@gmail.com>
|
||||
Victor Vieux <victor.vieux@docker.com>
|
||||
Victoria Bialas <victoria.bialas@docker.com>
|
||||
@ -677,6 +729,7 @@ Wang Long <long.wanglong@huawei.com>
|
||||
Wang Ping <present.wp@icloud.com>
|
||||
Wang Xing <hzwangxing@corp.netease.com>
|
||||
Wang Yuexiao <wang.yuexiao@zte.com.cn>
|
||||
Wang Yumu <37442693@qq.com>
|
||||
Wataru Ishida <ishida.wataru@lab.ntt.co.jp>
|
||||
Wayne Song <wsong@docker.com>
|
||||
Wen Cheng Ma <wenchma@cn.ibm.com>
|
||||
@ -685,6 +738,7 @@ Wes Morgan <cap10morgan@gmail.com>
|
||||
Wewang Xiaorenfine <wang.xiaoren@zte.com.cn>
|
||||
William Henry <whenry@redhat.com>
|
||||
Xianglin Gao <xlgao@zju.edu.cn>
|
||||
Xiaodong Liu <liuxiaodong@loongson.cn>
|
||||
Xiaodong Zhang <a4012017@sina.com>
|
||||
Xiaoxi He <xxhe@alauda.io>
|
||||
Xinbo Weng <xihuanbo_0521@zju.edu.cn>
|
||||
@ -701,6 +755,7 @@ Yuan Sun <sunyuan3@huawei.com>
|
||||
Yue Zhang <zy675793960@yeah.net>
|
||||
Yunxiang Huang <hyxqshk@vip.qq.com>
|
||||
Zachary Romero <zacromero3@gmail.com>
|
||||
Zander Mackie <zmackie@gmail.com>
|
||||
zebrilee <zebrilee@gmail.com>
|
||||
Zhang Kun <zkazure@gmail.com>
|
||||
Zhang Wei <zhangwei555@huawei.com>
|
||||
|
2
vendor/github.com/docker/cli/NOTICE
generated
vendored
2
vendor/github.com/docker/cli/NOTICE
generated
vendored
@ -3,7 +3,7 @@ Copyright 2012-2017 Docker, Inc.
|
||||
|
||||
This product includes software developed at Docker, Inc. (https://www.docker.com).
|
||||
|
||||
This product contains software (https://github.com/kr/pty) developed
|
||||
This product contains software (https://github.com/creack/pty) developed
|
||||
by Keith Rarick, licensed under the MIT License.
|
||||
|
||||
The following is courtesy of our legal counsel:
|
||||
|
79
vendor/github.com/docker/cli/cli/config/config.go
generated
vendored
79
vendor/github.com/docker/cli/cli/config/config.go
generated
vendored
@ -6,6 +6,7 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/docker/cli/cli/config/configfile"
|
||||
"github.com/docker/cli/cli/config/credentials"
|
||||
@ -23,17 +24,44 @@ const (
|
||||
)
|
||||
|
||||
var (
|
||||
configDir = os.Getenv("DOCKER_CONFIG")
|
||||
initConfigDir = new(sync.Once)
|
||||
configDir string
|
||||
homeDir string
|
||||
)
|
||||
|
||||
func init() {
|
||||
// resetHomeDir is used in testing to reset the "homeDir" package variable to
|
||||
// force re-lookup of the home directory between tests.
|
||||
func resetHomeDir() {
|
||||
homeDir = ""
|
||||
}
|
||||
|
||||
func getHomeDir() string {
|
||||
if homeDir == "" {
|
||||
homeDir = homedir.Get()
|
||||
}
|
||||
return homeDir
|
||||
}
|
||||
|
||||
// resetConfigDir is used in testing to reset the "configDir" package variable
|
||||
// and its sync.Once to force re-lookup between tests.
|
||||
func resetConfigDir() {
|
||||
configDir = ""
|
||||
initConfigDir = new(sync.Once)
|
||||
}
|
||||
|
||||
func setConfigDir() {
|
||||
if configDir != "" {
|
||||
return
|
||||
}
|
||||
configDir = os.Getenv("DOCKER_CONFIG")
|
||||
if configDir == "" {
|
||||
configDir = filepath.Join(homedir.Get(), configFileDir)
|
||||
configDir = filepath.Join(getHomeDir(), configFileDir)
|
||||
}
|
||||
}
|
||||
|
||||
// Dir returns the directory the configuration file is stored in
|
||||
func Dir() string {
|
||||
initConfigDir.Do(setConfigDir)
|
||||
return configDir
|
||||
}
|
||||
|
||||
@ -80,6 +108,15 @@ func LoadFromReader(configData io.Reader) (*configfile.ConfigFile, error) {
|
||||
// the auth config information and returns values.
|
||||
// FIXME: use the internal golang config parser
|
||||
func Load(configDir string) (*configfile.ConfigFile, error) {
|
||||
cfg, _, err := load(configDir)
|
||||
return cfg, err
|
||||
}
|
||||
|
||||
// TODO remove this temporary hack, which is used to warn about the deprecated ~/.dockercfg file
|
||||
// so we can remove the bool return value and collapse this back into `Load`
|
||||
func load(configDir string) (*configfile.ConfigFile, bool, error) {
|
||||
printLegacyFileWarning := false
|
||||
|
||||
if configDir == "" {
|
||||
configDir = Dir()
|
||||
}
|
||||
@ -88,47 +125,41 @@ func Load(configDir string) (*configfile.ConfigFile, error) {
|
||||
configFile := configfile.New(filename)
|
||||
|
||||
// Try happy path first - latest config file
|
||||
if _, err := os.Stat(filename); err == nil {
|
||||
file, err := os.Open(filename)
|
||||
if err != nil {
|
||||
return configFile, errors.Wrap(err, filename)
|
||||
}
|
||||
if file, err := os.Open(filename); err == nil {
|
||||
defer file.Close()
|
||||
err = configFile.LoadFromReader(file)
|
||||
if err != nil {
|
||||
err = errors.Wrap(err, filename)
|
||||
}
|
||||
return configFile, err
|
||||
return configFile, printLegacyFileWarning, err
|
||||
} else if !os.IsNotExist(err) {
|
||||
// if file is there but we can't stat it for any reason other
|
||||
// than it doesn't exist then stop
|
||||
return configFile, errors.Wrap(err, filename)
|
||||
return configFile, printLegacyFileWarning, errors.Wrap(err, filename)
|
||||
}
|
||||
|
||||
// Can't find latest config file so check for the old one
|
||||
confFile := filepath.Join(homedir.Get(), oldConfigfile)
|
||||
if _, err := os.Stat(confFile); err != nil {
|
||||
return configFile, nil //missing file is not an error
|
||||
filename = filepath.Join(getHomeDir(), oldConfigfile)
|
||||
if file, err := os.Open(filename); err == nil {
|
||||
printLegacyFileWarning = true
|
||||
defer file.Close()
|
||||
if err := configFile.LegacyLoadFromReader(file); err != nil {
|
||||
return configFile, printLegacyFileWarning, errors.Wrap(err, filename)
|
||||
}
|
||||
}
|
||||
file, err := os.Open(confFile)
|
||||
if err != nil {
|
||||
return configFile, errors.Wrap(err, filename)
|
||||
}
|
||||
defer file.Close()
|
||||
err = configFile.LegacyLoadFromReader(file)
|
||||
if err != nil {
|
||||
return configFile, errors.Wrap(err, filename)
|
||||
}
|
||||
return configFile, nil
|
||||
return configFile, printLegacyFileWarning, nil
|
||||
}
|
||||
|
||||
// LoadDefaultConfigFile attempts to load the default config file and returns
|
||||
// an initialized ConfigFile struct if none is found.
|
||||
func LoadDefaultConfigFile(stderr io.Writer) *configfile.ConfigFile {
|
||||
configFile, err := Load(Dir())
|
||||
configFile, printLegacyFileWarning, err := load(Dir())
|
||||
if err != nil {
|
||||
fmt.Fprintf(stderr, "WARNING: Error loading config file: %v\n", err)
|
||||
}
|
||||
if printLegacyFileWarning {
|
||||
_, _ = fmt.Fprintln(stderr, "WARNING: Support for the legacy ~/.dockercfg configuration file and file-format is deprecated and will be removed in an upcoming release")
|
||||
}
|
||||
if !configFile.ContainsAuth() {
|
||||
configFile.CredentialsStore = credentials.DetectDefaultStore(configFile.CredentialsStore)
|
||||
}
|
||||
|
46
vendor/github.com/docker/cli/cli/config/configfile/file.go
generated
vendored
46
vendor/github.com/docker/cli/cli/config/configfile/file.go
generated
vendored
@ -13,6 +13,7 @@ import (
|
||||
"github.com/docker/cli/cli/config/credentials"
|
||||
"github.com/docker/cli/cli/config/types"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -118,14 +119,16 @@ func (configFile *ConfigFile) LegacyLoadFromReader(configData io.Reader) error {
|
||||
// LoadFromReader reads the configuration data given and sets up the auth config
|
||||
// information with given directory and populates the receiver object
|
||||
func (configFile *ConfigFile) LoadFromReader(configData io.Reader) error {
|
||||
if err := json.NewDecoder(configData).Decode(&configFile); err != nil {
|
||||
if err := json.NewDecoder(configData).Decode(configFile); err != nil && !errors.Is(err, io.EOF) {
|
||||
return err
|
||||
}
|
||||
var err error
|
||||
for addr, ac := range configFile.AuthConfigs {
|
||||
ac.Username, ac.Password, err = decodeAuth(ac.Auth)
|
||||
if err != nil {
|
||||
return err
|
||||
if ac.Auth != "" {
|
||||
ac.Username, ac.Password, err = decodeAuth(ac.Auth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
ac.Auth = ""
|
||||
ac.ServerAddress = addr
|
||||
@ -166,6 +169,13 @@ func (configFile *ConfigFile) SaveToWriter(writer io.Writer) error {
|
||||
configFile.AuthConfigs = tmpAuthConfigs
|
||||
defer func() { configFile.AuthConfigs = saveAuthConfigs }()
|
||||
|
||||
// User-Agent header is automatically set, and should not be stored in the configuration
|
||||
for v := range configFile.HTTPHeaders {
|
||||
if strings.EqualFold(v, "User-Agent") {
|
||||
delete(configFile.HTTPHeaders, v)
|
||||
}
|
||||
}
|
||||
|
||||
data, err := json.MarshalIndent(configFile, "", "\t")
|
||||
if err != nil {
|
||||
return err
|
||||
@ -175,7 +185,7 @@ func (configFile *ConfigFile) SaveToWriter(writer io.Writer) error {
|
||||
}
|
||||
|
||||
// Save encodes and writes out all the authorization information
|
||||
func (configFile *ConfigFile) Save() error {
|
||||
func (configFile *ConfigFile) Save() (retErr error) {
|
||||
if configFile.Filename == "" {
|
||||
return errors.Errorf("Can't save config with empty filename")
|
||||
}
|
||||
@ -188,13 +198,33 @@ func (configFile *ConfigFile) Save() error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
temp.Close()
|
||||
if retErr != nil {
|
||||
if err := os.Remove(temp.Name()); err != nil {
|
||||
logrus.WithError(err).WithField("file", temp.Name()).Debug("Error cleaning up temp file")
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
err = configFile.SaveToWriter(temp)
|
||||
temp.Close()
|
||||
if err != nil {
|
||||
os.Remove(temp.Name())
|
||||
return err
|
||||
}
|
||||
return os.Rename(temp.Name(), configFile.Filename)
|
||||
|
||||
if err := temp.Close(); err != nil {
|
||||
return errors.Wrap(err, "error closing temp file")
|
||||
}
|
||||
|
||||
// Handle situation where the configfile is a symlink
|
||||
cfgFile := configFile.Filename
|
||||
if f, err := os.Readlink(cfgFile); err == nil {
|
||||
cfgFile = f
|
||||
}
|
||||
|
||||
// Try copying the current config file (if any) ownership and permissions
|
||||
copyFilePermissions(cfgFile, temp.Name())
|
||||
return os.Rename(temp.Name(), cfgFile)
|
||||
}
|
||||
|
||||
// ParseProxyConfig computes proxy configuration by retrieving the config for the provided host and
|
||||
|
36
vendor/github.com/docker/cli/cli/config/configfile/file_unix.go
generated
vendored
Normal file
36
vendor/github.com/docker/cli/cli/config/configfile/file_unix.go
generated
vendored
Normal file
@ -0,0 +1,36 @@
|
||||
//go:build !windows
|
||||
// +build !windows
|
||||
|
||||
package configfile
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// copyFilePermissions copies file ownership and permissions from "src" to "dst",
|
||||
// ignoring any error during the process.
|
||||
func copyFilePermissions(src, dst string) {
|
||||
var (
|
||||
mode os.FileMode = 0600
|
||||
uid, gid int
|
||||
)
|
||||
|
||||
fi, err := os.Stat(src)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if fi.Mode().IsRegular() {
|
||||
mode = fi.Mode()
|
||||
}
|
||||
if err := os.Chmod(dst, mode); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
uid = int(fi.Sys().(*syscall.Stat_t).Uid)
|
||||
gid = int(fi.Sys().(*syscall.Stat_t).Gid)
|
||||
|
||||
if uid > 0 && gid > 0 {
|
||||
_ = os.Chown(dst, uid, gid)
|
||||
}
|
||||
}
|
5
vendor/github.com/docker/cli/cli/config/configfile/file_windows.go
generated
vendored
Normal file
5
vendor/github.com/docker/cli/cli/config/configfile/file_windows.go
generated
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
package configfile
|
||||
|
||||
func copyFilePermissions(src, dst string) {
|
||||
// TODO implement for Windows
|
||||
}
|
2
vendor/github.com/docker/cli/cli/config/credentials/default_store.go
generated
vendored
2
vendor/github.com/docker/cli/cli/config/credentials/default_store.go
generated
vendored
@ -1,7 +1,7 @@
|
||||
package credentials
|
||||
|
||||
import (
|
||||
"os/exec"
|
||||
exec "golang.org/x/sys/execabs"
|
||||
)
|
||||
|
||||
// DetectDefaultStore return the default credentials store for the platform if
|
||||
|
1
vendor/github.com/docker/cli/cli/config/credentials/default_store_unsupported.go
generated
vendored
1
vendor/github.com/docker/cli/cli/config/credentials/default_store_unsupported.go
generated
vendored
@ -1,3 +1,4 @@
|
||||
//go:build !windows && !darwin && !linux
|
||||
// +build !windows,!darwin,!linux
|
||||
|
||||
package credentials
|
||||
|
3
vendor/github.com/docker/docker-credential-helpers/client/command.go
generated
vendored
3
vendor/github.com/docker/docker-credential-helpers/client/command.go
generated
vendored
@ -4,7 +4,8 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
|
||||
exec "golang.org/x/sys/execabs"
|
||||
)
|
||||
|
||||
// Program is an interface to execute external programs.
|
||||
|
2
vendor/github.com/docker/docker-credential-helpers/credentials/version.go
generated
vendored
2
vendor/github.com/docker/docker-credential-helpers/credentials/version.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
package credentials
|
||||
|
||||
// Version holds a string describing the current version
|
||||
const Version = "0.6.3"
|
||||
const Version = "0.6.4"
|
||||
|
117
vendor/github.com/docker/docker/AUTHORS
generated
vendored
117
vendor/github.com/docker/docker/AUTHORS
generated
vendored
@ -4,6 +4,7 @@
|
||||
Aanand Prasad <aanand.prasad@gmail.com>
|
||||
Aaron Davidson <aaron@databricks.com>
|
||||
Aaron Feng <aaron.feng@gmail.com>
|
||||
Aaron Hnatiw <aaron@griddio.com>
|
||||
Aaron Huslage <huslage@gmail.com>
|
||||
Aaron L. Xu <liker.xu@foxmail.com>
|
||||
Aaron Lehmann <aaron.lehmann@docker.com>
|
||||
@ -17,6 +18,7 @@ Abhishek Chanda <abhishek.becs@gmail.com>
|
||||
Abhishek Sharma <abhishek@asharma.me>
|
||||
Abin Shahab <ashahab@altiscale.com>
|
||||
Adam Avilla <aavilla@yp.com>
|
||||
Adam Dobrawy <naczelnik@jawnosc.tk>
|
||||
Adam Eijdenberg <adam.eijdenberg@gmail.com>
|
||||
Adam Kunk <adam.kunk@tiaa-cref.org>
|
||||
Adam Miller <admiller@redhat.com>
|
||||
@ -43,6 +45,7 @@ AJ Bowen <aj@soulshake.net>
|
||||
Ajey Charantimath <ajey.charantimath@gmail.com>
|
||||
ajneu <ajneu@users.noreply.github.com>
|
||||
Akash Gupta <akagup@microsoft.com>
|
||||
Akhil Mohan <akhil.mohan@mayadata.io>
|
||||
Akihiro Matsushima <amatsusbit@gmail.com>
|
||||
Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp>
|
||||
Akim Demaille <akim.demaille@docker.com>
|
||||
@ -50,10 +53,12 @@ Akira Koyasu <mail@akirakoyasu.net>
|
||||
Akshay Karle <akshay.a.karle@gmail.com>
|
||||
Al Tobey <al@ooyala.com>
|
||||
alambike <alambike@gmail.com>
|
||||
Alan Hoyle <alan@alanhoyle.com>
|
||||
Alan Scherger <flyinprogrammer@gmail.com>
|
||||
Alan Thompson <cloojure@gmail.com>
|
||||
Albert Callarisa <shark234@gmail.com>
|
||||
Albert Zhang <zhgwenming@gmail.com>
|
||||
Albin Kerouanton <albin@akerouanton.name>
|
||||
Alejandro González Hevia <alejandrgh11@gmail.com>
|
||||
Aleksa Sarai <asarai@suse.de>
|
||||
Aleksandrs Fadins <aleks@s-ko.net>
|
||||
@ -107,11 +112,13 @@ Amy Lindburg <amy.lindburg@docker.com>
|
||||
Anand Patil <anand.prabhakar.patil@gmail.com>
|
||||
AnandkumarPatel <anandkumarpatel@gmail.com>
|
||||
Anatoly Borodin <anatoly.borodin@gmail.com>
|
||||
Anca Iordache <anca.iordache@docker.com>
|
||||
Anchal Agrawal <aagrawa4@illinois.edu>
|
||||
Anda Xu <anda.xu@docker.com>
|
||||
Anders Janmyr <anders@janmyr.com>
|
||||
Andre Dublin <81dublin@gmail.com>
|
||||
Andre Granovsky <robotciti@live.com>
|
||||
Andrea Denisse Gómez <crypto.andrea@protonmail.ch>
|
||||
Andrea Luzzardi <aluzzardi@gmail.com>
|
||||
Andrea Turli <andrea.turli@gmail.com>
|
||||
Andreas Elvers <andreas@work.de>
|
||||
@ -176,8 +183,10 @@ Anusha Ragunathan <anusha.ragunathan@docker.com>
|
||||
apocas <petermdias@gmail.com>
|
||||
Arash Deshmeh <adeshmeh@ca.ibm.com>
|
||||
ArikaChen <eaglesora@gmail.com>
|
||||
Arko Dasgupta <arko.dasgupta@docker.com>
|
||||
Arnaud Lefebvre <a.lefebvre@outlook.fr>
|
||||
Arnaud Porterie <arnaud.porterie@docker.com>
|
||||
Arnaud Rebillout <arnaud.rebillout@collabora.com>
|
||||
Arthur Barr <arthur.barr@uk.ibm.com>
|
||||
Arthur Gautier <baloo@gandi.net>
|
||||
Artur Meyster <arthurfbi@yahoo.com>
|
||||
@ -210,10 +219,12 @@ Benjamin Atkin <ben@benatkin.com>
|
||||
Benjamin Baker <Benjamin.baker@utexas.edu>
|
||||
Benjamin Boudreau <boudreau.benjamin@gmail.com>
|
||||
Benjamin Yolken <yolken@stripe.com>
|
||||
Benny Ng <benny.tpng@gmail.com>
|
||||
Benoit Chesneau <bchesneau@gmail.com>
|
||||
Bernerd Schaefer <bj.schaefer@gmail.com>
|
||||
Bernhard M. Wiedemann <bwiedemann@suse.de>
|
||||
Bert Goethals <bert@bertg.be>
|
||||
Bertrand Roussel <broussel@sierrawireless.com>
|
||||
Bevisy Zhang <binbin36520@gmail.com>
|
||||
Bharath Thiruveedula <bharath_ves@hotmail.com>
|
||||
Bhiraj Butala <abhiraj.butala@gmail.com>
|
||||
@ -226,6 +237,7 @@ Bingshen Wang <bingshen.wbs@alibaba-inc.com>
|
||||
Blake Geno <blakegeno@gmail.com>
|
||||
Boaz Shuster <ripcurld.github@gmail.com>
|
||||
bobby abbott <ttobbaybbob@gmail.com>
|
||||
Boqin Qin <bobbqqin@gmail.com>
|
||||
Boris Pruessmann <boris@pruessmann.org>
|
||||
Boshi Lian <farmer1992@gmail.com>
|
||||
Bouke Haarsma <bouke@webatoom.nl>
|
||||
@ -279,6 +291,7 @@ Carl Loa Odin <carlodin@gmail.com>
|
||||
Carl X. Su <bcbcarl@gmail.com>
|
||||
Carlo Mion <mion00@gmail.com>
|
||||
Carlos Alexandro Becker <caarlos0@gmail.com>
|
||||
Carlos de Paula <me@carlosedp.com>
|
||||
Carlos Sanchez <carlos@apache.org>
|
||||
Carol Fager-Higgins <carol.fager-higgins@docker.com>
|
||||
Cary <caryhartline@users.noreply.github.com>
|
||||
@ -328,6 +341,7 @@ Chris Gibson <chris@chrisg.io>
|
||||
Chris Khoo <chris.khoo@gmail.com>
|
||||
Chris McKinnel <chris.mckinnel@tangentlabs.co.uk>
|
||||
Chris McKinnel <chrismckinnel@gmail.com>
|
||||
Chris Price <cprice@mirantis.com>
|
||||
Chris Seto <chriskseto@gmail.com>
|
||||
Chris Snow <chsnow123@gmail.com>
|
||||
Chris St. Pierre <chris.a.st.pierre@gmail.com>
|
||||
@ -354,7 +368,7 @@ Christopher Currie <codemonkey+github@gmail.com>
|
||||
Christopher Jones <tophj@linux.vnet.ibm.com>
|
||||
Christopher Latham <sudosurootdev@gmail.com>
|
||||
Christopher Rigor <crigor@gmail.com>
|
||||
Christy Perez <christy@linux.vnet.ibm.com>
|
||||
Christy Norman <christy@linux.vnet.ibm.com>
|
||||
Chun Chen <ramichen@tencent.com>
|
||||
Ciro S. Costa <ciro.costa@usp.br>
|
||||
Clayton Coleman <ccoleman@redhat.com>
|
||||
@ -374,8 +388,10 @@ Corey Farrell <git@cfware.com>
|
||||
Cory Forsyth <cory.forsyth@gmail.com>
|
||||
cressie176 <github@stephen-cresswell.net>
|
||||
CrimsonGlory <CrimsonGlory@users.noreply.github.com>
|
||||
Cristian Ariza <dev@cristianrz.com>
|
||||
Cristian Staretu <cristian.staretu@gmail.com>
|
||||
cristiano balducci <cristiano.balducci@gmail.com>
|
||||
Cristina Yenyxe Gonzalez Garcia <cristina.yenyxe@gmail.com>
|
||||
Cruceru Calin-Cristian <crucerucalincristian@gmail.com>
|
||||
CUI Wei <ghostplant@qq.com>
|
||||
Cyprian Gracz <cyprian.gracz@micro-jumbo.eu>
|
||||
@ -402,12 +418,14 @@ Dan Williams <me@deedubs.com>
|
||||
Dani Hodovic <dani.hodovic@gmail.com>
|
||||
Dani Louca <dani.louca@docker.com>
|
||||
Daniel Antlinger <d.antlinger@gmx.at>
|
||||
Daniel Black <daniel@linux.ibm.com>
|
||||
Daniel Dao <dqminh@cloudflare.com>
|
||||
Daniel Exner <dex@dragonslave.de>
|
||||
Daniel Farrell <dfarrell@redhat.com>
|
||||
Daniel Garcia <daniel@danielgarcia.info>
|
||||
Daniel Gasienica <daniel@gasienica.ch>
|
||||
Daniel Grunwell <mwgrunny@gmail.com>
|
||||
Daniel Helfand <helfand.4@gmail.com>
|
||||
Daniel Hiltgen <daniel.hiltgen@docker.com>
|
||||
Daniel J Walsh <dwalsh@redhat.com>
|
||||
Daniel Menet <membership@sontags.ch>
|
||||
@ -417,12 +435,14 @@ Daniel Norberg <dano@spotify.com>
|
||||
Daniel Nordberg <dnordberg@gmail.com>
|
||||
Daniel Robinson <gottagetmac@gmail.com>
|
||||
Daniel S <dan.streby@gmail.com>
|
||||
Daniel Sweet <danieljsweet@icloud.com>
|
||||
Daniel Von Fange <daniel@leancoder.com>
|
||||
Daniel Watkins <daniel@daniel-watkins.co.uk>
|
||||
Daniel X Moore <yahivin@gmail.com>
|
||||
Daniel YC Lin <dlin.tw@gmail.com>
|
||||
Daniel Zhang <jmzwcn@gmail.com>
|
||||
Danny Berger <dpb587@gmail.com>
|
||||
Danny Milosavljevic <dannym@scratchpost.org>
|
||||
Danny Yates <danny@codeaholics.org>
|
||||
Danyal Khaliq <danyal.khaliq@tenpearls.com>
|
||||
Darren Coxall <darren@darrencoxall.com>
|
||||
@ -487,6 +507,7 @@ Derek McGowan <derek@mcgstyle.net>
|
||||
Deric Crago <deric.crago@gmail.com>
|
||||
Deshi Xiao <dxiao@redhat.com>
|
||||
devmeyster <arthurfbi@yahoo.com>
|
||||
Devon Estes <devon.estes@klarna.com>
|
||||
Devvyn Murphy <devvyn@devvyn.com>
|
||||
Dharmit Shah <shahdharmit@gmail.com>
|
||||
Dhawal Yogesh Bhanushali <dbhanushali@vmware.com>
|
||||
@ -516,6 +537,8 @@ Dmitry Smirnov <onlyjob@member.fsf.org>
|
||||
Dmitry V. Krivenok <krivenok.dmitry@gmail.com>
|
||||
Dmitry Vorobev <dimahabr@gmail.com>
|
||||
Dolph Mathews <dolph.mathews@gmail.com>
|
||||
Dominic Tubach <dominic.tubach@to.com>
|
||||
Dominic Yin <yindongchao@inspur.com>
|
||||
Dominik Dingel <dingel@linux.vnet.ibm.com>
|
||||
Dominik Finkbeiner <finkes93@gmail.com>
|
||||
Dominik Honnef <dominik@honnef.co>
|
||||
@ -534,7 +557,7 @@ Douglas Curtis <dougcurtis1@gmail.com>
|
||||
Dr Nic Williams <drnicwilliams@gmail.com>
|
||||
dragon788 <dragon788@users.noreply.github.com>
|
||||
Dražen Lučanin <kermit666@gmail.com>
|
||||
Drew Erny <drew.erny@docker.com>
|
||||
Drew Erny <derny@mirantis.com>
|
||||
Drew Hubl <drew.hubl@gmail.com>
|
||||
Dustin Sallings <dustin@spy.net>
|
||||
Ed Costello <epc@epcostello.com>
|
||||
@ -584,6 +607,7 @@ Erik Weathers <erikdw@gmail.com>
|
||||
Erno Hopearuoho <erno.hopearuoho@gmail.com>
|
||||
Erwin van der Koogh <info@erronis.nl>
|
||||
Ethan Bell <ebgamer29@gmail.com>
|
||||
Ethan Mosbaugh <ethan@replicated.com>
|
||||
Euan Kemp <euan.kemp@coreos.com>
|
||||
Eugen Krizo <eugen.krizo@gmail.com>
|
||||
Eugene Yakubovich <eugene.yakubovich@coreos.com>
|
||||
@ -595,6 +619,7 @@ Evan Phoenix <evan@fallingsnow.net>
|
||||
Evan Wies <evan@neomantra.net>
|
||||
Evelyn Xu <evelynhsu21@gmail.com>
|
||||
Everett Toews <everett.toews@rackspace.com>
|
||||
Evgeniy Makhrov <e.makhrov@corp.badoo.com>
|
||||
Evgeny Shmarnev <shmarnev@gmail.com>
|
||||
Evgeny Vereshchagin <evvers@ya.ru>
|
||||
Ewa Czechowska <ewa@ai-traders.com>
|
||||
@ -620,6 +645,7 @@ Fareed Dudhia <fareeddudhia@googlemail.com>
|
||||
Fathi Boudra <fathi.boudra@linaro.org>
|
||||
Federico Gimenez <fgimenez@coit.es>
|
||||
Felipe Oliveira <felipeweb.programador@gmail.com>
|
||||
Felipe Ruhland <felipe.ruhland@gmail.com>
|
||||
Felix Abecassis <fabecassis@nvidia.com>
|
||||
Felix Geisendörfer <felix@debuggable.com>
|
||||
Felix Hupfeld <felix@quobyte.com>
|
||||
@ -640,6 +666,7 @@ Florian <FWirtz@users.noreply.github.com>
|
||||
Florian Klein <florian.klein@free.fr>
|
||||
Florian Maier <marsmensch@users.noreply.github.com>
|
||||
Florian Noeding <noeding@adobe.com>
|
||||
Florian Schmaus <flo@geekplace.eu>
|
||||
Florian Weingarten <flo@hackvalue.de>
|
||||
Florin Asavoaie <florin.asavoaie@gmail.com>
|
||||
Florin Patan <florinpatan@gmail.com>
|
||||
@ -654,6 +681,7 @@ Frank Groeneveld <frank@ivaldi.nl>
|
||||
Frank Herrmann <fgh@4gh.tv>
|
||||
Frank Macreery <frank@macreery.com>
|
||||
Frank Rosquin <frank.rosquin+github@gmail.com>
|
||||
frankyang <yyb196@gmail.com>
|
||||
Fred Lifton <fred.lifton@docker.com>
|
||||
Frederick F. Kautz IV <fkautz@redhat.com>
|
||||
Frederik Loeffert <frederik@zitrusmedia.de>
|
||||
@ -675,7 +703,7 @@ Gareth Rushgrove <gareth@morethanseven.net>
|
||||
Garrett Barboza <garrett@garrettbarboza.com>
|
||||
Gary Schaetz <gary@schaetzkc.com>
|
||||
Gaurav <gaurav.gosec@gmail.com>
|
||||
gautam, prasanna <prasannagautam@gmail.com>
|
||||
Gaurav Singh <gaurav1086@gmail.com>
|
||||
Gaël PORTAY <gael.portay@savoirfairelinux.com>
|
||||
Genki Takiuchi <genki@s21g.com>
|
||||
GennadySpb <lipenkov@gmail.com>
|
||||
@ -701,11 +729,12 @@ Gleb M Borisov <borisov.gleb@gmail.com>
|
||||
Glyn Normington <gnormington@gopivotal.com>
|
||||
GoBella <caili_welcome@163.com>
|
||||
Goffert van Gool <goffert@phusion.nl>
|
||||
Goldwyn Rodrigues <rgoldwyn@suse.com>
|
||||
Gopikannan Venugopalsamy <gopikannan.venugopalsamy@gmail.com>
|
||||
Gosuke Miyashita <gosukenator@gmail.com>
|
||||
Gou Rao <gou@portworx.com>
|
||||
Govinda Fichtner <govinda.fichtner@googlemail.com>
|
||||
Grant Millar <grant@cylo.io>
|
||||
Grant Millar <rid@cylo.io>
|
||||
Grant Reaber <grant.reaber@gmail.com>
|
||||
Graydon Hoare <graydon@pobox.com>
|
||||
Greg Fausak <greg@tacodata.com>
|
||||
@ -724,14 +753,17 @@ Guruprasad <lgp171188@gmail.com>
|
||||
Gustav Sinder <gustav.sinder@gmail.com>
|
||||
gwx296173 <gaojing3@huawei.com>
|
||||
Günter Zöchbauer <guenter@gzoechbauer.com>
|
||||
Haichao Yang <yang.haichao@zte.com.cn>
|
||||
haikuoliu <haikuo@amazon.com>
|
||||
Hakan Özler <hakan.ozler@kodcu.com>
|
||||
Hamish Hutchings <moredhel@aoeu.me>
|
||||
Hannes Ljungberg <hannes@5monkeys.se>
|
||||
Hans Kristian Flaatten <hans@starefossen.com>
|
||||
Hans Rødtang <hansrodtang@gmail.com>
|
||||
Hao Shu Wei <haosw@cn.ibm.com>
|
||||
Hao Zhang <21521210@zju.edu.cn>
|
||||
Harald Albers <github@albersweb.de>
|
||||
Harald Niesche <harald@niesche.de>
|
||||
Harley Laue <losinggeneration@gmail.com>
|
||||
Harold Cooper <hrldcpr@gmail.com>
|
||||
Harrison Turton <harrisonturton@gmail.com>
|
||||
@ -751,9 +783,13 @@ Hobofan <goisser94@gmail.com>
|
||||
Hollie Teal <hollie@docker.com>
|
||||
Hong Xu <hong@topbug.net>
|
||||
Hongbin Lu <hongbin034@gmail.com>
|
||||
Hongxu Jia <hongxu.jia@windriver.com>
|
||||
Honza Pokorny <me@honza.ca>
|
||||
Hsing-Hui Hsu <hsinghui@amazon.com>
|
||||
hsinko <21551195@zju.edu.cn>
|
||||
Hu Keping <hukeping@huawei.com>
|
||||
Hu Tao <hutao@cn.fujitsu.com>
|
||||
HuanHuan Ye <logindaveye@gmail.com>
|
||||
Huanzhong Zhang <zhanghuanzhong90@gmail.com>
|
||||
Huayi Zhang <irachex@gmail.com>
|
||||
Hugo Duncan <hugo@hugoduncan.org>
|
||||
@ -790,6 +826,7 @@ Ingo Gottwald <in.gottwald@gmail.com>
|
||||
Innovimax <innovimax@gmail.com>
|
||||
Isaac Dupree <antispam@idupree.com>
|
||||
Isabel Jimenez <contact.isabeljimenez@gmail.com>
|
||||
Isaiah Grace <irgkenya4@gmail.com>
|
||||
Isao Jonas <isao.jonas@gmail.com>
|
||||
Iskander Sharipov <quasilyte@gmail.com>
|
||||
Ivan Babrou <ibobrik@gmail.com>
|
||||
@ -805,6 +842,7 @@ Jacob Edelman <edelman.jd@gmail.com>
|
||||
Jacob Tomlinson <jacob@tom.linson.uk>
|
||||
Jacob Vallejo <jakeev@amazon.com>
|
||||
Jacob Wen <jian.w.wen@oracle.com>
|
||||
Jaime Cepeda <jcepedavillamayor@gmail.com>
|
||||
Jaivish Kothari <janonymous.codevulture@gmail.com>
|
||||
Jake Champlin <jake.champlin.27@gmail.com>
|
||||
Jake Moshenko <jake@devtable.com>
|
||||
@ -819,12 +857,13 @@ James Kyburz <james.kyburz@gmail.com>
|
||||
James Kyle <james@jameskyle.org>
|
||||
James Lal <james@lightsofapollo.com>
|
||||
James Mills <prologic@shortcircuit.net.au>
|
||||
James Nesbitt <james.nesbitt@wunderkraut.com>
|
||||
James Nesbitt <jnesbitt@mirantis.com>
|
||||
James Nugent <james@jen20.com>
|
||||
James Turnbull <james@lovedthanlost.net>
|
||||
James Watkins-Harvey <jwatkins@progi-media.com>
|
||||
Jamie Hannaford <jamie@limetree.org>
|
||||
Jamshid Afshar <jafshar@yahoo.com>
|
||||
Jan Chren <dev.rindeal@gmail.com>
|
||||
Jan Keromnes <janx@linux.com>
|
||||
Jan Koprowski <jan.koprowski@gmail.com>
|
||||
Jan Pazdziora <jpazdziora@redhat.com>
|
||||
@ -839,6 +878,7 @@ Jared Hocutt <jaredh@netapp.com>
|
||||
Jaroslaw Zabiello <hipertracker@gmail.com>
|
||||
jaseg <jaseg@jaseg.net>
|
||||
Jasmine Hegman <jasmine@jhegman.com>
|
||||
Jason A. Donenfeld <Jason@zx2c4.com>
|
||||
Jason Divock <jdivock@gmail.com>
|
||||
Jason Giedymin <jasong@apache.org>
|
||||
Jason Green <Jason.Green@AverInformatics.Com>
|
||||
@ -886,7 +926,7 @@ Jeroen Franse <jeroenfranse@gmail.com>
|
||||
Jeroen Jacobs <github@jeroenj.be>
|
||||
Jesse Dearing <jesse.dearing@gmail.com>
|
||||
Jesse Dubay <jesse@thefortytwo.net>
|
||||
Jessica Frazelle <acidburn@microsoft.com>
|
||||
Jessica Frazelle <jess@oxide.computer>
|
||||
Jezeniel Zapanta <jpzapanta22@gmail.com>
|
||||
Jhon Honce <jhonce@redhat.com>
|
||||
Ji.Zhilong <zhilongji@gmail.com>
|
||||
@ -894,9 +934,11 @@ Jian Liao <jliao@alauda.io>
|
||||
Jian Zhang <zhangjian.fnst@cn.fujitsu.com>
|
||||
Jiang Jinyang <jjyruby@gmail.com>
|
||||
Jie Luo <luo612@zju.edu.cn>
|
||||
Jie Ma <jienius@outlook.com>
|
||||
Jihyun Hwang <jhhwang@telcoware.com>
|
||||
Jilles Oldenbeuving <ojilles@gmail.com>
|
||||
Jim Alateras <jima@comware.com.au>
|
||||
Jim Ehrismann <jim.ehrismann@docker.com>
|
||||
Jim Galasyn <jim.galasyn@docker.com>
|
||||
Jim Minter <jminter@redhat.com>
|
||||
Jim Perrin <jperrin@centos.org>
|
||||
@ -934,7 +976,7 @@ John Feminella <jxf@jxf.me>
|
||||
John Gardiner Myers <jgmyers@proofpoint.com>
|
||||
John Gossman <johngos@microsoft.com>
|
||||
John Harris <john@johnharris.io>
|
||||
John Howard (VM) <John.Howard@microsoft.com>
|
||||
John Howard <github@lowenna.com>
|
||||
John Laswell <john.n.laswell@gmail.com>
|
||||
John Maguire <jmaguire@duosecurity.com>
|
||||
John Mulhausen <john@docker.com>
|
||||
@ -948,6 +990,8 @@ John Willis <john.willis@docker.com>
|
||||
Jon Johnson <jonjohnson@google.com>
|
||||
Jon Surrell <jon.surrell@gmail.com>
|
||||
Jon Wedaman <jweede@gmail.com>
|
||||
Jonas Dohse <jonas@dohse.ch>
|
||||
Jonas Heinrich <Jonas@JonasHeinrich.com>
|
||||
Jonas Pfenniger <jonas@pfenniger.name>
|
||||
Jonathan A. Schweder <jonathanschweder@gmail.com>
|
||||
Jonathan A. Sternberg <jonathansternberg@gmail.com>
|
||||
@ -997,10 +1041,13 @@ Julien Dubois <julien.dubois@gmail.com>
|
||||
Julien Kassar <github@kassisol.com>
|
||||
Julien Maitrehenry <julien.maitrehenry@me.com>
|
||||
Julien Pervillé <julien.perville@perfect-memory.com>
|
||||
Julien Pivotto <roidelapluie@inuits.eu>
|
||||
Julio Guerra <julio@sqreen.com>
|
||||
Julio Montes <imc.coder@gmail.com>
|
||||
Jun-Ru Chang <jrjang@gmail.com>
|
||||
Jussi Nummelin <jussi.nummelin@gmail.com>
|
||||
Justas Brazauskas <brazauskasjustas@gmail.com>
|
||||
Justen Martin <jmart@the-coder.com>
|
||||
Justin Cormack <justin.cormack@docker.com>
|
||||
Justin Force <justin.force@gmail.com>
|
||||
Justin Menga <justin.menga@gmail.com>
|
||||
@ -1009,6 +1056,7 @@ Justin Simonelis <justin.p.simonelis@gmail.com>
|
||||
Justin Terry <juterry@microsoft.com>
|
||||
Justyn Temme <justyntemme@gmail.com>
|
||||
Jyrki Puttonen <jyrkiput@gmail.com>
|
||||
Jérémy Leherpeur <amenophis@leherpeur.net>
|
||||
Jérôme Petazzoni <jerome.petazzoni@docker.com>
|
||||
Jörg Thalheim <joerg@higgsboson.tk>
|
||||
K. Heller <pestophagous@gmail.com>
|
||||
@ -1046,6 +1094,7 @@ Ken Reese <krrgithub@gmail.com>
|
||||
Kenfe-Mickaël Laventure <mickael.laventure@gmail.com>
|
||||
Kenjiro Nakayama <nakayamakenjiro@gmail.com>
|
||||
Kent Johnson <kentoj@gmail.com>
|
||||
Kenta Tada <Kenta.Tada@sony.com>
|
||||
Kevin "qwazerty" Houdebert <kevin.houdebert@gmail.com>
|
||||
Kevin Burke <kev@inburke.com>
|
||||
Kevin Clark <kevin.clark@gmail.com>
|
||||
@ -1056,6 +1105,7 @@ Kevin Kern <kaiwentan@harmonycloud.cn>
|
||||
Kevin Menard <kevin@nirvdrum.com>
|
||||
Kevin Meredith <kevin.m.meredith@gmail.com>
|
||||
Kevin P. Kucharczyk <kevinkucharczyk@gmail.com>
|
||||
Kevin Parsons <kevpar@microsoft.com>
|
||||
Kevin Richardson <kevin@kevinrichardson.co>
|
||||
Kevin Shi <kshi@andrew.cmu.edu>
|
||||
Kevin Wallace <kevin@pentabarf.net>
|
||||
@ -1146,6 +1196,7 @@ longliqiang88 <394564827@qq.com>
|
||||
Lorenz Leutgeb <lorenz.leutgeb@gmail.com>
|
||||
Lorenzo Fontana <fontanalorenz@gmail.com>
|
||||
Lotus Fenn <fenn.lotus@gmail.com>
|
||||
Louis Delossantos <ldelossa.ld@gmail.com>
|
||||
Louis Opter <kalessin@kalessin.fr>
|
||||
Luca Favatella <luca.favatella@erlang-solutions.com>
|
||||
Luca Marturana <lucamarturana@gmail.com>
|
||||
@ -1154,9 +1205,11 @@ Luca-Bogdan Grigorescu <Luca-Bogdan Grigorescu>
|
||||
Lucas Chan <lucas-github@lucaschan.com>
|
||||
Lucas Chi <lucas@teacherspayteachers.com>
|
||||
Lucas Molas <lmolas@fundacionsadosky.org.ar>
|
||||
Lucas Silvestre <lukas.silvestre@gmail.com>
|
||||
Luciano Mores <leslau@gmail.com>
|
||||
Luis Martínez de Bartolomé Izquierdo <lmartinez@biicode.com>
|
||||
Luiz Svoboda <luizek@gmail.com>
|
||||
Lukas Heeren <lukas-heeren@hotmail.com>
|
||||
Lukas Waslowski <cr7pt0gr4ph7@gmail.com>
|
||||
lukaspustina <lukas.pustina@centerdevice.com>
|
||||
Lukasz Zajaczkowski <Lukasz.Zajaczkowski@ts.fujitsu.com>
|
||||
@ -1256,6 +1309,7 @@ Matthieu Hauglustaine <matt.hauglustaine@gmail.com>
|
||||
Mattias Jernberg <nostrad@gmail.com>
|
||||
Mauricio Garavaglia <mauricio@medallia.com>
|
||||
mauriyouth <mauriyouth@gmail.com>
|
||||
Max Harmathy <max.harmathy@web.de>
|
||||
Max Shytikov <mshytikov@gmail.com>
|
||||
Maxim Fedchyshyn <sevmax@gmail.com>
|
||||
Maxim Ivanov <ivanov.maxim@gmail.com>
|
||||
@ -1296,6 +1350,7 @@ Michael Stapelberg <michael+gh@stapelberg.de>
|
||||
Michael Steinert <mike.steinert@gmail.com>
|
||||
Michael Thies <michaelthies78@gmail.com>
|
||||
Michael West <mwest@mdsol.com>
|
||||
Michael Zhao <michael.zhao@arm.com>
|
||||
Michal Fojtik <mfojtik@redhat.com>
|
||||
Michal Gebauer <mishak@mishak.net>
|
||||
Michal Jemala <michal.jemala@gmail.com>
|
||||
@ -1312,6 +1367,7 @@ Miguel Morales <mimoralea@gmail.com>
|
||||
Mihai Borobocea <MihaiBorob@gmail.com>
|
||||
Mihuleacc Sergiu <mihuleac.sergiu@gmail.com>
|
||||
Mike Brown <brownwm@us.ibm.com>
|
||||
Mike Bush <mpbush@gmail.com>
|
||||
Mike Casas <mkcsas0@gmail.com>
|
||||
Mike Chelen <michael.chelen@gmail.com>
|
||||
Mike Danese <mikedanese@google.com>
|
||||
@ -1380,6 +1436,7 @@ Neyazul Haque <nuhaque@gmail.com>
|
||||
Nghia Tran <nghia@google.com>
|
||||
Niall O'Higgins <niallo@unworkable.org>
|
||||
Nicholas E. Rabenau <nerab@gmx.at>
|
||||
Nick Adcock <nick.adcock@docker.com>
|
||||
Nick DeCoursin <n.decoursin@foodpanda.com>
|
||||
Nick Irvine <nfirvine@nfirvine.com>
|
||||
Nick Neisen <nwneisen@gmail.com>
|
||||
@ -1403,6 +1460,7 @@ Nik Nyby <nikolas@gnu.org>
|
||||
Nikhil Chawla <chawlanikhil24@gmail.com>
|
||||
NikolaMandic <mn080202@gmail.com>
|
||||
Nikolas Garofil <nikolas.garofil@uantwerpen.be>
|
||||
Nikolay Edigaryev <edigaryev@gmail.com>
|
||||
Nikolay Milovanov <nmil@itransformers.net>
|
||||
Nirmal Mehta <nirmalkmehta@gmail.com>
|
||||
Nishant Totla <nishanttotla@gmail.com>
|
||||
@ -1418,6 +1476,7 @@ Nuutti Kotivuori <naked@iki.fi>
|
||||
nzwsch <hi@nzwsch.com>
|
||||
O.S. Tezer <ostezer@gmail.com>
|
||||
objectified <objectified@gmail.com>
|
||||
Odin Ugedal <odin@ugedal.com>
|
||||
Oguz Bilgic <fisyonet@gmail.com>
|
||||
Oh Jinkyun <tintypemolly@gmail.com>
|
||||
Ohad Schneider <ohadschn@users.noreply.github.com>
|
||||
@ -1428,6 +1487,7 @@ Oliver Reason <oli@overrateddev.co>
|
||||
Olivier Gambier <dmp42@users.noreply.github.com>
|
||||
Olle Jonsson <olle.jonsson@gmail.com>
|
||||
Olli Janatuinen <olli.janatuinen@gmail.com>
|
||||
Olly Pomeroy <oppomeroy@gmail.com>
|
||||
Omri Shiv <Omri.Shiv@teradata.com>
|
||||
Oriol Francès <oriolfa@gmail.com>
|
||||
Oskar Niburski <oskarniburski@gmail.com>
|
||||
@ -1437,6 +1497,7 @@ Ovidio Mallo <ovidio.mallo@gmail.com>
|
||||
Panagiotis Moustafellos <pmoust@elastic.co>
|
||||
Paolo G. Giarrusso <p.giarrusso@gmail.com>
|
||||
Pascal <pascalgn@users.noreply.github.com>
|
||||
Pascal Bach <pascal.bach@siemens.com>
|
||||
Pascal Borreli <pascal@borreli.com>
|
||||
Pascal Hartig <phartig@rdrei.net>
|
||||
Patrick Böänziger <patrick.baenziger@bsi-software.com>
|
||||
@ -1461,6 +1522,7 @@ Paul Nasrat <pnasrat@gmail.com>
|
||||
Paul Weaver <pauweave@cisco.com>
|
||||
Paulo Ribeiro <paigr.io@gmail.com>
|
||||
Pavel Lobashov <ShockwaveNN@gmail.com>
|
||||
Pavel Matěja <pavel@verotel.cz>
|
||||
Pavel Pletenev <cpp.create@gmail.com>
|
||||
Pavel Pospisil <pospispa@gmail.com>
|
||||
Pavel Sutyrin <pavel.sutyrin@gmail.com>
|
||||
@ -1572,6 +1634,7 @@ Riku Voipio <riku.voipio@linaro.org>
|
||||
Riley Guerin <rileytg.dev@gmail.com>
|
||||
Ritesh H Shukla <sritesh@vmware.com>
|
||||
Riyaz Faizullabhoy <riyaz.faizullabhoy@docker.com>
|
||||
Rob Gulewich <rgulewich@netflix.com>
|
||||
Rob Vesse <rvesse@dotnetrdf.org>
|
||||
Robert Bachmann <rb@robertbachmann.at>
|
||||
Robert Bittle <guywithnose@gmail.com>
|
||||
@ -1580,11 +1643,13 @@ Robert Schneider <mail@shakeme.info>
|
||||
Robert Stern <lexandro2000@gmail.com>
|
||||
Robert Terhaar <rterhaar@atlanticdynamic.com>
|
||||
Robert Wallis <smilingrob@gmail.com>
|
||||
Robert Wang <robert@arctic.tw>
|
||||
Roberto G. Hashioka <roberto.hashioka@docker.com>
|
||||
Roberto Muñoz Fernández <robertomf@gmail.com>
|
||||
Robin Naundorf <r.naundorf@fh-muenster.de>
|
||||
Robin Schneider <ypid@riseup.net>
|
||||
Robin Speekenbrink <robin@kingsquare.nl>
|
||||
Robin Thoni <robin@rthoni.com>
|
||||
robpc <rpcann@gmail.com>
|
||||
Rodolfo Carvalho <rhcarvalho@gmail.com>
|
||||
Rodrigo Vaz <rodrigo.vaz@gmail.com>
|
||||
@ -1599,6 +1664,7 @@ Roland Kammerer <roland.kammerer@linbit.com>
|
||||
Roland Moriz <rmoriz@users.noreply.github.com>
|
||||
Roma Sokolov <sokolov.r.v@gmail.com>
|
||||
Roman Dudin <katrmr@gmail.com>
|
||||
Roman Mazur <roman@balena.io>
|
||||
Roman Strashkin <roman.strashkin@gmail.com>
|
||||
Ron Smits <ron.smits@gmail.com>
|
||||
Ron Williams <ron.a.williams@gmail.com>
|
||||
@ -1618,6 +1684,7 @@ Rozhnov Alexandr <nox73@ya.ru>
|
||||
Rudolph Gottesheim <r.gottesheim@loot.at>
|
||||
Rui Cao <ruicao@alauda.io>
|
||||
Rui Lopes <rgl@ruilopes.com>
|
||||
Ruilin Li <liruilin4@huawei.com>
|
||||
Runshen Zhu <runshen.zhu@gmail.com>
|
||||
Russ Magee <rmagee@gmail.com>
|
||||
Ryan Abrams <rdabrams@gmail.com>
|
||||
@ -1656,6 +1723,7 @@ Sam J Sharpe <sam.sharpe@digital.cabinet-office.gov.uk>
|
||||
Sam Neirinck <sam@samneirinck.com>
|
||||
Sam Reis <sreis@atlassian.com>
|
||||
Sam Rijs <srijs@airpost.net>
|
||||
Sam Whited <sam@samwhited.com>
|
||||
Sambuddha Basu <sambuddhabasu1@gmail.com>
|
||||
Sami Wagiaalla <swagiaal@redhat.com>
|
||||
Samuel Andaya <samuel@andaya.net>
|
||||
@ -1670,6 +1738,7 @@ sapphiredev <se.imas.kr@gmail.com>
|
||||
Sargun Dhillon <sargun@netflix.com>
|
||||
Sascha Andres <sascha.andres@outlook.com>
|
||||
Sascha Grunert <sgrunert@suse.com>
|
||||
SataQiu <qiushida@beyondcent.com>
|
||||
Satnam Singh <satnam@raintown.org>
|
||||
Satoshi Amemiya <satoshi_amemiya@voyagegroup.com>
|
||||
Satoshi Tagomori <tagomoris@gmail.com>
|
||||
@ -1718,6 +1787,7 @@ Shijun Qin <qinshijun16@mails.ucas.ac.cn>
|
||||
Shishir Mahajan <shishir.mahajan@redhat.com>
|
||||
Shoubhik Bose <sbose78@gmail.com>
|
||||
Shourya Sarcar <shourya.sarcar@gmail.com>
|
||||
Shu-Wai Chow <shu-wai.chow@seattlechildrens.org>
|
||||
shuai-z <zs.broccoli@gmail.com>
|
||||
Shukui Yang <yangshukui@huawei.com>
|
||||
Shuwei Hao <haosw@cn.ibm.com>
|
||||
@ -1728,6 +1798,7 @@ Silas Sewell <silas@sewell.org>
|
||||
Silvan Jegen <s.jegen@gmail.com>
|
||||
Simão Reis <smnrsti@gmail.com>
|
||||
Simei He <hesimei@zju.edu.cn>
|
||||
Simon Barendse <simon.barendse@gmail.com>
|
||||
Simon Eskildsen <sirup@sirupsen.com>
|
||||
Simon Ferquel <simon.ferquel@docker.com>
|
||||
Simon Leinen <simon.leinen@gmail.com>
|
||||
@ -1736,6 +1807,7 @@ Simon Taranto <simon.taranto@gmail.com>
|
||||
Simon Vikstrom <pullreq@devsn.se>
|
||||
Sindhu S <sindhus@live.in>
|
||||
Sjoerd Langkemper <sjoerd-github@linuxonly.nl>
|
||||
skanehira <sho19921005@gmail.com>
|
||||
Solganik Alexander <solganik@gmail.com>
|
||||
Solomon Hykes <solomon@docker.com>
|
||||
Song Gao <song@gao.io>
|
||||
@ -1747,18 +1819,21 @@ Sridatta Thatipamala <sthatipamala@gmail.com>
|
||||
Sridhar Ratnakumar <sridharr@activestate.com>
|
||||
Srini Brahmaroutu <srbrahma@us.ibm.com>
|
||||
Srinivasan Srivatsan <srinivasan.srivatsan@hpe.com>
|
||||
Staf Wagemakers <staf@wagemakers.be>
|
||||
Stanislav Bondarenko <stanislav.bondarenko@gmail.com>
|
||||
Stanislav Levin <slev@altlinux.org>
|
||||
Steeve Morin <steeve.morin@gmail.com>
|
||||
Stefan Berger <stefanb@linux.vnet.ibm.com>
|
||||
Stefan J. Wernli <swernli@microsoft.com>
|
||||
Stefan Praszalowicz <stefan@greplin.com>
|
||||
Stefan S. <tronicum@user.github.com>
|
||||
Stefan Scherer <scherer_stefan@icloud.com>
|
||||
Stefan Scherer <stefan.scherer@docker.com>
|
||||
Stefan Staudenmeyer <doerte@instana.com>
|
||||
Stefan Weil <sw@weilnetz.de>
|
||||
Stephan Spindler <shutefan@gmail.com>
|
||||
Stephen Benjamin <stephen@redhat.com>
|
||||
Stephen Crosby <stevecrozz@gmail.com>
|
||||
Stephen Day <stephen.day@docker.com>
|
||||
Stephen Day <stevvooe@gmail.com>
|
||||
Stephen Drake <stephen@xenolith.net>
|
||||
Stephen Rust <srust@blockbridge.com>
|
||||
Steve Desmond <steve@vtsv.ca>
|
||||
@ -1773,10 +1848,12 @@ Steven Iveson <sjiveson@outlook.com>
|
||||
Steven Merrill <steven.merrill@gmail.com>
|
||||
Steven Richards <steven@axiomzen.co>
|
||||
Steven Taylor <steven.taylor@me.com>
|
||||
Stig Larsson <stig@larsson.dev>
|
||||
Subhajit Ghosh <isubuz.g@gmail.com>
|
||||
Sujith Haridasan <sujith.h@gmail.com>
|
||||
Sun Gengze <690388648@qq.com>
|
||||
Sun Jianbo <wonderflow.sun@gmail.com>
|
||||
Sune Keller <sune.keller@gmail.com>
|
||||
Sunny Gogoi <indiasuny000@gmail.com>
|
||||
Suryakumar Sudar <surya.trunks@gmail.com>
|
||||
Sven Dowideit <SvenDowideit@home.org.au>
|
||||
@ -1827,6 +1904,8 @@ Tianyi Wang <capkurmagati@gmail.com>
|
||||
Tibor Vass <teabee89@gmail.com>
|
||||
Tiffany Jernigan <tiffany.f.j@gmail.com>
|
||||
Tiffany Low <tiffany@box.com>
|
||||
Till Wegmüller <toasterson@gmail.com>
|
||||
Tim <elatllat@gmail.com>
|
||||
Tim Bart <tim@fewagainstmany.com>
|
||||
Tim Bosse <taim@bosboot.org>
|
||||
Tim Dettrick <t.dettrick@uq.edu.au>
|
||||
@ -1878,7 +1957,7 @@ Tony Miller <mcfiredrill@gmail.com>
|
||||
toogley <toogley@mailbox.org>
|
||||
Torstein Husebø <torstein@huseboe.net>
|
||||
Tõnis Tiigi <tonistiigi@gmail.com>
|
||||
tpng <benny.tpng@gmail.com>
|
||||
Trace Andreason <tandreason@gmail.com>
|
||||
tracylihui <793912329@qq.com>
|
||||
Trapier Marshall <trapier.marshall@docker.com>
|
||||
Travis Cline <travis.cline@gmail.com>
|
||||
@ -1901,6 +1980,7 @@ Utz Bacher <utz.bacher@de.ibm.com>
|
||||
vagrant <vagrant@ubuntu-14.04-amd64-vbox>
|
||||
Vaidas Jablonskis <jablonskis@gmail.com>
|
||||
vanderliang <lansheng@meili-inc.com>
|
||||
Velko Ivanov <vivanov@deeperplane.com>
|
||||
Veres Lajos <vlajos@gmail.com>
|
||||
Victor Algaze <valgaze@gmail.com>
|
||||
Victor Coisne <victor.coisne@dotcloud.com>
|
||||
@ -1912,11 +1992,13 @@ Victor Palma <palma.victor@gmail.com>
|
||||
Victor Vieux <victor.vieux@docker.com>
|
||||
Victoria Bialas <victoria.bialas@docker.com>
|
||||
Vijaya Kumar K <vijayak@caviumnetworks.com>
|
||||
Vikram bir Singh <vsingh@mirantis.com>
|
||||
Viktor Stanchev <me@viktorstanchev.com>
|
||||
Viktor Vojnovski <viktor.vojnovski@amadeus.com>
|
||||
VinayRaghavanKS <raghavan.vinay@gmail.com>
|
||||
Vincent Batts <vbatts@redhat.com>
|
||||
Vincent Bernat <Vincent.Bernat@exoscale.ch>
|
||||
Vincent Boulineau <vincent.boulineau@datadoghq.com>
|
||||
Vincent Demeester <vincent.demeester@docker.com>
|
||||
Vincent Giersch <vincent.giersch@ovh.net>
|
||||
Vincent Mayers <vincent.mayers@inbloom.org>
|
||||
@ -1947,6 +2029,8 @@ Wang Long <long.wanglong@huawei.com>
|
||||
Wang Ping <present.wp@icloud.com>
|
||||
Wang Xing <hzwangxing@corp.netease.com>
|
||||
Wang Yuexiao <wang.yuexiao@zte.com.cn>
|
||||
Wang Yumu <37442693@qq.com>
|
||||
wanghuaiqing <wanghuaiqing@loongson.cn>
|
||||
Ward Vandewege <ward@jhvc.com>
|
||||
WarheadsSE <max@warheads.net>
|
||||
Wassim Dhif <wassimdhif@gmail.com>
|
||||
@ -1963,12 +2047,14 @@ Wen Cheng Ma <wenchma@cn.ibm.com>
|
||||
Wendel Fleming <wfleming@usc.edu>
|
||||
Wenjun Tang <tangwj2@lenovo.com>
|
||||
Wenkai Yin <yinw@vmware.com>
|
||||
wenlxie <wenlxie@ebay.com>
|
||||
Wentao Zhang <zhangwentao234@huawei.com>
|
||||
Wenxuan Zhao <viz@linux.com>
|
||||
Wenyu You <21551128@zju.edu.cn>
|
||||
Wenzhi Liang <wenzhi.liang@gmail.com>
|
||||
Wes Morgan <cap10morgan@gmail.com>
|
||||
Wewang Xiaorenfine <wang.xiaoren@zte.com.cn>
|
||||
Wiktor Kwapisiewicz <wiktor@metacode.biz>
|
||||
Will Dietz <w@wdtz.org>
|
||||
Will Rouesnel <w.rouesnel@gmail.com>
|
||||
Will Weaver <monkey@buildingbananas.com>
|
||||
@ -1979,6 +2065,8 @@ William Hubbs <w.d.hubbs@gmail.com>
|
||||
William Martin <wmartin@pivotal.io>
|
||||
William Riancho <wr.wllm@gmail.com>
|
||||
William Thurston <thurstw@amazon.com>
|
||||
Wilson Júnior <wilsonpjunior@gmail.com>
|
||||
Wing-Kam Wong <wingkwong.code@gmail.com>
|
||||
WiseTrem <shepelyov.g@gmail.com>
|
||||
Wolfgang Powisch <powo@powo.priv.at>
|
||||
Wonjun Kim <wonjun.kim@navercorp.com>
|
||||
@ -1988,6 +2076,7 @@ Xianglin Gao <xlgao@zju.edu.cn>
|
||||
Xianlu Bird <xianlubird@gmail.com>
|
||||
Xiao YongBiao <xyb4638@gmail.com>
|
||||
XiaoBing Jiang <s7v7nislands@gmail.com>
|
||||
Xiaodong Liu <liuxiaodong@loongson.cn>
|
||||
Xiaodong Zhang <a4012017@sina.com>
|
||||
Xiaoxi He <xxhe@alauda.io>
|
||||
Xiaoxu Chen <chenxiaoxu14@otcaix.iscas.ac.cn>
|
||||
@ -1996,6 +2085,7 @@ xichengliudui <1693291525@qq.com>
|
||||
xiekeyang <xiekeyang@huawei.com>
|
||||
Ximo Guanter Gonzálbez <joaquin.guantergonzalbez@telefonica.com>
|
||||
Xinbo Weng <xihuanbo_0521@zju.edu.cn>
|
||||
Xinfeng Liu <xinfeng.liu@gmail.com>
|
||||
Xinzi Zhou <imdreamrunner@gmail.com>
|
||||
Xiuming Chen <cc@cxm.cc>
|
||||
Xuecong Liao <satorulogic@gmail.com>
|
||||
@ -2010,6 +2100,7 @@ Yang Pengfei <yangpengfei4@huawei.com>
|
||||
yangchenliang <yangchenliang@huawei.com>
|
||||
Yanqiang Miao <miao.yanqiang@zte.com.cn>
|
||||
Yao Zaiyong <yaozaiyong@hotmail.com>
|
||||
Yash Murty <yashmurty@gmail.com>
|
||||
Yassine Tijani <yasstij11@gmail.com>
|
||||
Yasunori Mahata <nori@mahata.net>
|
||||
Yazhong Liu <yorkiefixer@gmail.com>
|
||||
@ -2024,6 +2115,7 @@ Yongxin Li <yxli@alauda.io>
|
||||
Yongzhi Pan <panyongzhi@gmail.com>
|
||||
Yosef Fertel <yfertel@gmail.com>
|
||||
You-Sheng Yang (楊有勝) <vicamo@gmail.com>
|
||||
youcai <omegacoleman@gmail.com>
|
||||
Youcef YEKHLEF <yyekhlef@gmail.com>
|
||||
Yu Changchun <yuchangchun1@huawei.com>
|
||||
Yu Chengxia <yuchengxia@huawei.com>
|
||||
@ -2055,11 +2147,13 @@ Zhenan Ye <21551168@zju.edu.cn>
|
||||
zhenghenghuo <zhenghenghuo@zju.edu.cn>
|
||||
Zhenhai Gao <gaozh1988@live.com>
|
||||
Zhenkun Bi <bi.zhenkun@zte.com.cn>
|
||||
zhipengzuo <zuozhipeng@baidu.com>
|
||||
Zhou Hao <zhouhao@cn.fujitsu.com>
|
||||
Zhoulin Xie <zhoulin.xie@daocloud.io>
|
||||
Zhu Guihua <zhugh.fnst@cn.fujitsu.com>
|
||||
Zhu Kunjia <zhu.kunjia@zte.com.cn>
|
||||
Zhuoyun Wei <wzyboy@wzyboy.org>
|
||||
Ziheng Liu <lzhfromustc@gmail.com>
|
||||
Zilin Du <zilin.du@gmail.com>
|
||||
zimbatm <zimbatm@zimbatm.com>
|
||||
Ziming Dong <bnudzm@foxmail.com>
|
||||
@ -2068,12 +2162,13 @@ zmarouf <zeid.marouf@gmail.com>
|
||||
Zoltan Tombol <zoltan.tombol@gmail.com>
|
||||
Zou Yu <zouyu7@huawei.com>
|
||||
zqh <zqhxuyuan@gmail.com>
|
||||
Zuhayr Elahi <elahi.zuhayr@gmail.com>
|
||||
Zuhayr Elahi <zuhayr.elahi@docker.com>
|
||||
Zunayed Ali <zunayed@gmail.com>
|
||||
Álex González <agonzalezro@gmail.com>
|
||||
Álvaro Lázaro <alvaro.lazaro.g@gmail.com>
|
||||
Átila Camurça Alves <camurca.home@gmail.com>
|
||||
尹吉峰 <jifeng.yin@gmail.com>
|
||||
屈骏 <qujun@tiduyun.com>
|
||||
徐俊杰 <paco.xu@daocloud.io>
|
||||
慕陶 <jihui.xjh@alibaba-inc.com>
|
||||
搏通 <yufeng.pyf@alibaba-inc.com>
|
||||
|
16
vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go
generated
vendored
16
vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go
generated
vendored
@ -5,24 +5,8 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
)
|
||||
|
||||
// GetStatic returns the home directory for the current user without calling
|
||||
// os/user.Current(). This is useful for static-linked binary on glibc-based
|
||||
// system, because a call to os/user.Current() in a static binary leads to
|
||||
// segfault due to a glibc issue that won't be fixed in a short term.
|
||||
// (#29344, golang/go#13470, https://sourceware.org/bugzilla/show_bug.cgi?id=19341)
|
||||
func GetStatic() (string, error) {
|
||||
uid := os.Getuid()
|
||||
usr, err := idtools.LookupUID(uid)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return usr.Home, nil
|
||||
}
|
||||
|
||||
// GetRuntimeDir returns XDG_RUNTIME_DIR.
|
||||
// XDG_RUNTIME_DIR is typically configured via pam_systemd.
|
||||
// GetRuntimeDir returns non-nil error if XDG_RUNTIME_DIR is not set.
|
||||
|
7
vendor/github.com/docker/docker/pkg/homedir/homedir_others.go
generated
vendored
7
vendor/github.com/docker/docker/pkg/homedir/homedir_others.go
generated
vendored
@ -1,3 +1,4 @@
|
||||
//go:build !linux
|
||||
// +build !linux
|
||||
|
||||
package homedir // import "github.com/docker/docker/pkg/homedir"
|
||||
@ -6,12 +7,6 @@ import (
|
||||
"errors"
|
||||
)
|
||||
|
||||
// GetStatic is not needed for non-linux systems.
|
||||
// (Precisely, it is needed only for glibc-based linux systems.)
|
||||
func GetStatic() (string, error) {
|
||||
return "", errors.New("homedir.GetStatic() is not supported on this system")
|
||||
}
|
||||
|
||||
// GetRuntimeDir is unsupported on non-linux system.
|
||||
func GetRuntimeDir() (string, error) {
|
||||
return "", errors.New("homedir.GetRuntimeDir() is not supported on this system")
|
||||
|
13
vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go
generated
vendored
13
vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go
generated
vendored
@ -1,11 +1,11 @@
|
||||
//go:build !windows
|
||||
// +build !windows
|
||||
|
||||
package homedir // import "github.com/docker/docker/pkg/homedir"
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/opencontainers/runc/libcontainer/user"
|
||||
"os/user"
|
||||
)
|
||||
|
||||
// Key returns the env var name for the user's home dir based on
|
||||
@ -17,11 +17,16 @@ func Key() string {
|
||||
// Get returns the home directory of the current user with the help of
|
||||
// environment variables depending on the target operating system.
|
||||
// Returned path should be used with "path/filepath" to form new paths.
|
||||
//
|
||||
// If linking statically with cgo enabled against glibc, ensure the
|
||||
// osusergo build tag is used.
|
||||
//
|
||||
// If needing to do nss lookups, do not disable cgo or set osusergo.
|
||||
func Get() string {
|
||||
home := os.Getenv(Key())
|
||||
if home == "" {
|
||||
if u, err := user.CurrentUser(); err == nil {
|
||||
return u.Home
|
||||
if u, err := user.Current(); err == nil {
|
||||
return u.HomeDir
|
||||
}
|
||||
}
|
||||
return home
|
||||
|
264
vendor/github.com/docker/docker/pkg/idtools/idtools.go
generated
vendored
264
vendor/github.com/docker/docker/pkg/idtools/idtools.go
generated
vendored
@ -1,264 +0,0 @@
|
||||
package idtools // import "github.com/docker/docker/pkg/idtools"
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// IDMap contains a single entry for user namespace range remapping. An array
|
||||
// of IDMap entries represents the structure that will be provided to the Linux
|
||||
// kernel for creating a user namespace.
|
||||
type IDMap struct {
|
||||
ContainerID int `json:"container_id"`
|
||||
HostID int `json:"host_id"`
|
||||
Size int `json:"size"`
|
||||
}
|
||||
|
||||
type subIDRange struct {
|
||||
Start int
|
||||
Length int
|
||||
}
|
||||
|
||||
type ranges []subIDRange
|
||||
|
||||
func (e ranges) Len() int { return len(e) }
|
||||
func (e ranges) Swap(i, j int) { e[i], e[j] = e[j], e[i] }
|
||||
func (e ranges) Less(i, j int) bool { return e[i].Start < e[j].Start }
|
||||
|
||||
const (
|
||||
subuidFileName = "/etc/subuid"
|
||||
subgidFileName = "/etc/subgid"
|
||||
)
|
||||
|
||||
// MkdirAllAndChown creates a directory (include any along the path) and then modifies
|
||||
// ownership to the requested uid/gid. If the directory already exists, this
|
||||
// function will still change ownership to the requested uid/gid pair.
|
||||
func MkdirAllAndChown(path string, mode os.FileMode, owner Identity) error {
|
||||
return mkdirAs(path, mode, owner, true, true)
|
||||
}
|
||||
|
||||
// MkdirAndChown creates a directory and then modifies ownership to the requested uid/gid.
|
||||
// If the directory already exists, this function still changes ownership.
|
||||
// Note that unlike os.Mkdir(), this function does not return IsExist error
|
||||
// in case path already exists.
|
||||
func MkdirAndChown(path string, mode os.FileMode, owner Identity) error {
|
||||
return mkdirAs(path, mode, owner, false, true)
|
||||
}
|
||||
|
||||
// MkdirAllAndChownNew creates a directory (include any along the path) and then modifies
|
||||
// ownership ONLY of newly created directories to the requested uid/gid. If the
|
||||
// directories along the path exist, no change of ownership will be performed
|
||||
func MkdirAllAndChownNew(path string, mode os.FileMode, owner Identity) error {
|
||||
return mkdirAs(path, mode, owner, true, false)
|
||||
}
|
||||
|
||||
// GetRootUIDGID retrieves the remapped root uid/gid pair from the set of maps.
|
||||
// If the maps are empty, then the root uid/gid will default to "real" 0/0
|
||||
func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) {
|
||||
uid, err := toHost(0, uidMap)
|
||||
if err != nil {
|
||||
return -1, -1, err
|
||||
}
|
||||
gid, err := toHost(0, gidMap)
|
||||
if err != nil {
|
||||
return -1, -1, err
|
||||
}
|
||||
return uid, gid, nil
|
||||
}
|
||||
|
||||
// toContainer takes an id mapping, and uses it to translate a
|
||||
// host ID to the remapped ID. If no map is provided, then the translation
|
||||
// assumes a 1-to-1 mapping and returns the passed in id
|
||||
func toContainer(hostID int, idMap []IDMap) (int, error) {
|
||||
if idMap == nil {
|
||||
return hostID, nil
|
||||
}
|
||||
for _, m := range idMap {
|
||||
if (hostID >= m.HostID) && (hostID <= (m.HostID + m.Size - 1)) {
|
||||
contID := m.ContainerID + (hostID - m.HostID)
|
||||
return contID, nil
|
||||
}
|
||||
}
|
||||
return -1, fmt.Errorf("Host ID %d cannot be mapped to a container ID", hostID)
|
||||
}
|
||||
|
||||
// toHost takes an id mapping and a remapped ID, and translates the
|
||||
// ID to the mapped host ID. If no map is provided, then the translation
|
||||
// assumes a 1-to-1 mapping and returns the passed in id #
|
||||
func toHost(contID int, idMap []IDMap) (int, error) {
|
||||
if idMap == nil {
|
||||
return contID, nil
|
||||
}
|
||||
for _, m := range idMap {
|
||||
if (contID >= m.ContainerID) && (contID <= (m.ContainerID + m.Size - 1)) {
|
||||
hostID := m.HostID + (contID - m.ContainerID)
|
||||
return hostID, nil
|
||||
}
|
||||
}
|
||||
return -1, fmt.Errorf("Container ID %d cannot be mapped to a host ID", contID)
|
||||
}
|
||||
|
||||
// Identity is either a UID and GID pair or a SID (but not both)
|
||||
type Identity struct {
|
||||
UID int
|
||||
GID int
|
||||
SID string
|
||||
}
|
||||
|
||||
// IdentityMapping contains a mappings of UIDs and GIDs
|
||||
type IdentityMapping struct {
|
||||
uids []IDMap
|
||||
gids []IDMap
|
||||
}
|
||||
|
||||
// NewIdentityMapping takes a requested user and group name and
|
||||
// using the data from /etc/sub{uid,gid} ranges, creates the
|
||||
// proper uid and gid remapping ranges for that user/group pair
|
||||
func NewIdentityMapping(username, groupname string) (*IdentityMapping, error) {
|
||||
subuidRanges, err := parseSubuid(username)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
subgidRanges, err := parseSubgid(groupname)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(subuidRanges) == 0 {
|
||||
return nil, fmt.Errorf("No subuid ranges found for user %q", username)
|
||||
}
|
||||
if len(subgidRanges) == 0 {
|
||||
return nil, fmt.Errorf("No subgid ranges found for group %q", groupname)
|
||||
}
|
||||
|
||||
return &IdentityMapping{
|
||||
uids: createIDMap(subuidRanges),
|
||||
gids: createIDMap(subgidRanges),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// NewIDMappingsFromMaps creates a new mapping from two slices
|
||||
// Deprecated: this is a temporary shim while transitioning to IDMapping
|
||||
func NewIDMappingsFromMaps(uids []IDMap, gids []IDMap) *IdentityMapping {
|
||||
return &IdentityMapping{uids: uids, gids: gids}
|
||||
}
|
||||
|
||||
// RootPair returns a uid and gid pair for the root user. The error is ignored
|
||||
// because a root user always exists, and the defaults are correct when the uid
|
||||
// and gid maps are empty.
|
||||
func (i *IdentityMapping) RootPair() Identity {
|
||||
uid, gid, _ := GetRootUIDGID(i.uids, i.gids)
|
||||
return Identity{UID: uid, GID: gid}
|
||||
}
|
||||
|
||||
// ToHost returns the host UID and GID for the container uid, gid.
|
||||
// Remapping is only performed if the ids aren't already the remapped root ids
|
||||
func (i *IdentityMapping) ToHost(pair Identity) (Identity, error) {
|
||||
var err error
|
||||
target := i.RootPair()
|
||||
|
||||
if pair.UID != target.UID {
|
||||
target.UID, err = toHost(pair.UID, i.uids)
|
||||
if err != nil {
|
||||
return target, err
|
||||
}
|
||||
}
|
||||
|
||||
if pair.GID != target.GID {
|
||||
target.GID, err = toHost(pair.GID, i.gids)
|
||||
}
|
||||
return target, err
|
||||
}
|
||||
|
||||
// ToContainer returns the container UID and GID for the host uid and gid
|
||||
func (i *IdentityMapping) ToContainer(pair Identity) (int, int, error) {
|
||||
uid, err := toContainer(pair.UID, i.uids)
|
||||
if err != nil {
|
||||
return -1, -1, err
|
||||
}
|
||||
gid, err := toContainer(pair.GID, i.gids)
|
||||
return uid, gid, err
|
||||
}
|
||||
|
||||
// Empty returns true if there are no id mappings
|
||||
func (i *IdentityMapping) Empty() bool {
|
||||
return len(i.uids) == 0 && len(i.gids) == 0
|
||||
}
|
||||
|
||||
// UIDs return the UID mapping
|
||||
// TODO: remove this once everything has been refactored to use pairs
|
||||
func (i *IdentityMapping) UIDs() []IDMap {
|
||||
return i.uids
|
||||
}
|
||||
|
||||
// GIDs return the UID mapping
|
||||
// TODO: remove this once everything has been refactored to use pairs
|
||||
func (i *IdentityMapping) GIDs() []IDMap {
|
||||
return i.gids
|
||||
}
|
||||
|
||||
func createIDMap(subidRanges ranges) []IDMap {
|
||||
idMap := []IDMap{}
|
||||
|
||||
containerID := 0
|
||||
for _, idrange := range subidRanges {
|
||||
idMap = append(idMap, IDMap{
|
||||
ContainerID: containerID,
|
||||
HostID: idrange.Start,
|
||||
Size: idrange.Length,
|
||||
})
|
||||
containerID = containerID + idrange.Length
|
||||
}
|
||||
return idMap
|
||||
}
|
||||
|
||||
func parseSubuid(username string) (ranges, error) {
|
||||
return parseSubidFile(subuidFileName, username)
|
||||
}
|
||||
|
||||
func parseSubgid(username string) (ranges, error) {
|
||||
return parseSubidFile(subgidFileName, username)
|
||||
}
|
||||
|
||||
// parseSubidFile will read the appropriate file (/etc/subuid or /etc/subgid)
|
||||
// and return all found ranges for a specified username. If the special value
|
||||
// "ALL" is supplied for username, then all ranges in the file will be returned
|
||||
func parseSubidFile(path, username string) (ranges, error) {
|
||||
var rangeList ranges
|
||||
|
||||
subidFile, err := os.Open(path)
|
||||
if err != nil {
|
||||
return rangeList, err
|
||||
}
|
||||
defer subidFile.Close()
|
||||
|
||||
s := bufio.NewScanner(subidFile)
|
||||
for s.Scan() {
|
||||
if err := s.Err(); err != nil {
|
||||
return rangeList, err
|
||||
}
|
||||
|
||||
text := strings.TrimSpace(s.Text())
|
||||
if text == "" || strings.HasPrefix(text, "#") {
|
||||
continue
|
||||
}
|
||||
parts := strings.Split(text, ":")
|
||||
if len(parts) != 3 {
|
||||
return rangeList, fmt.Errorf("Cannot parse subuid/gid information: Format not correct for %s file", path)
|
||||
}
|
||||
if parts[0] == username || username == "ALL" {
|
||||
startid, err := strconv.Atoi(parts[1])
|
||||
if err != nil {
|
||||
return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err)
|
||||
}
|
||||
length, err := strconv.Atoi(parts[2])
|
||||
if err != nil {
|
||||
return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err)
|
||||
}
|
||||
rangeList = append(rangeList, subIDRange{startid, length})
|
||||
}
|
||||
}
|
||||
return rangeList, nil
|
||||
}
|
231
vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go
generated
vendored
231
vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go
generated
vendored
@ -1,231 +0,0 @@
|
||||
// +build !windows
|
||||
|
||||
package idtools // import "github.com/docker/docker/pkg/idtools"
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"github.com/opencontainers/runc/libcontainer/user"
|
||||
)
|
||||
|
||||
var (
|
||||
entOnce sync.Once
|
||||
getentCmd string
|
||||
)
|
||||
|
||||
func mkdirAs(path string, mode os.FileMode, owner Identity, mkAll, chownExisting bool) error {
|
||||
// make an array containing the original path asked for, plus (for mkAll == true)
|
||||
// all path components leading up to the complete path that don't exist before we MkdirAll
|
||||
// so that we can chown all of them properly at the end. If chownExisting is false, we won't
|
||||
// chown the full directory path if it exists
|
||||
|
||||
var paths []string
|
||||
|
||||
stat, err := system.Stat(path)
|
||||
if err == nil {
|
||||
if !stat.IsDir() {
|
||||
return &os.PathError{Op: "mkdir", Path: path, Err: syscall.ENOTDIR}
|
||||
}
|
||||
if !chownExisting {
|
||||
return nil
|
||||
}
|
||||
|
||||
// short-circuit--we were called with an existing directory and chown was requested
|
||||
return lazyChown(path, owner.UID, owner.GID, stat)
|
||||
}
|
||||
|
||||
if os.IsNotExist(err) {
|
||||
paths = []string{path}
|
||||
}
|
||||
|
||||
if mkAll {
|
||||
// walk back to "/" looking for directories which do not exist
|
||||
// and add them to the paths array for chown after creation
|
||||
dirPath := path
|
||||
for {
|
||||
dirPath = filepath.Dir(dirPath)
|
||||
if dirPath == "/" {
|
||||
break
|
||||
}
|
||||
if _, err := os.Stat(dirPath); err != nil && os.IsNotExist(err) {
|
||||
paths = append(paths, dirPath)
|
||||
}
|
||||
}
|
||||
if err := system.MkdirAll(path, mode, ""); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := os.Mkdir(path, mode); err != nil && !os.IsExist(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// even if it existed, we will chown the requested path + any subpaths that
|
||||
// didn't exist when we called MkdirAll
|
||||
for _, pathComponent := range paths {
|
||||
if err := lazyChown(pathComponent, owner.UID, owner.GID, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CanAccess takes a valid (existing) directory and a uid, gid pair and determines
|
||||
// if that uid, gid pair has access (execute bit) to the directory
|
||||
func CanAccess(path string, pair Identity) bool {
|
||||
statInfo, err := system.Stat(path)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
fileMode := os.FileMode(statInfo.Mode())
|
||||
permBits := fileMode.Perm()
|
||||
return accessible(statInfo.UID() == uint32(pair.UID),
|
||||
statInfo.GID() == uint32(pair.GID), permBits)
|
||||
}
|
||||
|
||||
func accessible(isOwner, isGroup bool, perms os.FileMode) bool {
|
||||
if isOwner && (perms&0100 == 0100) {
|
||||
return true
|
||||
}
|
||||
if isGroup && (perms&0010 == 0010) {
|
||||
return true
|
||||
}
|
||||
if perms&0001 == 0001 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// LookupUser uses traditional local system files lookup (from libcontainer/user) on a username,
|
||||
// followed by a call to `getent` for supporting host configured non-files passwd and group dbs
|
||||
func LookupUser(username string) (user.User, error) {
|
||||
// first try a local system files lookup using existing capabilities
|
||||
usr, err := user.LookupUser(username)
|
||||
if err == nil {
|
||||
return usr, nil
|
||||
}
|
||||
// local files lookup failed; attempt to call `getent` to query configured passwd dbs
|
||||
usr, err = getentUser(fmt.Sprintf("%s %s", "passwd", username))
|
||||
if err != nil {
|
||||
return user.User{}, err
|
||||
}
|
||||
return usr, nil
|
||||
}
|
||||
|
||||
// LookupUID uses traditional local system files lookup (from libcontainer/user) on a uid,
|
||||
// followed by a call to `getent` for supporting host configured non-files passwd and group dbs
|
||||
func LookupUID(uid int) (user.User, error) {
|
||||
// first try a local system files lookup using existing capabilities
|
||||
usr, err := user.LookupUid(uid)
|
||||
if err == nil {
|
||||
return usr, nil
|
||||
}
|
||||
// local files lookup failed; attempt to call `getent` to query configured passwd dbs
|
||||
return getentUser(fmt.Sprintf("%s %d", "passwd", uid))
|
||||
}
|
||||
|
||||
func getentUser(args string) (user.User, error) {
|
||||
reader, err := callGetent(args)
|
||||
if err != nil {
|
||||
return user.User{}, err
|
||||
}
|
||||
users, err := user.ParsePasswd(reader)
|
||||
if err != nil {
|
||||
return user.User{}, err
|
||||
}
|
||||
if len(users) == 0 {
|
||||
return user.User{}, fmt.Errorf("getent failed to find passwd entry for %q", strings.Split(args, " ")[1])
|
||||
}
|
||||
return users[0], nil
|
||||
}
|
||||
|
||||
// LookupGroup uses traditional local system files lookup (from libcontainer/user) on a group name,
|
||||
// followed by a call to `getent` for supporting host configured non-files passwd and group dbs
|
||||
func LookupGroup(groupname string) (user.Group, error) {
|
||||
// first try a local system files lookup using existing capabilities
|
||||
group, err := user.LookupGroup(groupname)
|
||||
if err == nil {
|
||||
return group, nil
|
||||
}
|
||||
// local files lookup failed; attempt to call `getent` to query configured group dbs
|
||||
return getentGroup(fmt.Sprintf("%s %s", "group", groupname))
|
||||
}
|
||||
|
||||
// LookupGID uses traditional local system files lookup (from libcontainer/user) on a group ID,
|
||||
// followed by a call to `getent` for supporting host configured non-files passwd and group dbs
|
||||
func LookupGID(gid int) (user.Group, error) {
|
||||
// first try a local system files lookup using existing capabilities
|
||||
group, err := user.LookupGid(gid)
|
||||
if err == nil {
|
||||
return group, nil
|
||||
}
|
||||
// local files lookup failed; attempt to call `getent` to query configured group dbs
|
||||
return getentGroup(fmt.Sprintf("%s %d", "group", gid))
|
||||
}
|
||||
|
||||
func getentGroup(args string) (user.Group, error) {
|
||||
reader, err := callGetent(args)
|
||||
if err != nil {
|
||||
return user.Group{}, err
|
||||
}
|
||||
groups, err := user.ParseGroup(reader)
|
||||
if err != nil {
|
||||
return user.Group{}, err
|
||||
}
|
||||
if len(groups) == 0 {
|
||||
return user.Group{}, fmt.Errorf("getent failed to find groups entry for %q", strings.Split(args, " ")[1])
|
||||
}
|
||||
return groups[0], nil
|
||||
}
|
||||
|
||||
func callGetent(args string) (io.Reader, error) {
|
||||
entOnce.Do(func() { getentCmd, _ = resolveBinary("getent") })
|
||||
// if no `getent` command on host, can't do anything else
|
||||
if getentCmd == "" {
|
||||
return nil, fmt.Errorf("")
|
||||
}
|
||||
out, err := execCmd(getentCmd, args)
|
||||
if err != nil {
|
||||
exitCode, errC := system.GetExitCode(err)
|
||||
if errC != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch exitCode {
|
||||
case 1:
|
||||
return nil, fmt.Errorf("getent reported invalid parameters/database unknown")
|
||||
case 2:
|
||||
terms := strings.Split(args, " ")
|
||||
return nil, fmt.Errorf("getent unable to find entry %q in %s database", terms[1], terms[0])
|
||||
case 3:
|
||||
return nil, fmt.Errorf("getent database doesn't support enumeration")
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
|
||||
}
|
||||
return bytes.NewReader(out), nil
|
||||
}
|
||||
|
||||
// lazyChown performs a chown only if the uid/gid don't match what's requested
|
||||
// Normally a Chown is a no-op if uid/gid match, but in some cases this can still cause an error, e.g. if the
|
||||
// dir is on an NFS share, so don't call chown unless we absolutely must.
|
||||
func lazyChown(p string, uid, gid int, stat *system.StatT) error {
|
||||
if stat == nil {
|
||||
var err error
|
||||
stat, err = system.Stat(p)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if stat.UID() == uint32(uid) && stat.GID() == uint32(gid) {
|
||||
return nil
|
||||
}
|
||||
return os.Chown(p, uid, gid)
|
||||
}
|
25
vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go
generated
vendored
25
vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go
generated
vendored
@ -1,25 +0,0 @@
|
||||
package idtools // import "github.com/docker/docker/pkg/idtools"
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/docker/docker/pkg/system"
|
||||
)
|
||||
|
||||
// This is currently a wrapper around MkdirAll, however, since currently
|
||||
// permissions aren't set through this path, the identity isn't utilized.
|
||||
// Ownership is handled elsewhere, but in the future could be support here
|
||||
// too.
|
||||
func mkdirAs(path string, mode os.FileMode, owner Identity, mkAll, chownExisting bool) error {
|
||||
if err := system.MkdirAll(path, mode, ""); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CanAccess takes a valid (existing) directory and a uid, gid pair and determines
|
||||
// if that uid, gid pair has access (execute bit) to the directory
|
||||
// Windows does not require/support this function, so always return true
|
||||
func CanAccess(path string, identity Identity) bool {
|
||||
return true
|
||||
}
|
164
vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go
generated
vendored
164
vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go
generated
vendored
@ -1,164 +0,0 @@
|
||||
package idtools // import "github.com/docker/docker/pkg/idtools"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// add a user and/or group to Linux /etc/passwd, /etc/group using standard
|
||||
// Linux distribution commands:
|
||||
// adduser --system --shell /bin/false --disabled-login --disabled-password --no-create-home --group <username>
|
||||
// useradd -r -s /bin/false <username>
|
||||
|
||||
var (
|
||||
once sync.Once
|
||||
userCommand string
|
||||
|
||||
cmdTemplates = map[string]string{
|
||||
"adduser": "--system --shell /bin/false --no-create-home --disabled-login --disabled-password --group %s",
|
||||
"useradd": "-r -s /bin/false %s",
|
||||
"usermod": "-%s %d-%d %s",
|
||||
}
|
||||
|
||||
idOutRegexp = regexp.MustCompile(`uid=([0-9]+).*gid=([0-9]+)`)
|
||||
// default length for a UID/GID subordinate range
|
||||
defaultRangeLen = 65536
|
||||
defaultRangeStart = 100000
|
||||
userMod = "usermod"
|
||||
)
|
||||
|
||||
// AddNamespaceRangesUser takes a username and uses the standard system
|
||||
// utility to create a system user/group pair used to hold the
|
||||
// /etc/sub{uid,gid} ranges which will be used for user namespace
|
||||
// mapping ranges in containers.
|
||||
func AddNamespaceRangesUser(name string) (int, int, error) {
|
||||
if err := addUser(name); err != nil {
|
||||
return -1, -1, fmt.Errorf("Error adding user %q: %v", name, err)
|
||||
}
|
||||
|
||||
// Query the system for the created uid and gid pair
|
||||
out, err := execCmd("id", name)
|
||||
if err != nil {
|
||||
return -1, -1, fmt.Errorf("Error trying to find uid/gid for new user %q: %v", name, err)
|
||||
}
|
||||
matches := idOutRegexp.FindStringSubmatch(strings.TrimSpace(string(out)))
|
||||
if len(matches) != 3 {
|
||||
return -1, -1, fmt.Errorf("Can't find uid, gid from `id` output: %q", string(out))
|
||||
}
|
||||
uid, err := strconv.Atoi(matches[1])
|
||||
if err != nil {
|
||||
return -1, -1, fmt.Errorf("Can't convert found uid (%s) to int: %v", matches[1], err)
|
||||
}
|
||||
gid, err := strconv.Atoi(matches[2])
|
||||
if err != nil {
|
||||
return -1, -1, fmt.Errorf("Can't convert found gid (%s) to int: %v", matches[2], err)
|
||||
}
|
||||
|
||||
// Now we need to create the subuid/subgid ranges for our new user/group (system users
|
||||
// do not get auto-created ranges in subuid/subgid)
|
||||
|
||||
if err := createSubordinateRanges(name); err != nil {
|
||||
return -1, -1, fmt.Errorf("Couldn't create subordinate ID ranges: %v", err)
|
||||
}
|
||||
return uid, gid, nil
|
||||
}
|
||||
|
||||
func addUser(userName string) error {
|
||||
once.Do(func() {
|
||||
// set up which commands are used for adding users/groups dependent on distro
|
||||
if _, err := resolveBinary("adduser"); err == nil {
|
||||
userCommand = "adduser"
|
||||
} else if _, err := resolveBinary("useradd"); err == nil {
|
||||
userCommand = "useradd"
|
||||
}
|
||||
})
|
||||
if userCommand == "" {
|
||||
return fmt.Errorf("Cannot add user; no useradd/adduser binary found")
|
||||
}
|
||||
args := fmt.Sprintf(cmdTemplates[userCommand], userName)
|
||||
out, err := execCmd(userCommand, args)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to add user with error: %v; output: %q", err, string(out))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func createSubordinateRanges(name string) error {
|
||||
|
||||
// first, we should verify that ranges weren't automatically created
|
||||
// by the distro tooling
|
||||
ranges, err := parseSubuid(name)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error while looking for subuid ranges for user %q: %v", name, err)
|
||||
}
|
||||
if len(ranges) == 0 {
|
||||
// no UID ranges; let's create one
|
||||
startID, err := findNextUIDRange()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Can't find available subuid range: %v", err)
|
||||
}
|
||||
out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "v", startID, startID+defaultRangeLen-1, name))
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to add subuid range to user: %q; output: %s, err: %v", name, out, err)
|
||||
}
|
||||
}
|
||||
|
||||
ranges, err = parseSubgid(name)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error while looking for subgid ranges for user %q: %v", name, err)
|
||||
}
|
||||
if len(ranges) == 0 {
|
||||
// no GID ranges; let's create one
|
||||
startID, err := findNextGIDRange()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Can't find available subgid range: %v", err)
|
||||
}
|
||||
out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "w", startID, startID+defaultRangeLen-1, name))
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to add subgid range to user: %q; output: %s, err: %v", name, out, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func findNextUIDRange() (int, error) {
|
||||
ranges, err := parseSubuid("ALL")
|
||||
if err != nil {
|
||||
return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subuid file: %v", err)
|
||||
}
|
||||
sort.Sort(ranges)
|
||||
return findNextRangeStart(ranges)
|
||||
}
|
||||
|
||||
func findNextGIDRange() (int, error) {
|
||||
ranges, err := parseSubgid("ALL")
|
||||
if err != nil {
|
||||
return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subgid file: %v", err)
|
||||
}
|
||||
sort.Sort(ranges)
|
||||
return findNextRangeStart(ranges)
|
||||
}
|
||||
|
||||
func findNextRangeStart(rangeList ranges) (int, error) {
|
||||
startID := defaultRangeStart
|
||||
for _, arange := range rangeList {
|
||||
if wouldOverlap(arange, startID) {
|
||||
startID = arange.Start + arange.Length
|
||||
}
|
||||
}
|
||||
return startID, nil
|
||||
}
|
||||
|
||||
func wouldOverlap(arange subIDRange, ID int) bool {
|
||||
low := ID
|
||||
high := ID + defaultRangeLen
|
||||
if (low >= arange.Start && low <= arange.Start+arange.Length) ||
|
||||
(high <= arange.Start+arange.Length && high >= arange.Start) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
12
vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go
generated
vendored
12
vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go
generated
vendored
@ -1,12 +0,0 @@
|
||||
// +build !linux
|
||||
|
||||
package idtools // import "github.com/docker/docker/pkg/idtools"
|
||||
|
||||
import "fmt"
|
||||
|
||||
// AddNamespaceRangesUser takes a name and finds an unused uid, gid pair
|
||||
// and calls the appropriate helper function to add the group and then
|
||||
// the user to the group in /etc/group and /etc/passwd respectively.
|
||||
func AddNamespaceRangesUser(name string) (int, int, error) {
|
||||
return -1, -1, fmt.Errorf("No support for adding users or groups on this OS")
|
||||
}
|
32
vendor/github.com/docker/docker/pkg/idtools/utils_unix.go
generated
vendored
32
vendor/github.com/docker/docker/pkg/idtools/utils_unix.go
generated
vendored
@ -1,32 +0,0 @@
|
||||
// +build !windows
|
||||
|
||||
package idtools // import "github.com/docker/docker/pkg/idtools"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func resolveBinary(binname string) (string, error) {
|
||||
binaryPath, err := exec.LookPath(binname)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
resolvedPath, err := filepath.EvalSymlinks(binaryPath)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
//only return no error if the final resolved binary basename
|
||||
//matches what was searched for
|
||||
if filepath.Base(resolvedPath) == binname {
|
||||
return resolvedPath, nil
|
||||
}
|
||||
return "", fmt.Errorf("Binary %q does not resolve to a binary of that name in $PATH (%q)", binname, resolvedPath)
|
||||
}
|
||||
|
||||
func execCmd(cmd, args string) ([]byte, error) {
|
||||
execCmd := exec.Command(cmd, strings.Split(args, " ")...)
|
||||
return execCmd.CombinedOutput()
|
||||
}
|
137
vendor/github.com/docker/docker/pkg/mount/flags.go
generated
vendored
137
vendor/github.com/docker/docker/pkg/mount/flags.go
generated
vendored
@ -1,137 +0,0 @@
|
||||
package mount // import "github.com/docker/docker/pkg/mount"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var flags = map[string]struct {
|
||||
clear bool
|
||||
flag int
|
||||
}{
|
||||
"defaults": {false, 0},
|
||||
"ro": {false, RDONLY},
|
||||
"rw": {true, RDONLY},
|
||||
"suid": {true, NOSUID},
|
||||
"nosuid": {false, NOSUID},
|
||||
"dev": {true, NODEV},
|
||||
"nodev": {false, NODEV},
|
||||
"exec": {true, NOEXEC},
|
||||
"noexec": {false, NOEXEC},
|
||||
"sync": {false, SYNCHRONOUS},
|
||||
"async": {true, SYNCHRONOUS},
|
||||
"dirsync": {false, DIRSYNC},
|
||||
"remount": {false, REMOUNT},
|
||||
"mand": {false, MANDLOCK},
|
||||
"nomand": {true, MANDLOCK},
|
||||
"atime": {true, NOATIME},
|
||||
"noatime": {false, NOATIME},
|
||||
"diratime": {true, NODIRATIME},
|
||||
"nodiratime": {false, NODIRATIME},
|
||||
"bind": {false, BIND},
|
||||
"rbind": {false, RBIND},
|
||||
"unbindable": {false, UNBINDABLE},
|
||||
"runbindable": {false, RUNBINDABLE},
|
||||
"private": {false, PRIVATE},
|
||||
"rprivate": {false, RPRIVATE},
|
||||
"shared": {false, SHARED},
|
||||
"rshared": {false, RSHARED},
|
||||
"slave": {false, SLAVE},
|
||||
"rslave": {false, RSLAVE},
|
||||
"relatime": {false, RELATIME},
|
||||
"norelatime": {true, RELATIME},
|
||||
"strictatime": {false, STRICTATIME},
|
||||
"nostrictatime": {true, STRICTATIME},
|
||||
}
|
||||
|
||||
var validFlags = map[string]bool{
|
||||
"": true,
|
||||
"size": true,
|
||||
"mode": true,
|
||||
"uid": true,
|
||||
"gid": true,
|
||||
"nr_inodes": true,
|
||||
"nr_blocks": true,
|
||||
"mpol": true,
|
||||
}
|
||||
|
||||
var propagationFlags = map[string]bool{
|
||||
"bind": true,
|
||||
"rbind": true,
|
||||
"unbindable": true,
|
||||
"runbindable": true,
|
||||
"private": true,
|
||||
"rprivate": true,
|
||||
"shared": true,
|
||||
"rshared": true,
|
||||
"slave": true,
|
||||
"rslave": true,
|
||||
}
|
||||
|
||||
// MergeTmpfsOptions merge mount options to make sure there is no duplicate.
|
||||
func MergeTmpfsOptions(options []string) ([]string, error) {
|
||||
// We use collisions maps to remove duplicates.
|
||||
// For flag, the key is the flag value (the key for propagation flag is -1)
|
||||
// For data=value, the key is the data
|
||||
flagCollisions := map[int]bool{}
|
||||
dataCollisions := map[string]bool{}
|
||||
|
||||
var newOptions []string
|
||||
// We process in reverse order
|
||||
for i := len(options) - 1; i >= 0; i-- {
|
||||
option := options[i]
|
||||
if option == "defaults" {
|
||||
continue
|
||||
}
|
||||
if f, ok := flags[option]; ok && f.flag != 0 {
|
||||
// There is only one propagation mode
|
||||
key := f.flag
|
||||
if propagationFlags[option] {
|
||||
key = -1
|
||||
}
|
||||
// Check to see if there is collision for flag
|
||||
if !flagCollisions[key] {
|
||||
// We prepend the option and add to collision map
|
||||
newOptions = append([]string{option}, newOptions...)
|
||||
flagCollisions[key] = true
|
||||
}
|
||||
continue
|
||||
}
|
||||
opt := strings.SplitN(option, "=", 2)
|
||||
if len(opt) != 2 || !validFlags[opt[0]] {
|
||||
return nil, fmt.Errorf("Invalid tmpfs option %q", opt)
|
||||
}
|
||||
if !dataCollisions[opt[0]] {
|
||||
// We prepend the option and add to collision map
|
||||
newOptions = append([]string{option}, newOptions...)
|
||||
dataCollisions[opt[0]] = true
|
||||
}
|
||||
}
|
||||
|
||||
return newOptions, nil
|
||||
}
|
||||
|
||||
// Parse fstab type mount options into mount() flags
|
||||
// and device specific data
|
||||
func parseOptions(options string) (int, string) {
|
||||
var (
|
||||
flag int
|
||||
data []string
|
||||
)
|
||||
|
||||
for _, o := range strings.Split(options, ",") {
|
||||
// If the option does not exist in the flags table or the flag
|
||||
// is not supported on the platform,
|
||||
// then it is a data value for a specific fs type
|
||||
if f, exists := flags[o]; exists && f.flag != 0 {
|
||||
if f.clear {
|
||||
flag &= ^f.flag
|
||||
} else {
|
||||
flag |= f.flag
|
||||
}
|
||||
} else {
|
||||
data = append(data, o)
|
||||
}
|
||||
}
|
||||
return flag, strings.Join(data, ",")
|
||||
}
|
49
vendor/github.com/docker/docker/pkg/mount/flags_freebsd.go
generated
vendored
49
vendor/github.com/docker/docker/pkg/mount/flags_freebsd.go
generated
vendored
@ -1,49 +0,0 @@
|
||||
// +build freebsd,cgo
|
||||
|
||||
package mount // import "github.com/docker/docker/pkg/mount"
|
||||
|
||||
/*
|
||||
#include <sys/mount.h>
|
||||
*/
|
||||
import "C"
|
||||
|
||||
const (
|
||||
// RDONLY will mount the filesystem as read-only.
|
||||
RDONLY = C.MNT_RDONLY
|
||||
|
||||
// NOSUID will not allow set-user-identifier or set-group-identifier bits to
|
||||
// take effect.
|
||||
NOSUID = C.MNT_NOSUID
|
||||
|
||||
// NOEXEC will not allow execution of any binaries on the mounted file system.
|
||||
NOEXEC = C.MNT_NOEXEC
|
||||
|
||||
// SYNCHRONOUS will allow any I/O to the file system to be done synchronously.
|
||||
SYNCHRONOUS = C.MNT_SYNCHRONOUS
|
||||
|
||||
// NOATIME will not update the file access time when reading from a file.
|
||||
NOATIME = C.MNT_NOATIME
|
||||
)
|
||||
|
||||
// These flags are unsupported.
|
||||
const (
|
||||
BIND = 0
|
||||
DIRSYNC = 0
|
||||
MANDLOCK = 0
|
||||
NODEV = 0
|
||||
NODIRATIME = 0
|
||||
UNBINDABLE = 0
|
||||
RUNBINDABLE = 0
|
||||
PRIVATE = 0
|
||||
RPRIVATE = 0
|
||||
SHARED = 0
|
||||
RSHARED = 0
|
||||
SLAVE = 0
|
||||
RSLAVE = 0
|
||||
RBIND = 0
|
||||
RELATIVE = 0
|
||||
RELATIME = 0
|
||||
REMOUNT = 0
|
||||
STRICTATIME = 0
|
||||
mntDetach = 0
|
||||
)
|
87
vendor/github.com/docker/docker/pkg/mount/flags_linux.go
generated
vendored
87
vendor/github.com/docker/docker/pkg/mount/flags_linux.go
generated
vendored
@ -1,87 +0,0 @@
|
||||
package mount // import "github.com/docker/docker/pkg/mount"
|
||||
|
||||
import (
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
const (
|
||||
// RDONLY will mount the file system read-only.
|
||||
RDONLY = unix.MS_RDONLY
|
||||
|
||||
// NOSUID will not allow set-user-identifier or set-group-identifier bits to
|
||||
// take effect.
|
||||
NOSUID = unix.MS_NOSUID
|
||||
|
||||
// NODEV will not interpret character or block special devices on the file
|
||||
// system.
|
||||
NODEV = unix.MS_NODEV
|
||||
|
||||
// NOEXEC will not allow execution of any binaries on the mounted file system.
|
||||
NOEXEC = unix.MS_NOEXEC
|
||||
|
||||
// SYNCHRONOUS will allow I/O to the file system to be done synchronously.
|
||||
SYNCHRONOUS = unix.MS_SYNCHRONOUS
|
||||
|
||||
// DIRSYNC will force all directory updates within the file system to be done
|
||||
// synchronously. This affects the following system calls: create, link,
|
||||
// unlink, symlink, mkdir, rmdir, mknod and rename.
|
||||
DIRSYNC = unix.MS_DIRSYNC
|
||||
|
||||
// REMOUNT will attempt to remount an already-mounted file system. This is
|
||||
// commonly used to change the mount flags for a file system, especially to
|
||||
// make a readonly file system writeable. It does not change device or mount
|
||||
// point.
|
||||
REMOUNT = unix.MS_REMOUNT
|
||||
|
||||
// MANDLOCK will force mandatory locks on a filesystem.
|
||||
MANDLOCK = unix.MS_MANDLOCK
|
||||
|
||||
// NOATIME will not update the file access time when reading from a file.
|
||||
NOATIME = unix.MS_NOATIME
|
||||
|
||||
// NODIRATIME will not update the directory access time.
|
||||
NODIRATIME = unix.MS_NODIRATIME
|
||||
|
||||
// BIND remounts a subtree somewhere else.
|
||||
BIND = unix.MS_BIND
|
||||
|
||||
// RBIND remounts a subtree and all possible submounts somewhere else.
|
||||
RBIND = unix.MS_BIND | unix.MS_REC
|
||||
|
||||
// UNBINDABLE creates a mount which cannot be cloned through a bind operation.
|
||||
UNBINDABLE = unix.MS_UNBINDABLE
|
||||
|
||||
// RUNBINDABLE marks the entire mount tree as UNBINDABLE.
|
||||
RUNBINDABLE = unix.MS_UNBINDABLE | unix.MS_REC
|
||||
|
||||
// PRIVATE creates a mount which carries no propagation abilities.
|
||||
PRIVATE = unix.MS_PRIVATE
|
||||
|
||||
// RPRIVATE marks the entire mount tree as PRIVATE.
|
||||
RPRIVATE = unix.MS_PRIVATE | unix.MS_REC
|
||||
|
||||
// SLAVE creates a mount which receives propagation from its master, but not
|
||||
// vice versa.
|
||||
SLAVE = unix.MS_SLAVE
|
||||
|
||||
// RSLAVE marks the entire mount tree as SLAVE.
|
||||
RSLAVE = unix.MS_SLAVE | unix.MS_REC
|
||||
|
||||
// SHARED creates a mount which provides the ability to create mirrors of
|
||||
// that mount such that mounts and unmounts within any of the mirrors
|
||||
// propagate to the other mirrors.
|
||||
SHARED = unix.MS_SHARED
|
||||
|
||||
// RSHARED marks the entire mount tree as SHARED.
|
||||
RSHARED = unix.MS_SHARED | unix.MS_REC
|
||||
|
||||
// RELATIME updates inode access times relative to modify or change time.
|
||||
RELATIME = unix.MS_RELATIME
|
||||
|
||||
// STRICTATIME allows to explicitly request full atime updates. This makes
|
||||
// it possible for the kernel to default to relatime or noatime but still
|
||||
// allow userspace to override it.
|
||||
STRICTATIME = unix.MS_STRICTATIME
|
||||
|
||||
mntDetach = unix.MNT_DETACH
|
||||
)
|
31
vendor/github.com/docker/docker/pkg/mount/flags_unsupported.go
generated
vendored
31
vendor/github.com/docker/docker/pkg/mount/flags_unsupported.go
generated
vendored
@ -1,31 +0,0 @@
|
||||
// +build !linux,!freebsd freebsd,!cgo
|
||||
|
||||
package mount // import "github.com/docker/docker/pkg/mount"
|
||||
|
||||
// These flags are unsupported.
|
||||
const (
|
||||
BIND = 0
|
||||
DIRSYNC = 0
|
||||
MANDLOCK = 0
|
||||
NOATIME = 0
|
||||
NODEV = 0
|
||||
NODIRATIME = 0
|
||||
NOEXEC = 0
|
||||
NOSUID = 0
|
||||
UNBINDABLE = 0
|
||||
RUNBINDABLE = 0
|
||||
PRIVATE = 0
|
||||
RPRIVATE = 0
|
||||
SHARED = 0
|
||||
RSHARED = 0
|
||||
SLAVE = 0
|
||||
RSLAVE = 0
|
||||
RBIND = 0
|
||||
RELATIME = 0
|
||||
RELATIVE = 0
|
||||
REMOUNT = 0
|
||||
STRICTATIME = 0
|
||||
SYNCHRONOUS = 0
|
||||
RDONLY = 0
|
||||
mntDetach = 0
|
||||
)
|
159
vendor/github.com/docker/docker/pkg/mount/mount.go
generated
vendored
159
vendor/github.com/docker/docker/pkg/mount/mount.go
generated
vendored
@ -1,159 +0,0 @@
|
||||
package mount // import "github.com/docker/docker/pkg/mount"
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// mountError records an error from mount or unmount operation
|
||||
type mountError struct {
|
||||
op string
|
||||
source, target string
|
||||
flags uintptr
|
||||
data string
|
||||
err error
|
||||
}
|
||||
|
||||
func (e *mountError) Error() string {
|
||||
out := e.op + " "
|
||||
|
||||
if e.source != "" {
|
||||
out += e.source + ":" + e.target
|
||||
} else {
|
||||
out += e.target
|
||||
}
|
||||
|
||||
if e.flags != uintptr(0) {
|
||||
out += ", flags: 0x" + strconv.FormatUint(uint64(e.flags), 16)
|
||||
}
|
||||
if e.data != "" {
|
||||
out += ", data: " + e.data
|
||||
}
|
||||
|
||||
out += ": " + e.err.Error()
|
||||
return out
|
||||
}
|
||||
|
||||
// Cause returns the underlying cause of the error
|
||||
func (e *mountError) Cause() error {
|
||||
return e.err
|
||||
}
|
||||
|
||||
// FilterFunc is a type defining a callback function
|
||||
// to filter out unwanted entries. It takes a pointer
|
||||
// to an Info struct (not fully populated, currently
|
||||
// only Mountpoint is filled in), and returns two booleans:
|
||||
// - skip: true if the entry should be skipped
|
||||
// - stop: true if parsing should be stopped after the entry
|
||||
type FilterFunc func(*Info) (skip, stop bool)
|
||||
|
||||
// PrefixFilter discards all entries whose mount points
|
||||
// do not start with a prefix specified
|
||||
func PrefixFilter(prefix string) FilterFunc {
|
||||
return func(m *Info) (bool, bool) {
|
||||
skip := !strings.HasPrefix(m.Mountpoint, prefix)
|
||||
return skip, false
|
||||
}
|
||||
}
|
||||
|
||||
// SingleEntryFilter looks for a specific entry
|
||||
func SingleEntryFilter(mp string) FilterFunc {
|
||||
return func(m *Info) (bool, bool) {
|
||||
if m.Mountpoint == mp {
|
||||
return false, true // don't skip, stop now
|
||||
}
|
||||
return true, false // skip, keep going
|
||||
}
|
||||
}
|
||||
|
||||
// ParentsFilter returns all entries whose mount points
|
||||
// can be parents of a path specified, discarding others.
|
||||
// For example, given `/var/lib/docker/something`, entries
|
||||
// like `/var/lib/docker`, `/var` and `/` are returned.
|
||||
func ParentsFilter(path string) FilterFunc {
|
||||
return func(m *Info) (bool, bool) {
|
||||
skip := !strings.HasPrefix(path, m.Mountpoint)
|
||||
return skip, false
|
||||
}
|
||||
}
|
||||
|
||||
// GetMounts retrieves a list of mounts for the current running process,
|
||||
// with an optional filter applied (use nil for no filter).
|
||||
func GetMounts(f FilterFunc) ([]*Info, error) {
|
||||
return parseMountTable(f)
|
||||
}
|
||||
|
||||
// Mounted determines if a specified mountpoint has been mounted.
|
||||
// On Linux it looks at /proc/self/mountinfo.
|
||||
func Mounted(mountpoint string) (bool, error) {
|
||||
entries, err := GetMounts(SingleEntryFilter(mountpoint))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return len(entries) > 0, nil
|
||||
}
|
||||
|
||||
// Mount will mount filesystem according to the specified configuration, on the
|
||||
// condition that the target path is *not* already mounted. Options must be
|
||||
// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See
|
||||
// flags.go for supported option flags.
|
||||
func Mount(device, target, mType, options string) error {
|
||||
flag, data := parseOptions(options)
|
||||
if flag&REMOUNT != REMOUNT {
|
||||
if mounted, err := Mounted(target); err != nil || mounted {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return mount(device, target, mType, uintptr(flag), data)
|
||||
}
|
||||
|
||||
// ForceMount will mount a filesystem according to the specified configuration,
|
||||
// *regardless* if the target path is not already mounted. Options must be
|
||||
// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See
|
||||
// flags.go for supported option flags.
|
||||
func ForceMount(device, target, mType, options string) error {
|
||||
flag, data := parseOptions(options)
|
||||
return mount(device, target, mType, uintptr(flag), data)
|
||||
}
|
||||
|
||||
// Unmount lazily unmounts a filesystem on supported platforms, otherwise
|
||||
// does a normal unmount.
|
||||
func Unmount(target string) error {
|
||||
return unmount(target, mntDetach)
|
||||
}
|
||||
|
||||
// RecursiveUnmount unmounts the target and all mounts underneath, starting with
|
||||
// the deepsest mount first.
|
||||
func RecursiveUnmount(target string) error {
|
||||
mounts, err := parseMountTable(PrefixFilter(target))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Make the deepest mount be first
|
||||
sort.Slice(mounts, func(i, j int) bool {
|
||||
return len(mounts[i].Mountpoint) > len(mounts[j].Mountpoint)
|
||||
})
|
||||
|
||||
for i, m := range mounts {
|
||||
logrus.Debugf("Trying to unmount %s", m.Mountpoint)
|
||||
err = unmount(m.Mountpoint, mntDetach)
|
||||
if err != nil {
|
||||
if i == len(mounts)-1 { // last mount
|
||||
if mounted, e := Mounted(m.Mountpoint); e != nil || mounted {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// This is some submount, we can ignore this error for now, the final unmount will fail if this is a real problem
|
||||
logrus.WithError(err).Warnf("Failed to unmount submount %s", m.Mountpoint)
|
||||
}
|
||||
}
|
||||
|
||||
logrus.Debugf("Unmounted %s", m.Mountpoint)
|
||||
}
|
||||
return nil
|
||||
}
|
59
vendor/github.com/docker/docker/pkg/mount/mounter_freebsd.go
generated
vendored
59
vendor/github.com/docker/docker/pkg/mount/mounter_freebsd.go
generated
vendored
@ -1,59 +0,0 @@
|
||||
package mount // import "github.com/docker/docker/pkg/mount"
|
||||
|
||||
/*
|
||||
#include <errno.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <sys/_iovec.h>
|
||||
#include <sys/mount.h>
|
||||
#include <sys/param.h>
|
||||
*/
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func allocateIOVecs(options []string) []C.struct_iovec {
|
||||
out := make([]C.struct_iovec, len(options))
|
||||
for i, option := range options {
|
||||
out[i].iov_base = unsafe.Pointer(C.CString(option))
|
||||
out[i].iov_len = C.size_t(len(option) + 1)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func mount(device, target, mType string, flag uintptr, data string) error {
|
||||
isNullFS := false
|
||||
|
||||
xs := strings.Split(data, ",")
|
||||
for _, x := range xs {
|
||||
if x == "bind" {
|
||||
isNullFS = true
|
||||
}
|
||||
}
|
||||
|
||||
options := []string{"fspath", target}
|
||||
if isNullFS {
|
||||
options = append(options, "fstype", "nullfs", "target", device)
|
||||
} else {
|
||||
options = append(options, "fstype", mType, "from", device)
|
||||
}
|
||||
rawOptions := allocateIOVecs(options)
|
||||
for _, rawOption := range rawOptions {
|
||||
defer C.free(rawOption.iov_base)
|
||||
}
|
||||
|
||||
if errno := C.nmount(&rawOptions[0], C.uint(len(options)), C.int(flag)); errno != 0 {
|
||||
return &mountError{
|
||||
op: "mount",
|
||||
source: device,
|
||||
target: target,
|
||||
flags: flag,
|
||||
err: syscall.Errno(errno),
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
73
vendor/github.com/docker/docker/pkg/mount/mounter_linux.go
generated
vendored
73
vendor/github.com/docker/docker/pkg/mount/mounter_linux.go
generated
vendored
@ -1,73 +0,0 @@
|
||||
package mount // import "github.com/docker/docker/pkg/mount"
|
||||
|
||||
import (
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
const (
|
||||
// ptypes is the set propagation types.
|
||||
ptypes = unix.MS_SHARED | unix.MS_PRIVATE | unix.MS_SLAVE | unix.MS_UNBINDABLE
|
||||
|
||||
// pflags is the full set valid flags for a change propagation call.
|
||||
pflags = ptypes | unix.MS_REC | unix.MS_SILENT
|
||||
|
||||
// broflags is the combination of bind and read only
|
||||
broflags = unix.MS_BIND | unix.MS_RDONLY
|
||||
)
|
||||
|
||||
// isremount returns true if either device name or flags identify a remount request, false otherwise.
|
||||
func isremount(device string, flags uintptr) bool {
|
||||
switch {
|
||||
// We treat device "" and "none" as a remount request to provide compatibility with
|
||||
// requests that don't explicitly set MS_REMOUNT such as those manipulating bind mounts.
|
||||
case flags&unix.MS_REMOUNT != 0, device == "", device == "none":
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func mount(device, target, mType string, flags uintptr, data string) error {
|
||||
oflags := flags &^ ptypes
|
||||
if !isremount(device, flags) || data != "" {
|
||||
// Initial call applying all non-propagation flags for mount
|
||||
// or remount with changed data
|
||||
if err := unix.Mount(device, target, mType, oflags, data); err != nil {
|
||||
return &mountError{
|
||||
op: "mount",
|
||||
source: device,
|
||||
target: target,
|
||||
flags: oflags,
|
||||
data: data,
|
||||
err: err,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if flags&ptypes != 0 {
|
||||
// Change the propagation type.
|
||||
if err := unix.Mount("", target, "", flags&pflags, ""); err != nil {
|
||||
return &mountError{
|
||||
op: "remount",
|
||||
target: target,
|
||||
flags: flags & pflags,
|
||||
err: err,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if oflags&broflags == broflags {
|
||||
// Remount the bind to apply read only.
|
||||
if err := unix.Mount("", target, "", oflags|unix.MS_REMOUNT, ""); err != nil {
|
||||
return &mountError{
|
||||
op: "remount-ro",
|
||||
target: target,
|
||||
flags: oflags | unix.MS_REMOUNT,
|
||||
err: err,
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
7
vendor/github.com/docker/docker/pkg/mount/mounter_unsupported.go
generated
vendored
7
vendor/github.com/docker/docker/pkg/mount/mounter_unsupported.go
generated
vendored
@ -1,7 +0,0 @@
|
||||
// +build !linux,!freebsd freebsd,!cgo
|
||||
|
||||
package mount // import "github.com/docker/docker/pkg/mount"
|
||||
|
||||
func mount(device, target, mType string, flag uintptr, data string) error {
|
||||
panic("Not implemented")
|
||||
}
|
40
vendor/github.com/docker/docker/pkg/mount/mountinfo.go
generated
vendored
40
vendor/github.com/docker/docker/pkg/mount/mountinfo.go
generated
vendored
@ -1,40 +0,0 @@
|
||||
package mount // import "github.com/docker/docker/pkg/mount"
|
||||
|
||||
// Info reveals information about a particular mounted filesystem. This
|
||||
// struct is populated from the content in the /proc/<pid>/mountinfo file.
|
||||
type Info struct {
|
||||
// ID is a unique identifier of the mount (may be reused after umount).
|
||||
ID int
|
||||
|
||||
// Parent indicates the ID of the mount parent (or of self for the top of the
|
||||
// mount tree).
|
||||
Parent int
|
||||
|
||||
// Major indicates one half of the device ID which identifies the device class.
|
||||
Major int
|
||||
|
||||
// Minor indicates one half of the device ID which identifies a specific
|
||||
// instance of device.
|
||||
Minor int
|
||||
|
||||
// Root of the mount within the filesystem.
|
||||
Root string
|
||||
|
||||
// Mountpoint indicates the mount point relative to the process's root.
|
||||
Mountpoint string
|
||||
|
||||
// Opts represents mount-specific options.
|
||||
Opts string
|
||||
|
||||
// Optional represents optional fields.
|
||||
Optional string
|
||||
|
||||
// Fstype indicates the type of filesystem, such as EXT3.
|
||||
Fstype string
|
||||
|
||||
// Source indicates filesystem specific information or "none".
|
||||
Source string
|
||||
|
||||
// VfsOpts represents per super block options.
|
||||
VfsOpts string
|
||||
}
|
55
vendor/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go
generated
vendored
55
vendor/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go
generated
vendored
@ -1,55 +0,0 @@
|
||||
package mount // import "github.com/docker/docker/pkg/mount"
|
||||
|
||||
/*
|
||||
#include <sys/param.h>
|
||||
#include <sys/ucred.h>
|
||||
#include <sys/mount.h>
|
||||
*/
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// Parse /proc/self/mountinfo because comparing Dev and ino does not work from
|
||||
// bind mounts.
|
||||
func parseMountTable(filter FilterFunc) ([]*Info, error) {
|
||||
var rawEntries *C.struct_statfs
|
||||
|
||||
count := int(C.getmntinfo(&rawEntries, C.MNT_WAIT))
|
||||
if count == 0 {
|
||||
return nil, fmt.Errorf("Failed to call getmntinfo")
|
||||
}
|
||||
|
||||
var entries []C.struct_statfs
|
||||
header := (*reflect.SliceHeader)(unsafe.Pointer(&entries))
|
||||
header.Cap = count
|
||||
header.Len = count
|
||||
header.Data = uintptr(unsafe.Pointer(rawEntries))
|
||||
|
||||
var out []*Info
|
||||
for _, entry := range entries {
|
||||
var mountinfo Info
|
||||
var skip, stop bool
|
||||
mountinfo.Mountpoint = C.GoString(&entry.f_mntonname[0])
|
||||
|
||||
if filter != nil {
|
||||
// filter out entries we're not interested in
|
||||
skip, stop = filter(p)
|
||||
if skip {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
mountinfo.Source = C.GoString(&entry.f_mntfromname[0])
|
||||
mountinfo.Fstype = C.GoString(&entry.f_fstypename[0])
|
||||
|
||||
out = append(out, &mountinfo)
|
||||
if stop {
|
||||
break
|
||||
}
|
||||
}
|
||||
return out, nil
|
||||
}
|
144
vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go
generated
vendored
144
vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go
generated
vendored
@ -1,144 +0,0 @@
|
||||
package mount // import "github.com/docker/docker/pkg/mount"
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func parseInfoFile(r io.Reader, filter FilterFunc) ([]*Info, error) {
|
||||
s := bufio.NewScanner(r)
|
||||
out := []*Info{}
|
||||
var err error
|
||||
for s.Scan() {
|
||||
if err = s.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
/*
|
||||
See http://man7.org/linux/man-pages/man5/proc.5.html
|
||||
|
||||
36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue
|
||||
(1)(2)(3) (4) (5) (6) (7) (8) (9) (10) (11)
|
||||
|
||||
(1) mount ID: unique identifier of the mount (may be reused after umount)
|
||||
(2) parent ID: ID of parent (or of self for the top of the mount tree)
|
||||
(3) major:minor: value of st_dev for files on filesystem
|
||||
(4) root: root of the mount within the filesystem
|
||||
(5) mount point: mount point relative to the process's root
|
||||
(6) mount options: per mount options
|
||||
(7) optional fields: zero or more fields of the form "tag[:value]"
|
||||
(8) separator: marks the end of the optional fields
|
||||
(9) filesystem type: name of filesystem of the form "type[.subtype]"
|
||||
(10) mount source: filesystem specific information or "none"
|
||||
(11) super options: per super block options
|
||||
*/
|
||||
|
||||
text := s.Text()
|
||||
fields := strings.Split(text, " ")
|
||||
numFields := len(fields)
|
||||
if numFields < 10 {
|
||||
// should be at least 10 fields
|
||||
return nil, fmt.Errorf("Parsing '%s' failed: not enough fields (%d)", text, numFields)
|
||||
}
|
||||
|
||||
p := &Info{}
|
||||
// ignore any numbers parsing errors, as there should not be any
|
||||
p.ID, _ = strconv.Atoi(fields[0])
|
||||
p.Parent, _ = strconv.Atoi(fields[1])
|
||||
mm := strings.Split(fields[2], ":")
|
||||
if len(mm) != 2 {
|
||||
return nil, fmt.Errorf("Parsing '%s' failed: unexpected minor:major pair %s", text, mm)
|
||||
}
|
||||
p.Major, _ = strconv.Atoi(mm[0])
|
||||
p.Minor, _ = strconv.Atoi(mm[1])
|
||||
|
||||
p.Root, err = strconv.Unquote(`"` + fields[3] + `"`)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Parsing '%s' failed: unable to unquote root field", fields[3])
|
||||
}
|
||||
|
||||
p.Mountpoint, err = strconv.Unquote(`"` + fields[4] + `"`)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Parsing '%s' failed: unable to unquote mount point field", fields[4])
|
||||
}
|
||||
p.Opts = fields[5]
|
||||
|
||||
var skip, stop bool
|
||||
if filter != nil {
|
||||
// filter out entries we're not interested in
|
||||
skip, stop = filter(p)
|
||||
if skip {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// one or more optional fields, when a separator (-)
|
||||
i := 6
|
||||
for ; i < numFields && fields[i] != "-"; i++ {
|
||||
switch i {
|
||||
case 6:
|
||||
p.Optional = fields[6]
|
||||
default:
|
||||
/* NOTE there might be more optional fields before the such as
|
||||
fields[7]...fields[N] (where N < sepIndex), although
|
||||
as of Linux kernel 4.15 the only known ones are
|
||||
mount propagation flags in fields[6]. The correct
|
||||
behavior is to ignore any unknown optional fields.
|
||||
*/
|
||||
break
|
||||
}
|
||||
}
|
||||
if i == numFields {
|
||||
return nil, fmt.Errorf("Parsing '%s' failed: missing separator ('-')", text)
|
||||
}
|
||||
|
||||
// There should be 3 fields after the separator...
|
||||
if i+4 > numFields {
|
||||
return nil, fmt.Errorf("Parsing '%s' failed: not enough fields after a separator", text)
|
||||
}
|
||||
// ... but in Linux <= 3.9 mounting a cifs with spaces in a share name
|
||||
// (like "//serv/My Documents") _may_ end up having a space in the last field
|
||||
// of mountinfo (like "unc=//serv/My Documents"). Since kernel 3.10-rc1, cifs
|
||||
// option unc= is ignored, so a space should not appear. In here we ignore
|
||||
// those "extra" fields caused by extra spaces.
|
||||
p.Fstype = fields[i+1]
|
||||
p.Source = fields[i+2]
|
||||
p.VfsOpts = fields[i+3]
|
||||
|
||||
out = append(out, p)
|
||||
if stop {
|
||||
break
|
||||
}
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// Parse /proc/self/mountinfo because comparing Dev and ino does not work from
|
||||
// bind mounts
|
||||
func parseMountTable(filter FilterFunc) ([]*Info, error) {
|
||||
f, err := os.Open("/proc/self/mountinfo")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
return parseInfoFile(f, filter)
|
||||
}
|
||||
|
||||
// PidMountInfo collects the mounts for a specific process ID. If the process
|
||||
// ID is unknown, it is better to use `GetMounts` which will inspect
|
||||
// "/proc/self/mountinfo" instead.
|
||||
func PidMountInfo(pid int) ([]*Info, error) {
|
||||
f, err := os.Open(fmt.Sprintf("/proc/%d/mountinfo", pid))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
return parseInfoFile(f, nil)
|
||||
}
|
12
vendor/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go
generated
vendored
12
vendor/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go
generated
vendored
@ -1,12 +0,0 @@
|
||||
// +build !windows,!linux,!freebsd freebsd,!cgo
|
||||
|
||||
package mount // import "github.com/docker/docker/pkg/mount"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
func parseMountTable(f FilterFunc) ([]*Info, error) {
|
||||
return nil, fmt.Errorf("mount.parseMountTable is not implemented on %s/%s", runtime.GOOS, runtime.GOARCH)
|
||||
}
|
6
vendor/github.com/docker/docker/pkg/mount/mountinfo_windows.go
generated
vendored
6
vendor/github.com/docker/docker/pkg/mount/mountinfo_windows.go
generated
vendored
@ -1,6 +0,0 @@
|
||||
package mount // import "github.com/docker/docker/pkg/mount"
|
||||
|
||||
func parseMountTable(f FilterFunc) ([]*Info, error) {
|
||||
// Do NOT return an error!
|
||||
return nil, nil
|
||||
}
|
71
vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go
generated
vendored
71
vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go
generated
vendored
@ -1,71 +0,0 @@
|
||||
package mount // import "github.com/docker/docker/pkg/mount"
|
||||
|
||||
// MakeShared ensures a mounted filesystem has the SHARED mount option enabled.
|
||||
// See the supported options in flags.go for further reference.
|
||||
func MakeShared(mountPoint string) error {
|
||||
return ensureMountedAs(mountPoint, SHARED)
|
||||
}
|
||||
|
||||
// MakeRShared ensures a mounted filesystem has the RSHARED mount option enabled.
|
||||
// See the supported options in flags.go for further reference.
|
||||
func MakeRShared(mountPoint string) error {
|
||||
return ensureMountedAs(mountPoint, RSHARED)
|
||||
}
|
||||
|
||||
// MakePrivate ensures a mounted filesystem has the PRIVATE mount option enabled.
|
||||
// See the supported options in flags.go for further reference.
|
||||
func MakePrivate(mountPoint string) error {
|
||||
return ensureMountedAs(mountPoint, PRIVATE)
|
||||
}
|
||||
|
||||
// MakeRPrivate ensures a mounted filesystem has the RPRIVATE mount option
|
||||
// enabled. See the supported options in flags.go for further reference.
|
||||
func MakeRPrivate(mountPoint string) error {
|
||||
return ensureMountedAs(mountPoint, RPRIVATE)
|
||||
}
|
||||
|
||||
// MakeSlave ensures a mounted filesystem has the SLAVE mount option enabled.
|
||||
// See the supported options in flags.go for further reference.
|
||||
func MakeSlave(mountPoint string) error {
|
||||
return ensureMountedAs(mountPoint, SLAVE)
|
||||
}
|
||||
|
||||
// MakeRSlave ensures a mounted filesystem has the RSLAVE mount option enabled.
|
||||
// See the supported options in flags.go for further reference.
|
||||
func MakeRSlave(mountPoint string) error {
|
||||
return ensureMountedAs(mountPoint, RSLAVE)
|
||||
}
|
||||
|
||||
// MakeUnbindable ensures a mounted filesystem has the UNBINDABLE mount option
|
||||
// enabled. See the supported options in flags.go for further reference.
|
||||
func MakeUnbindable(mountPoint string) error {
|
||||
return ensureMountedAs(mountPoint, UNBINDABLE)
|
||||
}
|
||||
|
||||
// MakeRUnbindable ensures a mounted filesystem has the RUNBINDABLE mount
|
||||
// option enabled. See the supported options in flags.go for further reference.
|
||||
func MakeRUnbindable(mountPoint string) error {
|
||||
return ensureMountedAs(mountPoint, RUNBINDABLE)
|
||||
}
|
||||
|
||||
// MakeMount ensures that the file or directory given is a mount point,
|
||||
// bind mounting it to itself it case it is not.
|
||||
func MakeMount(mnt string) error {
|
||||
mounted, err := Mounted(mnt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if mounted {
|
||||
return nil
|
||||
}
|
||||
|
||||
return mount(mnt, mnt, "none", uintptr(BIND), "")
|
||||
}
|
||||
|
||||
func ensureMountedAs(mnt string, flags int) error {
|
||||
if err := MakeMount(mnt); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return mount("", mnt, "none", uintptr(flags), "")
|
||||
}
|
22
vendor/github.com/docker/docker/pkg/mount/unmount_unix.go
generated
vendored
22
vendor/github.com/docker/docker/pkg/mount/unmount_unix.go
generated
vendored
@ -1,22 +0,0 @@
|
||||
// +build !windows
|
||||
|
||||
package mount // import "github.com/docker/docker/pkg/mount"
|
||||
|
||||
import "golang.org/x/sys/unix"
|
||||
|
||||
func unmount(target string, flags int) error {
|
||||
err := unix.Unmount(target, flags)
|
||||
if err == nil || err == unix.EINVAL {
|
||||
// Ignore "not mounted" error here. Note the same error
|
||||
// can be returned if flags are invalid, so this code
|
||||
// assumes that the flags value is always correct.
|
||||
return nil
|
||||
}
|
||||
|
||||
return &mountError{
|
||||
op: "umount",
|
||||
target: target,
|
||||
flags: uintptr(flags),
|
||||
err: err,
|
||||
}
|
||||
}
|
7
vendor/github.com/docker/docker/pkg/mount/unmount_unsupported.go
generated
vendored
7
vendor/github.com/docker/docker/pkg/mount/unmount_unsupported.go
generated
vendored
@ -1,7 +0,0 @@
|
||||
// +build windows
|
||||
|
||||
package mount // import "github.com/docker/docker/pkg/mount"
|
||||
|
||||
func unmount(target string, flag int) error {
|
||||
panic("Not implemented")
|
||||
}
|
16
vendor/github.com/docker/docker/pkg/system/args_windows.go
generated
vendored
16
vendor/github.com/docker/docker/pkg/system/args_windows.go
generated
vendored
@ -1,16 +0,0 @@
|
||||
package system // import "github.com/docker/docker/pkg/system"
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
// EscapeArgs makes a Windows-style escaped command line from a set of arguments
|
||||
func EscapeArgs(args []string) string {
|
||||
escapedArgs := make([]string, len(args))
|
||||
for i, a := range args {
|
||||
escapedArgs[i] = windows.EscapeArg(a)
|
||||
}
|
||||
return strings.Join(escapedArgs, " ")
|
||||
}
|
31
vendor/github.com/docker/docker/pkg/system/chtimes.go
generated
vendored
31
vendor/github.com/docker/docker/pkg/system/chtimes.go
generated
vendored
@ -1,31 +0,0 @@
|
||||
package system // import "github.com/docker/docker/pkg/system"
|
||||
|
||||
import (
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Chtimes changes the access time and modified time of a file at the given path
|
||||
func Chtimes(name string, atime time.Time, mtime time.Time) error {
|
||||
unixMinTime := time.Unix(0, 0)
|
||||
unixMaxTime := maxTime
|
||||
|
||||
// If the modified time is prior to the Unix Epoch, or after the
|
||||
// end of Unix Time, os.Chtimes has undefined behavior
|
||||
// default to Unix Epoch in this case, just in case
|
||||
|
||||
if atime.Before(unixMinTime) || atime.After(unixMaxTime) {
|
||||
atime = unixMinTime
|
||||
}
|
||||
|
||||
if mtime.Before(unixMinTime) || mtime.After(unixMaxTime) {
|
||||
mtime = unixMinTime
|
||||
}
|
||||
|
||||
if err := os.Chtimes(name, atime, mtime); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Take platform specific action for setting create time.
|
||||
return setCTime(name, mtime)
|
||||
}
|
14
vendor/github.com/docker/docker/pkg/system/chtimes_unix.go
generated
vendored
14
vendor/github.com/docker/docker/pkg/system/chtimes_unix.go
generated
vendored
@ -1,14 +0,0 @@
|
||||
// +build !windows
|
||||
|
||||
package system // import "github.com/docker/docker/pkg/system"
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
//setCTime will set the create time on a file. On Unix, the create
|
||||
//time is updated as a side effect of setting the modified time, so
|
||||
//no action is required.
|
||||
func setCTime(path string, ctime time.Time) error {
|
||||
return nil
|
||||
}
|
26
vendor/github.com/docker/docker/pkg/system/chtimes_windows.go
generated
vendored
26
vendor/github.com/docker/docker/pkg/system/chtimes_windows.go
generated
vendored
@ -1,26 +0,0 @@
|
||||
package system // import "github.com/docker/docker/pkg/system"
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
//setCTime will set the create time on a file. On Windows, this requires
|
||||
//calling SetFileTime and explicitly including the create time.
|
||||
func setCTime(path string, ctime time.Time) error {
|
||||
ctimespec := windows.NsecToTimespec(ctime.UnixNano())
|
||||
pathp, e := windows.UTF16PtrFromString(path)
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
h, e := windows.CreateFile(pathp,
|
||||
windows.FILE_WRITE_ATTRIBUTES, windows.FILE_SHARE_WRITE, nil,
|
||||
windows.OPEN_EXISTING, windows.FILE_FLAG_BACKUP_SEMANTICS, 0)
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
defer windows.Close(h)
|
||||
c := windows.NsecToFiletime(windows.TimespecToNsec(ctimespec))
|
||||
return windows.SetFileTime(h, &c, nil, nil)
|
||||
}
|
13
vendor/github.com/docker/docker/pkg/system/errors.go
generated
vendored
13
vendor/github.com/docker/docker/pkg/system/errors.go
generated
vendored
@ -1,13 +0,0 @@
|
||||
package system // import "github.com/docker/docker/pkg/system"
|
||||
|
||||
import (
|
||||
"errors"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrNotSupportedPlatform means the platform is not supported.
|
||||
ErrNotSupportedPlatform = errors.New("platform and architecture is not supported")
|
||||
|
||||
// ErrNotSupportedOperatingSystem means the operating system is not supported.
|
||||
ErrNotSupportedOperatingSystem = errors.New("operating system is not supported")
|
||||
)
|
19
vendor/github.com/docker/docker/pkg/system/exitcode.go
generated
vendored
19
vendor/github.com/docker/docker/pkg/system/exitcode.go
generated
vendored
@ -1,19 +0,0 @@
|
||||
package system // import "github.com/docker/docker/pkg/system"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// GetExitCode returns the ExitStatus of the specified error if its type is
|
||||
// exec.ExitError, returns 0 and an error otherwise.
|
||||
func GetExitCode(err error) (int, error) {
|
||||
exitCode := 0
|
||||
if exiterr, ok := err.(*exec.ExitError); ok {
|
||||
if procExit, ok := exiterr.Sys().(syscall.WaitStatus); ok {
|
||||
return procExit.ExitStatus(), nil
|
||||
}
|
||||
}
|
||||
return exitCode, fmt.Errorf("failed to get exit code")
|
||||
}
|
67
vendor/github.com/docker/docker/pkg/system/filesys.go
generated
vendored
67
vendor/github.com/docker/docker/pkg/system/filesys.go
generated
vendored
@ -1,67 +0,0 @@
|
||||
// +build !windows
|
||||
|
||||
package system // import "github.com/docker/docker/pkg/system"
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// MkdirAllWithACL is a wrapper for MkdirAll on unix systems.
|
||||
func MkdirAllWithACL(path string, perm os.FileMode, sddl string) error {
|
||||
return MkdirAll(path, perm, sddl)
|
||||
}
|
||||
|
||||
// MkdirAll creates a directory named path along with any necessary parents,
|
||||
// with permission specified by attribute perm for all dir created.
|
||||
func MkdirAll(path string, perm os.FileMode, sddl string) error {
|
||||
return os.MkdirAll(path, perm)
|
||||
}
|
||||
|
||||
// IsAbs is a platform-specific wrapper for filepath.IsAbs.
|
||||
func IsAbs(path string) bool {
|
||||
return filepath.IsAbs(path)
|
||||
}
|
||||
|
||||
// The functions below here are wrappers for the equivalents in the os and ioutils packages.
|
||||
// They are passthrough on Unix platforms, and only relevant on Windows.
|
||||
|
||||
// CreateSequential creates the named file with mode 0666 (before umask), truncating
|
||||
// it if it already exists. If successful, methods on the returned
|
||||
// File can be used for I/O; the associated file descriptor has mode
|
||||
// O_RDWR.
|
||||
// If there is an error, it will be of type *PathError.
|
||||
func CreateSequential(name string) (*os.File, error) {
|
||||
return os.Create(name)
|
||||
}
|
||||
|
||||
// OpenSequential opens the named file for reading. If successful, methods on
|
||||
// the returned file can be used for reading; the associated file
|
||||
// descriptor has mode O_RDONLY.
|
||||
// If there is an error, it will be of type *PathError.
|
||||
func OpenSequential(name string) (*os.File, error) {
|
||||
return os.Open(name)
|
||||
}
|
||||
|
||||
// OpenFileSequential is the generalized open call; most users will use Open
|
||||
// or Create instead. It opens the named file with specified flag
|
||||
// (O_RDONLY etc.) and perm, (0666 etc.) if applicable. If successful,
|
||||
// methods on the returned File can be used for I/O.
|
||||
// If there is an error, it will be of type *PathError.
|
||||
func OpenFileSequential(name string, flag int, perm os.FileMode) (*os.File, error) {
|
||||
return os.OpenFile(name, flag, perm)
|
||||
}
|
||||
|
||||
// TempFileSequential creates a new temporary file in the directory dir
|
||||
// with a name beginning with prefix, opens the file for reading
|
||||
// and writing, and returns the resulting *os.File.
|
||||
// If dir is the empty string, TempFile uses the default directory
|
||||
// for temporary files (see os.TempDir).
|
||||
// Multiple programs calling TempFile simultaneously
|
||||
// will not choose the same file. The caller can use f.Name()
|
||||
// to find the pathname of the file. It is the caller's responsibility
|
||||
// to remove the file when no longer needed.
|
||||
func TempFileSequential(dir, prefix string) (f *os.File, err error) {
|
||||
return ioutil.TempFile(dir, prefix)
|
||||
}
|
294
vendor/github.com/docker/docker/pkg/system/filesys_windows.go
generated
vendored
294
vendor/github.com/docker/docker/pkg/system/filesys_windows.go
generated
vendored
@ -1,294 +0,0 @@
|
||||
package system // import "github.com/docker/docker/pkg/system"
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
winio "github.com/Microsoft/go-winio"
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
const (
|
||||
// SddlAdministratorsLocalSystem is local administrators plus NT AUTHORITY\System
|
||||
SddlAdministratorsLocalSystem = "D:P(A;OICI;GA;;;BA)(A;OICI;GA;;;SY)"
|
||||
)
|
||||
|
||||
// MkdirAllWithACL is a wrapper for MkdirAll that creates a directory
|
||||
// with an appropriate SDDL defined ACL.
|
||||
func MkdirAllWithACL(path string, perm os.FileMode, sddl string) error {
|
||||
return mkdirall(path, true, sddl)
|
||||
}
|
||||
|
||||
// MkdirAll implementation that is volume path aware for Windows.
|
||||
func MkdirAll(path string, _ os.FileMode, sddl string) error {
|
||||
return mkdirall(path, false, sddl)
|
||||
}
|
||||
|
||||
// mkdirall is a custom version of os.MkdirAll modified for use on Windows
|
||||
// so that it is both volume path aware, and can create a directory with
|
||||
// a DACL.
|
||||
func mkdirall(path string, applyACL bool, sddl string) error {
|
||||
if re := regexp.MustCompile(`^\\\\\?\\Volume{[a-z0-9-]+}$`); re.MatchString(path) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// The rest of this method is largely copied from os.MkdirAll and should be kept
|
||||
// as-is to ensure compatibility.
|
||||
|
||||
// Fast path: if we can tell whether path is a directory or file, stop with success or error.
|
||||
dir, err := os.Stat(path)
|
||||
if err == nil {
|
||||
if dir.IsDir() {
|
||||
return nil
|
||||
}
|
||||
return &os.PathError{
|
||||
Op: "mkdir",
|
||||
Path: path,
|
||||
Err: syscall.ENOTDIR,
|
||||
}
|
||||
}
|
||||
|
||||
// Slow path: make sure parent exists and then call Mkdir for path.
|
||||
i := len(path)
|
||||
for i > 0 && os.IsPathSeparator(path[i-1]) { // Skip trailing path separator.
|
||||
i--
|
||||
}
|
||||
|
||||
j := i
|
||||
for j > 0 && !os.IsPathSeparator(path[j-1]) { // Scan backward over element.
|
||||
j--
|
||||
}
|
||||
|
||||
if j > 1 {
|
||||
// Create parent
|
||||
err = mkdirall(path[0:j-1], false, sddl)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Parent now exists; invoke os.Mkdir or mkdirWithACL and use its result.
|
||||
if applyACL {
|
||||
err = mkdirWithACL(path, sddl)
|
||||
} else {
|
||||
err = os.Mkdir(path, 0)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
// Handle arguments like "foo/." by
|
||||
// double-checking that directory doesn't exist.
|
||||
dir, err1 := os.Lstat(path)
|
||||
if err1 == nil && dir.IsDir() {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// mkdirWithACL creates a new directory. If there is an error, it will be of
|
||||
// type *PathError. .
|
||||
//
|
||||
// This is a modified and combined version of os.Mkdir and windows.Mkdir
|
||||
// in golang to cater for creating a directory am ACL permitting full
|
||||
// access, with inheritance, to any subfolder/file for Built-in Administrators
|
||||
// and Local System.
|
||||
func mkdirWithACL(name string, sddl string) error {
|
||||
sa := windows.SecurityAttributes{Length: 0}
|
||||
sd, err := winio.SddlToSecurityDescriptor(sddl)
|
||||
if err != nil {
|
||||
return &os.PathError{Op: "mkdir", Path: name, Err: err}
|
||||
}
|
||||
sa.Length = uint32(unsafe.Sizeof(sa))
|
||||
sa.InheritHandle = 1
|
||||
sa.SecurityDescriptor = uintptr(unsafe.Pointer(&sd[0]))
|
||||
|
||||
namep, err := windows.UTF16PtrFromString(name)
|
||||
if err != nil {
|
||||
return &os.PathError{Op: "mkdir", Path: name, Err: err}
|
||||
}
|
||||
|
||||
e := windows.CreateDirectory(namep, &sa)
|
||||
if e != nil {
|
||||
return &os.PathError{Op: "mkdir", Path: name, Err: e}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsAbs is a platform-specific wrapper for filepath.IsAbs. On Windows,
|
||||
// golang filepath.IsAbs does not consider a path \windows\system32 as absolute
|
||||
// as it doesn't start with a drive-letter/colon combination. However, in
|
||||
// docker we need to verify things such as WORKDIR /windows/system32 in
|
||||
// a Dockerfile (which gets translated to \windows\system32 when being processed
|
||||
// by the daemon. This SHOULD be treated as absolute from a docker processing
|
||||
// perspective.
|
||||
func IsAbs(path string) bool {
|
||||
if !filepath.IsAbs(path) {
|
||||
if !strings.HasPrefix(path, string(os.PathSeparator)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// The origin of the functions below here are the golang OS and windows packages,
|
||||
// slightly modified to only cope with files, not directories due to the
|
||||
// specific use case.
|
||||
//
|
||||
// The alteration is to allow a file on Windows to be opened with
|
||||
// FILE_FLAG_SEQUENTIAL_SCAN (particular for docker load), to avoid eating
|
||||
// the standby list, particularly when accessing large files such as layer.tar.
|
||||
|
||||
// CreateSequential creates the named file with mode 0666 (before umask), truncating
|
||||
// it if it already exists. If successful, methods on the returned
|
||||
// File can be used for I/O; the associated file descriptor has mode
|
||||
// O_RDWR.
|
||||
// If there is an error, it will be of type *PathError.
|
||||
func CreateSequential(name string) (*os.File, error) {
|
||||
return OpenFileSequential(name, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0)
|
||||
}
|
||||
|
||||
// OpenSequential opens the named file for reading. If successful, methods on
|
||||
// the returned file can be used for reading; the associated file
|
||||
// descriptor has mode O_RDONLY.
|
||||
// If there is an error, it will be of type *PathError.
|
||||
func OpenSequential(name string) (*os.File, error) {
|
||||
return OpenFileSequential(name, os.O_RDONLY, 0)
|
||||
}
|
||||
|
||||
// OpenFileSequential is the generalized open call; most users will use Open
|
||||
// or Create instead.
|
||||
// If there is an error, it will be of type *PathError.
|
||||
func OpenFileSequential(name string, flag int, _ os.FileMode) (*os.File, error) {
|
||||
if name == "" {
|
||||
return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOENT}
|
||||
}
|
||||
r, errf := windowsOpenFileSequential(name, flag, 0)
|
||||
if errf == nil {
|
||||
return r, nil
|
||||
}
|
||||
return nil, &os.PathError{Op: "open", Path: name, Err: errf}
|
||||
}
|
||||
|
||||
func windowsOpenFileSequential(name string, flag int, _ os.FileMode) (file *os.File, err error) {
|
||||
r, e := windowsOpenSequential(name, flag|windows.O_CLOEXEC, 0)
|
||||
if e != nil {
|
||||
return nil, e
|
||||
}
|
||||
return os.NewFile(uintptr(r), name), nil
|
||||
}
|
||||
|
||||
func makeInheritSa() *windows.SecurityAttributes {
|
||||
var sa windows.SecurityAttributes
|
||||
sa.Length = uint32(unsafe.Sizeof(sa))
|
||||
sa.InheritHandle = 1
|
||||
return &sa
|
||||
}
|
||||
|
||||
func windowsOpenSequential(path string, mode int, _ uint32) (fd windows.Handle, err error) {
|
||||
if len(path) == 0 {
|
||||
return windows.InvalidHandle, windows.ERROR_FILE_NOT_FOUND
|
||||
}
|
||||
pathp, err := windows.UTF16PtrFromString(path)
|
||||
if err != nil {
|
||||
return windows.InvalidHandle, err
|
||||
}
|
||||
var access uint32
|
||||
switch mode & (windows.O_RDONLY | windows.O_WRONLY | windows.O_RDWR) {
|
||||
case windows.O_RDONLY:
|
||||
access = windows.GENERIC_READ
|
||||
case windows.O_WRONLY:
|
||||
access = windows.GENERIC_WRITE
|
||||
case windows.O_RDWR:
|
||||
access = windows.GENERIC_READ | windows.GENERIC_WRITE
|
||||
}
|
||||
if mode&windows.O_CREAT != 0 {
|
||||
access |= windows.GENERIC_WRITE
|
||||
}
|
||||
if mode&windows.O_APPEND != 0 {
|
||||
access &^= windows.GENERIC_WRITE
|
||||
access |= windows.FILE_APPEND_DATA
|
||||
}
|
||||
sharemode := uint32(windows.FILE_SHARE_READ | windows.FILE_SHARE_WRITE)
|
||||
var sa *windows.SecurityAttributes
|
||||
if mode&windows.O_CLOEXEC == 0 {
|
||||
sa = makeInheritSa()
|
||||
}
|
||||
var createmode uint32
|
||||
switch {
|
||||
case mode&(windows.O_CREAT|windows.O_EXCL) == (windows.O_CREAT | windows.O_EXCL):
|
||||
createmode = windows.CREATE_NEW
|
||||
case mode&(windows.O_CREAT|windows.O_TRUNC) == (windows.O_CREAT | windows.O_TRUNC):
|
||||
createmode = windows.CREATE_ALWAYS
|
||||
case mode&windows.O_CREAT == windows.O_CREAT:
|
||||
createmode = windows.OPEN_ALWAYS
|
||||
case mode&windows.O_TRUNC == windows.O_TRUNC:
|
||||
createmode = windows.TRUNCATE_EXISTING
|
||||
default:
|
||||
createmode = windows.OPEN_EXISTING
|
||||
}
|
||||
// Use FILE_FLAG_SEQUENTIAL_SCAN rather than FILE_ATTRIBUTE_NORMAL as implemented in golang.
|
||||
//https://msdn.microsoft.com/en-us/library/windows/desktop/aa363858(v=vs.85).aspx
|
||||
const fileFlagSequentialScan = 0x08000000 // FILE_FLAG_SEQUENTIAL_SCAN
|
||||
h, e := windows.CreateFile(pathp, access, sharemode, sa, createmode, fileFlagSequentialScan, 0)
|
||||
return h, e
|
||||
}
|
||||
|
||||
// Helpers for TempFileSequential
|
||||
var rand uint32
|
||||
var randmu sync.Mutex
|
||||
|
||||
func reseed() uint32 {
|
||||
return uint32(time.Now().UnixNano() + int64(os.Getpid()))
|
||||
}
|
||||
func nextSuffix() string {
|
||||
randmu.Lock()
|
||||
r := rand
|
||||
if r == 0 {
|
||||
r = reseed()
|
||||
}
|
||||
r = r*1664525 + 1013904223 // constants from Numerical Recipes
|
||||
rand = r
|
||||
randmu.Unlock()
|
||||
return strconv.Itoa(int(1e9 + r%1e9))[1:]
|
||||
}
|
||||
|
||||
// TempFileSequential is a copy of ioutil.TempFile, modified to use sequential
|
||||
// file access. Below is the original comment from golang:
|
||||
// TempFile creates a new temporary file in the directory dir
|
||||
// with a name beginning with prefix, opens the file for reading
|
||||
// and writing, and returns the resulting *os.File.
|
||||
// If dir is the empty string, TempFile uses the default directory
|
||||
// for temporary files (see os.TempDir).
|
||||
// Multiple programs calling TempFile simultaneously
|
||||
// will not choose the same file. The caller can use f.Name()
|
||||
// to find the pathname of the file. It is the caller's responsibility
|
||||
// to remove the file when no longer needed.
|
||||
func TempFileSequential(dir, prefix string) (f *os.File, err error) {
|
||||
if dir == "" {
|
||||
dir = os.TempDir()
|
||||
}
|
||||
|
||||
nconflict := 0
|
||||
for i := 0; i < 10000; i++ {
|
||||
name := filepath.Join(dir, prefix+nextSuffix())
|
||||
f, err = OpenFileSequential(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
|
||||
if os.IsExist(err) {
|
||||
if nconflict++; nconflict > 10 {
|
||||
randmu.Lock()
|
||||
rand = reseed()
|
||||
randmu.Unlock()
|
||||
}
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
return
|
||||
}
|
22
vendor/github.com/docker/docker/pkg/system/init.go
generated
vendored
22
vendor/github.com/docker/docker/pkg/system/init.go
generated
vendored
@ -1,22 +0,0 @@
|
||||
package system // import "github.com/docker/docker/pkg/system"
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"time"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// Used by chtimes
|
||||
var maxTime time.Time
|
||||
|
||||
func init() {
|
||||
// chtimes initialization
|
||||
if unsafe.Sizeof(syscall.Timespec{}.Nsec) == 8 {
|
||||
// This is a 64 bit timespec
|
||||
// os.Chtimes limits time to the following
|
||||
maxTime = time.Unix(0, 1<<63-1)
|
||||
} else {
|
||||
// This is a 32 bit timespec
|
||||
maxTime = time.Unix(1<<31-1, 0)
|
||||
}
|
||||
}
|
12
vendor/github.com/docker/docker/pkg/system/init_unix.go
generated
vendored
12
vendor/github.com/docker/docker/pkg/system/init_unix.go
generated
vendored
@ -1,12 +0,0 @@
|
||||
// +build !windows
|
||||
|
||||
package system // import "github.com/docker/docker/pkg/system"
|
||||
|
||||
// InitLCOW does nothing since LCOW is a windows only feature
|
||||
func InitLCOW(experimental bool) {
|
||||
}
|
||||
|
||||
// ContainerdRuntimeSupported returns true if the use of ContainerD runtime is supported.
|
||||
func ContainerdRuntimeSupported(_ bool, _ string) bool {
|
||||
return true
|
||||
}
|
41
vendor/github.com/docker/docker/pkg/system/init_windows.go
generated
vendored
41
vendor/github.com/docker/docker/pkg/system/init_windows.go
generated
vendored
@ -1,41 +0,0 @@
|
||||
package system // import "github.com/docker/docker/pkg/system"
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/Microsoft/hcsshim/osversion"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
// lcowSupported determines if Linux Containers on Windows are supported.
|
||||
lcowSupported = false
|
||||
|
||||
// containerdRuntimeSupported determines if ContainerD should be the runtime.
|
||||
// As of March 2019, this is an experimental feature.
|
||||
containerdRuntimeSupported = false
|
||||
)
|
||||
|
||||
// InitLCOW sets whether LCOW is supported or not. Requires RS5+
|
||||
func InitLCOW(experimental bool) {
|
||||
v := GetOSVersion()
|
||||
if experimental && v.Build >= osversion.RS5 {
|
||||
lcowSupported = true
|
||||
}
|
||||
}
|
||||
|
||||
// InitContainerdRuntime sets whether to use ContainerD for runtime
|
||||
// on Windows. This is an experimental feature still in development, and
|
||||
// also requires an environment variable to be set (so as not to turn the
|
||||
// feature on from simply experimental which would also mean LCOW.
|
||||
func InitContainerdRuntime(experimental bool, cdPath string) {
|
||||
if experimental && len(cdPath) > 0 && len(os.Getenv("DOCKER_WINDOWS_CONTAINERD_RUNTIME")) > 0 {
|
||||
logrus.Warnf("Using ContainerD runtime. This feature is experimental")
|
||||
containerdRuntimeSupported = true
|
||||
}
|
||||
}
|
||||
|
||||
// ContainerdRuntimeSupported returns true if the use of ContainerD runtime is supported.
|
||||
func ContainerdRuntimeSupported() bool {
|
||||
return containerdRuntimeSupported
|
||||
}
|
32
vendor/github.com/docker/docker/pkg/system/lcow.go
generated
vendored
32
vendor/github.com/docker/docker/pkg/system/lcow.go
generated
vendored
@ -1,32 +0,0 @@
|
||||
package system // import "github.com/docker/docker/pkg/system"
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// IsOSSupported determines if an operating system is supported by the host
|
||||
func IsOSSupported(os string) bool {
|
||||
if strings.EqualFold(runtime.GOOS, os) {
|
||||
return true
|
||||
}
|
||||
if LCOWSupported() && strings.EqualFold(os, "linux") {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// ValidatePlatform determines if a platform structure is valid.
|
||||
// TODO This is a temporary windows-only function, should be replaced by
|
||||
// comparison of worker capabilities
|
||||
func ValidatePlatform(platform specs.Platform) error {
|
||||
if runtime.GOOS == "windows" {
|
||||
if !(platform.OS == runtime.GOOS || (LCOWSupported() && platform.OS == "linux")) {
|
||||
return errors.Errorf("unsupported os %s", platform.OS)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
8
vendor/github.com/docker/docker/pkg/system/lcow_unix.go
generated
vendored
8
vendor/github.com/docker/docker/pkg/system/lcow_unix.go
generated
vendored
@ -1,8 +0,0 @@
|
||||
// +build !windows
|
||||
|
||||
package system // import "github.com/docker/docker/pkg/system"
|
||||
|
||||
// LCOWSupported returns true if Linux containers on Windows are supported.
|
||||
func LCOWSupported() bool {
|
||||
return false
|
||||
}
|
6
vendor/github.com/docker/docker/pkg/system/lcow_windows.go
generated
vendored
6
vendor/github.com/docker/docker/pkg/system/lcow_windows.go
generated
vendored
@ -1,6 +0,0 @@
|
||||
package system // import "github.com/docker/docker/pkg/system"
|
||||
|
||||
// LCOWSupported returns true if Linux containers on Windows are supported.
|
||||
func LCOWSupported() bool {
|
||||
return lcowSupported
|
||||
}
|
20
vendor/github.com/docker/docker/pkg/system/lstat_unix.go
generated
vendored
20
vendor/github.com/docker/docker/pkg/system/lstat_unix.go
generated
vendored
@ -1,20 +0,0 @@
|
||||
// +build !windows
|
||||
|
||||
package system // import "github.com/docker/docker/pkg/system"
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// Lstat takes a path to a file and returns
|
||||
// a system.StatT type pertaining to that file.
|
||||
//
|
||||
// Throws an error if the file does not exist
|
||||
func Lstat(path string) (*StatT, error) {
|
||||
s := &syscall.Stat_t{}
|
||||
if err := syscall.Lstat(path, s); err != nil {
|
||||
return nil, &os.PathError{Op: "Lstat", Path: path, Err: err}
|
||||
}
|
||||
return fromStatT(s)
|
||||
}
|
14
vendor/github.com/docker/docker/pkg/system/lstat_windows.go
generated
vendored
14
vendor/github.com/docker/docker/pkg/system/lstat_windows.go
generated
vendored
@ -1,14 +0,0 @@
|
||||
package system // import "github.com/docker/docker/pkg/system"
|
||||
|
||||
import "os"
|
||||
|
||||
// Lstat calls os.Lstat to get a fileinfo interface back.
|
||||
// This is then copied into our own locally defined structure.
|
||||
func Lstat(path string) (*StatT, error) {
|
||||
fi, err := os.Lstat(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return fromStatT(&fi)
|
||||
}
|
17
vendor/github.com/docker/docker/pkg/system/meminfo.go
generated
vendored
17
vendor/github.com/docker/docker/pkg/system/meminfo.go
generated
vendored
@ -1,17 +0,0 @@
|
||||
package system // import "github.com/docker/docker/pkg/system"
|
||||
|
||||
// MemInfo contains memory statistics of the host system.
|
||||
type MemInfo struct {
|
||||
// Total usable RAM (i.e. physical RAM minus a few reserved bits and the
|
||||
// kernel binary code).
|
||||
MemTotal int64
|
||||
|
||||
// Amount of free memory.
|
||||
MemFree int64
|
||||
|
||||
// Total amount of swap space available.
|
||||
SwapTotal int64
|
||||
|
||||
// Amount of swap space that is currently unused.
|
||||
SwapFree int64
|
||||
}
|
65
vendor/github.com/docker/docker/pkg/system/meminfo_linux.go
generated
vendored
65
vendor/github.com/docker/docker/pkg/system/meminfo_linux.go
generated
vendored
@ -1,65 +0,0 @@
|
||||
package system // import "github.com/docker/docker/pkg/system"
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"io"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/go-units"
|
||||
)
|
||||
|
||||
// ReadMemInfo retrieves memory statistics of the host system and returns a
|
||||
// MemInfo type.
|
||||
func ReadMemInfo() (*MemInfo, error) {
|
||||
file, err := os.Open("/proc/meminfo")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer file.Close()
|
||||
return parseMemInfo(file)
|
||||
}
|
||||
|
||||
// parseMemInfo parses the /proc/meminfo file into
|
||||
// a MemInfo object given an io.Reader to the file.
|
||||
// Throws error if there are problems reading from the file
|
||||
func parseMemInfo(reader io.Reader) (*MemInfo, error) {
|
||||
meminfo := &MemInfo{}
|
||||
scanner := bufio.NewScanner(reader)
|
||||
for scanner.Scan() {
|
||||
// Expected format: ["MemTotal:", "1234", "kB"]
|
||||
parts := strings.Fields(scanner.Text())
|
||||
|
||||
// Sanity checks: Skip malformed entries.
|
||||
if len(parts) < 3 || parts[2] != "kB" {
|
||||
continue
|
||||
}
|
||||
|
||||
// Convert to bytes.
|
||||
size, err := strconv.Atoi(parts[1])
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
bytes := int64(size) * units.KiB
|
||||
|
||||
switch parts[0] {
|
||||
case "MemTotal:":
|
||||
meminfo.MemTotal = bytes
|
||||
case "MemFree:":
|
||||
meminfo.MemFree = bytes
|
||||
case "SwapTotal:":
|
||||
meminfo.SwapTotal = bytes
|
||||
case "SwapFree:":
|
||||
meminfo.SwapFree = bytes
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Handle errors that may have occurred during the reading of the file.
|
||||
if err := scanner.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return meminfo, nil
|
||||
}
|
8
vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go
generated
vendored
8
vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go
generated
vendored
@ -1,8 +0,0 @@
|
||||
// +build !linux,!windows
|
||||
|
||||
package system // import "github.com/docker/docker/pkg/system"
|
||||
|
||||
// ReadMemInfo is not supported on platforms other than linux and windows.
|
||||
func ReadMemInfo() (*MemInfo, error) {
|
||||
return nil, ErrNotSupportedPlatform
|
||||
}
|
45
vendor/github.com/docker/docker/pkg/system/meminfo_windows.go
generated
vendored
45
vendor/github.com/docker/docker/pkg/system/meminfo_windows.go
generated
vendored
@ -1,45 +0,0 @@
|
||||
package system // import "github.com/docker/docker/pkg/system"
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
var (
|
||||
modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
|
||||
|
||||
procGlobalMemoryStatusEx = modkernel32.NewProc("GlobalMemoryStatusEx")
|
||||
)
|
||||
|
||||
// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366589(v=vs.85).aspx
|
||||
// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366770(v=vs.85).aspx
|
||||
type memorystatusex struct {
|
||||
dwLength uint32
|
||||
dwMemoryLoad uint32
|
||||
ullTotalPhys uint64
|
||||
ullAvailPhys uint64
|
||||
ullTotalPageFile uint64
|
||||
ullAvailPageFile uint64
|
||||
ullTotalVirtual uint64
|
||||
ullAvailVirtual uint64
|
||||
ullAvailExtendedVirtual uint64
|
||||
}
|
||||
|
||||
// ReadMemInfo retrieves memory statistics of the host system and returns a
|
||||
// MemInfo type.
|
||||
func ReadMemInfo() (*MemInfo, error) {
|
||||
msi := &memorystatusex{
|
||||
dwLength: 64,
|
||||
}
|
||||
r1, _, _ := procGlobalMemoryStatusEx.Call(uintptr(unsafe.Pointer(msi)))
|
||||
if r1 == 0 {
|
||||
return &MemInfo{}, nil
|
||||
}
|
||||
return &MemInfo{
|
||||
MemTotal: int64(msi.ullTotalPhys),
|
||||
MemFree: int64(msi.ullAvailPhys),
|
||||
SwapTotal: int64(msi.ullTotalPageFile),
|
||||
SwapFree: int64(msi.ullAvailPageFile),
|
||||
}, nil
|
||||
}
|
22
vendor/github.com/docker/docker/pkg/system/mknod.go
generated
vendored
22
vendor/github.com/docker/docker/pkg/system/mknod.go
generated
vendored
@ -1,22 +0,0 @@
|
||||
// +build !windows
|
||||
|
||||
package system // import "github.com/docker/docker/pkg/system"
|
||||
|
||||
import (
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// Mknod creates a filesystem node (file, device special file or named pipe) named path
|
||||
// with attributes specified by mode and dev.
|
||||
func Mknod(path string, mode uint32, dev int) error {
|
||||
return unix.Mknod(path, mode, dev)
|
||||
}
|
||||
|
||||
// Mkdev is used to build the value of linux devices (in /dev/) which specifies major
|
||||
// and minor number of the newly created device special file.
|
||||
// Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes.
|
||||
// They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major,
|
||||
// then the top 12 bits of the minor.
|
||||
func Mkdev(major int64, minor int64) uint32 {
|
||||
return uint32(unix.Mkdev(uint32(major), uint32(minor)))
|
||||
}
|
11
vendor/github.com/docker/docker/pkg/system/mknod_windows.go
generated
vendored
11
vendor/github.com/docker/docker/pkg/system/mknod_windows.go
generated
vendored
@ -1,11 +0,0 @@
|
||||
package system // import "github.com/docker/docker/pkg/system"
|
||||
|
||||
// Mknod is not implemented on Windows.
|
||||
func Mknod(path string, mode uint32, dev int) error {
|
||||
return ErrNotSupportedPlatform
|
||||
}
|
||||
|
||||
// Mkdev is not implemented on Windows.
|
||||
func Mkdev(major int64, minor int64) uint32 {
|
||||
panic("Mkdev not implemented on Windows.")
|
||||
}
|
60
vendor/github.com/docker/docker/pkg/system/path.go
generated
vendored
60
vendor/github.com/docker/docker/pkg/system/path.go
generated
vendored
@ -1,60 +0,0 @@
|
||||
package system // import "github.com/docker/docker/pkg/system"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/continuity/pathdriver"
|
||||
)
|
||||
|
||||
const defaultUnixPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
|
||||
|
||||
// DefaultPathEnv is unix style list of directories to search for
|
||||
// executables. Each directory is separated from the next by a colon
|
||||
// ':' character .
|
||||
func DefaultPathEnv(os string) string {
|
||||
if runtime.GOOS == "windows" {
|
||||
if os != runtime.GOOS {
|
||||
return defaultUnixPathEnv
|
||||
}
|
||||
// Deliberately empty on Windows containers on Windows as the default path will be set by
|
||||
// the container. Docker has no context of what the default path should be.
|
||||
return ""
|
||||
}
|
||||
return defaultUnixPathEnv
|
||||
|
||||
}
|
||||
|
||||
// CheckSystemDriveAndRemoveDriveLetter verifies that a path, if it includes a drive letter,
|
||||
// is the system drive.
|
||||
// On Linux: this is a no-op.
|
||||
// On Windows: this does the following>
|
||||
// CheckSystemDriveAndRemoveDriveLetter verifies and manipulates a Windows path.
|
||||
// This is used, for example, when validating a user provided path in docker cp.
|
||||
// If a drive letter is supplied, it must be the system drive. The drive letter
|
||||
// is always removed. Also, it translates it to OS semantics (IOW / to \). We
|
||||
// need the path in this syntax so that it can ultimately be concatenated with
|
||||
// a Windows long-path which doesn't support drive-letters. Examples:
|
||||
// C: --> Fail
|
||||
// C:\ --> \
|
||||
// a --> a
|
||||
// /a --> \a
|
||||
// d:\ --> Fail
|
||||
func CheckSystemDriveAndRemoveDriveLetter(path string, driver pathdriver.PathDriver) (string, error) {
|
||||
if runtime.GOOS != "windows" || LCOWSupported() {
|
||||
return path, nil
|
||||
}
|
||||
|
||||
if len(path) == 2 && string(path[1]) == ":" {
|
||||
return "", fmt.Errorf("No relative path specified in %q", path)
|
||||
}
|
||||
if !driver.IsAbs(path) || len(path) < 2 {
|
||||
return filepath.FromSlash(path), nil
|
||||
}
|
||||
if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") {
|
||||
return "", fmt.Errorf("The specified path is not on the system drive (C:)")
|
||||
}
|
||||
return filepath.FromSlash(path[2:]), nil
|
||||
}
|
10
vendor/github.com/docker/docker/pkg/system/path_unix.go
generated
vendored
10
vendor/github.com/docker/docker/pkg/system/path_unix.go
generated
vendored
@ -1,10 +0,0 @@
|
||||
// +build !windows
|
||||
|
||||
package system // import "github.com/docker/docker/pkg/system"
|
||||
|
||||
// GetLongPathName converts Windows short pathnames to full pathnames.
|
||||
// For example C:\Users\ADMIN~1 --> C:\Users\Administrator.
|
||||
// It is a no-op on non-Windows platforms
|
||||
func GetLongPathName(path string) (string, error) {
|
||||
return path, nil
|
||||
}
|
24
vendor/github.com/docker/docker/pkg/system/path_windows.go
generated
vendored
24
vendor/github.com/docker/docker/pkg/system/path_windows.go
generated
vendored
@ -1,24 +0,0 @@
|
||||
package system // import "github.com/docker/docker/pkg/system"
|
||||
|
||||
import "syscall"
|
||||
|
||||
// GetLongPathName converts Windows short pathnames to full pathnames.
|
||||
// For example C:\Users\ADMIN~1 --> C:\Users\Administrator.
|
||||
// It is a no-op on non-Windows platforms
|
||||
func GetLongPathName(path string) (string, error) {
|
||||
// See https://groups.google.com/forum/#!topic/golang-dev/1tufzkruoTg
|
||||
p := syscall.StringToUTF16(path)
|
||||
b := p // GetLongPathName says we can reuse buffer
|
||||
n, err := syscall.GetLongPathName(&p[0], &b[0], uint32(len(b)))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if n > uint32(len(b)) {
|
||||
b = make([]uint16, n)
|
||||
_, err = syscall.GetLongPathName(&p[0], &b[0], uint32(len(b)))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
return syscall.UTF16ToString(b), nil
|
||||
}
|
24
vendor/github.com/docker/docker/pkg/system/process_unix.go
generated
vendored
24
vendor/github.com/docker/docker/pkg/system/process_unix.go
generated
vendored
@ -1,24 +0,0 @@
|
||||
// +build linux freebsd darwin
|
||||
|
||||
package system // import "github.com/docker/docker/pkg/system"
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// IsProcessAlive returns true if process with a given pid is running.
|
||||
func IsProcessAlive(pid int) bool {
|
||||
err := unix.Kill(pid, syscall.Signal(0))
|
||||
if err == nil || err == unix.EPERM {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// KillProcess force-stops a process.
|
||||
func KillProcess(pid int) {
|
||||
unix.Kill(pid, unix.SIGKILL)
|
||||
}
|
18
vendor/github.com/docker/docker/pkg/system/process_windows.go
generated
vendored
18
vendor/github.com/docker/docker/pkg/system/process_windows.go
generated
vendored
@ -1,18 +0,0 @@
|
||||
package system // import "github.com/docker/docker/pkg/system"
|
||||
|
||||
import "os"
|
||||
|
||||
// IsProcessAlive returns true if process with a given pid is running.
|
||||
func IsProcessAlive(pid int) bool {
|
||||
_, err := os.FindProcess(pid)
|
||||
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// KillProcess force-stops a process.
|
||||
func KillProcess(pid int) {
|
||||
p, err := os.FindProcess(pid)
|
||||
if err == nil {
|
||||
p.Kill()
|
||||
}
|
||||
}
|
80
vendor/github.com/docker/docker/pkg/system/rm.go
generated
vendored
80
vendor/github.com/docker/docker/pkg/system/rm.go
generated
vendored
@ -1,80 +0,0 @@
|
||||
package system // import "github.com/docker/docker/pkg/system"
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/pkg/mount"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// EnsureRemoveAll wraps `os.RemoveAll` to check for specific errors that can
|
||||
// often be remedied.
|
||||
// Only use `EnsureRemoveAll` if you really want to make every effort to remove
|
||||
// a directory.
|
||||
//
|
||||
// Because of the way `os.Remove` (and by extension `os.RemoveAll`) works, there
|
||||
// can be a race between reading directory entries and then actually attempting
|
||||
// to remove everything in the directory.
|
||||
// These types of errors do not need to be returned since it's ok for the dir to
|
||||
// be gone we can just retry the remove operation.
|
||||
//
|
||||
// This should not return a `os.ErrNotExist` kind of error under any circumstances
|
||||
func EnsureRemoveAll(dir string) error {
|
||||
notExistErr := make(map[string]bool)
|
||||
|
||||
// track retries
|
||||
exitOnErr := make(map[string]int)
|
||||
maxRetry := 50
|
||||
|
||||
// Attempt to unmount anything beneath this dir first
|
||||
mount.RecursiveUnmount(dir)
|
||||
|
||||
for {
|
||||
err := os.RemoveAll(dir)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
pe, ok := err.(*os.PathError)
|
||||
if !ok {
|
||||
return err
|
||||
}
|
||||
|
||||
if os.IsNotExist(err) {
|
||||
if notExistErr[pe.Path] {
|
||||
return err
|
||||
}
|
||||
notExistErr[pe.Path] = true
|
||||
|
||||
// There is a race where some subdir can be removed but after the parent
|
||||
// dir entries have been read.
|
||||
// So the path could be from `os.Remove(subdir)`
|
||||
// If the reported non-existent path is not the passed in `dir` we
|
||||
// should just retry, but otherwise return with no error.
|
||||
if pe.Path == dir {
|
||||
return nil
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if pe.Err != syscall.EBUSY {
|
||||
return err
|
||||
}
|
||||
|
||||
if mounted, _ := mount.Mounted(pe.Path); mounted {
|
||||
if e := mount.Unmount(pe.Path); e != nil {
|
||||
if mounted, _ := mount.Mounted(pe.Path); mounted {
|
||||
return errors.Wrapf(e, "error while removing %s", dir)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if exitOnErr[pe.Path] == maxRetry {
|
||||
return err
|
||||
}
|
||||
exitOnErr[pe.Path]++
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
}
|
13
vendor/github.com/docker/docker/pkg/system/stat_darwin.go
generated
vendored
13
vendor/github.com/docker/docker/pkg/system/stat_darwin.go
generated
vendored
@ -1,13 +0,0 @@
|
||||
package system // import "github.com/docker/docker/pkg/system"
|
||||
|
||||
import "syscall"
|
||||
|
||||
// fromStatT converts a syscall.Stat_t type to a system.Stat_t type
|
||||
func fromStatT(s *syscall.Stat_t) (*StatT, error) {
|
||||
return &StatT{size: s.Size,
|
||||
mode: uint32(s.Mode),
|
||||
uid: s.Uid,
|
||||
gid: s.Gid,
|
||||
rdev: uint64(s.Rdev),
|
||||
mtim: s.Mtimespec}, nil
|
||||
}
|
13
vendor/github.com/docker/docker/pkg/system/stat_freebsd.go
generated
vendored
13
vendor/github.com/docker/docker/pkg/system/stat_freebsd.go
generated
vendored
@ -1,13 +0,0 @@
|
||||
package system // import "github.com/docker/docker/pkg/system"
|
||||
|
||||
import "syscall"
|
||||
|
||||
// fromStatT converts a syscall.Stat_t type to a system.Stat_t type
|
||||
func fromStatT(s *syscall.Stat_t) (*StatT, error) {
|
||||
return &StatT{size: s.Size,
|
||||
mode: uint32(s.Mode),
|
||||
uid: s.Uid,
|
||||
gid: s.Gid,
|
||||
rdev: uint64(s.Rdev),
|
||||
mtim: s.Mtimespec}, nil
|
||||
}
|
19
vendor/github.com/docker/docker/pkg/system/stat_linux.go
generated
vendored
19
vendor/github.com/docker/docker/pkg/system/stat_linux.go
generated
vendored
@ -1,19 +0,0 @@
|
||||
package system // import "github.com/docker/docker/pkg/system"
|
||||
|
||||
import "syscall"
|
||||
|
||||
// fromStatT converts a syscall.Stat_t type to a system.Stat_t type
|
||||
func fromStatT(s *syscall.Stat_t) (*StatT, error) {
|
||||
return &StatT{size: s.Size,
|
||||
mode: s.Mode,
|
||||
uid: s.Uid,
|
||||
gid: s.Gid,
|
||||
rdev: s.Rdev,
|
||||
mtim: s.Mtim}, nil
|
||||
}
|
||||
|
||||
// FromStatT converts a syscall.Stat_t type to a system.Stat_t type
|
||||
// This is exposed on Linux as pkg/archive/changes uses it.
|
||||
func FromStatT(s *syscall.Stat_t) (*StatT, error) {
|
||||
return fromStatT(s)
|
||||
}
|
13
vendor/github.com/docker/docker/pkg/system/stat_openbsd.go
generated
vendored
13
vendor/github.com/docker/docker/pkg/system/stat_openbsd.go
generated
vendored
@ -1,13 +0,0 @@
|
||||
package system // import "github.com/docker/docker/pkg/system"
|
||||
|
||||
import "syscall"
|
||||
|
||||
// fromStatT converts a syscall.Stat_t type to a system.Stat_t type
|
||||
func fromStatT(s *syscall.Stat_t) (*StatT, error) {
|
||||
return &StatT{size: s.Size,
|
||||
mode: uint32(s.Mode),
|
||||
uid: s.Uid,
|
||||
gid: s.Gid,
|
||||
rdev: uint64(s.Rdev),
|
||||
mtim: s.Mtim}, nil
|
||||
}
|
13
vendor/github.com/docker/docker/pkg/system/stat_solaris.go
generated
vendored
13
vendor/github.com/docker/docker/pkg/system/stat_solaris.go
generated
vendored
@ -1,13 +0,0 @@
|
||||
package system // import "github.com/docker/docker/pkg/system"
|
||||
|
||||
import "syscall"
|
||||
|
||||
// fromStatT converts a syscall.Stat_t type to a system.Stat_t type
|
||||
func fromStatT(s *syscall.Stat_t) (*StatT, error) {
|
||||
return &StatT{size: s.Size,
|
||||
mode: uint32(s.Mode),
|
||||
uid: s.Uid,
|
||||
gid: s.Gid,
|
||||
rdev: uint64(s.Rdev),
|
||||
mtim: s.Mtim}, nil
|
||||
}
|
66
vendor/github.com/docker/docker/pkg/system/stat_unix.go
generated
vendored
66
vendor/github.com/docker/docker/pkg/system/stat_unix.go
generated
vendored
@ -1,66 +0,0 @@
|
||||
// +build !windows
|
||||
|
||||
package system // import "github.com/docker/docker/pkg/system"
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// StatT type contains status of a file. It contains metadata
|
||||
// like permission, owner, group, size, etc about a file.
|
||||
type StatT struct {
|
||||
mode uint32
|
||||
uid uint32
|
||||
gid uint32
|
||||
rdev uint64
|
||||
size int64
|
||||
mtim syscall.Timespec
|
||||
}
|
||||
|
||||
// Mode returns file's permission mode.
|
||||
func (s StatT) Mode() uint32 {
|
||||
return s.mode
|
||||
}
|
||||
|
||||
// UID returns file's user id of owner.
|
||||
func (s StatT) UID() uint32 {
|
||||
return s.uid
|
||||
}
|
||||
|
||||
// GID returns file's group id of owner.
|
||||
func (s StatT) GID() uint32 {
|
||||
return s.gid
|
||||
}
|
||||
|
||||
// Rdev returns file's device ID (if it's special file).
|
||||
func (s StatT) Rdev() uint64 {
|
||||
return s.rdev
|
||||
}
|
||||
|
||||
// Size returns file's size.
|
||||
func (s StatT) Size() int64 {
|
||||
return s.size
|
||||
}
|
||||
|
||||
// Mtim returns file's last modification time.
|
||||
func (s StatT) Mtim() syscall.Timespec {
|
||||
return s.mtim
|
||||
}
|
||||
|
||||
// IsDir reports whether s describes a directory.
|
||||
func (s StatT) IsDir() bool {
|
||||
return s.mode&syscall.S_IFDIR != 0
|
||||
}
|
||||
|
||||
// Stat takes a path to a file and returns
|
||||
// a system.StatT type pertaining to that file.
|
||||
//
|
||||
// Throws an error if the file does not exist
|
||||
func Stat(path string) (*StatT, error) {
|
||||
s := &syscall.Stat_t{}
|
||||
if err := syscall.Stat(path, s); err != nil {
|
||||
return nil, &os.PathError{Op: "Stat", Path: path, Err: err}
|
||||
}
|
||||
return fromStatT(s)
|
||||
}
|
49
vendor/github.com/docker/docker/pkg/system/stat_windows.go
generated
vendored
49
vendor/github.com/docker/docker/pkg/system/stat_windows.go
generated
vendored
@ -1,49 +0,0 @@
|
||||
package system // import "github.com/docker/docker/pkg/system"
|
||||
|
||||
import (
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
// StatT type contains status of a file. It contains metadata
|
||||
// like permission, size, etc about a file.
|
||||
type StatT struct {
|
||||
mode os.FileMode
|
||||
size int64
|
||||
mtim time.Time
|
||||
}
|
||||
|
||||
// Size returns file's size.
|
||||
func (s StatT) Size() int64 {
|
||||
return s.size
|
||||
}
|
||||
|
||||
// Mode returns file's permission mode.
|
||||
func (s StatT) Mode() os.FileMode {
|
||||
return os.FileMode(s.mode)
|
||||
}
|
||||
|
||||
// Mtim returns file's last modification time.
|
||||
func (s StatT) Mtim() time.Time {
|
||||
return time.Time(s.mtim)
|
||||
}
|
||||
|
||||
// Stat takes a path to a file and returns
|
||||
// a system.StatT type pertaining to that file.
|
||||
//
|
||||
// Throws an error if the file does not exist
|
||||
func Stat(path string) (*StatT, error) {
|
||||
fi, err := os.Stat(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return fromStatT(&fi)
|
||||
}
|
||||
|
||||
// fromStatT converts a os.FileInfo type to a system.StatT type
|
||||
func fromStatT(fi *os.FileInfo) (*StatT, error) {
|
||||
return &StatT{
|
||||
size: (*fi).Size(),
|
||||
mode: (*fi).Mode(),
|
||||
mtim: (*fi).ModTime()}, nil
|
||||
}
|
17
vendor/github.com/docker/docker/pkg/system/syscall_unix.go
generated
vendored
17
vendor/github.com/docker/docker/pkg/system/syscall_unix.go
generated
vendored
@ -1,17 +0,0 @@
|
||||
// +build linux freebsd
|
||||
|
||||
package system // import "github.com/docker/docker/pkg/system"
|
||||
|
||||
import "golang.org/x/sys/unix"
|
||||
|
||||
// Unmount is a platform-specific helper function to call
|
||||
// the unmount syscall.
|
||||
func Unmount(dest string) error {
|
||||
return unix.Unmount(dest, 0)
|
||||
}
|
||||
|
||||
// CommandLineToArgv should not be used on Unix.
|
||||
// It simply returns commandLine in the only element in the returned array.
|
||||
func CommandLineToArgv(commandLine string) ([]string, error) {
|
||||
return []string{commandLine}, nil
|
||||
}
|
193
vendor/github.com/docker/docker/pkg/system/syscall_windows.go
generated
vendored
193
vendor/github.com/docker/docker/pkg/system/syscall_windows.go
generated
vendored
@ -1,193 +0,0 @@
|
||||
package system // import "github.com/docker/docker/pkg/system"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
const (
|
||||
OWNER_SECURITY_INFORMATION = 0x00000001
|
||||
GROUP_SECURITY_INFORMATION = 0x00000002
|
||||
DACL_SECURITY_INFORMATION = 0x00000004
|
||||
SACL_SECURITY_INFORMATION = 0x00000008
|
||||
LABEL_SECURITY_INFORMATION = 0x00000010
|
||||
ATTRIBUTE_SECURITY_INFORMATION = 0x00000020
|
||||
SCOPE_SECURITY_INFORMATION = 0x00000040
|
||||
PROCESS_TRUST_LABEL_SECURITY_INFORMATION = 0x00000080
|
||||
ACCESS_FILTER_SECURITY_INFORMATION = 0x00000100
|
||||
BACKUP_SECURITY_INFORMATION = 0x00010000
|
||||
PROTECTED_DACL_SECURITY_INFORMATION = 0x80000000
|
||||
PROTECTED_SACL_SECURITY_INFORMATION = 0x40000000
|
||||
UNPROTECTED_DACL_SECURITY_INFORMATION = 0x20000000
|
||||
UNPROTECTED_SACL_SECURITY_INFORMATION = 0x10000000
|
||||
)
|
||||
|
||||
const (
|
||||
SE_UNKNOWN_OBJECT_TYPE = iota
|
||||
SE_FILE_OBJECT
|
||||
SE_SERVICE
|
||||
SE_PRINTER
|
||||
SE_REGISTRY_KEY
|
||||
SE_LMSHARE
|
||||
SE_KERNEL_OBJECT
|
||||
SE_WINDOW_OBJECT
|
||||
SE_DS_OBJECT
|
||||
SE_DS_OBJECT_ALL
|
||||
SE_PROVIDER_DEFINED_OBJECT
|
||||
SE_WMIGUID_OBJECT
|
||||
SE_REGISTRY_WOW64_32KEY
|
||||
)
|
||||
|
||||
const (
|
||||
SeTakeOwnershipPrivilege = "SeTakeOwnershipPrivilege"
|
||||
)
|
||||
|
||||
const (
|
||||
ContainerAdministratorSidString = "S-1-5-93-2-1"
|
||||
ContainerUserSidString = "S-1-5-93-2-2"
|
||||
)
|
||||
|
||||
var (
|
||||
ntuserApiset = windows.NewLazyDLL("ext-ms-win-ntuser-window-l1-1-0")
|
||||
modadvapi32 = windows.NewLazySystemDLL("advapi32.dll")
|
||||
procGetVersionExW = modkernel32.NewProc("GetVersionExW")
|
||||
procGetProductInfo = modkernel32.NewProc("GetProductInfo")
|
||||
procSetNamedSecurityInfo = modadvapi32.NewProc("SetNamedSecurityInfoW")
|
||||
procGetSecurityDescriptorDacl = modadvapi32.NewProc("GetSecurityDescriptorDacl")
|
||||
)
|
||||
|
||||
// OSVersion is a wrapper for Windows version information
|
||||
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724439(v=vs.85).aspx
|
||||
type OSVersion struct {
|
||||
Version uint32
|
||||
MajorVersion uint8
|
||||
MinorVersion uint8
|
||||
Build uint16
|
||||
}
|
||||
|
||||
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724833(v=vs.85).aspx
|
||||
type osVersionInfoEx struct {
|
||||
OSVersionInfoSize uint32
|
||||
MajorVersion uint32
|
||||
MinorVersion uint32
|
||||
BuildNumber uint32
|
||||
PlatformID uint32
|
||||
CSDVersion [128]uint16
|
||||
ServicePackMajor uint16
|
||||
ServicePackMinor uint16
|
||||
SuiteMask uint16
|
||||
ProductType byte
|
||||
Reserve byte
|
||||
}
|
||||
|
||||
// GetOSVersion gets the operating system version on Windows. Note that
|
||||
// docker.exe must be manifested to get the correct version information.
|
||||
func GetOSVersion() OSVersion {
|
||||
var err error
|
||||
osv := OSVersion{}
|
||||
osv.Version, err = windows.GetVersion()
|
||||
if err != nil {
|
||||
// GetVersion never fails.
|
||||
panic(err)
|
||||
}
|
||||
osv.MajorVersion = uint8(osv.Version & 0xFF)
|
||||
osv.MinorVersion = uint8(osv.Version >> 8 & 0xFF)
|
||||
osv.Build = uint16(osv.Version >> 16)
|
||||
return osv
|
||||
}
|
||||
|
||||
func (osv OSVersion) ToString() string {
|
||||
return fmt.Sprintf("%d.%d.%d", osv.MajorVersion, osv.MinorVersion, osv.Build)
|
||||
}
|
||||
|
||||
// IsWindowsClient returns true if the SKU is client
|
||||
// @engine maintainers - this function should not be removed or modified as it
|
||||
// is used to enforce licensing restrictions on Windows.
|
||||
func IsWindowsClient() bool {
|
||||
osviex := &osVersionInfoEx{OSVersionInfoSize: 284}
|
||||
r1, _, err := procGetVersionExW.Call(uintptr(unsafe.Pointer(osviex)))
|
||||
if r1 == 0 {
|
||||
logrus.Warnf("GetVersionExW failed - assuming server SKU: %v", err)
|
||||
return false
|
||||
}
|
||||
const verNTWorkstation = 0x00000001
|
||||
return osviex.ProductType == verNTWorkstation
|
||||
}
|
||||
|
||||
// IsIoTCore returns true if the currently running image is based off of
|
||||
// Windows 10 IoT Core.
|
||||
// @engine maintainers - this function should not be removed or modified as it
|
||||
// is used to enforce licensing restrictions on Windows.
|
||||
func IsIoTCore() bool {
|
||||
var returnedProductType uint32
|
||||
r1, _, err := procGetProductInfo.Call(6, 1, 0, 0, uintptr(unsafe.Pointer(&returnedProductType)))
|
||||
if r1 == 0 {
|
||||
logrus.Warnf("GetProductInfo failed - assuming this is not IoT: %v", err)
|
||||
return false
|
||||
}
|
||||
const productIoTUAP = 0x0000007B
|
||||
const productIoTUAPCommercial = 0x00000083
|
||||
return returnedProductType == productIoTUAP || returnedProductType == productIoTUAPCommercial
|
||||
}
|
||||
|
||||
// Unmount is a platform-specific helper function to call
|
||||
// the unmount syscall. Not supported on Windows
|
||||
func Unmount(dest string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// CommandLineToArgv wraps the Windows syscall to turn a commandline into an argument array.
|
||||
func CommandLineToArgv(commandLine string) ([]string, error) {
|
||||
var argc int32
|
||||
|
||||
argsPtr, err := windows.UTF16PtrFromString(commandLine)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
argv, err := windows.CommandLineToArgv(argsPtr, &argc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer windows.LocalFree(windows.Handle(uintptr(unsafe.Pointer(argv))))
|
||||
|
||||
newArgs := make([]string, argc)
|
||||
for i, v := range (*argv)[:argc] {
|
||||
newArgs[i] = string(windows.UTF16ToString((*v)[:]))
|
||||
}
|
||||
|
||||
return newArgs, nil
|
||||
}
|
||||
|
||||
// HasWin32KSupport determines whether containers that depend on win32k can
|
||||
// run on this machine. Win32k is the driver used to implement windowing.
|
||||
func HasWin32KSupport() bool {
|
||||
// For now, check for ntuser API support on the host. In the future, a host
|
||||
// may support win32k in containers even if the host does not support ntuser
|
||||
// APIs.
|
||||
return ntuserApiset.Load() == nil
|
||||
}
|
||||
|
||||
func SetNamedSecurityInfo(objectName *uint16, objectType uint32, securityInformation uint32, sidOwner *windows.SID, sidGroup *windows.SID, dacl *byte, sacl *byte) (result error) {
|
||||
r0, _, _ := syscall.Syscall9(procSetNamedSecurityInfo.Addr(), 7, uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(sidOwner)), uintptr(unsafe.Pointer(sidGroup)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), 0, 0)
|
||||
if r0 != 0 {
|
||||
result = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func GetSecurityDescriptorDacl(securityDescriptor *byte, daclPresent *uint32, dacl **byte, daclDefaulted *uint32) (result error) {
|
||||
r1, _, e1 := syscall.Syscall6(procGetSecurityDescriptorDacl.Addr(), 4, uintptr(unsafe.Pointer(securityDescriptor)), uintptr(unsafe.Pointer(daclPresent)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(daclDefaulted)), 0, 0)
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
result = syscall.Errno(e1)
|
||||
} else {
|
||||
result = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
13
vendor/github.com/docker/docker/pkg/system/umask.go
generated
vendored
13
vendor/github.com/docker/docker/pkg/system/umask.go
generated
vendored
@ -1,13 +0,0 @@
|
||||
// +build !windows
|
||||
|
||||
package system // import "github.com/docker/docker/pkg/system"
|
||||
|
||||
import (
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// Umask sets current process's file mode creation mask to newmask
|
||||
// and returns oldmask.
|
||||
func Umask(newmask int) (oldmask int, err error) {
|
||||
return unix.Umask(newmask), nil
|
||||
}
|
7
vendor/github.com/docker/docker/pkg/system/umask_windows.go
generated
vendored
7
vendor/github.com/docker/docker/pkg/system/umask_windows.go
generated
vendored
@ -1,7 +0,0 @@
|
||||
package system // import "github.com/docker/docker/pkg/system"
|
||||
|
||||
// Umask is not supported on the windows platform.
|
||||
func Umask(newmask int) (oldmask int, err error) {
|
||||
// should not be called on cli code path
|
||||
return 0, ErrNotSupportedPlatform
|
||||
}
|
24
vendor/github.com/docker/docker/pkg/system/utimes_freebsd.go
generated
vendored
24
vendor/github.com/docker/docker/pkg/system/utimes_freebsd.go
generated
vendored
@ -1,24 +0,0 @@
|
||||
package system // import "github.com/docker/docker/pkg/system"
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// LUtimesNano is used to change access and modification time of the specified path.
|
||||
// It's used for symbol link file because unix.UtimesNano doesn't support a NOFOLLOW flag atm.
|
||||
func LUtimesNano(path string, ts []syscall.Timespec) error {
|
||||
var _path *byte
|
||||
_path, err := unix.BytePtrFromString(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, _, err := unix.Syscall(unix.SYS_LUTIMES, uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), 0); err != 0 && err != unix.ENOSYS {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
25
vendor/github.com/docker/docker/pkg/system/utimes_linux.go
generated
vendored
25
vendor/github.com/docker/docker/pkg/system/utimes_linux.go
generated
vendored
@ -1,25 +0,0 @@
|
||||
package system // import "github.com/docker/docker/pkg/system"
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// LUtimesNano is used to change access and modification time of the specified path.
|
||||
// It's used for symbol link file because unix.UtimesNano doesn't support a NOFOLLOW flag atm.
|
||||
func LUtimesNano(path string, ts []syscall.Timespec) error {
|
||||
atFdCwd := unix.AT_FDCWD
|
||||
|
||||
var _path *byte
|
||||
_path, err := unix.BytePtrFromString(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, _, err := unix.Syscall6(unix.SYS_UTIMENSAT, uintptr(atFdCwd), uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), unix.AT_SYMLINK_NOFOLLOW, 0, 0); err != 0 && err != unix.ENOSYS {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
10
vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go
generated
vendored
10
vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go
generated
vendored
@ -1,10 +0,0 @@
|
||||
// +build !linux,!freebsd
|
||||
|
||||
package system // import "github.com/docker/docker/pkg/system"
|
||||
|
||||
import "syscall"
|
||||
|
||||
// LUtimesNano is only supported on linux and freebsd.
|
||||
func LUtimesNano(path string, ts []syscall.Timespec) error {
|
||||
return ErrNotSupportedPlatform
|
||||
}
|
29
vendor/github.com/docker/docker/pkg/system/xattrs_linux.go
generated
vendored
29
vendor/github.com/docker/docker/pkg/system/xattrs_linux.go
generated
vendored
@ -1,29 +0,0 @@
|
||||
package system // import "github.com/docker/docker/pkg/system"
|
||||
|
||||
import "golang.org/x/sys/unix"
|
||||
|
||||
// Lgetxattr retrieves the value of the extended attribute identified by attr
|
||||
// and associated with the given path in the file system.
|
||||
// It will returns a nil slice and nil error if the xattr is not set.
|
||||
func Lgetxattr(path string, attr string) ([]byte, error) {
|
||||
dest := make([]byte, 128)
|
||||
sz, errno := unix.Lgetxattr(path, attr, dest)
|
||||
if errno == unix.ENODATA {
|
||||
return nil, nil
|
||||
}
|
||||
if errno == unix.ERANGE {
|
||||
dest = make([]byte, sz)
|
||||
sz, errno = unix.Lgetxattr(path, attr, dest)
|
||||
}
|
||||
if errno != nil {
|
||||
return nil, errno
|
||||
}
|
||||
|
||||
return dest[:sz], nil
|
||||
}
|
||||
|
||||
// Lsetxattr sets the value of the extended attribute identified by attr
|
||||
// and associated with the given path in the file system.
|
||||
func Lsetxattr(path string, attr string, data []byte, flags int) error {
|
||||
return unix.Lsetxattr(path, attr, data, flags)
|
||||
}
|
13
vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go
generated
vendored
13
vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go
generated
vendored
@ -1,13 +0,0 @@
|
||||
// +build !linux
|
||||
|
||||
package system // import "github.com/docker/docker/pkg/system"
|
||||
|
||||
// Lgetxattr is not supported on platforms other than linux.
|
||||
func Lgetxattr(path string, attr string) ([]byte, error) {
|
||||
return nil, ErrNotSupportedPlatform
|
||||
}
|
||||
|
||||
// Lsetxattr is not supported on platforms other than linux.
|
||||
func Lsetxattr(path string, attr string, data []byte, flags int) error {
|
||||
return ErrNotSupportedPlatform
|
||||
}
|
70
vendor/github.com/docker/go-units/size.go
generated
vendored
70
vendor/github.com/docker/go-units/size.go
generated
vendored
@ -2,7 +2,6 @@ package units
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
@ -26,16 +25,17 @@ const (
|
||||
PiB = 1024 * TiB
|
||||
)
|
||||
|
||||
type unitMap map[string]int64
|
||||
type unitMap map[byte]int64
|
||||
|
||||
var (
|
||||
decimalMap = unitMap{"k": KB, "m": MB, "g": GB, "t": TB, "p": PB}
|
||||
binaryMap = unitMap{"k": KiB, "m": MiB, "g": GiB, "t": TiB, "p": PiB}
|
||||
sizeRegex = regexp.MustCompile(`^(\d+(\.\d+)*) ?([kKmMgGtTpP])?[iI]?[bB]?$`)
|
||||
decimalMap = unitMap{'k': KB, 'm': MB, 'g': GB, 't': TB, 'p': PB}
|
||||
binaryMap = unitMap{'k': KiB, 'm': MiB, 'g': GiB, 't': TiB, 'p': PiB}
|
||||
)
|
||||
|
||||
var decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"}
|
||||
var binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"}
|
||||
var (
|
||||
decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"}
|
||||
binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"}
|
||||
)
|
||||
|
||||
func getSizeAndUnit(size float64, base float64, _map []string) (float64, string) {
|
||||
i := 0
|
||||
@ -89,20 +89,66 @@ func RAMInBytes(size string) (int64, error) {
|
||||
|
||||
// Parses the human-readable size string into the amount it represents.
|
||||
func parseSize(sizeStr string, uMap unitMap) (int64, error) {
|
||||
matches := sizeRegex.FindStringSubmatch(sizeStr)
|
||||
if len(matches) != 4 {
|
||||
// TODO: rewrite to use strings.Cut if there's a space
|
||||
// once Go < 1.18 is deprecated.
|
||||
sep := strings.LastIndexAny(sizeStr, "01234567890. ")
|
||||
if sep == -1 {
|
||||
// There should be at least a digit.
|
||||
return -1, fmt.Errorf("invalid size: '%s'", sizeStr)
|
||||
}
|
||||
var num, sfx string
|
||||
if sizeStr[sep] != ' ' {
|
||||
num = sizeStr[:sep+1]
|
||||
sfx = sizeStr[sep+1:]
|
||||
} else {
|
||||
// Omit the space separator.
|
||||
num = sizeStr[:sep]
|
||||
sfx = sizeStr[sep+1:]
|
||||
}
|
||||
|
||||
size, err := strconv.ParseFloat(matches[1], 64)
|
||||
size, err := strconv.ParseFloat(num, 64)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
// Backward compatibility: reject negative sizes.
|
||||
if size < 0 {
|
||||
return -1, fmt.Errorf("invalid size: '%s'", sizeStr)
|
||||
}
|
||||
|
||||
unitPrefix := strings.ToLower(matches[3])
|
||||
if mul, ok := uMap[unitPrefix]; ok {
|
||||
if len(sfx) == 0 {
|
||||
return int64(size), nil
|
||||
}
|
||||
|
||||
// Process the suffix.
|
||||
|
||||
if len(sfx) > 3 { // Too long.
|
||||
goto badSuffix
|
||||
}
|
||||
sfx = strings.ToLower(sfx)
|
||||
// Trivial case: b suffix.
|
||||
if sfx[0] == 'b' {
|
||||
if len(sfx) > 1 { // no extra characters allowed after b.
|
||||
goto badSuffix
|
||||
}
|
||||
return int64(size), nil
|
||||
}
|
||||
// A suffix from the map.
|
||||
if mul, ok := uMap[sfx[0]]; ok {
|
||||
size *= float64(mul)
|
||||
} else {
|
||||
goto badSuffix
|
||||
}
|
||||
|
||||
// The suffix may have extra "b" or "ib" (e.g. KiB or MB).
|
||||
switch {
|
||||
case len(sfx) == 2 && sfx[1] != 'b':
|
||||
goto badSuffix
|
||||
case len(sfx) == 3 && sfx[1:] != "ib":
|
||||
goto badSuffix
|
||||
}
|
||||
|
||||
return int64(size), nil
|
||||
|
||||
badSuffix:
|
||||
return -1, fmt.Errorf("invalid suffix: '%s'", sfx)
|
||||
}
|
||||
|
118
vendor/github.com/google/uuid/null.go
generated
vendored
Normal file
118
vendor/github.com/google/uuid/null.go
generated
vendored
Normal file
@ -0,0 +1,118 @@
|
||||
// Copyright 2021 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"database/sql/driver"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
var jsonNull = []byte("null")
|
||||
|
||||
// NullUUID represents a UUID that may be null.
|
||||
// NullUUID implements the SQL driver.Scanner interface so
|
||||
// it can be used as a scan destination:
|
||||
//
|
||||
// var u uuid.NullUUID
|
||||
// err := db.QueryRow("SELECT name FROM foo WHERE id=?", id).Scan(&u)
|
||||
// ...
|
||||
// if u.Valid {
|
||||
// // use u.UUID
|
||||
// } else {
|
||||
// // NULL value
|
||||
// }
|
||||
//
|
||||
type NullUUID struct {
|
||||
UUID UUID
|
||||
Valid bool // Valid is true if UUID is not NULL
|
||||
}
|
||||
|
||||
// Scan implements the SQL driver.Scanner interface.
|
||||
func (nu *NullUUID) Scan(value interface{}) error {
|
||||
if value == nil {
|
||||
nu.UUID, nu.Valid = Nil, false
|
||||
return nil
|
||||
}
|
||||
|
||||
err := nu.UUID.Scan(value)
|
||||
if err != nil {
|
||||
nu.Valid = false
|
||||
return err
|
||||
}
|
||||
|
||||
nu.Valid = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// Value implements the driver Valuer interface.
|
||||
func (nu NullUUID) Value() (driver.Value, error) {
|
||||
if !nu.Valid {
|
||||
return nil, nil
|
||||
}
|
||||
// Delegate to UUID Value function
|
||||
return nu.UUID.Value()
|
||||
}
|
||||
|
||||
// MarshalBinary implements encoding.BinaryMarshaler.
|
||||
func (nu NullUUID) MarshalBinary() ([]byte, error) {
|
||||
if nu.Valid {
|
||||
return nu.UUID[:], nil
|
||||
}
|
||||
|
||||
return []byte(nil), nil
|
||||
}
|
||||
|
||||
// UnmarshalBinary implements encoding.BinaryUnmarshaler.
|
||||
func (nu *NullUUID) UnmarshalBinary(data []byte) error {
|
||||
if len(data) != 16 {
|
||||
return fmt.Errorf("invalid UUID (got %d bytes)", len(data))
|
||||
}
|
||||
copy(nu.UUID[:], data)
|
||||
nu.Valid = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalText implements encoding.TextMarshaler.
|
||||
func (nu NullUUID) MarshalText() ([]byte, error) {
|
||||
if nu.Valid {
|
||||
return nu.UUID.MarshalText()
|
||||
}
|
||||
|
||||
return jsonNull, nil
|
||||
}
|
||||
|
||||
// UnmarshalText implements encoding.TextUnmarshaler.
|
||||
func (nu *NullUUID) UnmarshalText(data []byte) error {
|
||||
id, err := ParseBytes(data)
|
||||
if err != nil {
|
||||
nu.Valid = false
|
||||
return err
|
||||
}
|
||||
nu.UUID = id
|
||||
nu.Valid = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalJSON implements json.Marshaler.
|
||||
func (nu NullUUID) MarshalJSON() ([]byte, error) {
|
||||
if nu.Valid {
|
||||
return json.Marshal(nu.UUID)
|
||||
}
|
||||
|
||||
return jsonNull, nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements json.Unmarshaler.
|
||||
func (nu *NullUUID) UnmarshalJSON(data []byte) error {
|
||||
if bytes.Equal(data, jsonNull) {
|
||||
*nu = NullUUID{}
|
||||
return nil // valid null UUID
|
||||
}
|
||||
err := json.Unmarshal(data, &nu.UUID)
|
||||
nu.Valid = err == nil
|
||||
return err
|
||||
}
|
45
vendor/github.com/google/uuid/uuid.go
generated
vendored
45
vendor/github.com/google/uuid/uuid.go
generated
vendored
@ -12,6 +12,7 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC
|
||||
@ -33,7 +34,15 @@ const (
|
||||
Future // Reserved for future definition.
|
||||
)
|
||||
|
||||
var rander = rand.Reader // random function
|
||||
const randPoolSize = 16 * 16
|
||||
|
||||
var (
|
||||
rander = rand.Reader // random function
|
||||
poolEnabled = false
|
||||
poolMu sync.Mutex
|
||||
poolPos = randPoolSize // protected with poolMu
|
||||
pool [randPoolSize]byte // protected with poolMu
|
||||
)
|
||||
|
||||
type invalidLengthError struct{ len int }
|
||||
|
||||
@ -41,6 +50,12 @@ func (err invalidLengthError) Error() string {
|
||||
return fmt.Sprintf("invalid UUID length: %d", err.len)
|
||||
}
|
||||
|
||||
// IsInvalidLengthError is matcher function for custom error invalidLengthError
|
||||
func IsInvalidLengthError(err error) bool {
|
||||
_, ok := err.(invalidLengthError)
|
||||
return ok
|
||||
}
|
||||
|
||||
// Parse decodes s into a UUID or returns an error. Both the standard UUID
|
||||
// forms of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and
|
||||
// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded as well as the
|
||||
@ -249,3 +264,31 @@ func SetRand(r io.Reader) {
|
||||
}
|
||||
rander = r
|
||||
}
|
||||
|
||||
// EnableRandPool enables internal randomness pool used for Random
|
||||
// (Version 4) UUID generation. The pool contains random bytes read from
|
||||
// the random number generator on demand in batches. Enabling the pool
|
||||
// may improve the UUID generation throughput significantly.
|
||||
//
|
||||
// Since the pool is stored on the Go heap, this feature may be a bad fit
|
||||
// for security sensitive applications.
|
||||
//
|
||||
// Both EnableRandPool and DisableRandPool are not thread-safe and should
|
||||
// only be called when there is no possibility that New or any other
|
||||
// UUID Version 4 generation function will be called concurrently.
|
||||
func EnableRandPool() {
|
||||
poolEnabled = true
|
||||
}
|
||||
|
||||
// DisableRandPool disables the randomness pool if it was previously
|
||||
// enabled with EnableRandPool.
|
||||
//
|
||||
// Both EnableRandPool and DisableRandPool are not thread-safe and should
|
||||
// only be called when there is no possibility that New or any other
|
||||
// UUID Version 4 generation function will be called concurrently.
|
||||
func DisableRandPool() {
|
||||
poolEnabled = false
|
||||
defer poolMu.Unlock()
|
||||
poolMu.Lock()
|
||||
poolPos = randPoolSize
|
||||
}
|
||||
|
27
vendor/github.com/google/uuid/version4.go
generated
vendored
27
vendor/github.com/google/uuid/version4.go
generated
vendored
@ -27,6 +27,8 @@ func NewString() string {
|
||||
// The strength of the UUIDs is based on the strength of the crypto/rand
|
||||
// package.
|
||||
//
|
||||
// Uses the randomness pool if it was enabled with EnableRandPool.
|
||||
//
|
||||
// A note about uniqueness derived from the UUID Wikipedia entry:
|
||||
//
|
||||
// Randomly generated UUIDs have 122 random bits. One's annual risk of being
|
||||
@ -35,7 +37,10 @@ func NewString() string {
|
||||
// equivalent to the odds of creating a few tens of trillions of UUIDs in a
|
||||
// year and having one duplicate.
|
||||
func NewRandom() (UUID, error) {
|
||||
return NewRandomFromReader(rander)
|
||||
if !poolEnabled {
|
||||
return NewRandomFromReader(rander)
|
||||
}
|
||||
return newRandomFromPool()
|
||||
}
|
||||
|
||||
// NewRandomFromReader returns a UUID based on bytes read from a given io.Reader.
|
||||
@ -49,3 +54,23 @@ func NewRandomFromReader(r io.Reader) (UUID, error) {
|
||||
uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10
|
||||
return uuid, nil
|
||||
}
|
||||
|
||||
func newRandomFromPool() (UUID, error) {
|
||||
var uuid UUID
|
||||
poolMu.Lock()
|
||||
if poolPos == randPoolSize {
|
||||
_, err := io.ReadFull(rander, pool[:])
|
||||
if err != nil {
|
||||
poolMu.Unlock()
|
||||
return Nil, err
|
||||
}
|
||||
poolPos = 0
|
||||
}
|
||||
copy(uuid[:], pool[poolPos:(poolPos+16)])
|
||||
poolPos += 16
|
||||
poolMu.Unlock()
|
||||
|
||||
uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4
|
||||
uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10
|
||||
return uuid, nil
|
||||
}
|
||||
|
2
vendor/github.com/klauspost/compress/.gitattributes
generated
vendored
Normal file
2
vendor/github.com/klauspost/compress/.gitattributes
generated
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
* -text
|
||||
*.bin -text -diff
|
32
vendor/github.com/klauspost/compress/.gitignore
generated
vendored
Normal file
32
vendor/github.com/klauspost/compress/.gitignore
generated
vendored
Normal file
@ -0,0 +1,32 @@
|
||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
*.test
|
||||
*.prof
|
||||
/s2/cmd/_s2sx/sfx-exe
|
||||
|
||||
# Linux perf files
|
||||
perf.data
|
||||
perf.data.old
|
||||
|
||||
# gdb history
|
||||
.gdb_history
|
141
vendor/github.com/klauspost/compress/.goreleaser.yml
generated
vendored
Normal file
141
vendor/github.com/klauspost/compress/.goreleaser.yml
generated
vendored
Normal file
@ -0,0 +1,141 @@
|
||||
# This is an example goreleaser.yaml file with some sane defaults.
|
||||
# Make sure to check the documentation at http://goreleaser.com
|
||||
before:
|
||||
hooks:
|
||||
- ./gen.sh
|
||||
- go install mvdan.cc/garble@latest
|
||||
|
||||
builds:
|
||||
-
|
||||
id: "s2c"
|
||||
binary: s2c
|
||||
main: ./s2/cmd/s2c/main.go
|
||||
flags:
|
||||
- -trimpath
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos:
|
||||
- aix
|
||||
- linux
|
||||
- freebsd
|
||||
- netbsd
|
||||
- windows
|
||||
- darwin
|
||||
goarch:
|
||||
- 386
|
||||
- amd64
|
||||
- arm
|
||||
- arm64
|
||||
- ppc64
|
||||
- ppc64le
|
||||
- mips64
|
||||
- mips64le
|
||||
goarm:
|
||||
- 7
|
||||
gobinary: garble
|
||||
-
|
||||
id: "s2d"
|
||||
binary: s2d
|
||||
main: ./s2/cmd/s2d/main.go
|
||||
flags:
|
||||
- -trimpath
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos:
|
||||
- aix
|
||||
- linux
|
||||
- freebsd
|
||||
- netbsd
|
||||
- windows
|
||||
- darwin
|
||||
goarch:
|
||||
- 386
|
||||
- amd64
|
||||
- arm
|
||||
- arm64
|
||||
- ppc64
|
||||
- ppc64le
|
||||
- mips64
|
||||
- mips64le
|
||||
goarm:
|
||||
- 7
|
||||
gobinary: garble
|
||||
-
|
||||
id: "s2sx"
|
||||
binary: s2sx
|
||||
main: ./s2/cmd/_s2sx/main.go
|
||||
flags:
|
||||
- -modfile=s2sx.mod
|
||||
- -trimpath
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos:
|
||||
- aix
|
||||
- linux
|
||||
- freebsd
|
||||
- netbsd
|
||||
- windows
|
||||
- darwin
|
||||
goarch:
|
||||
- 386
|
||||
- amd64
|
||||
- arm
|
||||
- arm64
|
||||
- ppc64
|
||||
- ppc64le
|
||||
- mips64
|
||||
- mips64le
|
||||
goarm:
|
||||
- 7
|
||||
gobinary: garble
|
||||
|
||||
archives:
|
||||
-
|
||||
id: s2-binaries
|
||||
name_template: "s2-{{ .Os }}_{{ .Arch }}_{{ .Version }}"
|
||||
replacements:
|
||||
aix: AIX
|
||||
darwin: OSX
|
||||
linux: Linux
|
||||
windows: Windows
|
||||
386: i386
|
||||
amd64: x86_64
|
||||
freebsd: FreeBSD
|
||||
netbsd: NetBSD
|
||||
format_overrides:
|
||||
- goos: windows
|
||||
format: zip
|
||||
files:
|
||||
- unpack/*
|
||||
- s2/LICENSE
|
||||
- s2/README.md
|
||||
checksum:
|
||||
name_template: 'checksums.txt'
|
||||
snapshot:
|
||||
name_template: "{{ .Tag }}-next"
|
||||
changelog:
|
||||
sort: asc
|
||||
filters:
|
||||
exclude:
|
||||
- '^doc:'
|
||||
- '^docs:'
|
||||
- '^test:'
|
||||
- '^tests:'
|
||||
- '^Update\sREADME.md'
|
||||
|
||||
nfpms:
|
||||
-
|
||||
file_name_template: "s2_package_{{ .Version }}_{{ .Os }}_{{ .Arch }}"
|
||||
vendor: Klaus Post
|
||||
homepage: https://github.com/klauspost/compress
|
||||
maintainer: Klaus Post <klauspost@gmail.com>
|
||||
description: S2 Compression Tool
|
||||
license: BSD 3-Clause
|
||||
formats:
|
||||
- deb
|
||||
- rpm
|
||||
replacements:
|
||||
darwin: Darwin
|
||||
linux: Linux
|
||||
freebsd: FreeBSD
|
||||
amd64: x86_64
|
276
vendor/github.com/klauspost/compress/LICENSE
generated
vendored
276
vendor/github.com/klauspost/compress/LICENSE
generated
vendored
@ -26,3 +26,279 @@ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
------------------
|
||||
|
||||
Files: gzhttp/*
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright 2016-2017 The New York Times Company
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
------------------
|
||||
|
||||
Files: s2/cmd/internal/readahead/*
|
||||
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2015 Klaus Post
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
||||
---------------------
|
||||
Files: snappy/*
|
||||
Files: internal/snapref/*
|
||||
|
||||
Copyright (c) 2011 The Snappy-Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
-----------------
|
||||
|
||||
Files: s2/cmd/internal/filepathx/*
|
||||
|
||||
Copyright 2016 The filepathx Authors
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
550
vendor/github.com/klauspost/compress/README.md
generated
vendored
Normal file
550
vendor/github.com/klauspost/compress/README.md
generated
vendored
Normal file
@ -0,0 +1,550 @@
|
||||
# compress
|
||||
|
||||
This package provides various compression algorithms.
|
||||
|
||||
* [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and decompression in pure Go.
|
||||
* [S2](https://github.com/klauspost/compress/tree/master/s2#s2-compression) is a high performance replacement for Snappy.
|
||||
* Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib).
|
||||
* [snappy](https://github.com/klauspost/compress/tree/master/snappy) is a drop-in replacement for `github.com/golang/snappy` offering better compression and concurrent streams.
|
||||
* [huff0](https://github.com/klauspost/compress/tree/master/huff0) and [FSE](https://github.com/klauspost/compress/tree/master/fse) implementations for raw entropy encoding.
|
||||
* [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp) Provides client and server wrappers for handling gzipped requests efficiently.
|
||||
* [pgzip](https://github.com/klauspost/pgzip) is a separate package that provides a very fast parallel gzip implementation.
|
||||
* [fuzz package](https://github.com/klauspost/compress-fuzz) for fuzz testing all compressors/decompressors here.
|
||||
|
||||
[](https://pkg.go.dev/github.com/klauspost/compress?tab=subdirectories)
|
||||
[](https://github.com/klauspost/compress/actions/workflows/go.yml)
|
||||
[](https://sourcegraph.com/github.com/klauspost/compress?badge)
|
||||
|
||||
# changelog
|
||||
|
||||
* June 29, 2022 (v1.15.7)
|
||||
|
||||
* s2: Fix absolute forward seeks https://github.com/klauspost/compress/pull/633
|
||||
* zip: Merge upstream https://github.com/klauspost/compress/pull/631
|
||||
* zip: Re-add zip64 fix https://github.com/klauspost/compress/pull/624
|
||||
* zstd: translate fseDecoder.buildDtable into asm by @WojciechMula in https://github.com/klauspost/compress/pull/598
|
||||
* flate: Faster histograms https://github.com/klauspost/compress/pull/620
|
||||
* deflate: Use compound hcode https://github.com/klauspost/compress/pull/622
|
||||
|
||||
* June 3, 2022 (v1.15.6)
|
||||
* s2: Improve coding for long, close matches https://github.com/klauspost/compress/pull/613
|
||||
* s2c: Add Snappy/S2 stream recompression https://github.com/klauspost/compress/pull/611
|
||||
* zstd: Always use configured block size https://github.com/klauspost/compress/pull/605
|
||||
* zstd: Fix incorrect hash table placement for dict encoding in default https://github.com/klauspost/compress/pull/606
|
||||
* zstd: Apply default config to ZipDecompressor without options https://github.com/klauspost/compress/pull/608
|
||||
* gzhttp: Exclude more common archive formats https://github.com/klauspost/compress/pull/612
|
||||
* s2: Add ReaderIgnoreCRC https://github.com/klauspost/compress/pull/609
|
||||
* s2: Remove sanity load on index creation https://github.com/klauspost/compress/pull/607
|
||||
* snappy: Use dedicated function for scoring https://github.com/klauspost/compress/pull/614
|
||||
* s2c+s2d: Use official snappy framed extension https://github.com/klauspost/compress/pull/610
|
||||
|
||||
* May 25, 2022 (v1.15.5)
|
||||
* s2: Add concurrent stream decompression https://github.com/klauspost/compress/pull/602
|
||||
* s2: Fix final emit oob read crash on amd64 https://github.com/klauspost/compress/pull/601
|
||||
* huff0: asm implementation of Decompress1X by @WojciechMula https://github.com/klauspost/compress/pull/596
|
||||
* zstd: Use 1 less goroutine for stream decoding https://github.com/klauspost/compress/pull/588
|
||||
* zstd: Copy literal in 16 byte blocks when possible https://github.com/klauspost/compress/pull/592
|
||||
* zstd: Speed up when WithDecoderLowmem(false) https://github.com/klauspost/compress/pull/599
|
||||
* zstd: faster next state update in BMI2 version of decode by @WojciechMula in https://github.com/klauspost/compress/pull/593
|
||||
* huff0: Do not check max size when reading table. https://github.com/klauspost/compress/pull/586
|
||||
* flate: Inplace hashing for level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/590
|
||||
|
||||
|
||||
* May 11, 2022 (v1.15.4)
|
||||
* huff0: decompress directly into output by @WojciechMula in [#577](https://github.com/klauspost/compress/pull/577)
|
||||
* inflate: Keep dict on stack [#581](https://github.com/klauspost/compress/pull/581)
|
||||
* zstd: Faster decoding memcopy in asm [#583](https://github.com/klauspost/compress/pull/583)
|
||||
* zstd: Fix ignored crc [#580](https://github.com/klauspost/compress/pull/580)
|
||||
|
||||
* May 5, 2022 (v1.15.3)
|
||||
* zstd: Allow to ignore checksum checking by @WojciechMula [#572](https://github.com/klauspost/compress/pull/572)
|
||||
* s2: Fix incorrect seek for io.SeekEnd in [#575](https://github.com/klauspost/compress/pull/575)
|
||||
|
||||
* Apr 26, 2022 (v1.15.2)
|
||||
* zstd: Add x86-64 assembly for decompression on streams and blocks. Contributed by [@WojciechMula](https://github.com/WojciechMula). Typically 2x faster. [#528](https://github.com/klauspost/compress/pull/528) [#531](https://github.com/klauspost/compress/pull/531) [#545](https://github.com/klauspost/compress/pull/545) [#537](https://github.com/klauspost/compress/pull/537)
|
||||
* zstd: Add options to ZipDecompressor and fixes [#539](https://github.com/klauspost/compress/pull/539)
|
||||
* s2: Use sorted search for index [#555](https://github.com/klauspost/compress/pull/555)
|
||||
* Minimum version is Go 1.16, added CI test on 1.18.
|
||||
|
||||
* Mar 11, 2022 (v1.15.1)
|
||||
* huff0: Add x86 assembly of Decode4X by @WojciechMula in [#512](https://github.com/klauspost/compress/pull/512)
|
||||
* zstd: Reuse zip decoders in [#514](https://github.com/klauspost/compress/pull/514)
|
||||
* zstd: Detect extra block data and report as corrupted in [#520](https://github.com/klauspost/compress/pull/520)
|
||||
* zstd: Handle zero sized frame content size stricter in [#521](https://github.com/klauspost/compress/pull/521)
|
||||
* zstd: Add stricter block size checks in [#523](https://github.com/klauspost/compress/pull/523)
|
||||
|
||||
* Mar 3, 2022 (v1.15.0)
|
||||
* zstd: Refactor decoder by @klauspost in [#498](https://github.com/klauspost/compress/pull/498)
|
||||
* zstd: Add stream encoding without goroutines by @klauspost in [#505](https://github.com/klauspost/compress/pull/505)
|
||||
* huff0: Prevent single blocks exceeding 16 bits by @klauspost in[#507](https://github.com/klauspost/compress/pull/507)
|
||||
* flate: Inline literal emission by @klauspost in [#509](https://github.com/klauspost/compress/pull/509)
|
||||
* gzhttp: Add zstd to transport by @klauspost in [#400](https://github.com/klauspost/compress/pull/400)
|
||||
* gzhttp: Make content-type optional by @klauspost in [#510](https://github.com/klauspost/compress/pull/510)
|
||||
|
||||
<details>
|
||||
<summary>See Details</summary>
|
||||
Both compression and decompression now supports "synchronous" stream operations. This means that whenever "concurrency" is set to 1, they will operate without spawning goroutines.
|
||||
|
||||
Stream decompression is now faster on asynchronous, since the goroutine allocation much more effectively splits the workload. On typical streams this will typically use 2 cores fully for decompression. When a stream has finished decoding no goroutines will be left over, so decoders can now safely be pooled and still be garbage collected.
|
||||
|
||||
While the release has been extensively tested, it is recommended to testing when upgrading.
|
||||
</details>
|
||||
|
||||
* Feb 22, 2022 (v1.14.4)
|
||||
* flate: Fix rare huffman only (-2) corruption. [#503](https://github.com/klauspost/compress/pull/503)
|
||||
* zip: Update deprecated CreateHeaderRaw to correctly call CreateRaw by @saracen in [#502](https://github.com/klauspost/compress/pull/502)
|
||||
* zip: don't read data descriptor early by @saracen in [#501](https://github.com/klauspost/compress/pull/501) #501
|
||||
* huff0: Use static decompression buffer up to 30% faster by @klauspost in [#499](https://github.com/klauspost/compress/pull/499) [#500](https://github.com/klauspost/compress/pull/500)
|
||||
|
||||
* Feb 17, 2022 (v1.14.3)
|
||||
* flate: Improve fastest levels compression speed ~10% more throughput. [#482](https://github.com/klauspost/compress/pull/482) [#489](https://github.com/klauspost/compress/pull/489) [#490](https://github.com/klauspost/compress/pull/490) [#491](https://github.com/klauspost/compress/pull/491) [#494](https://github.com/klauspost/compress/pull/494) [#478](https://github.com/klauspost/compress/pull/478)
|
||||
* flate: Faster decompression speed, ~5-10%. [#483](https://github.com/klauspost/compress/pull/483)
|
||||
* s2: Faster compression with Go v1.18 and amd64 microarch level 3+. [#484](https://github.com/klauspost/compress/pull/484) [#486](https://github.com/klauspost/compress/pull/486)
|
||||
|
||||
* Jan 25, 2022 (v1.14.2)
|
||||
* zstd: improve header decoder by @dsnet [#476](https://github.com/klauspost/compress/pull/476)
|
||||
* zstd: Add bigger default blocks [#469](https://github.com/klauspost/compress/pull/469)
|
||||
* zstd: Remove unused decompression buffer [#470](https://github.com/klauspost/compress/pull/470)
|
||||
* zstd: Fix logically dead code by @ningmingxiao [#472](https://github.com/klauspost/compress/pull/472)
|
||||
* flate: Improve level 7-9 [#471](https://github.com/klauspost/compress/pull/471) [#473](https://github.com/klauspost/compress/pull/473)
|
||||
* zstd: Add noasm tag for xxhash [#475](https://github.com/klauspost/compress/pull/475)
|
||||
|
||||
* Jan 11, 2022 (v1.14.1)
|
||||
* s2: Add stream index in [#462](https://github.com/klauspost/compress/pull/462)
|
||||
* flate: Speed and efficiency improvements in [#439](https://github.com/klauspost/compress/pull/439) [#461](https://github.com/klauspost/compress/pull/461) [#455](https://github.com/klauspost/compress/pull/455) [#452](https://github.com/klauspost/compress/pull/452) [#458](https://github.com/klauspost/compress/pull/458)
|
||||
* zstd: Performance improvement in [#420]( https://github.com/klauspost/compress/pull/420) [#456](https://github.com/klauspost/compress/pull/456) [#437](https://github.com/klauspost/compress/pull/437) [#467](https://github.com/klauspost/compress/pull/467) [#468](https://github.com/klauspost/compress/pull/468)
|
||||
* zstd: add arm64 xxhash assembly in [#464](https://github.com/klauspost/compress/pull/464)
|
||||
* Add garbled for binaries for s2 in [#445](https://github.com/klauspost/compress/pull/445)
|
||||
|
||||
<details>
|
||||
<summary>See changes to v1.13.x</summary>
|
||||
|
||||
* Aug 30, 2021 (v1.13.5)
|
||||
* gz/zlib/flate: Alias stdlib errors [#425](https://github.com/klauspost/compress/pull/425)
|
||||
* s2: Add block support to commandline tools [#413](https://github.com/klauspost/compress/pull/413)
|
||||
* zstd: pooledZipWriter should return Writers to the same pool [#426](https://github.com/klauspost/compress/pull/426)
|
||||
* Removed golang/snappy as external dependency for tests [#421](https://github.com/klauspost/compress/pull/421)
|
||||
|
||||
* Aug 12, 2021 (v1.13.4)
|
||||
* Add [snappy replacement package](https://github.com/klauspost/compress/tree/master/snappy).
|
||||
* zstd: Fix incorrect encoding in "best" mode [#415](https://github.com/klauspost/compress/pull/415)
|
||||
|
||||
* Aug 3, 2021 (v1.13.3)
|
||||
* zstd: Improve Best compression [#404](https://github.com/klauspost/compress/pull/404)
|
||||
* zstd: Fix WriteTo error forwarding [#411](https://github.com/klauspost/compress/pull/411)
|
||||
* gzhttp: Return http.HandlerFunc instead of http.Handler. Unlikely breaking change. [#406](https://github.com/klauspost/compress/pull/406)
|
||||
* s2sx: Fix max size error [#399](https://github.com/klauspost/compress/pull/399)
|
||||
* zstd: Add optional stream content size on reset [#401](https://github.com/klauspost/compress/pull/401)
|
||||
* zstd: use SpeedBestCompression for level >= 10 [#410](https://github.com/klauspost/compress/pull/410)
|
||||
|
||||
* Jun 14, 2021 (v1.13.1)
|
||||
* s2: Add full Snappy output support [#396](https://github.com/klauspost/compress/pull/396)
|
||||
* zstd: Add configurable [Decoder window](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithDecoderMaxWindow) size [#394](https://github.com/klauspost/compress/pull/394)
|
||||
* gzhttp: Add header to skip compression [#389](https://github.com/klauspost/compress/pull/389)
|
||||
* s2: Improve speed with bigger output margin [#395](https://github.com/klauspost/compress/pull/395)
|
||||
|
||||
* Jun 3, 2021 (v1.13.0)
|
||||
* Added [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp#gzip-handler) which allows wrapping HTTP servers and clients with GZIP compressors.
|
||||
* zstd: Detect short invalid signatures [#382](https://github.com/klauspost/compress/pull/382)
|
||||
* zstd: Spawn decoder goroutine only if needed. [#380](https://github.com/klauspost/compress/pull/380)
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>See changes to v1.12.x</summary>
|
||||
|
||||
* May 25, 2021 (v1.12.3)
|
||||
* deflate: Better/faster Huffman encoding [#374](https://github.com/klauspost/compress/pull/374)
|
||||
* deflate: Allocate less for history. [#375](https://github.com/klauspost/compress/pull/375)
|
||||
* zstd: Forward read errors [#373](https://github.com/klauspost/compress/pull/373)
|
||||
|
||||
* Apr 27, 2021 (v1.12.2)
|
||||
* zstd: Improve better/best compression [#360](https://github.com/klauspost/compress/pull/360) [#364](https://github.com/klauspost/compress/pull/364) [#365](https://github.com/klauspost/compress/pull/365)
|
||||
* zstd: Add helpers to compress/decompress zstd inside zip files [#363](https://github.com/klauspost/compress/pull/363)
|
||||
* deflate: Improve level 5+6 compression [#367](https://github.com/klauspost/compress/pull/367)
|
||||
* s2: Improve better/best compression [#358](https://github.com/klauspost/compress/pull/358) [#359](https://github.com/klauspost/compress/pull/358)
|
||||
* s2: Load after checking src limit on amd64. [#362](https://github.com/klauspost/compress/pull/362)
|
||||
* s2sx: Limit max executable size [#368](https://github.com/klauspost/compress/pull/368)
|
||||
|
||||
* Apr 14, 2021 (v1.12.1)
|
||||
* snappy package removed. Upstream added as dependency.
|
||||
* s2: Better compression in "best" mode [#353](https://github.com/klauspost/compress/pull/353)
|
||||
* s2sx: Add stdin input and detect pre-compressed from signature [#352](https://github.com/klauspost/compress/pull/352)
|
||||
* s2c/s2d: Add http as possible input [#348](https://github.com/klauspost/compress/pull/348)
|
||||
* s2c/s2d/s2sx: Always truncate when writing files [#352](https://github.com/klauspost/compress/pull/352)
|
||||
* zstd: Reduce memory usage further when using [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) [#346](https://github.com/klauspost/compress/pull/346)
|
||||
* s2: Fix potential problem with amd64 assembly and profilers [#349](https://github.com/klauspost/compress/pull/349)
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>See changes to v1.11.x</summary>
|
||||
|
||||
* Mar 26, 2021 (v1.11.13)
|
||||
* zstd: Big speedup on small dictionary encodes [#344](https://github.com/klauspost/compress/pull/344) [#345](https://github.com/klauspost/compress/pull/345)
|
||||
* zstd: Add [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) encoder option [#336](https://github.com/klauspost/compress/pull/336)
|
||||
* deflate: Improve entropy compression [#338](https://github.com/klauspost/compress/pull/338)
|
||||
* s2: Clean up and minor performance improvement in best [#341](https://github.com/klauspost/compress/pull/341)
|
||||
|
||||
* Mar 5, 2021 (v1.11.12)
|
||||
* s2: Add `s2sx` binary that creates [self extracting archives](https://github.com/klauspost/compress/tree/master/s2#s2sx-self-extracting-archives).
|
||||
* s2: Speed up decompression on non-assembly platforms [#328](https://github.com/klauspost/compress/pull/328)
|
||||
|
||||
* Mar 1, 2021 (v1.11.9)
|
||||
* s2: Add ARM64 decompression assembly. Around 2x output speed. [#324](https://github.com/klauspost/compress/pull/324)
|
||||
* s2: Improve "better" speed and efficiency. [#325](https://github.com/klauspost/compress/pull/325)
|
||||
* s2: Fix binaries.
|
||||
|
||||
* Feb 25, 2021 (v1.11.8)
|
||||
* s2: Fixed occational out-of-bounds write on amd64. Upgrade recommended.
|
||||
* s2: Add AMD64 assembly for better mode. 25-50% faster. [#315](https://github.com/klauspost/compress/pull/315)
|
||||
* s2: Less upfront decoder allocation. [#322](https://github.com/klauspost/compress/pull/322)
|
||||
* zstd: Faster "compression" of incompressible data. [#314](https://github.com/klauspost/compress/pull/314)
|
||||
* zip: Fix zip64 headers. [#313](https://github.com/klauspost/compress/pull/313)
|
||||
|
||||
* Jan 14, 2021 (v1.11.7)
|
||||
* Use Bytes() interface to get bytes across packages. [#309](https://github.com/klauspost/compress/pull/309)
|
||||
* s2: Add 'best' compression option. [#310](https://github.com/klauspost/compress/pull/310)
|
||||
* s2: Add ReaderMaxBlockSize, changes `s2.NewReader` signature to include varargs. [#311](https://github.com/klauspost/compress/pull/311)
|
||||
* s2: Fix crash on small better buffers. [#308](https://github.com/klauspost/compress/pull/308)
|
||||
* s2: Clean up decoder. [#312](https://github.com/klauspost/compress/pull/312)
|
||||
|
||||
* Jan 7, 2021 (v1.11.6)
|
||||
* zstd: Make decoder allocations smaller [#306](https://github.com/klauspost/compress/pull/306)
|
||||
* zstd: Free Decoder resources when Reset is called with a nil io.Reader [#305](https://github.com/klauspost/compress/pull/305)
|
||||
|
||||
* Dec 20, 2020 (v1.11.4)
|
||||
* zstd: Add Best compression mode [#304](https://github.com/klauspost/compress/pull/304)
|
||||
* Add header decoder [#299](https://github.com/klauspost/compress/pull/299)
|
||||
* s2: Add uncompressed stream option [#297](https://github.com/klauspost/compress/pull/297)
|
||||
* Simplify/speed up small blocks with known max size. [#300](https://github.com/klauspost/compress/pull/300)
|
||||
* zstd: Always reset literal dict encoder [#303](https://github.com/klauspost/compress/pull/303)
|
||||
|
||||
* Nov 15, 2020 (v1.11.3)
|
||||
* inflate: 10-15% faster decompression [#293](https://github.com/klauspost/compress/pull/293)
|
||||
* zstd: Tweak DecodeAll default allocation [#295](https://github.com/klauspost/compress/pull/295)
|
||||
|
||||
* Oct 11, 2020 (v1.11.2)
|
||||
* s2: Fix out of bounds read in "better" block compression [#291](https://github.com/klauspost/compress/pull/291)
|
||||
|
||||
* Oct 1, 2020 (v1.11.1)
|
||||
* zstd: Set allLitEntropy true in default configuration [#286](https://github.com/klauspost/compress/pull/286)
|
||||
|
||||
* Sept 8, 2020 (v1.11.0)
|
||||
* zstd: Add experimental compression [dictionaries](https://github.com/klauspost/compress/tree/master/zstd#dictionaries) [#281](https://github.com/klauspost/compress/pull/281)
|
||||
* zstd: Fix mixed Write and ReadFrom calls [#282](https://github.com/klauspost/compress/pull/282)
|
||||
* inflate/gz: Limit variable shifts, ~5% faster decompression [#274](https://github.com/klauspost/compress/pull/274)
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>See changes to v1.10.x</summary>
|
||||
|
||||
* July 8, 2020 (v1.10.11)
|
||||
* zstd: Fix extra block when compressing with ReadFrom. [#278](https://github.com/klauspost/compress/pull/278)
|
||||
* huff0: Also populate compression table when reading decoding table. [#275](https://github.com/klauspost/compress/pull/275)
|
||||
|
||||
* June 23, 2020 (v1.10.10)
|
||||
* zstd: Skip entropy compression in fastest mode when no matches. [#270](https://github.com/klauspost/compress/pull/270)
|
||||
|
||||
* June 16, 2020 (v1.10.9):
|
||||
* zstd: API change for specifying dictionaries. See [#268](https://github.com/klauspost/compress/pull/268)
|
||||
* zip: update CreateHeaderRaw to handle zip64 fields. [#266](https://github.com/klauspost/compress/pull/266)
|
||||
* Fuzzit tests removed. The service has been purchased and is no longer available.
|
||||
|
||||
* June 5, 2020 (v1.10.8):
|
||||
* 1.15x faster zstd block decompression. [#265](https://github.com/klauspost/compress/pull/265)
|
||||
|
||||
* June 1, 2020 (v1.10.7):
|
||||
* Added zstd decompression [dictionary support](https://github.com/klauspost/compress/tree/master/zstd#dictionaries)
|
||||
* Increase zstd decompression speed up to 1.19x. [#259](https://github.com/klauspost/compress/pull/259)
|
||||
* Remove internal reset call in zstd compression and reduce allocations. [#263](https://github.com/klauspost/compress/pull/263)
|
||||
|
||||
* May 21, 2020: (v1.10.6)
|
||||
* zstd: Reduce allocations while decoding. [#258](https://github.com/klauspost/compress/pull/258), [#252](https://github.com/klauspost/compress/pull/252)
|
||||
* zstd: Stricter decompression checks.
|
||||
|
||||
* April 12, 2020: (v1.10.5)
|
||||
* s2-commands: Flush output when receiving SIGINT. [#239](https://github.com/klauspost/compress/pull/239)
|
||||
|
||||
* Apr 8, 2020: (v1.10.4)
|
||||
* zstd: Minor/special case optimizations. [#251](https://github.com/klauspost/compress/pull/251), [#250](https://github.com/klauspost/compress/pull/250), [#249](https://github.com/klauspost/compress/pull/249), [#247](https://github.com/klauspost/compress/pull/247)
|
||||
* Mar 11, 2020: (v1.10.3)
|
||||
* s2: Use S2 encoder in pure Go mode for Snappy output as well. [#245](https://github.com/klauspost/compress/pull/245)
|
||||
* s2: Fix pure Go block encoder. [#244](https://github.com/klauspost/compress/pull/244)
|
||||
* zstd: Added "better compression" mode. [#240](https://github.com/klauspost/compress/pull/240)
|
||||
* zstd: Improve speed of fastest compression mode by 5-10% [#241](https://github.com/klauspost/compress/pull/241)
|
||||
* zstd: Skip creating encoders when not needed. [#238](https://github.com/klauspost/compress/pull/238)
|
||||
|
||||
* Feb 27, 2020: (v1.10.2)
|
||||
* Close to 50% speedup in inflate (gzip/zip decompression). [#236](https://github.com/klauspost/compress/pull/236) [#234](https://github.com/klauspost/compress/pull/234) [#232](https://github.com/klauspost/compress/pull/232)
|
||||
* Reduce deflate level 1-6 memory usage up to 59%. [#227](https://github.com/klauspost/compress/pull/227)
|
||||
|
||||
* Feb 18, 2020: (v1.10.1)
|
||||
* Fix zstd crash when resetting multiple times without sending data. [#226](https://github.com/klauspost/compress/pull/226)
|
||||
* deflate: Fix dictionary use on level 1-6. [#224](https://github.com/klauspost/compress/pull/224)
|
||||
* Remove deflate writer reference when closing. [#224](https://github.com/klauspost/compress/pull/224)
|
||||
|
||||
* Feb 4, 2020: (v1.10.0)
|
||||
* Add optional dictionary to [stateless deflate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc#StatelessDeflate). Breaking change, send `nil` for previous behaviour. [#216](https://github.com/klauspost/compress/pull/216)
|
||||
* Fix buffer overflow on repeated small block deflate. [#218](https://github.com/klauspost/compress/pull/218)
|
||||
* Allow copying content from an existing ZIP file without decompressing+compressing. [#214](https://github.com/klauspost/compress/pull/214)
|
||||
* Added [S2](https://github.com/klauspost/compress/tree/master/s2#s2-compression) AMD64 assembler and various optimizations. Stream speed >10GB/s. [#186](https://github.com/klauspost/compress/pull/186)
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>See changes prior to v1.10.0</summary>
|
||||
|
||||
* Jan 20,2020 (v1.9.8) Optimize gzip/deflate with better size estimates and faster table generation. [#207](https://github.com/klauspost/compress/pull/207) by [luyu6056](https://github.com/luyu6056), [#206](https://github.com/klauspost/compress/pull/206).
|
||||
* Jan 11, 2020: S2 Encode/Decode will use provided buffer if capacity is big enough. [#204](https://github.com/klauspost/compress/pull/204)
|
||||
* Jan 5, 2020: (v1.9.7) Fix another zstd regression in v1.9.5 - v1.9.6 removed.
|
||||
* Jan 4, 2020: (v1.9.6) Regression in v1.9.5 fixed causing corrupt zstd encodes in rare cases.
|
||||
* Jan 4, 2020: Faster IO in [s2c + s2d commandline tools](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) compression/decompression. [#192](https://github.com/klauspost/compress/pull/192)
|
||||
* Dec 29, 2019: Removed v1.9.5 since fuzz tests showed a compatibility problem with the reference zstandard decoder.
|
||||
* Dec 29, 2019: (v1.9.5) zstd: 10-20% faster block compression. [#199](https://github.com/klauspost/compress/pull/199)
|
||||
* Dec 29, 2019: [zip](https://godoc.org/github.com/klauspost/compress/zip) package updated with latest Go features
|
||||
* Dec 29, 2019: zstd: Single segment flag condintions tweaked. [#197](https://github.com/klauspost/compress/pull/197)
|
||||
* Dec 18, 2019: s2: Faster compression when ReadFrom is used. [#198](https://github.com/klauspost/compress/pull/198)
|
||||
* Dec 10, 2019: s2: Fix repeat length output when just above at 16MB limit.
|
||||
* Dec 10, 2019: zstd: Add function to get decoder as io.ReadCloser. [#191](https://github.com/klauspost/compress/pull/191)
|
||||
* Dec 3, 2019: (v1.9.4) S2: limit max repeat length. [#188](https://github.com/klauspost/compress/pull/188)
|
||||
* Dec 3, 2019: Add [WithNoEntropyCompression](https://godoc.org/github.com/klauspost/compress/zstd#WithNoEntropyCompression) to zstd [#187](https://github.com/klauspost/compress/pull/187)
|
||||
* Dec 3, 2019: Reduce memory use for tests. Check for leaked goroutines.
|
||||
* Nov 28, 2019 (v1.9.3) Less allocations in stateless deflate.
|
||||
* Nov 28, 2019: 5-20% Faster huff0 decode. Impacts zstd as well. [#184](https://github.com/klauspost/compress/pull/184)
|
||||
* Nov 12, 2019 (v1.9.2) Added [Stateless Compression](#stateless-compression) for gzip/deflate.
|
||||
* Nov 12, 2019: Fixed zstd decompression of large single blocks. [#180](https://github.com/klauspost/compress/pull/180)
|
||||
* Nov 11, 2019: Set default [s2c](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) block size to 4MB.
|
||||
* Nov 11, 2019: Reduce inflate memory use by 1KB.
|
||||
* Nov 10, 2019: Less allocations in deflate bit writer.
|
||||
* Nov 10, 2019: Fix inconsistent error returned by zstd decoder.
|
||||
* Oct 28, 2019 (v1.9.1) ztsd: Fix crash when compressing blocks. [#174](https://github.com/klauspost/compress/pull/174)
|
||||
* Oct 24, 2019 (v1.9.0) zstd: Fix rare data corruption [#173](https://github.com/klauspost/compress/pull/173)
|
||||
* Oct 24, 2019 zstd: Fix huff0 out of buffer write [#171](https://github.com/klauspost/compress/pull/171) and always return errors [#172](https://github.com/klauspost/compress/pull/172)
|
||||
* Oct 10, 2019: Big deflate rewrite, 30-40% faster with better compression [#105](https://github.com/klauspost/compress/pull/105)
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>See changes prior to v1.9.0</summary>
|
||||
|
||||
* Oct 10, 2019: (v1.8.6) zstd: Allow partial reads to get flushed data. [#169](https://github.com/klauspost/compress/pull/169)
|
||||
* Oct 3, 2019: Fix inconsistent results on broken zstd streams.
|
||||
* Sep 25, 2019: Added `-rm` (remove source files) and `-q` (no output except errors) to `s2c` and `s2d` [commands](https://github.com/klauspost/compress/tree/master/s2#commandline-tools)
|
||||
* Sep 16, 2019: (v1.8.4) Add `s2c` and `s2d` [commandline tools](https://github.com/klauspost/compress/tree/master/s2#commandline-tools).
|
||||
* Sep 10, 2019: (v1.8.3) Fix s2 decoder [Skip](https://godoc.org/github.com/klauspost/compress/s2#Reader.Skip).
|
||||
* Sep 7, 2019: zstd: Added [WithWindowSize](https://godoc.org/github.com/klauspost/compress/zstd#WithWindowSize), contributed by [ianwilkes](https://github.com/ianwilkes).
|
||||
* Sep 5, 2019: (v1.8.2) Add [WithZeroFrames](https://godoc.org/github.com/klauspost/compress/zstd#WithZeroFrames) which adds full zero payload block encoding option.
|
||||
* Sep 5, 2019: Lazy initialization of zstandard predefined en/decoder tables.
|
||||
* Aug 26, 2019: (v1.8.1) S2: 1-2% compression increase in "better" compression mode.
|
||||
* Aug 26, 2019: zstd: Check maximum size of Huffman 1X compressed literals while decoding.
|
||||
* Aug 24, 2019: (v1.8.0) Added [S2 compression](https://github.com/klauspost/compress/tree/master/s2#s2-compression), a high performance replacement for Snappy.
|
||||
* Aug 21, 2019: (v1.7.6) Fixed minor issues found by fuzzer. One could lead to zstd not decompressing.
|
||||
* Aug 18, 2019: Add [fuzzit](https://fuzzit.dev/) continuous fuzzing.
|
||||
* Aug 14, 2019: zstd: Skip incompressible data 2x faster. [#147](https://github.com/klauspost/compress/pull/147)
|
||||
* Aug 4, 2019 (v1.7.5): Better literal compression. [#146](https://github.com/klauspost/compress/pull/146)
|
||||
* Aug 4, 2019: Faster zstd compression. [#143](https://github.com/klauspost/compress/pull/143) [#144](https://github.com/klauspost/compress/pull/144)
|
||||
* Aug 4, 2019: Faster zstd decompression. [#145](https://github.com/klauspost/compress/pull/145) [#143](https://github.com/klauspost/compress/pull/143) [#142](https://github.com/klauspost/compress/pull/142)
|
||||
* July 15, 2019 (v1.7.4): Fix double EOF block in rare cases on zstd encoder.
|
||||
* July 15, 2019 (v1.7.3): Minor speedup/compression increase in default zstd encoder.
|
||||
* July 14, 2019: zstd decoder: Fix decompression error on multiple uses with mixed content.
|
||||
* July 7, 2019 (v1.7.2): Snappy update, zstd decoder potential race fix.
|
||||
* June 17, 2019: zstd decompression bugfix.
|
||||
* June 17, 2019: fix 32 bit builds.
|
||||
* June 17, 2019: Easier use in modules (less dependencies).
|
||||
* June 9, 2019: New stronger "default" [zstd](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression mode. Matches zstd default compression ratio.
|
||||
* June 5, 2019: 20-40% throughput in [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and better compression.
|
||||
* June 5, 2019: deflate/gzip compression: Reduce memory usage of lower compression levels.
|
||||
* June 2, 2019: Added [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression!
|
||||
* May 25, 2019: deflate/gzip: 10% faster bit writer, mostly visible in lower levels.
|
||||
* Apr 22, 2019: [zstd](https://github.com/klauspost/compress/tree/master/zstd#zstd) decompression added.
|
||||
* Aug 1, 2018: Added [huff0 README](https://github.com/klauspost/compress/tree/master/huff0#huff0-entropy-compression).
|
||||
* Jul 8, 2018: Added [Performance Update 2018](#performance-update-2018) below.
|
||||
* Jun 23, 2018: Merged [Go 1.11 inflate optimizations](https://go-review.googlesource.com/c/go/+/102235). Go 1.9 is now required. Backwards compatible version tagged with [v1.3.0](https://github.com/klauspost/compress/releases/tag/v1.3.0).
|
||||
* Apr 2, 2018: Added [huff0](https://godoc.org/github.com/klauspost/compress/huff0) en/decoder. Experimental for now, API may change.
|
||||
* Mar 4, 2018: Added [FSE Entropy](https://godoc.org/github.com/klauspost/compress/fse) en/decoder. Experimental for now, API may change.
|
||||
* Nov 3, 2017: Add compression [Estimate](https://godoc.org/github.com/klauspost/compress#Estimate) function.
|
||||
* May 28, 2017: Reduce allocations when resetting decoder.
|
||||
* Apr 02, 2017: Change back to official crc32, since changes were merged in Go 1.7.
|
||||
* Jan 14, 2017: Reduce stack pressure due to array copies. See [Issue #18625](https://github.com/golang/go/issues/18625).
|
||||
* Oct 25, 2016: Level 2-4 have been rewritten and now offers significantly better performance than before.
|
||||
* Oct 20, 2016: Port zlib changes from Go 1.7 to fix zlib writer issue. Please update.
|
||||
* Oct 16, 2016: Go 1.7 changes merged. Apples to apples this package is a few percent faster, but has a significantly better balance between speed and compression per level.
|
||||
* Mar 24, 2016: Always attempt Huffman encoding on level 4-7. This improves base 64 encoded data compression.
|
||||
* Mar 24, 2016: Small speedup for level 1-3.
|
||||
* Feb 19, 2016: Faster bit writer, level -2 is 15% faster, level 1 is 4% faster.
|
||||
* Feb 19, 2016: Handle small payloads faster in level 1-3.
|
||||
* Feb 19, 2016: Added faster level 2 + 3 compression modes.
|
||||
* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progresssion in terms of compression. New default level is 5.
|
||||
* Feb 14, 2016: Snappy: Merge upstream changes.
|
||||
* Feb 14, 2016: Snappy: Fix aggressive skipping.
|
||||
* Feb 14, 2016: Snappy: Update benchmark.
|
||||
* Feb 13, 2016: Deflate: Fixed assembler problem that could lead to sub-optimal compression.
|
||||
* Feb 12, 2016: Snappy: Added AMD64 SSE 4.2 optimizations to matching, which makes easy to compress material run faster. Typical speedup is around 25%.
|
||||
* Feb 9, 2016: Added Snappy package fork. This version is 5-7% faster, much more on hard to compress content.
|
||||
* Jan 30, 2016: Optimize level 1 to 3 by not considering static dictionary or storing uncompressed. ~4-5% speedup.
|
||||
* Jan 16, 2016: Optimization on deflate level 1,2,3 compression.
|
||||
* Jan 8 2016: Merge [CL 18317](https://go-review.googlesource.com/#/c/18317): fix reading, writing of zip64 archives.
|
||||
* Dec 8 2015: Make level 1 and -2 deterministic even if write size differs.
|
||||
* Dec 8 2015: Split encoding functions, so hashing and matching can potentially be inlined. 1-3% faster on AMD64. 5% faster on other platforms.
|
||||
* Dec 8 2015: Fixed rare [one byte out-of bounds read](https://github.com/klauspost/compress/issues/20). Please update!
|
||||
* Nov 23 2015: Optimization on token writer. ~2-4% faster. Contributed by [@dsnet](https://github.com/dsnet).
|
||||
* Nov 20 2015: Small optimization to bit writer on 64 bit systems.
|
||||
* Nov 17 2015: Fixed out-of-bound errors if the underlying Writer returned an error. See [#15](https://github.com/klauspost/compress/issues/15).
|
||||
* Nov 12 2015: Added [io.WriterTo](https://golang.org/pkg/io/#WriterTo) support to gzip/inflate.
|
||||
* Nov 11 2015: Merged [CL 16669](https://go-review.googlesource.com/#/c/16669/4): archive/zip: enable overriding (de)compressors per file
|
||||
* Oct 15 2015: Added skipping on uncompressible data. Random data speed up >5x.
|
||||
|
||||
</details>
|
||||
|
||||
# deflate usage
|
||||
|
||||
The packages are drop-in replacements for standard libraries. Simply replace the import path to use them:
|
||||
|
||||
| old import | new import | Documentation
|
||||
|--------------------|-----------------------------------------|--------------------|
|
||||
| `compress/gzip` | `github.com/klauspost/compress/gzip` | [gzip](https://pkg.go.dev/github.com/klauspost/compress/gzip?tab=doc)
|
||||
| `compress/zlib` | `github.com/klauspost/compress/zlib` | [zlib](https://pkg.go.dev/github.com/klauspost/compress/zlib?tab=doc)
|
||||
| `archive/zip` | `github.com/klauspost/compress/zip` | [zip](https://pkg.go.dev/github.com/klauspost/compress/zip?tab=doc)
|
||||
| `compress/flate` | `github.com/klauspost/compress/flate` | [flate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc)
|
||||
|
||||
* Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib).
|
||||
|
||||
You may also be interested in [pgzip](https://github.com/klauspost/pgzip), which is a drop in replacement for gzip, which support multithreaded compression on big files and the optimized [crc32](https://github.com/klauspost/crc32) package used by these packages.
|
||||
|
||||
The packages contains the same as the standard library, so you can use the godoc for that: [gzip](http://golang.org/pkg/compress/gzip/), [zip](http://golang.org/pkg/archive/zip/), [zlib](http://golang.org/pkg/compress/zlib/), [flate](http://golang.org/pkg/compress/flate/).
|
||||
|
||||
Currently there is only minor speedup on decompression (mostly CRC32 calculation).
|
||||
|
||||
Memory usage is typically 1MB for a Writer. stdlib is in the same range.
|
||||
If you expect to have a lot of concurrently allocated Writers consider using
|
||||
the stateless compress described below.
|
||||
|
||||
For compression performance, see: [this spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing).
|
||||
|
||||
# Stateless compression
|
||||
|
||||
This package offers stateless compression as a special option for gzip/deflate.
|
||||
It will do compression but without maintaining any state between Write calls.
|
||||
|
||||
This means there will be no memory kept between Write calls, but compression and speed will be suboptimal.
|
||||
|
||||
This is only relevant in cases where you expect to run many thousands of compressors concurrently,
|
||||
but with very little activity. This is *not* intended for regular web servers serving individual requests.
|
||||
|
||||
Because of this, the size of actual Write calls will affect output size.
|
||||
|
||||
In gzip, specify level `-3` / `gzip.StatelessCompression` to enable.
|
||||
|
||||
For direct deflate use, NewStatelessWriter and StatelessDeflate are available. See [documentation](https://godoc.org/github.com/klauspost/compress/flate#NewStatelessWriter)
|
||||
|
||||
A `bufio.Writer` can of course be used to control write sizes. For example, to use a 4KB buffer:
|
||||
|
||||
```
|
||||
// replace 'ioutil.Discard' with your output.
|
||||
gzw, err := gzip.NewWriterLevel(ioutil.Discard, gzip.StatelessCompression)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer gzw.Close()
|
||||
|
||||
w := bufio.NewWriterSize(gzw, 4096)
|
||||
defer w.Flush()
|
||||
|
||||
// Write to 'w'
|
||||
```
|
||||
|
||||
This will only use up to 4KB in memory when the writer is idle.
|
||||
|
||||
Compression is almost always worse than the fastest compression level
|
||||
and each write will allocate (a little) memory.
|
||||
|
||||
# Performance Update 2018
|
||||
|
||||
It has been a while since we have been looking at the speed of this package compared to the standard library, so I thought I would re-do my tests and give some overall recommendations based on the current state. All benchmarks have been performed with Go 1.10 on my Desktop Intel(R) Core(TM) i7-2600 CPU @3.40GHz. Since I last ran the tests, I have gotten more RAM, which means tests with big files are no longer limited by my SSD.
|
||||
|
||||
The raw results are in my [updated spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). Due to cgo changes and upstream updates i could not get the cgo version of gzip to compile. Instead I included the [zstd](https://github.com/datadog/zstd) cgo implementation. If I get cgo gzip to work again, I might replace the results in the sheet.
|
||||
|
||||
The columns to take note of are: *MB/s* - the throughput. *Reduction* - the data size reduction in percent of the original. *Rel Speed* relative speed compared to the standard library at the same level. *Smaller* - how many percent smaller is the compressed output compared to stdlib. Negative means the output was bigger. *Loss* means the loss (or gain) in compression as a percentage difference of the input.
|
||||
|
||||
The `gzstd` (standard library gzip) and `gzkp` (this package gzip) only uses one CPU core. [`pgzip`](https://github.com/klauspost/pgzip), [`bgzf`](https://github.com/biogo/hts/tree/master/bgzf) uses all 4 cores. [`zstd`](https://github.com/DataDog/zstd) uses one core, and is a beast (but not Go, yet).
|
||||
|
||||
|
||||
## Overall differences.
|
||||
|
||||
There appears to be a roughly 5-10% speed advantage over the standard library when comparing at similar compression levels.
|
||||
|
||||
The biggest difference you will see is the result of [re-balancing](https://blog.klauspost.com/rebalancing-deflate-compression-levels/) the compression levels. I wanted by library to give a smoother transition between the compression levels than the standard library.
|
||||
|
||||
This package attempts to provide a more smooth transition, where "1" is taking a lot of shortcuts, "5" is the reasonable trade-off and "9" is the "give me the best compression", and the values in between gives something reasonable in between. The standard library has big differences in levels 1-4, but levels 5-9 having no significant gains - often spending a lot more time than can be justified by the achieved compression.
|
||||
|
||||
There are links to all the test data in the [spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing) in the top left field on each tab.
|
||||
|
||||
## Web Content
|
||||
|
||||
This test set aims to emulate typical use in a web server. The test-set is 4GB data in 53k files, and is a mixture of (mostly) HTML, JS, CSS.
|
||||
|
||||
Since level 1 and 9 are close to being the same code, they are quite close. But looking at the levels in-between the differences are quite big.
|
||||
|
||||
Looking at level 6, this package is 88% faster, but will output about 6% more data. For a web server, this means you can serve 88% more data, but have to pay for 6% more bandwidth. You can draw your own conclusions on what would be the most expensive for your case.
|
||||
|
||||
## Object files
|
||||
|
||||
This test is for typical data files stored on a server. In this case it is a collection of Go precompiled objects. They are very compressible.
|
||||
|
||||
The picture is similar to the web content, but with small differences since this is very compressible. Levels 2-3 offer good speed, but is sacrificing quite a bit of compression.
|
||||
|
||||
The standard library seems suboptimal on level 3 and 4 - offering both worse compression and speed than level 6 & 7 of this package respectively.
|
||||
|
||||
## Highly Compressible File
|
||||
|
||||
This is a JSON file with very high redundancy. The reduction starts at 95% on level 1, so in real life terms we are dealing with something like a highly redundant stream of data, etc.
|
||||
|
||||
It is definitely visible that we are dealing with specialized content here, so the results are very scattered. This package does not do very well at levels 1-4, but picks up significantly at level 5 and levels 7 and 8 offering great speed for the achieved compression.
|
||||
|
||||
So if you know you content is extremely compressible you might want to go slightly higher than the defaults. The standard library has a huge gap between levels 3 and 4 in terms of speed (2.75x slowdown), so it offers little "middle ground".
|
||||
|
||||
## Medium-High Compressible
|
||||
|
||||
This is a pretty common test corpus: [enwik9](http://mattmahoney.net/dc/textdata.html). It contains the first 10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. This is a very good test of typical text based compression and more data heavy streams.
|
||||
|
||||
We see a similar picture here as in "Web Content". On equal levels some compression is sacrificed for more speed. Level 5 seems to be the best trade-off between speed and size, beating stdlib level 3 in both.
|
||||
|
||||
## Medium Compressible
|
||||
|
||||
I will combine two test sets, one [10GB file set](http://mattmahoney.net/dc/10gb.html) and a VM disk image (~8GB). Both contain different data types and represent a typical backup scenario.
|
||||
|
||||
The most notable thing is how quickly the standard library drops to very low compression speeds around level 5-6 without any big gains in compression. Since this type of data is fairly common, this does not seem like good behavior.
|
||||
|
||||
|
||||
## Un-compressible Content
|
||||
|
||||
This is mainly a test of how good the algorithms are at detecting un-compressible input. The standard library only offers this feature with very conservative settings at level 1. Obviously there is no reason for the algorithms to try to compress input that cannot be compressed. The only downside is that it might skip some compressible data on false detections.
|
||||
|
||||
|
||||
## Huffman only compression
|
||||
|
||||
This compression library adds a special compression level, named `HuffmanOnly`, which allows near linear time compression. This is done by completely disabling matching of previous data, and only reduce the number of bits to represent each character.
|
||||
|
||||
This means that often used characters, like 'e' and ' ' (space) in text use the fewest bits to represent, and rare characters like '¤' takes more bits to represent. For more information see [wikipedia](https://en.wikipedia.org/wiki/Huffman_coding) or this nice [video](https://youtu.be/ZdooBTdW5bM).
|
||||
|
||||
Since this type of compression has much less variance, the compression speed is mostly unaffected by the input data, and is usually more than *180MB/s* for a single core.
|
||||
|
||||
The downside is that the compression ratio is usually considerably worse than even the fastest conventional compression. The compression ratio can never be better than 8:1 (12.5%).
|
||||
|
||||
The linear time compression can be used as a "better than nothing" mode, where you cannot risk the encoder to slow down on some content. For comparison, the size of the "Twain" text is *233460 bytes* (+29% vs. level 1) and encode speed is 144MB/s (4.5x level 1). So in this case you trade a 30% size increase for a 4 times speedup.
|
||||
|
||||
For more information see my blog post on [Fast Linear Time Compression](http://blog.klauspost.com/constant-time-gzipzip-compression/).
|
||||
|
||||
This is implemented on Go 1.7 as "Huffman Only" mode, though not exposed for gzip.
|
||||
|
||||
# Other packages
|
||||
|
||||
Here are other packages of good quality and pure Go (no cgo wrappers or autoconverted code):
|
||||
|
||||
* [github.com/pierrec/lz4](https://github.com/pierrec/lz4) - strong multithreaded LZ4 compression.
|
||||
* [github.com/cosnicolaou/pbzip2](https://github.com/cosnicolaou/pbzip2) - multithreaded bzip2 decompression.
|
||||
* [github.com/dsnet/compress](https://github.com/dsnet/compress) - brotli decompression, bzip2 writer.
|
||||
|
||||
# license
|
||||
|
||||
This code is licensed under the same conditions as the original Go code. See LICENSE file.
|
85
vendor/github.com/klauspost/compress/compressible.go
generated
vendored
Normal file
85
vendor/github.com/klauspost/compress/compressible.go
generated
vendored
Normal file
@ -0,0 +1,85 @@
|
||||
package compress
|
||||
|
||||
import "math"
|
||||
|
||||
// Estimate returns a normalized compressibility estimate of block b.
|
||||
// Values close to zero are likely uncompressible.
|
||||
// Values above 0.1 are likely to be compressible.
|
||||
// Values above 0.5 are very compressible.
|
||||
// Very small lengths will return 0.
|
||||
func Estimate(b []byte) float64 {
|
||||
if len(b) < 16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
// Correctly predicted order 1
|
||||
hits := 0
|
||||
lastMatch := false
|
||||
var o1 [256]byte
|
||||
var hist [256]int
|
||||
c1 := byte(0)
|
||||
for _, c := range b {
|
||||
if c == o1[c1] {
|
||||
// We only count a hit if there was two correct predictions in a row.
|
||||
if lastMatch {
|
||||
hits++
|
||||
}
|
||||
lastMatch = true
|
||||
} else {
|
||||
lastMatch = false
|
||||
}
|
||||
o1[c1] = c
|
||||
c1 = c
|
||||
hist[c]++
|
||||
}
|
||||
|
||||
// Use x^0.6 to give better spread
|
||||
prediction := math.Pow(float64(hits)/float64(len(b)), 0.6)
|
||||
|
||||
// Calculate histogram distribution
|
||||
variance := float64(0)
|
||||
avg := float64(len(b)) / 256
|
||||
|
||||
for _, v := range hist {
|
||||
Δ := float64(v) - avg
|
||||
variance += Δ * Δ
|
||||
}
|
||||
|
||||
stddev := math.Sqrt(float64(variance)) / float64(len(b))
|
||||
exp := math.Sqrt(1 / float64(len(b)))
|
||||
|
||||
// Subtract expected stddev
|
||||
stddev -= exp
|
||||
if stddev < 0 {
|
||||
stddev = 0
|
||||
}
|
||||
stddev *= 1 + exp
|
||||
|
||||
// Use x^0.4 to give better spread
|
||||
entropy := math.Pow(stddev, 0.4)
|
||||
|
||||
// 50/50 weight between prediction and histogram distribution
|
||||
return math.Pow((prediction+entropy)/2, 0.9)
|
||||
}
|
||||
|
||||
// ShannonEntropyBits returns the number of bits minimum required to represent
|
||||
// an entropy encoding of the input bytes.
|
||||
// https://en.wiktionary.org/wiki/Shannon_entropy
|
||||
func ShannonEntropyBits(b []byte) int {
|
||||
if len(b) == 0 {
|
||||
return 0
|
||||
}
|
||||
var hist [256]int
|
||||
for _, c := range b {
|
||||
hist[c]++
|
||||
}
|
||||
shannon := float64(0)
|
||||
invTotal := 1.0 / float64(len(b))
|
||||
for _, v := range hist[:] {
|
||||
if v > 0 {
|
||||
n := float64(v)
|
||||
shannon += math.Ceil(-math.Log2(n*invTotal) * n)
|
||||
}
|
||||
}
|
||||
return int(math.Ceil(shannon))
|
||||
}
|
1
vendor/github.com/klauspost/compress/fse/compress.go
generated
vendored
1
vendor/github.com/klauspost/compress/fse/compress.go
generated
vendored
@ -92,7 +92,6 @@ func (c *cState) init(bw *bitWriter, ct *cTable, tableLog uint8, first symbolTra
|
||||
im := int32((nbBitsOut << 16) - first.deltaNbBits)
|
||||
lu := (im >> nbBitsOut) + first.deltaFindState
|
||||
c.state = c.stateTable[lu]
|
||||
return
|
||||
}
|
||||
|
||||
// encode the output symbol provided and write it to the bitstream.
|
||||
|
4
vendor/github.com/klauspost/compress/gen.sh
generated
vendored
Normal file
4
vendor/github.com/klauspost/compress/gen.sh
generated
vendored
Normal file
@ -0,0 +1,4 @@
|
||||
#!/bin/sh
|
||||
|
||||
cd s2/cmd/_s2sx/ || exit 1
|
||||
go generate .
|
122
vendor/github.com/klauspost/compress/huff0/bitreader.go
generated
vendored
122
vendor/github.com/klauspost/compress/huff0/bitreader.go
generated
vendored
@ -8,115 +8,10 @@ package huff0
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
// bitReader reads a bitstream in reverse.
|
||||
// The last set bit indicates the start of the stream and is used
|
||||
// for aligning the input.
|
||||
type bitReader struct {
|
||||
in []byte
|
||||
off uint // next byte to read is at in[off - 1]
|
||||
value uint64
|
||||
bitsRead uint8
|
||||
}
|
||||
|
||||
// init initializes and resets the bit reader.
|
||||
func (b *bitReader) init(in []byte) error {
|
||||
if len(in) < 1 {
|
||||
return errors.New("corrupt stream: too short")
|
||||
}
|
||||
b.in = in
|
||||
b.off = uint(len(in))
|
||||
// The highest bit of the last byte indicates where to start
|
||||
v := in[len(in)-1]
|
||||
if v == 0 {
|
||||
return errors.New("corrupt stream, did not find end of stream")
|
||||
}
|
||||
b.bitsRead = 64
|
||||
b.value = 0
|
||||
if len(in) >= 8 {
|
||||
b.fillFastStart()
|
||||
} else {
|
||||
b.fill()
|
||||
b.fill()
|
||||
}
|
||||
b.bitsRead += 8 - uint8(highBit32(uint32(v)))
|
||||
return nil
|
||||
}
|
||||
|
||||
// peekBitsFast requires that at least one bit is requested every time.
|
||||
// There are no checks if the buffer is filled.
|
||||
func (b *bitReader) peekBitsFast(n uint8) uint16 {
|
||||
const regMask = 64 - 1
|
||||
v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask))
|
||||
return v
|
||||
}
|
||||
|
||||
// fillFast() will make sure at least 32 bits are available.
|
||||
// There must be at least 4 bytes available.
|
||||
func (b *bitReader) fillFast() {
|
||||
if b.bitsRead < 32 {
|
||||
return
|
||||
}
|
||||
|
||||
// 2 bounds checks.
|
||||
v := b.in[b.off-4 : b.off]
|
||||
v = v[:4]
|
||||
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
|
||||
b.value = (b.value << 32) | uint64(low)
|
||||
b.bitsRead -= 32
|
||||
b.off -= 4
|
||||
}
|
||||
|
||||
func (b *bitReader) advance(n uint8) {
|
||||
b.bitsRead += n
|
||||
}
|
||||
|
||||
// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read.
|
||||
func (b *bitReader) fillFastStart() {
|
||||
// Do single re-slice to avoid bounds checks.
|
||||
b.value = binary.LittleEndian.Uint64(b.in[b.off-8:])
|
||||
b.bitsRead = 0
|
||||
b.off -= 8
|
||||
}
|
||||
|
||||
// fill() will make sure at least 32 bits are available.
|
||||
func (b *bitReader) fill() {
|
||||
if b.bitsRead < 32 {
|
||||
return
|
||||
}
|
||||
if b.off > 4 {
|
||||
v := b.in[b.off-4:]
|
||||
v = v[:4]
|
||||
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
|
||||
b.value = (b.value << 32) | uint64(low)
|
||||
b.bitsRead -= 32
|
||||
b.off -= 4
|
||||
return
|
||||
}
|
||||
for b.off > 0 {
|
||||
b.value = (b.value << 8) | uint64(b.in[b.off-1])
|
||||
b.bitsRead -= 8
|
||||
b.off--
|
||||
}
|
||||
}
|
||||
|
||||
// finished returns true if all bits have been read from the bit stream.
|
||||
func (b *bitReader) finished() bool {
|
||||
return b.off == 0 && b.bitsRead >= 64
|
||||
}
|
||||
|
||||
// close the bitstream and returns an error if out-of-buffer reads occurred.
|
||||
func (b *bitReader) close() error {
|
||||
// Release reference.
|
||||
b.in = nil
|
||||
if b.bitsRead > 64 {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// bitReader reads a bitstream in reverse.
|
||||
// The last set bit indicates the start of the stream and is used
|
||||
// for aligning the input.
|
||||
@ -213,10 +108,17 @@ func (b *bitReaderBytes) finished() bool {
|
||||
return b.off == 0 && b.bitsRead >= 64
|
||||
}
|
||||
|
||||
func (b *bitReaderBytes) remaining() uint {
|
||||
return b.off*8 + uint(64-b.bitsRead)
|
||||
}
|
||||
|
||||
// close the bitstream and returns an error if out-of-buffer reads occurred.
|
||||
func (b *bitReaderBytes) close() error {
|
||||
// Release reference.
|
||||
b.in = nil
|
||||
if b.remaining() > 0 {
|
||||
return fmt.Errorf("corrupt input: %d bits remain on stream", b.remaining())
|
||||
}
|
||||
if b.bitsRead > 64 {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -313,15 +215,17 @@ func (b *bitReaderShifted) fill() {
|
||||
}
|
||||
}
|
||||
|
||||
// finished returns true if all bits have been read from the bit stream.
|
||||
func (b *bitReaderShifted) finished() bool {
|
||||
return b.off == 0 && b.bitsRead >= 64
|
||||
func (b *bitReaderShifted) remaining() uint {
|
||||
return b.off*8 + uint(64-b.bitsRead)
|
||||
}
|
||||
|
||||
// close the bitstream and returns an error if out-of-buffer reads occurred.
|
||||
func (b *bitReaderShifted) close() error {
|
||||
// Release reference.
|
||||
b.in = nil
|
||||
if b.remaining() > 0 {
|
||||
return fmt.Errorf("corrupt input: %d bits remain on stream", b.remaining())
|
||||
}
|
||||
if b.bitsRead > 64 {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
|
115
vendor/github.com/klauspost/compress/huff0/bitwriter.go
generated
vendored
115
vendor/github.com/klauspost/compress/huff0/bitwriter.go
generated
vendored
@ -5,8 +5,6 @@
|
||||
|
||||
package huff0
|
||||
|
||||
import "fmt"
|
||||
|
||||
// bitWriter will write bits.
|
||||
// First bit will be LSB of the first byte of output.
|
||||
type bitWriter struct {
|
||||
@ -23,14 +21,6 @@ var bitMask16 = [32]uint16{
|
||||
0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF,
|
||||
0xFFFF, 0xFFFF} /* up to 16 bits */
|
||||
|
||||
// addBits16NC will add up to 16 bits.
|
||||
// It will not check if there is space for them,
|
||||
// so the caller must ensure that it has flushed recently.
|
||||
func (b *bitWriter) addBits16NC(value uint16, bits uint8) {
|
||||
b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63)
|
||||
b.nBits += bits
|
||||
}
|
||||
|
||||
// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated.
|
||||
// It will not check if there is space for them, so the caller must ensure that it has flushed recently.
|
||||
func (b *bitWriter) addBits16Clean(value uint16, bits uint8) {
|
||||
@ -70,104 +60,6 @@ func (b *bitWriter) encTwoSymbols(ct cTable, av, bv byte) {
|
||||
b.nBits += encA.nBits + encB.nBits
|
||||
}
|
||||
|
||||
// addBits16ZeroNC will add up to 16 bits.
|
||||
// It will not check if there is space for them,
|
||||
// so the caller must ensure that it has flushed recently.
|
||||
// This is fastest if bits can be zero.
|
||||
func (b *bitWriter) addBits16ZeroNC(value uint16, bits uint8) {
|
||||
if bits == 0 {
|
||||
return
|
||||
}
|
||||
value <<= (16 - bits) & 15
|
||||
value >>= (16 - bits) & 15
|
||||
b.bitContainer |= uint64(value) << (b.nBits & 63)
|
||||
b.nBits += bits
|
||||
}
|
||||
|
||||
// flush will flush all pending full bytes.
|
||||
// There will be at least 56 bits available for writing when this has been called.
|
||||
// Using flush32 is faster, but leaves less space for writing.
|
||||
func (b *bitWriter) flush() {
|
||||
v := b.nBits >> 3
|
||||
switch v {
|
||||
case 0:
|
||||
return
|
||||
case 1:
|
||||
b.out = append(b.out,
|
||||
byte(b.bitContainer),
|
||||
)
|
||||
b.bitContainer >>= 1 << 3
|
||||
case 2:
|
||||
b.out = append(b.out,
|
||||
byte(b.bitContainer),
|
||||
byte(b.bitContainer>>8),
|
||||
)
|
||||
b.bitContainer >>= 2 << 3
|
||||
case 3:
|
||||
b.out = append(b.out,
|
||||
byte(b.bitContainer),
|
||||
byte(b.bitContainer>>8),
|
||||
byte(b.bitContainer>>16),
|
||||
)
|
||||
b.bitContainer >>= 3 << 3
|
||||
case 4:
|
||||
b.out = append(b.out,
|
||||
byte(b.bitContainer),
|
||||
byte(b.bitContainer>>8),
|
||||
byte(b.bitContainer>>16),
|
||||
byte(b.bitContainer>>24),
|
||||
)
|
||||
b.bitContainer >>= 4 << 3
|
||||
case 5:
|
||||
b.out = append(b.out,
|
||||
byte(b.bitContainer),
|
||||
byte(b.bitContainer>>8),
|
||||
byte(b.bitContainer>>16),
|
||||
byte(b.bitContainer>>24),
|
||||
byte(b.bitContainer>>32),
|
||||
)
|
||||
b.bitContainer >>= 5 << 3
|
||||
case 6:
|
||||
b.out = append(b.out,
|
||||
byte(b.bitContainer),
|
||||
byte(b.bitContainer>>8),
|
||||
byte(b.bitContainer>>16),
|
||||
byte(b.bitContainer>>24),
|
||||
byte(b.bitContainer>>32),
|
||||
byte(b.bitContainer>>40),
|
||||
)
|
||||
b.bitContainer >>= 6 << 3
|
||||
case 7:
|
||||
b.out = append(b.out,
|
||||
byte(b.bitContainer),
|
||||
byte(b.bitContainer>>8),
|
||||
byte(b.bitContainer>>16),
|
||||
byte(b.bitContainer>>24),
|
||||
byte(b.bitContainer>>32),
|
||||
byte(b.bitContainer>>40),
|
||||
byte(b.bitContainer>>48),
|
||||
)
|
||||
b.bitContainer >>= 7 << 3
|
||||
case 8:
|
||||
b.out = append(b.out,
|
||||
byte(b.bitContainer),
|
||||
byte(b.bitContainer>>8),
|
||||
byte(b.bitContainer>>16),
|
||||
byte(b.bitContainer>>24),
|
||||
byte(b.bitContainer>>32),
|
||||
byte(b.bitContainer>>40),
|
||||
byte(b.bitContainer>>48),
|
||||
byte(b.bitContainer>>56),
|
||||
)
|
||||
b.bitContainer = 0
|
||||
b.nBits = 0
|
||||
return
|
||||
default:
|
||||
panic(fmt.Errorf("bits (%d) > 64", b.nBits))
|
||||
}
|
||||
b.nBits &= 7
|
||||
}
|
||||
|
||||
// flush32 will flush out, so there are at least 32 bits available for writing.
|
||||
func (b *bitWriter) flush32() {
|
||||
if b.nBits < 32 {
|
||||
@ -201,10 +93,3 @@ func (b *bitWriter) close() error {
|
||||
b.flushAlign()
|
||||
return nil
|
||||
}
|
||||
|
||||
// reset and continue writing by appending to out.
|
||||
func (b *bitWriter) reset(out []byte) {
|
||||
b.bitContainer = 0
|
||||
b.nBits = 0
|
||||
b.out = out
|
||||
}
|
||||
|
10
vendor/github.com/klauspost/compress/huff0/bytereader.go
generated
vendored
10
vendor/github.com/klauspost/compress/huff0/bytereader.go
generated
vendored
@ -20,11 +20,6 @@ func (b *byteReader) init(in []byte) {
|
||||
b.off = 0
|
||||
}
|
||||
|
||||
// advance the stream b n bytes.
|
||||
func (b *byteReader) advance(n uint) {
|
||||
b.off += int(n)
|
||||
}
|
||||
|
||||
// Int32 returns a little endian int32 starting at current offset.
|
||||
func (b byteReader) Int32() int32 {
|
||||
v3 := int32(b.b[b.off+3])
|
||||
@ -43,11 +38,6 @@ func (b byteReader) Uint32() uint32 {
|
||||
return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0
|
||||
}
|
||||
|
||||
// unread returns the unread portion of the input.
|
||||
func (b byteReader) unread() []byte {
|
||||
return b.b[b.off:]
|
||||
}
|
||||
|
||||
// remain will return the number of bytes remaining.
|
||||
func (b byteReader) remain() int {
|
||||
return len(b.b) - b.off
|
||||
|
75
vendor/github.com/klauspost/compress/huff0/compress.go
generated
vendored
75
vendor/github.com/klauspost/compress/huff0/compress.go
generated
vendored
@ -2,6 +2,7 @@ package huff0
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"runtime"
|
||||
"sync"
|
||||
)
|
||||
@ -161,6 +162,70 @@ func compress(in []byte, s *Scratch, compressor func(src []byte) ([]byte, error)
|
||||
return s.Out, false, nil
|
||||
}
|
||||
|
||||
// EstimateSizes will estimate the data sizes
|
||||
func EstimateSizes(in []byte, s *Scratch) (tableSz, dataSz, reuseSz int, err error) {
|
||||
s, err = s.prepare(in)
|
||||
if err != nil {
|
||||
return 0, 0, 0, err
|
||||
}
|
||||
|
||||
// Create histogram, if none was provided.
|
||||
tableSz, dataSz, reuseSz = -1, -1, -1
|
||||
maxCount := s.maxCount
|
||||
var canReuse = false
|
||||
if maxCount == 0 {
|
||||
maxCount, canReuse = s.countSimple(in)
|
||||
} else {
|
||||
canReuse = s.canUseTable(s.prevTable)
|
||||
}
|
||||
|
||||
// We want the output size to be less than this:
|
||||
wantSize := len(in)
|
||||
if s.WantLogLess > 0 {
|
||||
wantSize -= wantSize >> s.WantLogLess
|
||||
}
|
||||
|
||||
// Reset for next run.
|
||||
s.clearCount = true
|
||||
s.maxCount = 0
|
||||
if maxCount >= len(in) {
|
||||
if maxCount > len(in) {
|
||||
return 0, 0, 0, fmt.Errorf("maxCount (%d) > length (%d)", maxCount, len(in))
|
||||
}
|
||||
if len(in) == 1 {
|
||||
return 0, 0, 0, ErrIncompressible
|
||||
}
|
||||
// One symbol, use RLE
|
||||
return 0, 0, 0, ErrUseRLE
|
||||
}
|
||||
if maxCount == 1 || maxCount < (len(in)>>7) {
|
||||
// Each symbol present maximum once or too well distributed.
|
||||
return 0, 0, 0, ErrIncompressible
|
||||
}
|
||||
|
||||
// Calculate new table.
|
||||
err = s.buildCTable()
|
||||
if err != nil {
|
||||
return 0, 0, 0, err
|
||||
}
|
||||
|
||||
if false && !s.canUseTable(s.cTable) {
|
||||
panic("invalid table generated")
|
||||
}
|
||||
|
||||
tableSz, err = s.cTable.estTableSize(s)
|
||||
if err != nil {
|
||||
return 0, 0, 0, err
|
||||
}
|
||||
if canReuse {
|
||||
reuseSz = s.prevTable.estimateSize(s.count[:s.symbolLen])
|
||||
}
|
||||
dataSz = s.cTable.estimateSize(s.count[:s.symbolLen])
|
||||
|
||||
// Restore
|
||||
return tableSz, dataSz, reuseSz, nil
|
||||
}
|
||||
|
||||
func (s *Scratch) compress1X(src []byte) ([]byte, error) {
|
||||
return s.compress1xDo(s.Out, src)
|
||||
}
|
||||
@ -225,6 +290,10 @@ func (s *Scratch) compress4X(src []byte) ([]byte, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(s.Out)-idx > math.MaxUint16 {
|
||||
// We cannot store the size in the jump table
|
||||
return nil, ErrIncompressible
|
||||
}
|
||||
// Write compressed length as little endian before block.
|
||||
if i < 3 {
|
||||
// Last length is not written.
|
||||
@ -268,6 +337,10 @@ func (s *Scratch) compress4Xp(src []byte) ([]byte, error) {
|
||||
return nil, errs[i]
|
||||
}
|
||||
o := s.tmpOut[i]
|
||||
if len(o) > math.MaxUint16 {
|
||||
// We cannot store the size in the jump table
|
||||
return nil, ErrIncompressible
|
||||
}
|
||||
// Write compressed length as little endian before block.
|
||||
if i < 3 {
|
||||
// Last length is not written.
|
||||
@ -331,6 +404,7 @@ func (s *Scratch) canUseTable(c cTable) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
//lint:ignore U1000 used for debugging
|
||||
func (s *Scratch) validateTable(c cTable) bool {
|
||||
if len(c) < int(s.symbolLen) {
|
||||
return false
|
||||
@ -536,7 +610,6 @@ func (s *Scratch) huffSort() {
|
||||
}
|
||||
nodes[pos&huffNodesMask] = nodeElt{count: c, symbol: byte(n)}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (s *Scratch) setMaxHeight(lastNonNull int) uint8 {
|
||||
|
959
vendor/github.com/klauspost/compress/huff0/decompress.go
generated
vendored
959
vendor/github.com/klauspost/compress/huff0/decompress.go
generated
vendored
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user