mirror of
https://github.com/openfaas/faas.git
synced 2025-06-21 14:23:25 +00:00
Update vendoring via vndr
This commit is contained in:
2625
gateway/vendor/github.com/docker/distribution/registry/handlers/api_test.go
generated
vendored
2625
gateway/vendor/github.com/docker/distribution/registry/handlers/api_test.go
generated
vendored
File diff suppressed because it is too large
Load Diff
1046
gateway/vendor/github.com/docker/distribution/registry/handlers/app.go
generated
vendored
1046
gateway/vendor/github.com/docker/distribution/registry/handlers/app.go
generated
vendored
File diff suppressed because it is too large
Load Diff
279
gateway/vendor/github.com/docker/distribution/registry/handlers/app_test.go
generated
vendored
279
gateway/vendor/github.com/docker/distribution/registry/handlers/app_test.go
generated
vendored
@ -1,279 +0,0 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/distribution/configuration"
|
||||
"github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/registry/api/errcode"
|
||||
"github.com/docker/distribution/registry/api/v2"
|
||||
"github.com/docker/distribution/registry/auth"
|
||||
_ "github.com/docker/distribution/registry/auth/silly"
|
||||
"github.com/docker/distribution/registry/storage"
|
||||
memorycache "github.com/docker/distribution/registry/storage/cache/memory"
|
||||
"github.com/docker/distribution/registry/storage/driver/testdriver"
|
||||
)
|
||||
|
||||
// TestAppDispatcher builds an application with a test dispatcher and ensures
|
||||
// that requests are properly dispatched and the handlers are constructed.
|
||||
// This only tests the dispatch mechanism. The underlying dispatchers must be
|
||||
// tested individually.
|
||||
func TestAppDispatcher(t *testing.T) {
|
||||
driver := testdriver.New()
|
||||
ctx := context.Background()
|
||||
registry, err := storage.NewRegistry(ctx, driver, storage.BlobDescriptorCacheProvider(memorycache.NewInMemoryBlobDescriptorCacheProvider()), storage.EnableDelete, storage.EnableRedirect)
|
||||
if err != nil {
|
||||
t.Fatalf("error creating registry: %v", err)
|
||||
}
|
||||
app := &App{
|
||||
Config: &configuration.Configuration{},
|
||||
Context: ctx,
|
||||
router: v2.Router(),
|
||||
driver: driver,
|
||||
registry: registry,
|
||||
}
|
||||
server := httptest.NewServer(app)
|
||||
defer server.Close()
|
||||
router := v2.Router()
|
||||
|
||||
serverURL, err := url.Parse(server.URL)
|
||||
if err != nil {
|
||||
t.Fatalf("error parsing server url: %v", err)
|
||||
}
|
||||
|
||||
varCheckingDispatcher := func(expectedVars map[string]string) dispatchFunc {
|
||||
return func(ctx *Context, r *http.Request) http.Handler {
|
||||
// Always checks the same name context
|
||||
if ctx.Repository.Named().Name() != getName(ctx) {
|
||||
t.Fatalf("unexpected name: %q != %q", ctx.Repository.Named().Name(), "foo/bar")
|
||||
}
|
||||
|
||||
// Check that we have all that is expected
|
||||
for expectedK, expectedV := range expectedVars {
|
||||
if ctx.Value(expectedK) != expectedV {
|
||||
t.Fatalf("unexpected %s in context vars: %q != %q", expectedK, ctx.Value(expectedK), expectedV)
|
||||
}
|
||||
}
|
||||
|
||||
// Check that we only have variables that are expected
|
||||
for k, v := range ctx.Value("vars").(map[string]string) {
|
||||
_, ok := expectedVars[k]
|
||||
|
||||
if !ok { // name is checked on context
|
||||
// We have an unexpected key, fail
|
||||
t.Fatalf("unexpected key %q in vars with value %q", k, v)
|
||||
}
|
||||
}
|
||||
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// unflatten a list of variables, suitable for gorilla/mux, to a map[string]string
|
||||
unflatten := func(vars []string) map[string]string {
|
||||
m := make(map[string]string)
|
||||
for i := 0; i < len(vars)-1; i = i + 2 {
|
||||
m[vars[i]] = vars[i+1]
|
||||
}
|
||||
|
||||
return m
|
||||
}
|
||||
|
||||
for _, testcase := range []struct {
|
||||
endpoint string
|
||||
vars []string
|
||||
}{
|
||||
{
|
||||
endpoint: v2.RouteNameManifest,
|
||||
vars: []string{
|
||||
"name", "foo/bar",
|
||||
"reference", "sometag",
|
||||
},
|
||||
},
|
||||
{
|
||||
endpoint: v2.RouteNameTags,
|
||||
vars: []string{
|
||||
"name", "foo/bar",
|
||||
},
|
||||
},
|
||||
{
|
||||
endpoint: v2.RouteNameBlobUpload,
|
||||
vars: []string{
|
||||
"name", "foo/bar",
|
||||
},
|
||||
},
|
||||
{
|
||||
endpoint: v2.RouteNameBlobUploadChunk,
|
||||
vars: []string{
|
||||
"name", "foo/bar",
|
||||
"uuid", "theuuid",
|
||||
},
|
||||
},
|
||||
} {
|
||||
app.register(testcase.endpoint, varCheckingDispatcher(unflatten(testcase.vars)))
|
||||
route := router.GetRoute(testcase.endpoint).Host(serverURL.Host)
|
||||
u, err := route.URL(testcase.vars...)
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
resp, err := http.Get(u.String())
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
t.Fatalf("unexpected status code: %v != %v", resp.StatusCode, http.StatusOK)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestNewApp covers the creation of an application via NewApp with a
|
||||
// configuration.
|
||||
func TestNewApp(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
config := configuration.Configuration{
|
||||
Storage: configuration.Storage{
|
||||
"testdriver": nil,
|
||||
"maintenance": configuration.Parameters{"uploadpurging": map[interface{}]interface{}{
|
||||
"enabled": false,
|
||||
}},
|
||||
},
|
||||
Auth: configuration.Auth{
|
||||
// For now, we simply test that new auth results in a viable
|
||||
// application.
|
||||
"silly": {
|
||||
"realm": "realm-test",
|
||||
"service": "service-test",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Mostly, with this test, given a sane configuration, we are simply
|
||||
// ensuring that NewApp doesn't panic. We might want to tweak this
|
||||
// behavior.
|
||||
app := NewApp(ctx, &config)
|
||||
|
||||
server := httptest.NewServer(app)
|
||||
defer server.Close()
|
||||
builder, err := v2.NewURLBuilderFromString(server.URL, false)
|
||||
if err != nil {
|
||||
t.Fatalf("error creating urlbuilder: %v", err)
|
||||
}
|
||||
|
||||
baseURL, err := builder.BuildBaseURL()
|
||||
if err != nil {
|
||||
t.Fatalf("error creating baseURL: %v", err)
|
||||
}
|
||||
|
||||
// TODO(stevvooe): The rest of this test might belong in the API tests.
|
||||
|
||||
// Just hit the app and make sure we get a 401 Unauthorized error.
|
||||
req, err := http.Get(baseURL)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error during GET: %v", err)
|
||||
}
|
||||
defer req.Body.Close()
|
||||
|
||||
if req.StatusCode != http.StatusUnauthorized {
|
||||
t.Fatalf("unexpected status code during request: %v", err)
|
||||
}
|
||||
|
||||
if req.Header.Get("Content-Type") != "application/json; charset=utf-8" {
|
||||
t.Fatalf("unexpected content-type: %v != %v", req.Header.Get("Content-Type"), "application/json; charset=utf-8")
|
||||
}
|
||||
|
||||
expectedAuthHeader := "Bearer realm=\"realm-test\",service=\"service-test\""
|
||||
if e, a := expectedAuthHeader, req.Header.Get("WWW-Authenticate"); e != a {
|
||||
t.Fatalf("unexpected WWW-Authenticate header: %q != %q", e, a)
|
||||
}
|
||||
|
||||
var errs errcode.Errors
|
||||
dec := json.NewDecoder(req.Body)
|
||||
if err := dec.Decode(&errs); err != nil {
|
||||
t.Fatalf("error decoding error response: %v", err)
|
||||
}
|
||||
|
||||
err2, ok := errs[0].(errcode.ErrorCoder)
|
||||
if !ok {
|
||||
t.Fatalf("not an ErrorCoder: %#v", errs[0])
|
||||
}
|
||||
if err2.ErrorCode() != errcode.ErrorCodeUnauthorized {
|
||||
t.Fatalf("unexpected error code: %v != %v", err2.ErrorCode(), errcode.ErrorCodeUnauthorized)
|
||||
}
|
||||
}
|
||||
|
||||
// Test the access record accumulator
|
||||
func TestAppendAccessRecords(t *testing.T) {
|
||||
repo := "testRepo"
|
||||
|
||||
expectedResource := auth.Resource{
|
||||
Type: "repository",
|
||||
Name: repo,
|
||||
}
|
||||
|
||||
expectedPullRecord := auth.Access{
|
||||
Resource: expectedResource,
|
||||
Action: "pull",
|
||||
}
|
||||
expectedPushRecord := auth.Access{
|
||||
Resource: expectedResource,
|
||||
Action: "push",
|
||||
}
|
||||
expectedDeleteRecord := auth.Access{
|
||||
Resource: expectedResource,
|
||||
Action: "delete",
|
||||
}
|
||||
|
||||
records := []auth.Access{}
|
||||
result := appendAccessRecords(records, "GET", repo)
|
||||
expectedResult := []auth.Access{expectedPullRecord}
|
||||
if ok := reflect.DeepEqual(result, expectedResult); !ok {
|
||||
t.Fatalf("Actual access record differs from expected")
|
||||
}
|
||||
|
||||
records = []auth.Access{}
|
||||
result = appendAccessRecords(records, "HEAD", repo)
|
||||
expectedResult = []auth.Access{expectedPullRecord}
|
||||
if ok := reflect.DeepEqual(result, expectedResult); !ok {
|
||||
t.Fatalf("Actual access record differs from expected")
|
||||
}
|
||||
|
||||
records = []auth.Access{}
|
||||
result = appendAccessRecords(records, "POST", repo)
|
||||
expectedResult = []auth.Access{expectedPullRecord, expectedPushRecord}
|
||||
if ok := reflect.DeepEqual(result, expectedResult); !ok {
|
||||
t.Fatalf("Actual access record differs from expected")
|
||||
}
|
||||
|
||||
records = []auth.Access{}
|
||||
result = appendAccessRecords(records, "PUT", repo)
|
||||
expectedResult = []auth.Access{expectedPullRecord, expectedPushRecord}
|
||||
if ok := reflect.DeepEqual(result, expectedResult); !ok {
|
||||
t.Fatalf("Actual access record differs from expected")
|
||||
}
|
||||
|
||||
records = []auth.Access{}
|
||||
result = appendAccessRecords(records, "PATCH", repo)
|
||||
expectedResult = []auth.Access{expectedPullRecord, expectedPushRecord}
|
||||
if ok := reflect.DeepEqual(result, expectedResult); !ok {
|
||||
t.Fatalf("Actual access record differs from expected")
|
||||
}
|
||||
|
||||
records = []auth.Access{}
|
||||
result = appendAccessRecords(records, "DELETE", repo)
|
||||
expectedResult = []auth.Access{expectedDeleteRecord}
|
||||
if ok := reflect.DeepEqual(result, expectedResult); !ok {
|
||||
t.Fatalf("Actual access record differs from expected")
|
||||
}
|
||||
|
||||
}
|
11
gateway/vendor/github.com/docker/distribution/registry/handlers/basicauth.go
generated
vendored
11
gateway/vendor/github.com/docker/distribution/registry/handlers/basicauth.go
generated
vendored
@ -1,11 +0,0 @@
|
||||
// +build go1.4
|
||||
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
)
|
||||
|
||||
func basicAuth(r *http.Request) (username, password string, ok bool) {
|
||||
return r.BasicAuth()
|
||||
}
|
41
gateway/vendor/github.com/docker/distribution/registry/handlers/basicauth_prego14.go
generated
vendored
41
gateway/vendor/github.com/docker/distribution/registry/handlers/basicauth_prego14.go
generated
vendored
@ -1,41 +0,0 @@
|
||||
// +build !go1.4
|
||||
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"net/http"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// NOTE(stevvooe): This is basic auth support from go1.4 present to ensure we
|
||||
// can compile on go1.3 and earlier.
|
||||
|
||||
// BasicAuth returns the username and password provided in the request's
|
||||
// Authorization header, if the request uses HTTP Basic Authentication.
|
||||
// See RFC 2617, Section 2.
|
||||
func basicAuth(r *http.Request) (username, password string, ok bool) {
|
||||
auth := r.Header.Get("Authorization")
|
||||
if auth == "" {
|
||||
return
|
||||
}
|
||||
return parseBasicAuth(auth)
|
||||
}
|
||||
|
||||
// parseBasicAuth parses an HTTP Basic Authentication string.
|
||||
// "Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==" returns ("Aladdin", "open sesame", true).
|
||||
func parseBasicAuth(auth string) (username, password string, ok bool) {
|
||||
if !strings.HasPrefix(auth, "Basic ") {
|
||||
return
|
||||
}
|
||||
c, err := base64.StdEncoding.DecodeString(strings.TrimPrefix(auth, "Basic "))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
cs := string(c)
|
||||
s := strings.IndexByte(cs, ':')
|
||||
if s < 0 {
|
||||
return
|
||||
}
|
||||
return cs[:s], cs[s+1:], true
|
||||
}
|
99
gateway/vendor/github.com/docker/distribution/registry/handlers/blob.go
generated
vendored
99
gateway/vendor/github.com/docker/distribution/registry/handlers/blob.go
generated
vendored
@ -1,99 +0,0 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/registry/api/errcode"
|
||||
"github.com/docker/distribution/registry/api/v2"
|
||||
"github.com/gorilla/handlers"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
// blobDispatcher uses the request context to build a blobHandler.
|
||||
func blobDispatcher(ctx *Context, r *http.Request) http.Handler {
|
||||
dgst, err := getDigest(ctx)
|
||||
if err != nil {
|
||||
|
||||
if err == errDigestNotAvailable {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
ctx.Errors = append(ctx.Errors, v2.ErrorCodeDigestInvalid.WithDetail(err))
|
||||
})
|
||||
}
|
||||
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
ctx.Errors = append(ctx.Errors, v2.ErrorCodeDigestInvalid.WithDetail(err))
|
||||
})
|
||||
}
|
||||
|
||||
blobHandler := &blobHandler{
|
||||
Context: ctx,
|
||||
Digest: dgst,
|
||||
}
|
||||
|
||||
mhandler := handlers.MethodHandler{
|
||||
"GET": http.HandlerFunc(blobHandler.GetBlob),
|
||||
"HEAD": http.HandlerFunc(blobHandler.GetBlob),
|
||||
}
|
||||
|
||||
if !ctx.readOnly {
|
||||
mhandler["DELETE"] = http.HandlerFunc(blobHandler.DeleteBlob)
|
||||
}
|
||||
|
||||
return mhandler
|
||||
}
|
||||
|
||||
// blobHandler serves http blob requests.
|
||||
type blobHandler struct {
|
||||
*Context
|
||||
|
||||
Digest digest.Digest
|
||||
}
|
||||
|
||||
// GetBlob fetches the binary data from backend storage returns it in the
|
||||
// response.
|
||||
func (bh *blobHandler) GetBlob(w http.ResponseWriter, r *http.Request) {
|
||||
context.GetLogger(bh).Debug("GetBlob")
|
||||
blobs := bh.Repository.Blobs(bh)
|
||||
desc, err := blobs.Stat(bh, bh.Digest)
|
||||
if err != nil {
|
||||
if err == distribution.ErrBlobUnknown {
|
||||
bh.Errors = append(bh.Errors, v2.ErrorCodeBlobUnknown.WithDetail(bh.Digest))
|
||||
} else {
|
||||
bh.Errors = append(bh.Errors, errcode.ErrorCodeUnknown.WithDetail(err))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if err := blobs.ServeBlob(bh, w, r, desc.Digest); err != nil {
|
||||
context.GetLogger(bh).Debugf("unexpected error getting blob HTTP handler: %v", err)
|
||||
bh.Errors = append(bh.Errors, errcode.ErrorCodeUnknown.WithDetail(err))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// DeleteBlob deletes a layer blob
|
||||
func (bh *blobHandler) DeleteBlob(w http.ResponseWriter, r *http.Request) {
|
||||
context.GetLogger(bh).Debug("DeleteBlob")
|
||||
|
||||
blobs := bh.Repository.Blobs(bh)
|
||||
err := blobs.Delete(bh, bh.Digest)
|
||||
if err != nil {
|
||||
switch err {
|
||||
case distribution.ErrUnsupported:
|
||||
bh.Errors = append(bh.Errors, errcode.ErrorCodeUnsupported)
|
||||
return
|
||||
case distribution.ErrBlobUnknown:
|
||||
bh.Errors = append(bh.Errors, v2.ErrorCodeBlobUnknown)
|
||||
return
|
||||
default:
|
||||
bh.Errors = append(bh.Errors, err)
|
||||
context.GetLogger(bh).Errorf("Unknown error deleting blob: %s", err.Error())
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Length", "0")
|
||||
w.WriteHeader(http.StatusAccepted)
|
||||
}
|
368
gateway/vendor/github.com/docker/distribution/registry/handlers/blobupload.go
generated
vendored
368
gateway/vendor/github.com/docker/distribution/registry/handlers/blobupload.go
generated
vendored
@ -1,368 +0,0 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
"github.com/docker/distribution"
|
||||
ctxu "github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/distribution/registry/api/errcode"
|
||||
"github.com/docker/distribution/registry/api/v2"
|
||||
"github.com/docker/distribution/registry/storage"
|
||||
"github.com/gorilla/handlers"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
// blobUploadDispatcher constructs and returns the blob upload handler for the
|
||||
// given request context.
|
||||
func blobUploadDispatcher(ctx *Context, r *http.Request) http.Handler {
|
||||
buh := &blobUploadHandler{
|
||||
Context: ctx,
|
||||
UUID: getUploadUUID(ctx),
|
||||
}
|
||||
|
||||
handler := handlers.MethodHandler{
|
||||
"GET": http.HandlerFunc(buh.GetUploadStatus),
|
||||
"HEAD": http.HandlerFunc(buh.GetUploadStatus),
|
||||
}
|
||||
|
||||
if !ctx.readOnly {
|
||||
handler["POST"] = http.HandlerFunc(buh.StartBlobUpload)
|
||||
handler["PATCH"] = http.HandlerFunc(buh.PatchBlobData)
|
||||
handler["PUT"] = http.HandlerFunc(buh.PutBlobUploadComplete)
|
||||
handler["DELETE"] = http.HandlerFunc(buh.CancelBlobUpload)
|
||||
}
|
||||
|
||||
if buh.UUID != "" {
|
||||
state, err := hmacKey(ctx.Config.HTTP.Secret).unpackUploadState(r.FormValue("_state"))
|
||||
if err != nil {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
ctxu.GetLogger(ctx).Infof("error resolving upload: %v", err)
|
||||
buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err))
|
||||
})
|
||||
}
|
||||
buh.State = state
|
||||
|
||||
if state.Name != ctx.Repository.Named().Name() {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
ctxu.GetLogger(ctx).Infof("mismatched repository name in upload state: %q != %q", state.Name, buh.Repository.Named().Name())
|
||||
buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err))
|
||||
})
|
||||
}
|
||||
|
||||
if state.UUID != buh.UUID {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
ctxu.GetLogger(ctx).Infof("mismatched uuid in upload state: %q != %q", state.UUID, buh.UUID)
|
||||
buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err))
|
||||
})
|
||||
}
|
||||
|
||||
blobs := ctx.Repository.Blobs(buh)
|
||||
upload, err := blobs.Resume(buh, buh.UUID)
|
||||
if err != nil {
|
||||
ctxu.GetLogger(ctx).Errorf("error resolving upload: %v", err)
|
||||
if err == distribution.ErrBlobUploadUnknown {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadUnknown.WithDetail(err))
|
||||
})
|
||||
}
|
||||
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err))
|
||||
})
|
||||
}
|
||||
buh.Upload = upload
|
||||
|
||||
if size := upload.Size(); size != buh.State.Offset {
|
||||
defer upload.Close()
|
||||
ctxu.GetLogger(ctx).Errorf("upload resumed at wrong offest: %d != %d", size, buh.State.Offset)
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err))
|
||||
upload.Cancel(buh)
|
||||
})
|
||||
}
|
||||
return closeResources(handler, buh.Upload)
|
||||
}
|
||||
|
||||
return handler
|
||||
}
|
||||
|
||||
// blobUploadHandler handles the http blob upload process.
|
||||
type blobUploadHandler struct {
|
||||
*Context
|
||||
|
||||
// UUID identifies the upload instance for the current request. Using UUID
|
||||
// to key blob writers since this implementation uses UUIDs.
|
||||
UUID string
|
||||
|
||||
Upload distribution.BlobWriter
|
||||
|
||||
State blobUploadState
|
||||
}
|
||||
|
||||
// StartBlobUpload begins the blob upload process and allocates a server-side
|
||||
// blob writer session, optionally mounting the blob from a separate repository.
|
||||
func (buh *blobUploadHandler) StartBlobUpload(w http.ResponseWriter, r *http.Request) {
|
||||
var options []distribution.BlobCreateOption
|
||||
|
||||
fromRepo := r.FormValue("from")
|
||||
mountDigest := r.FormValue("mount")
|
||||
|
||||
if mountDigest != "" && fromRepo != "" {
|
||||
opt, err := buh.createBlobMountOption(fromRepo, mountDigest)
|
||||
if opt != nil && err == nil {
|
||||
options = append(options, opt)
|
||||
}
|
||||
}
|
||||
|
||||
blobs := buh.Repository.Blobs(buh)
|
||||
upload, err := blobs.Create(buh, options...)
|
||||
|
||||
if err != nil {
|
||||
if ebm, ok := err.(distribution.ErrBlobMounted); ok {
|
||||
if err := buh.writeBlobCreatedHeaders(w, ebm.Descriptor); err != nil {
|
||||
buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err))
|
||||
}
|
||||
} else if err == distribution.ErrUnsupported {
|
||||
buh.Errors = append(buh.Errors, errcode.ErrorCodeUnsupported)
|
||||
} else {
|
||||
buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
buh.Upload = upload
|
||||
|
||||
if err := buh.blobUploadResponse(w, r, true); err != nil {
|
||||
buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err))
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Docker-Upload-UUID", buh.Upload.ID())
|
||||
w.WriteHeader(http.StatusAccepted)
|
||||
}
|
||||
|
||||
// GetUploadStatus returns the status of a given upload, identified by id.
|
||||
func (buh *blobUploadHandler) GetUploadStatus(w http.ResponseWriter, r *http.Request) {
|
||||
if buh.Upload == nil {
|
||||
buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadUnknown)
|
||||
return
|
||||
}
|
||||
|
||||
// TODO(dmcgowan): Set last argument to false in blobUploadResponse when
|
||||
// resumable upload is supported. This will enable returning a non-zero
|
||||
// range for clients to begin uploading at an offset.
|
||||
if err := buh.blobUploadResponse(w, r, true); err != nil {
|
||||
buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err))
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Docker-Upload-UUID", buh.UUID)
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
}
|
||||
|
||||
// PatchBlobData writes data to an upload.
|
||||
func (buh *blobUploadHandler) PatchBlobData(w http.ResponseWriter, r *http.Request) {
|
||||
if buh.Upload == nil {
|
||||
buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadUnknown)
|
||||
return
|
||||
}
|
||||
|
||||
ct := r.Header.Get("Content-Type")
|
||||
if ct != "" && ct != "application/octet-stream" {
|
||||
buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(fmt.Errorf("Bad Content-Type")))
|
||||
// TODO(dmcgowan): encode error
|
||||
return
|
||||
}
|
||||
|
||||
// TODO(dmcgowan): support Content-Range header to seek and write range
|
||||
|
||||
if err := copyFullPayload(w, r, buh.Upload, buh, "blob PATCH", &buh.Errors); err != nil {
|
||||
// copyFullPayload reports the error if necessary
|
||||
return
|
||||
}
|
||||
|
||||
if err := buh.blobUploadResponse(w, r, false); err != nil {
|
||||
buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err))
|
||||
return
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusAccepted)
|
||||
}
|
||||
|
||||
// PutBlobUploadComplete takes the final request of a blob upload. The
|
||||
// request may include all the blob data or no blob data. Any data
|
||||
// provided is received and verified. If successful, the blob is linked
|
||||
// into the blob store and 201 Created is returned with the canonical
|
||||
// url of the blob.
|
||||
func (buh *blobUploadHandler) PutBlobUploadComplete(w http.ResponseWriter, r *http.Request) {
|
||||
if buh.Upload == nil {
|
||||
buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadUnknown)
|
||||
return
|
||||
}
|
||||
|
||||
dgstStr := r.FormValue("digest") // TODO(stevvooe): Support multiple digest parameters!
|
||||
|
||||
if dgstStr == "" {
|
||||
// no digest? return error, but allow retry.
|
||||
buh.Errors = append(buh.Errors, v2.ErrorCodeDigestInvalid.WithDetail("digest missing"))
|
||||
return
|
||||
}
|
||||
|
||||
dgst, err := digest.Parse(dgstStr)
|
||||
if err != nil {
|
||||
// no digest? return error, but allow retry.
|
||||
buh.Errors = append(buh.Errors, v2.ErrorCodeDigestInvalid.WithDetail("digest parsing failed"))
|
||||
return
|
||||
}
|
||||
|
||||
if err := copyFullPayload(w, r, buh.Upload, buh, "blob PUT", &buh.Errors); err != nil {
|
||||
// copyFullPayload reports the error if necessary
|
||||
return
|
||||
}
|
||||
|
||||
desc, err := buh.Upload.Commit(buh, distribution.Descriptor{
|
||||
Digest: dgst,
|
||||
|
||||
// TODO(stevvooe): This isn't wildly important yet, but we should
|
||||
// really set the mediatype. For now, we can let the backend take care
|
||||
// of this.
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
switch err := err.(type) {
|
||||
case distribution.ErrBlobInvalidDigest:
|
||||
buh.Errors = append(buh.Errors, v2.ErrorCodeDigestInvalid.WithDetail(err))
|
||||
case errcode.Error:
|
||||
buh.Errors = append(buh.Errors, err)
|
||||
default:
|
||||
switch err {
|
||||
case distribution.ErrAccessDenied:
|
||||
buh.Errors = append(buh.Errors, errcode.ErrorCodeDenied)
|
||||
case distribution.ErrUnsupported:
|
||||
buh.Errors = append(buh.Errors, errcode.ErrorCodeUnsupported)
|
||||
case distribution.ErrBlobInvalidLength, distribution.ErrBlobDigestUnsupported:
|
||||
buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err))
|
||||
default:
|
||||
ctxu.GetLogger(buh).Errorf("unknown error completing upload: %v", err)
|
||||
buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err))
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Clean up the backend blob data if there was an error.
|
||||
if err := buh.Upload.Cancel(buh); err != nil {
|
||||
// If the cleanup fails, all we can do is observe and report.
|
||||
ctxu.GetLogger(buh).Errorf("error canceling upload after error: %v", err)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
if err := buh.writeBlobCreatedHeaders(w, desc); err != nil {
|
||||
buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// CancelBlobUpload cancels an in-progress upload of a blob.
|
||||
func (buh *blobUploadHandler) CancelBlobUpload(w http.ResponseWriter, r *http.Request) {
|
||||
if buh.Upload == nil {
|
||||
buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadUnknown)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Docker-Upload-UUID", buh.UUID)
|
||||
if err := buh.Upload.Cancel(buh); err != nil {
|
||||
ctxu.GetLogger(buh).Errorf("error encountered canceling upload: %v", err)
|
||||
buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err))
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
}
|
||||
|
||||
// blobUploadResponse provides a standard request for uploading blobs and
|
||||
// chunk responses. This sets the correct headers but the response status is
|
||||
// left to the caller. The fresh argument is used to ensure that new blob
|
||||
// uploads always start at a 0 offset. This allows disabling resumable push by
|
||||
// always returning a 0 offset on check status.
|
||||
func (buh *blobUploadHandler) blobUploadResponse(w http.ResponseWriter, r *http.Request, fresh bool) error {
|
||||
// TODO(stevvooe): Need a better way to manage the upload state automatically.
|
||||
buh.State.Name = buh.Repository.Named().Name()
|
||||
buh.State.UUID = buh.Upload.ID()
|
||||
buh.Upload.Close()
|
||||
buh.State.Offset = buh.Upload.Size()
|
||||
buh.State.StartedAt = buh.Upload.StartedAt()
|
||||
|
||||
token, err := hmacKey(buh.Config.HTTP.Secret).packUploadState(buh.State)
|
||||
if err != nil {
|
||||
ctxu.GetLogger(buh).Infof("error building upload state token: %s", err)
|
||||
return err
|
||||
}
|
||||
|
||||
uploadURL, err := buh.urlBuilder.BuildBlobUploadChunkURL(
|
||||
buh.Repository.Named(), buh.Upload.ID(),
|
||||
url.Values{
|
||||
"_state": []string{token},
|
||||
})
|
||||
if err != nil {
|
||||
ctxu.GetLogger(buh).Infof("error building upload url: %s", err)
|
||||
return err
|
||||
}
|
||||
|
||||
endRange := buh.Upload.Size()
|
||||
if endRange > 0 {
|
||||
endRange = endRange - 1
|
||||
}
|
||||
|
||||
w.Header().Set("Docker-Upload-UUID", buh.UUID)
|
||||
w.Header().Set("Location", uploadURL)
|
||||
|
||||
w.Header().Set("Content-Length", "0")
|
||||
w.Header().Set("Range", fmt.Sprintf("0-%d", endRange))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// mountBlob attempts to mount a blob from another repository by its digest. If
|
||||
// successful, the blob is linked into the blob store and 201 Created is
|
||||
// returned with the canonical url of the blob.
|
||||
func (buh *blobUploadHandler) createBlobMountOption(fromRepo, mountDigest string) (distribution.BlobCreateOption, error) {
|
||||
dgst, err := digest.Parse(mountDigest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ref, err := reference.WithName(fromRepo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
canonical, err := reference.WithDigest(ref, dgst)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return storage.WithMountFrom(canonical), nil
|
||||
}
|
||||
|
||||
// writeBlobCreatedHeaders writes the standard headers describing a newly
|
||||
// created blob. A 201 Created is written as well as the canonical URL and
|
||||
// blob digest.
|
||||
func (buh *blobUploadHandler) writeBlobCreatedHeaders(w http.ResponseWriter, desc distribution.Descriptor) error {
|
||||
ref, err := reference.WithDigest(buh.Repository.Named(), desc.Digest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
blobURL, err := buh.urlBuilder.BuildBlobURL(ref)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
w.Header().Set("Location", blobURL)
|
||||
w.Header().Set("Content-Length", "0")
|
||||
w.Header().Set("Docker-Content-Digest", desc.Digest.String())
|
||||
w.WriteHeader(http.StatusCreated)
|
||||
return nil
|
||||
}
|
98
gateway/vendor/github.com/docker/distribution/registry/handlers/catalog.go
generated
vendored
98
gateway/vendor/github.com/docker/distribution/registry/handlers/catalog.go
generated
vendored
@ -1,98 +0,0 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
|
||||
"github.com/docker/distribution/registry/api/errcode"
|
||||
"github.com/docker/distribution/registry/storage/driver"
|
||||
"github.com/gorilla/handlers"
|
||||
)
|
||||
|
||||
const maximumReturnedEntries = 100
|
||||
|
||||
func catalogDispatcher(ctx *Context, r *http.Request) http.Handler {
|
||||
catalogHandler := &catalogHandler{
|
||||
Context: ctx,
|
||||
}
|
||||
|
||||
return handlers.MethodHandler{
|
||||
"GET": http.HandlerFunc(catalogHandler.GetCatalog),
|
||||
}
|
||||
}
|
||||
|
||||
type catalogHandler struct {
|
||||
*Context
|
||||
}
|
||||
|
||||
type catalogAPIResponse struct {
|
||||
Repositories []string `json:"repositories"`
|
||||
}
|
||||
|
||||
func (ch *catalogHandler) GetCatalog(w http.ResponseWriter, r *http.Request) {
|
||||
var moreEntries = true
|
||||
|
||||
q := r.URL.Query()
|
||||
lastEntry := q.Get("last")
|
||||
maxEntries, err := strconv.Atoi(q.Get("n"))
|
||||
if err != nil || maxEntries < 0 {
|
||||
maxEntries = maximumReturnedEntries
|
||||
}
|
||||
|
||||
repos := make([]string, maxEntries)
|
||||
|
||||
filled, err := ch.App.registry.Repositories(ch.Context, repos, lastEntry)
|
||||
_, pathNotFound := err.(driver.PathNotFoundError)
|
||||
|
||||
if err == io.EOF || pathNotFound {
|
||||
moreEntries = false
|
||||
} else if err != nil {
|
||||
ch.Errors = append(ch.Errors, errcode.ErrorCodeUnknown.WithDetail(err))
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
|
||||
// Add a link header if there are more entries to retrieve
|
||||
if moreEntries {
|
||||
lastEntry = repos[len(repos)-1]
|
||||
urlStr, err := createLinkEntry(r.URL.String(), maxEntries, lastEntry)
|
||||
if err != nil {
|
||||
ch.Errors = append(ch.Errors, errcode.ErrorCodeUnknown.WithDetail(err))
|
||||
return
|
||||
}
|
||||
w.Header().Set("Link", urlStr)
|
||||
}
|
||||
|
||||
enc := json.NewEncoder(w)
|
||||
if err := enc.Encode(catalogAPIResponse{
|
||||
Repositories: repos[0:filled],
|
||||
}); err != nil {
|
||||
ch.Errors = append(ch.Errors, errcode.ErrorCodeUnknown.WithDetail(err))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Use the original URL from the request to create a new URL for
|
||||
// the link header
|
||||
func createLinkEntry(origURL string, maxEntries int, lastEntry string) (string, error) {
|
||||
calledURL, err := url.Parse(origURL)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
v := url.Values{}
|
||||
v.Add("n", strconv.Itoa(maxEntries))
|
||||
v.Add("last", lastEntry)
|
||||
|
||||
calledURL.RawQuery = v.Encode()
|
||||
|
||||
calledURL.Fragment = ""
|
||||
urlStr := fmt.Sprintf("<%s>; rel=\"next\"", calledURL.String())
|
||||
|
||||
return urlStr, nil
|
||||
}
|
92
gateway/vendor/github.com/docker/distribution/registry/handlers/context.go
generated
vendored
92
gateway/vendor/github.com/docker/distribution/registry/handlers/context.go
generated
vendored
@ -1,92 +0,0 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/docker/distribution"
|
||||
ctxu "github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/registry/api/errcode"
|
||||
"github.com/docker/distribution/registry/api/v2"
|
||||
"github.com/docker/distribution/registry/auth"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// Context should contain the request specific context for use in across
|
||||
// handlers. Resources that don't need to be shared across handlers should not
|
||||
// be on this object.
|
||||
type Context struct {
|
||||
// App points to the application structure that created this context.
|
||||
*App
|
||||
context.Context
|
||||
|
||||
// Repository is the repository for the current request. All requests
|
||||
// should be scoped to a single repository. This field may be nil.
|
||||
Repository distribution.Repository
|
||||
|
||||
// Errors is a collection of errors encountered during the request to be
|
||||
// returned to the client API. If errors are added to the collection, the
|
||||
// handler *must not* start the response via http.ResponseWriter.
|
||||
Errors errcode.Errors
|
||||
|
||||
urlBuilder *v2.URLBuilder
|
||||
|
||||
// TODO(stevvooe): The goal is too completely factor this context and
|
||||
// dispatching out of the web application. Ideally, we should lean on
|
||||
// context.Context for injection of these resources.
|
||||
}
|
||||
|
||||
// Value overrides context.Context.Value to ensure that calls are routed to
|
||||
// correct context.
|
||||
func (ctx *Context) Value(key interface{}) interface{} {
|
||||
return ctx.Context.Value(key)
|
||||
}
|
||||
|
||||
func getName(ctx context.Context) (name string) {
|
||||
return ctxu.GetStringValue(ctx, "vars.name")
|
||||
}
|
||||
|
||||
func getReference(ctx context.Context) (reference string) {
|
||||
return ctxu.GetStringValue(ctx, "vars.reference")
|
||||
}
|
||||
|
||||
var errDigestNotAvailable = fmt.Errorf("digest not available in context")
|
||||
|
||||
func getDigest(ctx context.Context) (dgst digest.Digest, err error) {
|
||||
dgstStr := ctxu.GetStringValue(ctx, "vars.digest")
|
||||
|
||||
if dgstStr == "" {
|
||||
ctxu.GetLogger(ctx).Errorf("digest not available")
|
||||
return "", errDigestNotAvailable
|
||||
}
|
||||
|
||||
d, err := digest.Parse(dgstStr)
|
||||
if err != nil {
|
||||
ctxu.GetLogger(ctx).Errorf("error parsing digest=%q: %v", dgstStr, err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
return d, nil
|
||||
}
|
||||
|
||||
func getUploadUUID(ctx context.Context) (uuid string) {
|
||||
return ctxu.GetStringValue(ctx, "vars.uuid")
|
||||
}
|
||||
|
||||
// getUserName attempts to resolve a username from the context and request. If
|
||||
// a username cannot be resolved, the empty string is returned.
|
||||
func getUserName(ctx context.Context, r *http.Request) string {
|
||||
username := ctxu.GetStringValue(ctx, auth.UserNameKey)
|
||||
|
||||
// Fallback to request user with basic auth
|
||||
if username == "" {
|
||||
var ok bool
|
||||
uname, _, ok := basicAuth(r)
|
||||
if ok {
|
||||
username = uname
|
||||
}
|
||||
}
|
||||
|
||||
return username
|
||||
}
|
210
gateway/vendor/github.com/docker/distribution/registry/handlers/health_test.go
generated
vendored
210
gateway/vendor/github.com/docker/distribution/registry/handlers/health_test.go
generated
vendored
@ -1,210 +0,0 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/docker/distribution/configuration"
|
||||
"github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/health"
|
||||
)
|
||||
|
||||
func TestFileHealthCheck(t *testing.T) {
|
||||
interval := time.Second
|
||||
|
||||
tmpfile, err := ioutil.TempFile(os.TempDir(), "healthcheck")
|
||||
if err != nil {
|
||||
t.Fatalf("could not create temporary file: %v", err)
|
||||
}
|
||||
defer tmpfile.Close()
|
||||
|
||||
config := &configuration.Configuration{
|
||||
Storage: configuration.Storage{
|
||||
"inmemory": configuration.Parameters{},
|
||||
"maintenance": configuration.Parameters{"uploadpurging": map[interface{}]interface{}{
|
||||
"enabled": false,
|
||||
}},
|
||||
},
|
||||
Health: configuration.Health{
|
||||
FileCheckers: []configuration.FileChecker{
|
||||
{
|
||||
Interval: interval,
|
||||
File: tmpfile.Name(),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
app := NewApp(ctx, config)
|
||||
healthRegistry := health.NewRegistry()
|
||||
app.RegisterHealthChecks(healthRegistry)
|
||||
|
||||
// Wait for health check to happen
|
||||
<-time.After(2 * interval)
|
||||
|
||||
status := healthRegistry.CheckStatus()
|
||||
if len(status) != 1 {
|
||||
t.Fatal("expected 1 item in health check results")
|
||||
}
|
||||
if status[tmpfile.Name()] != "file exists" {
|
||||
t.Fatal(`did not get "file exists" result for health check`)
|
||||
}
|
||||
|
||||
os.Remove(tmpfile.Name())
|
||||
|
||||
<-time.After(2 * interval)
|
||||
if len(healthRegistry.CheckStatus()) != 0 {
|
||||
t.Fatal("expected 0 items in health check results")
|
||||
}
|
||||
}
|
||||
|
||||
func TestTCPHealthCheck(t *testing.T) {
|
||||
interval := time.Second
|
||||
|
||||
ln, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
t.Fatalf("could not create listener: %v", err)
|
||||
}
|
||||
addrStr := ln.Addr().String()
|
||||
|
||||
// Start accepting
|
||||
go func() {
|
||||
for {
|
||||
conn, err := ln.Accept()
|
||||
if err != nil {
|
||||
// listener was closed
|
||||
return
|
||||
}
|
||||
defer conn.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
config := &configuration.Configuration{
|
||||
Storage: configuration.Storage{
|
||||
"inmemory": configuration.Parameters{},
|
||||
"maintenance": configuration.Parameters{"uploadpurging": map[interface{}]interface{}{
|
||||
"enabled": false,
|
||||
}},
|
||||
},
|
||||
Health: configuration.Health{
|
||||
TCPCheckers: []configuration.TCPChecker{
|
||||
{
|
||||
Interval: interval,
|
||||
Addr: addrStr,
|
||||
Timeout: 500 * time.Millisecond,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
app := NewApp(ctx, config)
|
||||
healthRegistry := health.NewRegistry()
|
||||
app.RegisterHealthChecks(healthRegistry)
|
||||
|
||||
// Wait for health check to happen
|
||||
<-time.After(2 * interval)
|
||||
|
||||
if len(healthRegistry.CheckStatus()) != 0 {
|
||||
t.Fatal("expected 0 items in health check results")
|
||||
}
|
||||
|
||||
ln.Close()
|
||||
<-time.After(2 * interval)
|
||||
|
||||
// Health check should now fail
|
||||
status := healthRegistry.CheckStatus()
|
||||
if len(status) != 1 {
|
||||
t.Fatal("expected 1 item in health check results")
|
||||
}
|
||||
if status[addrStr] != "connection to "+addrStr+" failed" {
|
||||
t.Fatal(`did not get "connection failed" result for health check`)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHTTPHealthCheck(t *testing.T) {
|
||||
interval := time.Second
|
||||
threshold := 3
|
||||
|
||||
stopFailing := make(chan struct{})
|
||||
|
||||
checkedServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != "HEAD" {
|
||||
t.Fatalf("expected HEAD request, got %s", r.Method)
|
||||
}
|
||||
select {
|
||||
case <-stopFailing:
|
||||
w.WriteHeader(http.StatusOK)
|
||||
default:
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
}
|
||||
}))
|
||||
|
||||
config := &configuration.Configuration{
|
||||
Storage: configuration.Storage{
|
||||
"inmemory": configuration.Parameters{},
|
||||
"maintenance": configuration.Parameters{"uploadpurging": map[interface{}]interface{}{
|
||||
"enabled": false,
|
||||
}},
|
||||
},
|
||||
Health: configuration.Health{
|
||||
HTTPCheckers: []configuration.HTTPChecker{
|
||||
{
|
||||
Interval: interval,
|
||||
URI: checkedServer.URL,
|
||||
Threshold: threshold,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
app := NewApp(ctx, config)
|
||||
healthRegistry := health.NewRegistry()
|
||||
app.RegisterHealthChecks(healthRegistry)
|
||||
|
||||
for i := 0; ; i++ {
|
||||
<-time.After(interval)
|
||||
|
||||
status := healthRegistry.CheckStatus()
|
||||
|
||||
if i < threshold-1 {
|
||||
// definitely shouldn't have hit the threshold yet
|
||||
if len(status) != 0 {
|
||||
t.Fatal("expected 1 item in health check results")
|
||||
}
|
||||
continue
|
||||
}
|
||||
if i < threshold+1 {
|
||||
// right on the threshold - don't expect a failure yet
|
||||
continue
|
||||
}
|
||||
|
||||
if len(status) != 1 {
|
||||
t.Fatal("expected 1 item in health check results")
|
||||
}
|
||||
if status[checkedServer.URL] != "downstream service returned unexpected status: 500" {
|
||||
t.Fatal("did not get expected result for health check")
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
|
||||
// Signal HTTP handler to start returning 200
|
||||
close(stopFailing)
|
||||
|
||||
<-time.After(2 * interval)
|
||||
|
||||
if len(healthRegistry.CheckStatus()) != 0 {
|
||||
t.Fatal("expected 0 items in health check results")
|
||||
}
|
||||
}
|
66
gateway/vendor/github.com/docker/distribution/registry/handlers/helpers.go
generated
vendored
66
gateway/vendor/github.com/docker/distribution/registry/handlers/helpers.go
generated
vendored
@ -1,66 +0,0 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
ctxu "github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/registry/api/errcode"
|
||||
)
|
||||
|
||||
// closeResources closes all the provided resources after running the target
|
||||
// handler.
|
||||
func closeResources(handler http.Handler, closers ...io.Closer) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
for _, closer := range closers {
|
||||
defer closer.Close()
|
||||
}
|
||||
handler.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
// copyFullPayload copies the payload of an HTTP request to destWriter. If it
|
||||
// receives less content than expected, and the client disconnected during the
|
||||
// upload, it avoids sending a 400 error to keep the logs cleaner.
|
||||
func copyFullPayload(responseWriter http.ResponseWriter, r *http.Request, destWriter io.Writer, context ctxu.Context, action string, errSlice *errcode.Errors) error {
|
||||
// Get a channel that tells us if the client disconnects
|
||||
var clientClosed <-chan bool
|
||||
if notifier, ok := responseWriter.(http.CloseNotifier); ok {
|
||||
clientClosed = notifier.CloseNotify()
|
||||
} else {
|
||||
ctxu.GetLogger(context).Warnf("the ResponseWriter does not implement CloseNotifier (type: %T)", responseWriter)
|
||||
}
|
||||
|
||||
// Read in the data, if any.
|
||||
copied, err := io.Copy(destWriter, r.Body)
|
||||
if clientClosed != nil && (err != nil || (r.ContentLength > 0 && copied < r.ContentLength)) {
|
||||
// Didn't receive as much content as expected. Did the client
|
||||
// disconnect during the request? If so, avoid returning a 400
|
||||
// error to keep the logs cleaner.
|
||||
select {
|
||||
case <-clientClosed:
|
||||
// Set the response code to "499 Client Closed Request"
|
||||
// Even though the connection has already been closed,
|
||||
// this causes the logger to pick up a 499 error
|
||||
// instead of showing 0 for the HTTP status.
|
||||
responseWriter.WriteHeader(499)
|
||||
|
||||
ctxu.GetLoggerWithFields(context, map[interface{}]interface{}{
|
||||
"error": err,
|
||||
"copied": copied,
|
||||
"contentLength": r.ContentLength,
|
||||
}, "error", "copied", "contentLength").Error("client disconnected during " + action)
|
||||
return errors.New("client disconnected")
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
ctxu.GetLogger(context).Errorf("unknown error reading request payload: %v", err)
|
||||
*errSlice = append(*errSlice, errcode.ErrorCodeUnknown.WithDetail(err))
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
74
gateway/vendor/github.com/docker/distribution/registry/handlers/hmac.go
generated
vendored
74
gateway/vendor/github.com/docker/distribution/registry/handlers/hmac.go
generated
vendored
@ -1,74 +0,0 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"crypto/hmac"
|
||||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
// blobUploadState captures the state serializable state of the blob upload.
|
||||
type blobUploadState struct {
|
||||
// name is the primary repository under which the blob will be linked.
|
||||
Name string
|
||||
|
||||
// UUID identifies the upload.
|
||||
UUID string
|
||||
|
||||
// offset contains the current progress of the upload.
|
||||
Offset int64
|
||||
|
||||
// StartedAt is the original start time of the upload.
|
||||
StartedAt time.Time
|
||||
}
|
||||
|
||||
type hmacKey string
|
||||
|
||||
var errInvalidSecret = fmt.Errorf("invalid secret")
|
||||
|
||||
// unpackUploadState unpacks and validates the blob upload state from the
|
||||
// token, using the hmacKey secret.
|
||||
func (secret hmacKey) unpackUploadState(token string) (blobUploadState, error) {
|
||||
var state blobUploadState
|
||||
|
||||
tokenBytes, err := base64.URLEncoding.DecodeString(token)
|
||||
if err != nil {
|
||||
return state, err
|
||||
}
|
||||
mac := hmac.New(sha256.New, []byte(secret))
|
||||
|
||||
if len(tokenBytes) < mac.Size() {
|
||||
return state, errInvalidSecret
|
||||
}
|
||||
|
||||
macBytes := tokenBytes[:mac.Size()]
|
||||
messageBytes := tokenBytes[mac.Size():]
|
||||
|
||||
mac.Write(messageBytes)
|
||||
if !hmac.Equal(mac.Sum(nil), macBytes) {
|
||||
return state, errInvalidSecret
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(messageBytes, &state); err != nil {
|
||||
return state, err
|
||||
}
|
||||
|
||||
return state, nil
|
||||
}
|
||||
|
||||
// packUploadState packs the upload state signed with and hmac digest using
|
||||
// the hmacKey secret, encoding to url safe base64. The resulting token can be
|
||||
// used to share data with minimized risk of external tampering.
|
||||
func (secret hmacKey) packUploadState(lus blobUploadState) (string, error) {
|
||||
mac := hmac.New(sha256.New, []byte(secret))
|
||||
p, err := json.Marshal(lus)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
mac.Write(p)
|
||||
|
||||
return base64.URLEncoding.EncodeToString(append(mac.Sum(nil), p...)), nil
|
||||
}
|
117
gateway/vendor/github.com/docker/distribution/registry/handlers/hmac_test.go
generated
vendored
117
gateway/vendor/github.com/docker/distribution/registry/handlers/hmac_test.go
generated
vendored
@ -1,117 +0,0 @@
|
||||
package handlers
|
||||
|
||||
import "testing"
|
||||
|
||||
var blobUploadStates = []blobUploadState{
|
||||
{
|
||||
Name: "hello",
|
||||
UUID: "abcd-1234-qwer-0987",
|
||||
Offset: 0,
|
||||
},
|
||||
{
|
||||
Name: "hello-world",
|
||||
UUID: "abcd-1234-qwer-0987",
|
||||
Offset: 0,
|
||||
},
|
||||
{
|
||||
Name: "h3ll0_w0rld",
|
||||
UUID: "abcd-1234-qwer-0987",
|
||||
Offset: 1337,
|
||||
},
|
||||
{
|
||||
Name: "ABCDEFG",
|
||||
UUID: "ABCD-1234-QWER-0987",
|
||||
Offset: 1234567890,
|
||||
},
|
||||
{
|
||||
Name: "this-is-A-sort-of-Long-name-for-Testing",
|
||||
UUID: "dead-1234-beef-0987",
|
||||
Offset: 8675309,
|
||||
},
|
||||
}
|
||||
|
||||
var secrets = []string{
|
||||
"supersecret",
|
||||
"12345",
|
||||
"a",
|
||||
"SuperSecret",
|
||||
"Sup3r... S3cr3t!",
|
||||
"This is a reasonably long secret key that is used for the purpose of testing.",
|
||||
"\u2603+\u2744", // snowman+snowflake
|
||||
}
|
||||
|
||||
// TestLayerUploadTokens constructs stateTokens from LayerUploadStates and
|
||||
// validates that the tokens can be used to reconstruct the proper upload state.
|
||||
func TestLayerUploadTokens(t *testing.T) {
|
||||
secret := hmacKey("supersecret")
|
||||
|
||||
for _, testcase := range blobUploadStates {
|
||||
token, err := secret.packUploadState(testcase)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
lus, err := secret.unpackUploadState(token)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
assertBlobUploadStateEquals(t, testcase, lus)
|
||||
}
|
||||
}
|
||||
|
||||
// TestHMACValidate ensures that any HMAC token providers are compatible if and
|
||||
// only if they share the same secret.
|
||||
func TestHMACValidation(t *testing.T) {
|
||||
for _, secret := range secrets {
|
||||
secret1 := hmacKey(secret)
|
||||
secret2 := hmacKey(secret)
|
||||
badSecret := hmacKey("DifferentSecret")
|
||||
|
||||
for _, testcase := range blobUploadStates {
|
||||
token, err := secret1.packUploadState(testcase)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
lus, err := secret2.unpackUploadState(token)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
assertBlobUploadStateEquals(t, testcase, lus)
|
||||
|
||||
_, err = badSecret.unpackUploadState(token)
|
||||
if err == nil {
|
||||
t.Fatalf("Expected token provider to fail at retrieving state from token: %s", token)
|
||||
}
|
||||
|
||||
badToken, err := badSecret.packUploadState(lus)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = secret1.unpackUploadState(badToken)
|
||||
if err == nil {
|
||||
t.Fatalf("Expected token provider to fail at retrieving state from token: %s", badToken)
|
||||
}
|
||||
|
||||
_, err = secret2.unpackUploadState(badToken)
|
||||
if err == nil {
|
||||
t.Fatalf("Expected token provider to fail at retrieving state from token: %s", badToken)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func assertBlobUploadStateEquals(t *testing.T, expected blobUploadState, received blobUploadState) {
|
||||
if expected.Name != received.Name {
|
||||
t.Fatalf("Expected Name=%q, Received Name=%q", expected.Name, received.Name)
|
||||
}
|
||||
if expected.UUID != received.UUID {
|
||||
t.Fatalf("Expected UUID=%q, Received UUID=%q", expected.UUID, received.UUID)
|
||||
}
|
||||
if expected.Offset != received.Offset {
|
||||
t.Fatalf("Expected Offset=%d, Received Offset=%d", expected.Offset, received.Offset)
|
||||
}
|
||||
}
|
53
gateway/vendor/github.com/docker/distribution/registry/handlers/hooks.go
generated
vendored
53
gateway/vendor/github.com/docker/distribution/registry/handlers/hooks.go
generated
vendored
@ -1,53 +0,0 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"text/template"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
)
|
||||
|
||||
// logHook is for hooking Panic in web application
|
||||
type logHook struct {
|
||||
LevelsParam []string
|
||||
Mail *mailer
|
||||
}
|
||||
|
||||
// Fire forwards an error to LogHook
|
||||
func (hook *logHook) Fire(entry *logrus.Entry) error {
|
||||
addr := strings.Split(hook.Mail.Addr, ":")
|
||||
if len(addr) != 2 {
|
||||
return errors.New("Invalid Mail Address")
|
||||
}
|
||||
host := addr[0]
|
||||
subject := fmt.Sprintf("[%s] %s: %s", entry.Level, host, entry.Message)
|
||||
|
||||
html := `
|
||||
{{.Message}}
|
||||
|
||||
{{range $key, $value := .Data}}
|
||||
{{$key}}: {{$value}}
|
||||
{{end}}
|
||||
`
|
||||
b := bytes.NewBuffer(make([]byte, 0))
|
||||
t := template.Must(template.New("mail body").Parse(html))
|
||||
if err := t.Execute(b, entry); err != nil {
|
||||
return err
|
||||
}
|
||||
body := fmt.Sprintf("%s", b)
|
||||
|
||||
return hook.Mail.sendMail(subject, body)
|
||||
}
|
||||
|
||||
// Levels contains hook levels to be catched
|
||||
func (hook *logHook) Levels() []logrus.Level {
|
||||
levels := []logrus.Level{}
|
||||
for _, v := range hook.LevelsParam {
|
||||
lv, _ := logrus.ParseLevel(v)
|
||||
levels = append(levels, lv)
|
||||
}
|
||||
return levels
|
||||
}
|
45
gateway/vendor/github.com/docker/distribution/registry/handlers/mail.go
generated
vendored
45
gateway/vendor/github.com/docker/distribution/registry/handlers/mail.go
generated
vendored
@ -1,45 +0,0 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net/smtp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// mailer provides fields of email configuration for sending.
|
||||
type mailer struct {
|
||||
Addr, Username, Password, From string
|
||||
Insecure bool
|
||||
To []string
|
||||
}
|
||||
|
||||
// sendMail allows users to send email, only if mail parameters is configured correctly.
|
||||
func (mail *mailer) sendMail(subject, message string) error {
|
||||
addr := strings.Split(mail.Addr, ":")
|
||||
if len(addr) != 2 {
|
||||
return errors.New("Invalid Mail Address")
|
||||
}
|
||||
host := addr[0]
|
||||
msg := []byte("To:" + strings.Join(mail.To, ";") +
|
||||
"\r\nFrom: " + mail.From +
|
||||
"\r\nSubject: " + subject +
|
||||
"\r\nContent-Type: text/plain\r\n\r\n" +
|
||||
message)
|
||||
auth := smtp.PlainAuth(
|
||||
"",
|
||||
mail.Username,
|
||||
mail.Password,
|
||||
host,
|
||||
)
|
||||
err := smtp.SendMail(
|
||||
mail.Addr,
|
||||
auth,
|
||||
mail.From,
|
||||
mail.To,
|
||||
[]byte(msg),
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
476
gateway/vendor/github.com/docker/distribution/registry/handlers/manifests.go
generated
vendored
476
gateway/vendor/github.com/docker/distribution/registry/handlers/manifests.go
generated
vendored
@ -1,476 +0,0 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/distribution"
|
||||
ctxu "github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/manifest/manifestlist"
|
||||
"github.com/docker/distribution/manifest/schema1"
|
||||
"github.com/docker/distribution/manifest/schema2"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/distribution/registry/api/errcode"
|
||||
"github.com/docker/distribution/registry/api/v2"
|
||||
"github.com/docker/distribution/registry/auth"
|
||||
"github.com/gorilla/handlers"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
// These constants determine which architecture and OS to choose from a
|
||||
// manifest list when downconverting it to a schema1 manifest.
|
||||
const (
|
||||
defaultArch = "amd64"
|
||||
defaultOS = "linux"
|
||||
)
|
||||
|
||||
// manifestDispatcher takes the request context and builds the
|
||||
// appropriate handler for handling manifest requests.
|
||||
func manifestDispatcher(ctx *Context, r *http.Request) http.Handler {
|
||||
manifestHandler := &manifestHandler{
|
||||
Context: ctx,
|
||||
}
|
||||
reference := getReference(ctx)
|
||||
dgst, err := digest.Parse(reference)
|
||||
if err != nil {
|
||||
// We just have a tag
|
||||
manifestHandler.Tag = reference
|
||||
} else {
|
||||
manifestHandler.Digest = dgst
|
||||
}
|
||||
|
||||
mhandler := handlers.MethodHandler{
|
||||
"GET": http.HandlerFunc(manifestHandler.GetManifest),
|
||||
"HEAD": http.HandlerFunc(manifestHandler.GetManifest),
|
||||
}
|
||||
|
||||
if !ctx.readOnly {
|
||||
mhandler["PUT"] = http.HandlerFunc(manifestHandler.PutManifest)
|
||||
mhandler["DELETE"] = http.HandlerFunc(manifestHandler.DeleteManifest)
|
||||
}
|
||||
|
||||
return mhandler
|
||||
}
|
||||
|
||||
// manifestHandler handles http operations on image manifests.
|
||||
type manifestHandler struct {
|
||||
*Context
|
||||
|
||||
// One of tag or digest gets set, depending on what is present in context.
|
||||
Tag string
|
||||
Digest digest.Digest
|
||||
}
|
||||
|
||||
// GetManifest fetches the image manifest from the storage backend, if it exists.
|
||||
func (imh *manifestHandler) GetManifest(w http.ResponseWriter, r *http.Request) {
|
||||
ctxu.GetLogger(imh).Debug("GetImageManifest")
|
||||
manifests, err := imh.Repository.Manifests(imh)
|
||||
if err != nil {
|
||||
imh.Errors = append(imh.Errors, err)
|
||||
return
|
||||
}
|
||||
|
||||
var manifest distribution.Manifest
|
||||
if imh.Tag != "" {
|
||||
tags := imh.Repository.Tags(imh)
|
||||
desc, err := tags.Get(imh, imh.Tag)
|
||||
if err != nil {
|
||||
if _, ok := err.(distribution.ErrTagUnknown); ok {
|
||||
imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown.WithDetail(err))
|
||||
} else {
|
||||
imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err))
|
||||
}
|
||||
return
|
||||
}
|
||||
imh.Digest = desc.Digest
|
||||
}
|
||||
|
||||
if etagMatch(r, imh.Digest.String()) {
|
||||
w.WriteHeader(http.StatusNotModified)
|
||||
return
|
||||
}
|
||||
|
||||
var options []distribution.ManifestServiceOption
|
||||
if imh.Tag != "" {
|
||||
options = append(options, distribution.WithTag(imh.Tag))
|
||||
}
|
||||
manifest, err = manifests.Get(imh, imh.Digest, options...)
|
||||
if err != nil {
|
||||
if _, ok := err.(distribution.ErrManifestUnknownRevision); ok {
|
||||
imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown.WithDetail(err))
|
||||
} else {
|
||||
imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
supportsSchema2 := false
|
||||
supportsManifestList := false
|
||||
// this parsing of Accept headers is not quite as full-featured as godoc.org's parser, but we don't care about "q=" values
|
||||
// https://github.com/golang/gddo/blob/e91d4165076d7474d20abda83f92d15c7ebc3e81/httputil/header/header.go#L165-L202
|
||||
for _, acceptHeader := range r.Header["Accept"] {
|
||||
// r.Header[...] is a slice in case the request contains the same header more than once
|
||||
// if the header isn't set, we'll get the zero value, which "range" will handle gracefully
|
||||
|
||||
// we need to split each header value on "," to get the full list of "Accept" values (per RFC 2616)
|
||||
// https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.1
|
||||
for _, mediaType := range strings.Split(acceptHeader, ",") {
|
||||
// remove "; q=..." if present
|
||||
if i := strings.Index(mediaType, ";"); i >= 0 {
|
||||
mediaType = mediaType[:i]
|
||||
}
|
||||
|
||||
// it's common (but not required) for Accept values to be space separated ("a/b, c/d, e/f")
|
||||
mediaType = strings.TrimSpace(mediaType)
|
||||
|
||||
if mediaType == schema2.MediaTypeManifest {
|
||||
supportsSchema2 = true
|
||||
}
|
||||
if mediaType == manifestlist.MediaTypeManifestList {
|
||||
supportsManifestList = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
schema2Manifest, isSchema2 := manifest.(*schema2.DeserializedManifest)
|
||||
manifestList, isManifestList := manifest.(*manifestlist.DeserializedManifestList)
|
||||
|
||||
// Only rewrite schema2 manifests when they are being fetched by tag.
|
||||
// If they are being fetched by digest, we can't return something not
|
||||
// matching the digest.
|
||||
if imh.Tag != "" && isSchema2 && !supportsSchema2 {
|
||||
// Rewrite manifest in schema1 format
|
||||
ctxu.GetLogger(imh).Infof("rewriting manifest %s in schema1 format to support old client", imh.Digest.String())
|
||||
|
||||
manifest, err = imh.convertSchema2Manifest(schema2Manifest)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
} else if imh.Tag != "" && isManifestList && !supportsManifestList {
|
||||
// Rewrite manifest in schema1 format
|
||||
ctxu.GetLogger(imh).Infof("rewriting manifest list %s in schema1 format to support old client", imh.Digest.String())
|
||||
|
||||
// Find the image manifest corresponding to the default
|
||||
// platform
|
||||
var manifestDigest digest.Digest
|
||||
for _, manifestDescriptor := range manifestList.Manifests {
|
||||
if manifestDescriptor.Platform.Architecture == defaultArch && manifestDescriptor.Platform.OS == defaultOS {
|
||||
manifestDigest = manifestDescriptor.Digest
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if manifestDigest == "" {
|
||||
imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown)
|
||||
return
|
||||
}
|
||||
|
||||
manifest, err = manifests.Get(imh, manifestDigest)
|
||||
if err != nil {
|
||||
if _, ok := err.(distribution.ErrManifestUnknownRevision); ok {
|
||||
imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown.WithDetail(err))
|
||||
} else {
|
||||
imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// If necessary, convert the image manifest
|
||||
if schema2Manifest, isSchema2 := manifest.(*schema2.DeserializedManifest); isSchema2 && !supportsSchema2 {
|
||||
manifest, err = imh.convertSchema2Manifest(schema2Manifest)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ct, p, err := manifest.Payload()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", ct)
|
||||
w.Header().Set("Content-Length", fmt.Sprint(len(p)))
|
||||
w.Header().Set("Docker-Content-Digest", imh.Digest.String())
|
||||
w.Header().Set("Etag", fmt.Sprintf(`"%s"`, imh.Digest))
|
||||
w.Write(p)
|
||||
}
|
||||
|
||||
func (imh *manifestHandler) convertSchema2Manifest(schema2Manifest *schema2.DeserializedManifest) (distribution.Manifest, error) {
|
||||
targetDescriptor := schema2Manifest.Target()
|
||||
blobs := imh.Repository.Blobs(imh)
|
||||
configJSON, err := blobs.Get(imh, targetDescriptor.Digest)
|
||||
if err != nil {
|
||||
if err == distribution.ErrBlobUnknown {
|
||||
imh.Errors = append(imh.Errors, v2.ErrorCodeManifestInvalid.WithDetail(err))
|
||||
} else {
|
||||
imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err))
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ref := imh.Repository.Named()
|
||||
|
||||
if imh.Tag != "" {
|
||||
ref, err = reference.WithTag(ref, imh.Tag)
|
||||
if err != nil {
|
||||
imh.Errors = append(imh.Errors, v2.ErrorCodeTagInvalid.WithDetail(err))
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
builder := schema1.NewConfigManifestBuilder(imh.Repository.Blobs(imh), imh.Context.App.trustKey, ref, configJSON)
|
||||
for _, d := range schema2Manifest.Layers {
|
||||
if err := builder.AppendReference(d); err != nil {
|
||||
imh.Errors = append(imh.Errors, v2.ErrorCodeManifestInvalid.WithDetail(err))
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
manifest, err := builder.Build(imh)
|
||||
if err != nil {
|
||||
imh.Errors = append(imh.Errors, v2.ErrorCodeManifestInvalid.WithDetail(err))
|
||||
return nil, err
|
||||
}
|
||||
imh.Digest = digest.FromBytes(manifest.(*schema1.SignedManifest).Canonical)
|
||||
|
||||
return manifest, nil
|
||||
}
|
||||
|
||||
func etagMatch(r *http.Request, etag string) bool {
|
||||
for _, headerVal := range r.Header["If-None-Match"] {
|
||||
if headerVal == etag || headerVal == fmt.Sprintf(`"%s"`, etag) { // allow quoted or unquoted
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// PutManifest validates and stores a manifest in the registry.
|
||||
func (imh *manifestHandler) PutManifest(w http.ResponseWriter, r *http.Request) {
|
||||
ctxu.GetLogger(imh).Debug("PutImageManifest")
|
||||
manifests, err := imh.Repository.Manifests(imh)
|
||||
if err != nil {
|
||||
imh.Errors = append(imh.Errors, err)
|
||||
return
|
||||
}
|
||||
|
||||
var jsonBuf bytes.Buffer
|
||||
if err := copyFullPayload(w, r, &jsonBuf, imh, "image manifest PUT", &imh.Errors); err != nil {
|
||||
// copyFullPayload reports the error if necessary
|
||||
return
|
||||
}
|
||||
|
||||
mediaType := r.Header.Get("Content-Type")
|
||||
manifest, desc, err := distribution.UnmarshalManifest(mediaType, jsonBuf.Bytes())
|
||||
if err != nil {
|
||||
imh.Errors = append(imh.Errors, v2.ErrorCodeManifestInvalid.WithDetail(err))
|
||||
return
|
||||
}
|
||||
|
||||
if imh.Digest != "" {
|
||||
if desc.Digest != imh.Digest {
|
||||
ctxu.GetLogger(imh).Errorf("payload digest does match: %q != %q", desc.Digest, imh.Digest)
|
||||
imh.Errors = append(imh.Errors, v2.ErrorCodeDigestInvalid)
|
||||
return
|
||||
}
|
||||
} else if imh.Tag != "" {
|
||||
imh.Digest = desc.Digest
|
||||
} else {
|
||||
imh.Errors = append(imh.Errors, v2.ErrorCodeTagInvalid.WithDetail("no tag or digest specified"))
|
||||
return
|
||||
}
|
||||
|
||||
var options []distribution.ManifestServiceOption
|
||||
if imh.Tag != "" {
|
||||
options = append(options, distribution.WithTag(imh.Tag))
|
||||
}
|
||||
|
||||
if err := imh.applyResourcePolicy(manifest); err != nil {
|
||||
imh.Errors = append(imh.Errors, err)
|
||||
return
|
||||
}
|
||||
|
||||
_, err = manifests.Put(imh, manifest, options...)
|
||||
if err != nil {
|
||||
// TODO(stevvooe): These error handling switches really need to be
|
||||
// handled by an app global mapper.
|
||||
if err == distribution.ErrUnsupported {
|
||||
imh.Errors = append(imh.Errors, errcode.ErrorCodeUnsupported)
|
||||
return
|
||||
}
|
||||
if err == distribution.ErrAccessDenied {
|
||||
imh.Errors = append(imh.Errors, errcode.ErrorCodeDenied)
|
||||
return
|
||||
}
|
||||
switch err := err.(type) {
|
||||
case distribution.ErrManifestVerification:
|
||||
for _, verificationError := range err {
|
||||
switch verificationError := verificationError.(type) {
|
||||
case distribution.ErrManifestBlobUnknown:
|
||||
imh.Errors = append(imh.Errors, v2.ErrorCodeManifestBlobUnknown.WithDetail(verificationError.Digest))
|
||||
case distribution.ErrManifestNameInvalid:
|
||||
imh.Errors = append(imh.Errors, v2.ErrorCodeNameInvalid.WithDetail(err))
|
||||
case distribution.ErrManifestUnverified:
|
||||
imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnverified)
|
||||
default:
|
||||
if verificationError == digest.ErrDigestInvalidFormat {
|
||||
imh.Errors = append(imh.Errors, v2.ErrorCodeDigestInvalid)
|
||||
} else {
|
||||
imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown, verificationError)
|
||||
}
|
||||
}
|
||||
}
|
||||
case errcode.Error:
|
||||
imh.Errors = append(imh.Errors, err)
|
||||
default:
|
||||
imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err))
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Tag this manifest
|
||||
if imh.Tag != "" {
|
||||
tags := imh.Repository.Tags(imh)
|
||||
err = tags.Tag(imh, imh.Tag, desc)
|
||||
if err != nil {
|
||||
imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err))
|
||||
return
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Construct a canonical url for the uploaded manifest.
|
||||
ref, err := reference.WithDigest(imh.Repository.Named(), imh.Digest)
|
||||
if err != nil {
|
||||
imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err))
|
||||
return
|
||||
}
|
||||
|
||||
location, err := imh.urlBuilder.BuildManifestURL(ref)
|
||||
if err != nil {
|
||||
// NOTE(stevvooe): Given the behavior above, this absurdly unlikely to
|
||||
// happen. We'll log the error here but proceed as if it worked. Worst
|
||||
// case, we set an empty location header.
|
||||
ctxu.GetLogger(imh).Errorf("error building manifest url from digest: %v", err)
|
||||
}
|
||||
|
||||
w.Header().Set("Location", location)
|
||||
w.Header().Set("Docker-Content-Digest", imh.Digest.String())
|
||||
w.WriteHeader(http.StatusCreated)
|
||||
}
|
||||
|
||||
// applyResourcePolicy checks whether the resource class matches what has
|
||||
// been authorized and allowed by the policy configuration.
|
||||
func (imh *manifestHandler) applyResourcePolicy(manifest distribution.Manifest) error {
|
||||
allowedClasses := imh.App.Config.Policy.Repository.Classes
|
||||
if len(allowedClasses) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
var class string
|
||||
switch m := manifest.(type) {
|
||||
case *schema1.SignedManifest:
|
||||
class = "image"
|
||||
case *schema2.DeserializedManifest:
|
||||
switch m.Config.MediaType {
|
||||
case schema2.MediaTypeImageConfig:
|
||||
class = "image"
|
||||
case schema2.MediaTypePluginConfig:
|
||||
class = "plugin"
|
||||
default:
|
||||
message := fmt.Sprintf("unknown manifest class for %s", m.Config.MediaType)
|
||||
return errcode.ErrorCodeDenied.WithMessage(message)
|
||||
}
|
||||
}
|
||||
|
||||
if class == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check to see if class is allowed in registry
|
||||
var allowedClass bool
|
||||
for _, c := range allowedClasses {
|
||||
if class == c {
|
||||
allowedClass = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !allowedClass {
|
||||
message := fmt.Sprintf("registry does not allow %s manifest", class)
|
||||
return errcode.ErrorCodeDenied.WithMessage(message)
|
||||
}
|
||||
|
||||
resources := auth.AuthorizedResources(imh)
|
||||
n := imh.Repository.Named().Name()
|
||||
|
||||
var foundResource bool
|
||||
for _, r := range resources {
|
||||
if r.Name == n {
|
||||
if r.Class == "" {
|
||||
r.Class = "image"
|
||||
}
|
||||
if r.Class == class {
|
||||
return nil
|
||||
}
|
||||
foundResource = true
|
||||
}
|
||||
}
|
||||
|
||||
// resource was found but no matching class was found
|
||||
if foundResource {
|
||||
message := fmt.Sprintf("repository not authorized for %s manifest", class)
|
||||
return errcode.ErrorCodeDenied.WithMessage(message)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
// DeleteManifest removes the manifest with the given digest from the registry.
|
||||
func (imh *manifestHandler) DeleteManifest(w http.ResponseWriter, r *http.Request) {
|
||||
ctxu.GetLogger(imh).Debug("DeleteImageManifest")
|
||||
|
||||
manifests, err := imh.Repository.Manifests(imh)
|
||||
if err != nil {
|
||||
imh.Errors = append(imh.Errors, err)
|
||||
return
|
||||
}
|
||||
|
||||
err = manifests.Delete(imh, imh.Digest)
|
||||
if err != nil {
|
||||
switch err {
|
||||
case digest.ErrDigestUnsupported:
|
||||
case digest.ErrDigestInvalidFormat:
|
||||
imh.Errors = append(imh.Errors, v2.ErrorCodeDigestInvalid)
|
||||
return
|
||||
case distribution.ErrBlobUnknown:
|
||||
imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown)
|
||||
return
|
||||
case distribution.ErrUnsupported:
|
||||
imh.Errors = append(imh.Errors, errcode.ErrorCodeUnsupported)
|
||||
return
|
||||
default:
|
||||
imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
tagService := imh.Repository.Tags(imh)
|
||||
referencedTags, err := tagService.Lookup(imh, distribution.Descriptor{Digest: imh.Digest})
|
||||
if err != nil {
|
||||
imh.Errors = append(imh.Errors, err)
|
||||
return
|
||||
}
|
||||
|
||||
for _, tag := range referencedTags {
|
||||
if err := tagService.Untag(imh, tag); err != nil {
|
||||
imh.Errors = append(imh.Errors, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusAccepted)
|
||||
}
|
62
gateway/vendor/github.com/docker/distribution/registry/handlers/tags.go
generated
vendored
62
gateway/vendor/github.com/docker/distribution/registry/handlers/tags.go
generated
vendored
@ -1,62 +0,0 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/registry/api/errcode"
|
||||
"github.com/docker/distribution/registry/api/v2"
|
||||
"github.com/gorilla/handlers"
|
||||
)
|
||||
|
||||
// tagsDispatcher constructs the tags handler api endpoint.
|
||||
func tagsDispatcher(ctx *Context, r *http.Request) http.Handler {
|
||||
tagsHandler := &tagsHandler{
|
||||
Context: ctx,
|
||||
}
|
||||
|
||||
return handlers.MethodHandler{
|
||||
"GET": http.HandlerFunc(tagsHandler.GetTags),
|
||||
}
|
||||
}
|
||||
|
||||
// tagsHandler handles requests for lists of tags under a repository name.
|
||||
type tagsHandler struct {
|
||||
*Context
|
||||
}
|
||||
|
||||
type tagsAPIResponse struct {
|
||||
Name string `json:"name"`
|
||||
Tags []string `json:"tags"`
|
||||
}
|
||||
|
||||
// GetTags returns a json list of tags for a specific image name.
|
||||
func (th *tagsHandler) GetTags(w http.ResponseWriter, r *http.Request) {
|
||||
defer r.Body.Close()
|
||||
|
||||
tagService := th.Repository.Tags(th)
|
||||
tags, err := tagService.All(th)
|
||||
if err != nil {
|
||||
switch err := err.(type) {
|
||||
case distribution.ErrRepositoryUnknown:
|
||||
th.Errors = append(th.Errors, v2.ErrorCodeNameUnknown.WithDetail(map[string]string{"name": th.Repository.Named().Name()}))
|
||||
case errcode.Error:
|
||||
th.Errors = append(th.Errors, err)
|
||||
default:
|
||||
th.Errors = append(th.Errors, errcode.ErrorCodeUnknown.WithDetail(err))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
|
||||
enc := json.NewEncoder(w)
|
||||
if err := enc.Encode(tagsAPIResponse{
|
||||
Name: th.Repository.Named().Name(),
|
||||
Tags: tags,
|
||||
}); err != nil {
|
||||
th.Errors = append(th.Errors, errcode.ErrorCodeUnknown.WithDetail(err))
|
||||
return
|
||||
}
|
||||
}
|
Reference in New Issue
Block a user