mirror of
https://github.com/openfaas/faas.git
synced 2025-06-19 04:26:35 +00:00
Vendoring with Glide and delete function handler
This commit is contained in:
259
gateway/vendor/github.com/docker/distribution/registry/proxy/scheduler/scheduler.go
generated
vendored
Normal file
259
gateway/vendor/github.com/docker/distribution/registry/proxy/scheduler/scheduler.go
generated
vendored
Normal file
@ -0,0 +1,259 @@
|
||||
package scheduler
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/distribution/registry/storage/driver"
|
||||
)
|
||||
|
||||
// onTTLExpiryFunc is called when a repository's TTL expires
|
||||
type expiryFunc func(reference.Reference) error
|
||||
|
||||
const (
|
||||
entryTypeBlob = iota
|
||||
entryTypeManifest
|
||||
indexSaveFrequency = 5 * time.Second
|
||||
)
|
||||
|
||||
// schedulerEntry represents an entry in the scheduler
|
||||
// fields are exported for serialization
|
||||
type schedulerEntry struct {
|
||||
Key string `json:"Key"`
|
||||
Expiry time.Time `json:"ExpiryData"`
|
||||
EntryType int `json:"EntryType"`
|
||||
|
||||
timer *time.Timer
|
||||
}
|
||||
|
||||
// New returns a new instance of the scheduler
|
||||
func New(ctx context.Context, driver driver.StorageDriver, path string) *TTLExpirationScheduler {
|
||||
return &TTLExpirationScheduler{
|
||||
entries: make(map[string]*schedulerEntry),
|
||||
driver: driver,
|
||||
pathToStateFile: path,
|
||||
ctx: ctx,
|
||||
stopped: true,
|
||||
doneChan: make(chan struct{}),
|
||||
saveTimer: time.NewTicker(indexSaveFrequency),
|
||||
}
|
||||
}
|
||||
|
||||
// TTLExpirationScheduler is a scheduler used to perform actions
|
||||
// when TTLs expire
|
||||
type TTLExpirationScheduler struct {
|
||||
sync.Mutex
|
||||
|
||||
entries map[string]*schedulerEntry
|
||||
|
||||
driver driver.StorageDriver
|
||||
ctx context.Context
|
||||
pathToStateFile string
|
||||
|
||||
stopped bool
|
||||
|
||||
onBlobExpire expiryFunc
|
||||
onManifestExpire expiryFunc
|
||||
|
||||
indexDirty bool
|
||||
saveTimer *time.Ticker
|
||||
doneChan chan struct{}
|
||||
}
|
||||
|
||||
// OnBlobExpire is called when a scheduled blob's TTL expires
|
||||
func (ttles *TTLExpirationScheduler) OnBlobExpire(f expiryFunc) {
|
||||
ttles.Lock()
|
||||
defer ttles.Unlock()
|
||||
|
||||
ttles.onBlobExpire = f
|
||||
}
|
||||
|
||||
// OnManifestExpire is called when a scheduled manifest's TTL expires
|
||||
func (ttles *TTLExpirationScheduler) OnManifestExpire(f expiryFunc) {
|
||||
ttles.Lock()
|
||||
defer ttles.Unlock()
|
||||
|
||||
ttles.onManifestExpire = f
|
||||
}
|
||||
|
||||
// AddBlob schedules a blob cleanup after ttl expires
|
||||
func (ttles *TTLExpirationScheduler) AddBlob(blobRef reference.Canonical, ttl time.Duration) error {
|
||||
ttles.Lock()
|
||||
defer ttles.Unlock()
|
||||
|
||||
if ttles.stopped {
|
||||
return fmt.Errorf("scheduler not started")
|
||||
}
|
||||
|
||||
ttles.add(blobRef, ttl, entryTypeBlob)
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddManifest schedules a manifest cleanup after ttl expires
|
||||
func (ttles *TTLExpirationScheduler) AddManifest(manifestRef reference.Canonical, ttl time.Duration) error {
|
||||
ttles.Lock()
|
||||
defer ttles.Unlock()
|
||||
|
||||
if ttles.stopped {
|
||||
return fmt.Errorf("scheduler not started")
|
||||
}
|
||||
|
||||
ttles.add(manifestRef, ttl, entryTypeManifest)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Start starts the scheduler
|
||||
func (ttles *TTLExpirationScheduler) Start() error {
|
||||
ttles.Lock()
|
||||
defer ttles.Unlock()
|
||||
|
||||
err := ttles.readState()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !ttles.stopped {
|
||||
return fmt.Errorf("Scheduler already started")
|
||||
}
|
||||
|
||||
context.GetLogger(ttles.ctx).Infof("Starting cached object TTL expiration scheduler...")
|
||||
ttles.stopped = false
|
||||
|
||||
// Start timer for each deserialized entry
|
||||
for _, entry := range ttles.entries {
|
||||
entry.timer = ttles.startTimer(entry, entry.Expiry.Sub(time.Now()))
|
||||
}
|
||||
|
||||
// Start a ticker to periodically save the entries index
|
||||
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-ttles.saveTimer.C:
|
||||
ttles.Lock()
|
||||
if !ttles.indexDirty {
|
||||
ttles.Unlock()
|
||||
continue
|
||||
}
|
||||
|
||||
err := ttles.writeState()
|
||||
if err != nil {
|
||||
context.GetLogger(ttles.ctx).Errorf("Error writing scheduler state: %s", err)
|
||||
} else {
|
||||
ttles.indexDirty = false
|
||||
}
|
||||
ttles.Unlock()
|
||||
|
||||
case <-ttles.doneChan:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ttles *TTLExpirationScheduler) add(r reference.Reference, ttl time.Duration, eType int) {
|
||||
entry := &schedulerEntry{
|
||||
Key: r.String(),
|
||||
Expiry: time.Now().Add(ttl),
|
||||
EntryType: eType,
|
||||
}
|
||||
context.GetLogger(ttles.ctx).Infof("Adding new scheduler entry for %s with ttl=%s", entry.Key, entry.Expiry.Sub(time.Now()))
|
||||
if oldEntry, present := ttles.entries[entry.Key]; present && oldEntry.timer != nil {
|
||||
oldEntry.timer.Stop()
|
||||
}
|
||||
ttles.entries[entry.Key] = entry
|
||||
entry.timer = ttles.startTimer(entry, ttl)
|
||||
ttles.indexDirty = true
|
||||
}
|
||||
|
||||
func (ttles *TTLExpirationScheduler) startTimer(entry *schedulerEntry, ttl time.Duration) *time.Timer {
|
||||
return time.AfterFunc(ttl, func() {
|
||||
ttles.Lock()
|
||||
defer ttles.Unlock()
|
||||
|
||||
var f expiryFunc
|
||||
|
||||
switch entry.EntryType {
|
||||
case entryTypeBlob:
|
||||
f = ttles.onBlobExpire
|
||||
case entryTypeManifest:
|
||||
f = ttles.onManifestExpire
|
||||
default:
|
||||
f = func(reference.Reference) error {
|
||||
return fmt.Errorf("scheduler entry type")
|
||||
}
|
||||
}
|
||||
|
||||
ref, err := reference.Parse(entry.Key)
|
||||
if err == nil {
|
||||
if err := f(ref); err != nil {
|
||||
context.GetLogger(ttles.ctx).Errorf("Scheduler error returned from OnExpire(%s): %s", entry.Key, err)
|
||||
}
|
||||
} else {
|
||||
context.GetLogger(ttles.ctx).Errorf("Error unpacking reference: %s", err)
|
||||
}
|
||||
|
||||
delete(ttles.entries, entry.Key)
|
||||
ttles.indexDirty = true
|
||||
})
|
||||
}
|
||||
|
||||
// Stop stops the scheduler.
|
||||
func (ttles *TTLExpirationScheduler) Stop() {
|
||||
ttles.Lock()
|
||||
defer ttles.Unlock()
|
||||
|
||||
if err := ttles.writeState(); err != nil {
|
||||
context.GetLogger(ttles.ctx).Errorf("Error writing scheduler state: %s", err)
|
||||
}
|
||||
|
||||
for _, entry := range ttles.entries {
|
||||
entry.timer.Stop()
|
||||
}
|
||||
|
||||
close(ttles.doneChan)
|
||||
ttles.saveTimer.Stop()
|
||||
ttles.stopped = true
|
||||
}
|
||||
|
||||
func (ttles *TTLExpirationScheduler) writeState() error {
|
||||
jsonBytes, err := json.Marshal(ttles.entries)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = ttles.driver.PutContent(ttles.ctx, ttles.pathToStateFile, jsonBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ttles *TTLExpirationScheduler) readState() error {
|
||||
if _, err := ttles.driver.Stat(ttles.ctx, ttles.pathToStateFile); err != nil {
|
||||
switch err := err.(type) {
|
||||
case driver.PathNotFoundError:
|
||||
return nil
|
||||
default:
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
bytes, err := ttles.driver.GetContent(ttles.ctx, ttles.pathToStateFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = json.Unmarshal(bytes, &ttles.entries)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
211
gateway/vendor/github.com/docker/distribution/registry/proxy/scheduler/scheduler_test.go
generated
vendored
Normal file
211
gateway/vendor/github.com/docker/distribution/registry/proxy/scheduler/scheduler_test.go
generated
vendored
Normal file
@ -0,0 +1,211 @@
|
||||
package scheduler
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/distribution/registry/storage/driver/inmemory"
|
||||
)
|
||||
|
||||
func testRefs(t *testing.T) (reference.Reference, reference.Reference, reference.Reference) {
|
||||
ref1, err := reference.Parse("testrepo@sha256:aaaaeaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")
|
||||
if err != nil {
|
||||
t.Fatalf("could not parse reference: %v", err)
|
||||
}
|
||||
|
||||
ref2, err := reference.Parse("testrepo@sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb")
|
||||
if err != nil {
|
||||
t.Fatalf("could not parse reference: %v", err)
|
||||
}
|
||||
|
||||
ref3, err := reference.Parse("testrepo@sha256:cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc")
|
||||
if err != nil {
|
||||
t.Fatalf("could not parse reference: %v", err)
|
||||
}
|
||||
|
||||
return ref1, ref2, ref3
|
||||
}
|
||||
|
||||
func TestSchedule(t *testing.T) {
|
||||
ref1, ref2, ref3 := testRefs(t)
|
||||
timeUnit := time.Millisecond
|
||||
remainingRepos := map[string]bool{
|
||||
ref1.String(): true,
|
||||
ref2.String(): true,
|
||||
ref3.String(): true,
|
||||
}
|
||||
|
||||
var mu sync.Mutex
|
||||
s := New(context.Background(), inmemory.New(), "/ttl")
|
||||
deleteFunc := func(repoName reference.Reference) error {
|
||||
if len(remainingRepos) == 0 {
|
||||
t.Fatalf("Incorrect expiry count")
|
||||
}
|
||||
_, ok := remainingRepos[repoName.String()]
|
||||
if !ok {
|
||||
t.Fatalf("Trying to remove nonexistent repo: %s", repoName)
|
||||
}
|
||||
t.Log("removing", repoName)
|
||||
mu.Lock()
|
||||
delete(remainingRepos, repoName.String())
|
||||
mu.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
s.onBlobExpire = deleteFunc
|
||||
err := s.Start()
|
||||
if err != nil {
|
||||
t.Fatalf("Error starting ttlExpirationScheduler: %s", err)
|
||||
}
|
||||
|
||||
s.add(ref1, 3*timeUnit, entryTypeBlob)
|
||||
s.add(ref2, 1*timeUnit, entryTypeBlob)
|
||||
|
||||
func() {
|
||||
s.Lock()
|
||||
s.add(ref3, 1*timeUnit, entryTypeBlob)
|
||||
s.Unlock()
|
||||
|
||||
}()
|
||||
|
||||
// Ensure all repos are deleted
|
||||
<-time.After(50 * timeUnit)
|
||||
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
if len(remainingRepos) != 0 {
|
||||
t.Fatalf("Repositories remaining: %#v", remainingRepos)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRestoreOld(t *testing.T) {
|
||||
ref1, ref2, _ := testRefs(t)
|
||||
remainingRepos := map[string]bool{
|
||||
ref1.String(): true,
|
||||
ref2.String(): true,
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(remainingRepos))
|
||||
var mu sync.Mutex
|
||||
deleteFunc := func(r reference.Reference) error {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
if r.String() == ref1.String() && len(remainingRepos) == 2 {
|
||||
t.Errorf("ref1 should not be removed first")
|
||||
}
|
||||
_, ok := remainingRepos[r.String()]
|
||||
if !ok {
|
||||
t.Fatalf("Trying to remove nonexistent repo: %s", r)
|
||||
}
|
||||
delete(remainingRepos, r.String())
|
||||
wg.Done()
|
||||
return nil
|
||||
}
|
||||
|
||||
timeUnit := time.Millisecond
|
||||
serialized, err := json.Marshal(&map[string]schedulerEntry{
|
||||
ref1.String(): {
|
||||
Expiry: time.Now().Add(10 * timeUnit),
|
||||
Key: ref1.String(),
|
||||
EntryType: 0,
|
||||
},
|
||||
ref2.String(): {
|
||||
Expiry: time.Now().Add(-3 * timeUnit), // TTL passed, should be removed first
|
||||
Key: ref2.String(),
|
||||
EntryType: 0,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Error serializing test data: %s", err.Error())
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
pathToStatFile := "/ttl"
|
||||
fs := inmemory.New()
|
||||
err = fs.PutContent(ctx, pathToStatFile, serialized)
|
||||
if err != nil {
|
||||
t.Fatal("Unable to write serialized data to fs")
|
||||
}
|
||||
s := New(context.Background(), fs, "/ttl")
|
||||
s.OnBlobExpire(deleteFunc)
|
||||
err = s.Start()
|
||||
if err != nil {
|
||||
t.Fatalf("Error starting ttlExpirationScheduler: %s", err)
|
||||
}
|
||||
defer s.Stop()
|
||||
|
||||
wg.Wait()
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
if len(remainingRepos) != 0 {
|
||||
t.Fatalf("Repositories remaining: %#v", remainingRepos)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStopRestore(t *testing.T) {
|
||||
ref1, ref2, _ := testRefs(t)
|
||||
|
||||
timeUnit := time.Millisecond
|
||||
remainingRepos := map[string]bool{
|
||||
ref1.String(): true,
|
||||
ref2.String(): true,
|
||||
}
|
||||
|
||||
var mu sync.Mutex
|
||||
deleteFunc := func(r reference.Reference) error {
|
||||
mu.Lock()
|
||||
delete(remainingRepos, r.String())
|
||||
mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
fs := inmemory.New()
|
||||
pathToStateFile := "/ttl"
|
||||
s := New(context.Background(), fs, pathToStateFile)
|
||||
s.onBlobExpire = deleteFunc
|
||||
|
||||
err := s.Start()
|
||||
if err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
s.add(ref1, 300*timeUnit, entryTypeBlob)
|
||||
s.add(ref2, 100*timeUnit, entryTypeBlob)
|
||||
|
||||
// Start and stop before all operations complete
|
||||
// state will be written to fs
|
||||
s.Stop()
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
// v2 will restore state from fs
|
||||
s2 := New(context.Background(), fs, pathToStateFile)
|
||||
s2.onBlobExpire = deleteFunc
|
||||
err = s2.Start()
|
||||
if err != nil {
|
||||
t.Fatalf("Error starting v2: %s", err.Error())
|
||||
}
|
||||
|
||||
<-time.After(500 * timeUnit)
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
if len(remainingRepos) != 0 {
|
||||
t.Fatalf("Repositories remaining: %#v", remainingRepos)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestDoubleStart(t *testing.T) {
|
||||
s := New(context.Background(), inmemory.New(), "/ttl")
|
||||
err := s.Start()
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to start scheduler")
|
||||
}
|
||||
err = s.Start()
|
||||
if err == nil {
|
||||
t.Fatalf("Scheduler started twice without error")
|
||||
}
|
||||
}
|
Reference in New Issue
Block a user