Add Swarm limits

Signed-off-by: Alex Ellis <alexellis2@gmail.com>
This commit is contained in:
Alex Ellis 2017-11-12 19:26:54 +00:00
parent 9614b0b173
commit b17838ce51
9 changed files with 110 additions and 109 deletions

View File

@ -17,6 +17,7 @@ import (
"github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/client"
"github.com/docker/docker/registry"
units "github.com/docker/go-units"
"github.com/openfaas/faas/gateway/metrics"
"github.com/openfaas/faas/gateway/requests"
)
@ -37,11 +38,6 @@ func MakeNewFunctionHandler(metricsOptions metrics.MetricOptions, c *client.Clie
return
}
fmt.Println(request)
// TODO: review why this was here... debugging?
// w.WriteHeader(http.StatusNotImplemented)
options := types.ServiceCreateOptions{}
if len(request.RegistryAuth) > 0 {
auth, err := BuildEncodedAuthConfig(request.RegistryAuth, request.Image)
@ -53,6 +49,7 @@ func MakeNewFunctionHandler(metricsOptions metrics.MetricOptions, c *client.Clie
}
options.EncodedRegistryAuth = auth
}
spec := makeSpec(&request, maxRestarts, restartDelay)
response, err := c.ServiceCreate(context.Background(), spec, options)
@ -68,6 +65,7 @@ func MakeNewFunctionHandler(metricsOptions metrics.MetricOptions, c *client.Clie
func makeSpec(request *requests.CreateFunctionRequest, maxRestarts uint64, restartDelay time.Duration) swarm.ServiceSpec {
constraints := []string{}
if request.Constraints != nil && len(request.Constraints) > 0 {
constraints = request.Constraints
} else {
@ -84,7 +82,8 @@ func makeSpec(request *requests.CreateFunctionRequest, maxRestarts uint64, resta
labels[k] = v
}
}
fmt.Println(labels)
resources := buildResources(request)
nets := []swarm.NetworkAttachmentConfig{
{
@ -107,7 +106,8 @@ func makeSpec(request *requests.CreateFunctionRequest, maxRestarts uint64, resta
Image: request.Image,
Labels: labels,
},
Networks: nets,
Networks: nets,
Resources: resources,
Placement: &swarm.Placement{
Constraints: constraints,
},
@ -175,3 +175,39 @@ func userPasswordFromBasicAuth(basicAuthB64 string) (string, string, error) {
}
return cs[:s], cs[s+1:], nil
}
func ParseMemory(value string) (int64, error) {
return units.RAMInBytes(value)
}
func buildResources(request *requests.CreateFunctionRequest) *swarm.ResourceRequirements {
var resources *swarm.ResourceRequirements
if request.Requests != nil || request.Limits != nil {
resources = &swarm.ResourceRequirements{}
if request.Limits != nil {
memoryBytes, err := ParseMemory(request.Limits.Memory)
if err != nil {
log.Printf("Error parsing memory limit: %T", err)
}
resources.Limits = &swarm.Resources{
MemoryBytes: memoryBytes,
}
}
if request.Requests != nil {
memoryBytes, err := ParseMemory(request.Requests.Memory)
if err != nil {
log.Printf("Error parsing memory request: %T", err)
}
resources.Reservations = &swarm.Resources{
MemoryBytes: memoryBytes,
}
}
}
return resources
}

View File

@ -104,6 +104,8 @@ func updateSpec(request *requests.CreateFunctionRequest, spec *swarm.ServiceSpec
},
}
spec.TaskTemplate.Resources = buildResources(request)
spec.TaskTemplate.Placement = &swarm.Placement{
Constraints: constraints,
}

View File

@ -0,0 +1,23 @@
// Copyright (c) Alex Ellis 2017. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
package requests
// PrometheusInnerAlertLabel PrometheusInnerAlertLabel
type PrometheusInnerAlertLabel struct {
AlertName string `json:"alertname"`
FunctionName string `json:"function_name"`
}
// PrometheusInnerAlert PrometheusInnerAlert
type PrometheusInnerAlert struct {
Status string `json:"status"`
Labels PrometheusInnerAlertLabel `json:"labels"`
}
// PrometheusAlert as produced by AlertManager
type PrometheusAlert struct {
Status string `json:"status"`
Receiver string `json:"receiver"`
Alerts []PrometheusInnerAlert `json:"alerts"`
}

View File

@ -5,6 +5,7 @@ package requests
// CreateFunctionRequest create a function in the swarm.
type CreateFunctionRequest struct {
// Service corresponds to a Docker Service
Service string `json:"service"`
@ -34,30 +35,18 @@ type CreateFunctionRequest struct {
// Labels are metadata for functions which may be used by the
// back-end for making scheduling or routing decisions
Labels *map[string]string `json:"labels"`
// Limits for function
Limits *FunctionResources `json:"limits"`
// Requests of resources requested by function
Requests *FunctionResources `json:"requests"`
}
// DeleteFunctionRequest delete a deployed function
type DeleteFunctionRequest struct {
FunctionName string `json:"functionName"`
}
// PrometheusInnerAlertLabel PrometheusInnerAlertLabel
type PrometheusInnerAlertLabel struct {
AlertName string `json:"alertname"`
FunctionName string `json:"function_name"`
}
// PrometheusInnerAlert PrometheusInnerAlert
type PrometheusInnerAlert struct {
Status string `json:"status"`
Labels PrometheusInnerAlertLabel `json:"labels"`
}
// PrometheusAlert as produced by AlertManager
type PrometheusAlert struct {
Status string `json:"status"`
Receiver string `json:"receiver"`
Alerts []PrometheusInnerAlert `json:"alerts"`
// FunctionResources Memory and CPU
type FunctionResources struct {
Memory string `json:"memory"`
CPU string `json:"cpu"`
}
// Function exported for system/functions endpoint
@ -79,3 +68,8 @@ type AsyncReport struct {
StatusCode int `json:"statusCode"`
TimeTaken float64 `json:"timeTaken"`
}
// DeleteFunctionRequest delete a deployed function
type DeleteFunctionRequest struct {
FunctionName string `json:"functionName"`
}

View File

@ -0,0 +1,20 @@
package tests
import (
"testing"
"github.com/openfaas/faas/gateway/handlers"
)
func Test_ParseMemory(t *testing.T) {
value := "512 m"
val, err := handlers.ParseMemory(value)
if err != nil {
t.Error(err)
}
if val != 1024*1024*512 {
t.Errorf("want: %d got: %d", 1024, val)
}
}

View File

@ -67,18 +67,18 @@ type TaskSpec struct {
ForceUpdate uint64
}
// Resources represents resources (CPU/Memory).
type Resources struct {
NanoCPUs int64 `json:",omitempty"`
MemoryBytes int64 `json:",omitempty"`
}
// ResourceRequirements represents resources requirements.
type ResourceRequirements struct {
Limits *Resources `json:",omitempty"`
Reservations *Resources `json:",omitempty"`
}
// Resources represents resources (CPU/Memory).
type Resources struct {
MemoryBytes int64 `json:",omitempty"`
// NanoCPUs int64 `json:",omitempty"`
}
// Placement represents orchestration parameters.
type Placement struct {
Constraints []string `json:",omitempty"`

View File

@ -1,67 +0,0 @@
# Integration Testing on Swarm
IT on Swarm allows you to execute integration test in parallel across a Docker Swarm cluster
## Architecture
### Master service
- Works as a funker caller
- Calls a worker funker (`-worker-service`) with a chunk of `-check.f` filter strings (passed as a file via `-input` flag, typically `/mnt/input`)
### Worker service
- Works as a funker callee
- Executes an equivalent of `TESTFLAGS=-check.f TestFoo|TestBar|TestBaz ... make test-integration-cli` using the bind-mounted API socket (`docker.sock`)
### Client
- Controls master and workers via `docker stack`
- No need to have a local daemon
Typically, the master and workers are supposed to be running on a cloud environment,
while the client is supposed to be running on a laptop, e.g. Docker for Mac/Windows.
## Requirement
- Docker daemon 1.13 or later
- Private registry for distributed execution with multiple nodes
## Usage
### Step 1: Prepare images
$ make build-integration-cli-on-swarm
Following environment variables are known to work in this step:
- `BUILDFLAGS`
- `DOCKER_INCREMENTAL_BINARY`
### Step 2: Execute tests
$ ./hack/integration-cli-on-swarm/integration-cli-on-swarm -replicas 40 -push-worker-image YOUR_REGISTRY.EXAMPLE.COM/integration-cli-worker:latest
Following environment variables are known to work in this step:
- `DOCKER_GRAPHDRIVER`
- `DOCKER_EXPERIMENTAL`
#### Flags
Basic flags:
- `-replicas N`: the number of worker service replicas. i.e. degree of parallelism.
- `-chunks N`: the number of chunks. By default, `chunks` == `replicas`.
- `-push-worker-image REGISTRY/IMAGE:TAG`: push the worker image to the registry. Note that if you have only single node and hence you do not need a private registry, you do not need to specify `-push-worker-image`.
Experimental flags for mitigating makespan nonuniformity:
- `-shuffle`: Shuffle the test filter strings
Flags for debugging IT on Swarm itself:
- `-rand-seed N`: the random seed. This flag is useful for deterministic replaying. By default(0), the timestamp is used.
- `-filters-file FILE`: the file contains `-check.f` strings. By default, the file is automatically generated.
- `-dry-run`: skip the actual workload
- `keep-executor`: do not auto-remove executor containers, which is used for running privileged programs on Swarm

View File

@ -1,2 +0,0 @@
# dependencies specific to worker (i.e. github.com/docker/docker/...) are not vendored here
github.com/bfirsh/funker-go eaa0a2e06f30e72c9a0b7f858951e581e26ef773

View File

@ -1,5 +0,0 @@
github.com/openfaas/faas 4cc299d4c84e7ce10c6a5117e918c5a5b4aeb2ae
github.com/nats-io/go-nats-streaming bf8654e90f5296da96eab1e85808eb5c4b7b5541
github.com/nats-io/go-nats 34c8842105ac0b69c838a9998a239d482936c466
github.com/nats-io/nuid 3cf34f9fca4e88afa9da8eabd75e3326c9941b44
github.com/gogo/protobuf dda3e8acadcc9affc16faf33fbb229db78399245