mirror of
https://github.com/openfaas/faasd.git
synced 2025-06-18 12:06:36 +00:00
Initial
Signed-off-by: Alex Ellis (OpenFaaS Ltd) <alexellis2@gmail.com>
This commit is contained in:
160
vendor/github.com/Microsoft/hcsshim/internal/hcs/callback.go
generated
vendored
Normal file
160
vendor/github.com/Microsoft/hcsshim/internal/hcs/callback.go
generated
vendored
Normal file
@ -0,0 +1,160 @@
|
||||
package hcs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
"github.com/Microsoft/hcsshim/internal/interop"
|
||||
"github.com/Microsoft/hcsshim/internal/logfields"
|
||||
"github.com/Microsoft/hcsshim/internal/vmcompute"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
nextCallback uintptr
|
||||
callbackMap = map[uintptr]*notifcationWatcherContext{}
|
||||
callbackMapLock = sync.RWMutex{}
|
||||
|
||||
notificationWatcherCallback = syscall.NewCallback(notificationWatcher)
|
||||
|
||||
// Notifications for HCS_SYSTEM handles
|
||||
hcsNotificationSystemExited hcsNotification = 0x00000001
|
||||
hcsNotificationSystemCreateCompleted hcsNotification = 0x00000002
|
||||
hcsNotificationSystemStartCompleted hcsNotification = 0x00000003
|
||||
hcsNotificationSystemPauseCompleted hcsNotification = 0x00000004
|
||||
hcsNotificationSystemResumeCompleted hcsNotification = 0x00000005
|
||||
hcsNotificationSystemCrashReport hcsNotification = 0x00000006
|
||||
hcsNotificationSystemSiloJobCreated hcsNotification = 0x00000007
|
||||
hcsNotificationSystemSaveCompleted hcsNotification = 0x00000008
|
||||
hcsNotificationSystemRdpEnhancedModeStateChanged hcsNotification = 0x00000009
|
||||
hcsNotificationSystemShutdownFailed hcsNotification = 0x0000000A
|
||||
hcsNotificationSystemGetPropertiesCompleted hcsNotification = 0x0000000B
|
||||
hcsNotificationSystemModifyCompleted hcsNotification = 0x0000000C
|
||||
hcsNotificationSystemCrashInitiated hcsNotification = 0x0000000D
|
||||
hcsNotificationSystemGuestConnectionClosed hcsNotification = 0x0000000E
|
||||
|
||||
// Notifications for HCS_PROCESS handles
|
||||
hcsNotificationProcessExited hcsNotification = 0x00010000
|
||||
|
||||
// Common notifications
|
||||
hcsNotificationInvalid hcsNotification = 0x00000000
|
||||
hcsNotificationServiceDisconnect hcsNotification = 0x01000000
|
||||
)
|
||||
|
||||
type hcsNotification uint32
|
||||
|
||||
func (hn hcsNotification) String() string {
|
||||
switch hn {
|
||||
case hcsNotificationSystemExited:
|
||||
return "SystemExited"
|
||||
case hcsNotificationSystemCreateCompleted:
|
||||
return "SystemCreateCompleted"
|
||||
case hcsNotificationSystemStartCompleted:
|
||||
return "SystemStartCompleted"
|
||||
case hcsNotificationSystemPauseCompleted:
|
||||
return "SystemPauseCompleted"
|
||||
case hcsNotificationSystemResumeCompleted:
|
||||
return "SystemResumeCompleted"
|
||||
case hcsNotificationSystemCrashReport:
|
||||
return "SystemCrashReport"
|
||||
case hcsNotificationSystemSiloJobCreated:
|
||||
return "SystemSiloJobCreated"
|
||||
case hcsNotificationSystemSaveCompleted:
|
||||
return "SystemSaveCompleted"
|
||||
case hcsNotificationSystemRdpEnhancedModeStateChanged:
|
||||
return "SystemRdpEnhancedModeStateChanged"
|
||||
case hcsNotificationSystemShutdownFailed:
|
||||
return "SystemShutdownFailed"
|
||||
case hcsNotificationSystemGetPropertiesCompleted:
|
||||
return "SystemGetPropertiesCompleted"
|
||||
case hcsNotificationSystemModifyCompleted:
|
||||
return "SystemModifyCompleted"
|
||||
case hcsNotificationSystemCrashInitiated:
|
||||
return "SystemCrashInitiated"
|
||||
case hcsNotificationSystemGuestConnectionClosed:
|
||||
return "SystemGuestConnectionClosed"
|
||||
case hcsNotificationProcessExited:
|
||||
return "ProcessExited"
|
||||
case hcsNotificationInvalid:
|
||||
return "Invalid"
|
||||
case hcsNotificationServiceDisconnect:
|
||||
return "ServiceDisconnect"
|
||||
default:
|
||||
return fmt.Sprintf("Unknown: %d", hn)
|
||||
}
|
||||
}
|
||||
|
||||
type notificationChannel chan error
|
||||
|
||||
type notifcationWatcherContext struct {
|
||||
channels notificationChannels
|
||||
handle vmcompute.HcsCallback
|
||||
|
||||
systemID string
|
||||
processID int
|
||||
}
|
||||
|
||||
type notificationChannels map[hcsNotification]notificationChannel
|
||||
|
||||
func newSystemChannels() notificationChannels {
|
||||
channels := make(notificationChannels)
|
||||
for _, notif := range []hcsNotification{
|
||||
hcsNotificationServiceDisconnect,
|
||||
hcsNotificationSystemExited,
|
||||
hcsNotificationSystemCreateCompleted,
|
||||
hcsNotificationSystemStartCompleted,
|
||||
hcsNotificationSystemPauseCompleted,
|
||||
hcsNotificationSystemResumeCompleted,
|
||||
} {
|
||||
channels[notif] = make(notificationChannel, 1)
|
||||
}
|
||||
return channels
|
||||
}
|
||||
|
||||
func newProcessChannels() notificationChannels {
|
||||
channels := make(notificationChannels)
|
||||
for _, notif := range []hcsNotification{
|
||||
hcsNotificationServiceDisconnect,
|
||||
hcsNotificationProcessExited,
|
||||
} {
|
||||
channels[notif] = make(notificationChannel, 1)
|
||||
}
|
||||
return channels
|
||||
}
|
||||
|
||||
func closeChannels(channels notificationChannels) {
|
||||
for _, c := range channels {
|
||||
close(c)
|
||||
}
|
||||
}
|
||||
|
||||
func notificationWatcher(notificationType hcsNotification, callbackNumber uintptr, notificationStatus uintptr, notificationData *uint16) uintptr {
|
||||
var result error
|
||||
if int32(notificationStatus) < 0 {
|
||||
result = interop.Win32FromHresult(notificationStatus)
|
||||
}
|
||||
|
||||
callbackMapLock.RLock()
|
||||
context := callbackMap[callbackNumber]
|
||||
callbackMapLock.RUnlock()
|
||||
|
||||
if context == nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
log := logrus.WithFields(logrus.Fields{
|
||||
"notification-type": notificationType.String(),
|
||||
"system-id": context.systemID,
|
||||
})
|
||||
if context.processID != 0 {
|
||||
log.Data[logfields.ProcessID] = context.processID
|
||||
}
|
||||
log.Debug("HCS notification")
|
||||
|
||||
if channel, ok := context.channels[notificationType]; ok {
|
||||
channel <- result
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
7
vendor/github.com/Microsoft/hcsshim/internal/hcs/cgo.go
generated
vendored
Normal file
7
vendor/github.com/Microsoft/hcsshim/internal/hcs/cgo.go
generated
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
package hcs
|
||||
|
||||
import "C"
|
||||
|
||||
// This import is needed to make the library compile as CGO because HCSSHIM
|
||||
// only works with CGO due to callbacks from HCS comming back from a C thread
|
||||
// which is not supported without CGO. See https://github.com/golang/go/issues/10973
|
336
vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go
generated
vendored
Normal file
336
vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go
generated
vendored
Normal file
@ -0,0 +1,336 @@
|
||||
package hcs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"syscall"
|
||||
|
||||
"github.com/Microsoft/hcsshim/internal/log"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrComputeSystemDoesNotExist is an error encountered when the container being operated on no longer exists
|
||||
ErrComputeSystemDoesNotExist = syscall.Errno(0xc037010e)
|
||||
|
||||
// ErrElementNotFound is an error encountered when the object being referenced does not exist
|
||||
ErrElementNotFound = syscall.Errno(0x490)
|
||||
|
||||
// ErrElementNotFound is an error encountered when the object being referenced does not exist
|
||||
ErrNotSupported = syscall.Errno(0x32)
|
||||
|
||||
// ErrInvalidData is an error encountered when the request being sent to hcs is invalid/unsupported
|
||||
// decimal -2147024883 / hex 0x8007000d
|
||||
ErrInvalidData = syscall.Errno(0xd)
|
||||
|
||||
// ErrHandleClose is an error encountered when the handle generating the notification being waited on has been closed
|
||||
ErrHandleClose = errors.New("hcsshim: the handle generating this notification has been closed")
|
||||
|
||||
// ErrAlreadyClosed is an error encountered when using a handle that has been closed by the Close method
|
||||
ErrAlreadyClosed = errors.New("hcsshim: the handle has already been closed")
|
||||
|
||||
// ErrInvalidNotificationType is an error encountered when an invalid notification type is used
|
||||
ErrInvalidNotificationType = errors.New("hcsshim: invalid notification type")
|
||||
|
||||
// ErrInvalidProcessState is an error encountered when the process is not in a valid state for the requested operation
|
||||
ErrInvalidProcessState = errors.New("the process is in an invalid state for the attempted operation")
|
||||
|
||||
// ErrTimeout is an error encountered when waiting on a notification times out
|
||||
ErrTimeout = errors.New("hcsshim: timeout waiting for notification")
|
||||
|
||||
// ErrUnexpectedContainerExit is the error encountered when a container exits while waiting for
|
||||
// a different expected notification
|
||||
ErrUnexpectedContainerExit = errors.New("unexpected container exit")
|
||||
|
||||
// ErrUnexpectedProcessAbort is the error encountered when communication with the compute service
|
||||
// is lost while waiting for a notification
|
||||
ErrUnexpectedProcessAbort = errors.New("lost communication with compute service")
|
||||
|
||||
// ErrUnexpectedValue is an error encountered when hcs returns an invalid value
|
||||
ErrUnexpectedValue = errors.New("unexpected value returned from hcs")
|
||||
|
||||
// ErrVmcomputeAlreadyStopped is an error encountered when a shutdown or terminate request is made on a stopped container
|
||||
ErrVmcomputeAlreadyStopped = syscall.Errno(0xc0370110)
|
||||
|
||||
// ErrVmcomputeOperationPending is an error encountered when the operation is being completed asynchronously
|
||||
ErrVmcomputeOperationPending = syscall.Errno(0xC0370103)
|
||||
|
||||
// ErrVmcomputeOperationInvalidState is an error encountered when the compute system is not in a valid state for the requested operation
|
||||
ErrVmcomputeOperationInvalidState = syscall.Errno(0xc0370105)
|
||||
|
||||
// ErrProcNotFound is an error encountered when the the process cannot be found
|
||||
ErrProcNotFound = syscall.Errno(0x7f)
|
||||
|
||||
// ErrVmcomputeOperationAccessIsDenied is an error which can be encountered when enumerating compute systems in RS1/RS2
|
||||
// builds when the underlying silo might be in the process of terminating. HCS was fixed in RS3.
|
||||
ErrVmcomputeOperationAccessIsDenied = syscall.Errno(0x5)
|
||||
|
||||
// ErrVmcomputeInvalidJSON is an error encountered when the compute system does not support/understand the messages sent by management
|
||||
ErrVmcomputeInvalidJSON = syscall.Errno(0xc037010d)
|
||||
|
||||
// ErrVmcomputeUnknownMessage is an error encountered guest compute system doesn't support the message
|
||||
ErrVmcomputeUnknownMessage = syscall.Errno(0xc037010b)
|
||||
|
||||
// ErrVmcomputeUnexpectedExit is an error encountered when the compute system terminates unexpectedly
|
||||
ErrVmcomputeUnexpectedExit = syscall.Errno(0xC0370106)
|
||||
|
||||
// ErrNotSupported is an error encountered when hcs doesn't support the request
|
||||
ErrPlatformNotSupported = errors.New("unsupported platform request")
|
||||
)
|
||||
|
||||
type ErrorEvent struct {
|
||||
Message string `json:"Message,omitempty"` // Fully formated error message
|
||||
StackTrace string `json:"StackTrace,omitempty"` // Stack trace in string form
|
||||
Provider string `json:"Provider,omitempty"`
|
||||
EventID uint16 `json:"EventId,omitempty"`
|
||||
Flags uint32 `json:"Flags,omitempty"`
|
||||
Source string `json:"Source,omitempty"`
|
||||
//Data []EventData `json:"Data,omitempty"` // Omit this as HCS doesn't encode this well. It's more confusing to include. It is however logged in debug mode (see processHcsResult function)
|
||||
}
|
||||
|
||||
type hcsResult struct {
|
||||
Error int32
|
||||
ErrorMessage string
|
||||
ErrorEvents []ErrorEvent `json:"ErrorEvents,omitempty"`
|
||||
}
|
||||
|
||||
func (ev *ErrorEvent) String() string {
|
||||
evs := "[Event Detail: " + ev.Message
|
||||
if ev.StackTrace != "" {
|
||||
evs += " Stack Trace: " + ev.StackTrace
|
||||
}
|
||||
if ev.Provider != "" {
|
||||
evs += " Provider: " + ev.Provider
|
||||
}
|
||||
if ev.EventID != 0 {
|
||||
evs = fmt.Sprintf("%s EventID: %d", evs, ev.EventID)
|
||||
}
|
||||
if ev.Flags != 0 {
|
||||
evs = fmt.Sprintf("%s flags: %d", evs, ev.Flags)
|
||||
}
|
||||
if ev.Source != "" {
|
||||
evs += " Source: " + ev.Source
|
||||
}
|
||||
evs += "]"
|
||||
return evs
|
||||
}
|
||||
|
||||
func processHcsResult(ctx context.Context, resultJSON string) []ErrorEvent {
|
||||
if resultJSON != "" {
|
||||
result := &hcsResult{}
|
||||
if err := json.Unmarshal([]byte(resultJSON), result); err != nil {
|
||||
log.G(ctx).WithError(err).Warning("Could not unmarshal HCS result")
|
||||
return nil
|
||||
}
|
||||
return result.ErrorEvents
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type HcsError struct {
|
||||
Op string
|
||||
Err error
|
||||
Events []ErrorEvent
|
||||
}
|
||||
|
||||
var _ net.Error = &HcsError{}
|
||||
|
||||
func (e *HcsError) Error() string {
|
||||
s := e.Op + ": " + e.Err.Error()
|
||||
for _, ev := range e.Events {
|
||||
s += "\n" + ev.String()
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (e *HcsError) Temporary() bool {
|
||||
err, ok := e.Err.(net.Error)
|
||||
return ok && err.Temporary()
|
||||
}
|
||||
|
||||
func (e *HcsError) Timeout() bool {
|
||||
err, ok := e.Err.(net.Error)
|
||||
return ok && err.Timeout()
|
||||
}
|
||||
|
||||
// ProcessError is an error encountered in HCS during an operation on a Process object
|
||||
type ProcessError struct {
|
||||
SystemID string
|
||||
Pid int
|
||||
Op string
|
||||
Err error
|
||||
Events []ErrorEvent
|
||||
}
|
||||
|
||||
var _ net.Error = &ProcessError{}
|
||||
|
||||
// SystemError is an error encountered in HCS during an operation on a Container object
|
||||
type SystemError struct {
|
||||
ID string
|
||||
Op string
|
||||
Err error
|
||||
Extra string
|
||||
Events []ErrorEvent
|
||||
}
|
||||
|
||||
var _ net.Error = &SystemError{}
|
||||
|
||||
func (e *SystemError) Error() string {
|
||||
s := e.Op + " " + e.ID + ": " + e.Err.Error()
|
||||
for _, ev := range e.Events {
|
||||
s += "\n" + ev.String()
|
||||
}
|
||||
if e.Extra != "" {
|
||||
s += "\n(extra info: " + e.Extra + ")"
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (e *SystemError) Temporary() bool {
|
||||
err, ok := e.Err.(net.Error)
|
||||
return ok && err.Temporary()
|
||||
}
|
||||
|
||||
func (e *SystemError) Timeout() bool {
|
||||
err, ok := e.Err.(net.Error)
|
||||
return ok && err.Timeout()
|
||||
}
|
||||
|
||||
func makeSystemError(system *System, op string, extra string, err error, events []ErrorEvent) error {
|
||||
// Don't double wrap errors
|
||||
if _, ok := err.(*SystemError); ok {
|
||||
return err
|
||||
}
|
||||
return &SystemError{
|
||||
ID: system.ID(),
|
||||
Op: op,
|
||||
Extra: extra,
|
||||
Err: err,
|
||||
Events: events,
|
||||
}
|
||||
}
|
||||
|
||||
func (e *ProcessError) Error() string {
|
||||
s := fmt.Sprintf("%s %s:%d: %s", e.Op, e.SystemID, e.Pid, e.Err.Error())
|
||||
for _, ev := range e.Events {
|
||||
s += "\n" + ev.String()
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (e *ProcessError) Temporary() bool {
|
||||
err, ok := e.Err.(net.Error)
|
||||
return ok && err.Temporary()
|
||||
}
|
||||
|
||||
func (e *ProcessError) Timeout() bool {
|
||||
err, ok := e.Err.(net.Error)
|
||||
return ok && err.Timeout()
|
||||
}
|
||||
|
||||
func makeProcessError(process *Process, op string, err error, events []ErrorEvent) error {
|
||||
// Don't double wrap errors
|
||||
if _, ok := err.(*ProcessError); ok {
|
||||
return err
|
||||
}
|
||||
return &ProcessError{
|
||||
Pid: process.Pid(),
|
||||
SystemID: process.SystemID(),
|
||||
Op: op,
|
||||
Err: err,
|
||||
Events: events,
|
||||
}
|
||||
}
|
||||
|
||||
// IsNotExist checks if an error is caused by the Container or Process not existing.
|
||||
// Note: Currently, ErrElementNotFound can mean that a Process has either
|
||||
// already exited, or does not exist. Both IsAlreadyStopped and IsNotExist
|
||||
// will currently return true when the error is ErrElementNotFound or ErrProcNotFound.
|
||||
func IsNotExist(err error) bool {
|
||||
err = getInnerError(err)
|
||||
return err == ErrComputeSystemDoesNotExist ||
|
||||
err == ErrElementNotFound ||
|
||||
err == ErrProcNotFound
|
||||
}
|
||||
|
||||
// IsAlreadyClosed checks if an error is caused by the Container or Process having been
|
||||
// already closed by a call to the Close() method.
|
||||
func IsAlreadyClosed(err error) bool {
|
||||
err = getInnerError(err)
|
||||
return err == ErrAlreadyClosed
|
||||
}
|
||||
|
||||
// IsPending returns a boolean indicating whether the error is that
|
||||
// the requested operation is being completed in the background.
|
||||
func IsPending(err error) bool {
|
||||
err = getInnerError(err)
|
||||
return err == ErrVmcomputeOperationPending
|
||||
}
|
||||
|
||||
// IsTimeout returns a boolean indicating whether the error is caused by
|
||||
// a timeout waiting for the operation to complete.
|
||||
func IsTimeout(err error) bool {
|
||||
if err, ok := err.(net.Error); ok && err.Timeout() {
|
||||
return true
|
||||
}
|
||||
err = getInnerError(err)
|
||||
return err == ErrTimeout
|
||||
}
|
||||
|
||||
// IsAlreadyStopped returns a boolean indicating whether the error is caused by
|
||||
// a Container or Process being already stopped.
|
||||
// Note: Currently, ErrElementNotFound can mean that a Process has either
|
||||
// already exited, or does not exist. Both IsAlreadyStopped and IsNotExist
|
||||
// will currently return true when the error is ErrElementNotFound or ErrProcNotFound.
|
||||
func IsAlreadyStopped(err error) bool {
|
||||
err = getInnerError(err)
|
||||
return err == ErrVmcomputeAlreadyStopped ||
|
||||
err == ErrElementNotFound ||
|
||||
err == ErrProcNotFound
|
||||
}
|
||||
|
||||
// IsNotSupported returns a boolean indicating whether the error is caused by
|
||||
// unsupported platform requests
|
||||
// Note: Currently Unsupported platform requests can be mean either
|
||||
// ErrVmcomputeInvalidJSON, ErrInvalidData, ErrNotSupported or ErrVmcomputeUnknownMessage
|
||||
// is thrown from the Platform
|
||||
func IsNotSupported(err error) bool {
|
||||
err = getInnerError(err)
|
||||
// If Platform doesn't recognize or support the request sent, below errors are seen
|
||||
return err == ErrVmcomputeInvalidJSON ||
|
||||
err == ErrInvalidData ||
|
||||
err == ErrNotSupported ||
|
||||
err == ErrVmcomputeUnknownMessage
|
||||
}
|
||||
|
||||
// IsOperationInvalidState returns true when err is caused by
|
||||
// `ErrVmcomputeOperationInvalidState`.
|
||||
func IsOperationInvalidState(err error) bool {
|
||||
err = getInnerError(err)
|
||||
return err == ErrVmcomputeOperationInvalidState
|
||||
}
|
||||
|
||||
func getInnerError(err error) error {
|
||||
switch pe := err.(type) {
|
||||
case nil:
|
||||
return nil
|
||||
case *HcsError:
|
||||
err = pe.Err
|
||||
case *SystemError:
|
||||
err = pe.Err
|
||||
case *ProcessError:
|
||||
err = pe.Err
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func getOperationLogResult(err error) (string, error) {
|
||||
switch err {
|
||||
case nil:
|
||||
return "Success", nil
|
||||
default:
|
||||
return "Error", err
|
||||
}
|
||||
}
|
452
vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go
generated
vendored
Normal file
452
vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go
generated
vendored
Normal file
@ -0,0 +1,452 @@
|
||||
package hcs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/Microsoft/hcsshim/internal/log"
|
||||
"github.com/Microsoft/hcsshim/internal/oc"
|
||||
"github.com/Microsoft/hcsshim/internal/vmcompute"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// ContainerError is an error encountered in HCS
|
||||
type Process struct {
|
||||
handleLock sync.RWMutex
|
||||
handle vmcompute.HcsProcess
|
||||
processID int
|
||||
system *System
|
||||
stdin io.WriteCloser
|
||||
stdout io.ReadCloser
|
||||
stderr io.ReadCloser
|
||||
callbackNumber uintptr
|
||||
|
||||
closedWaitOnce sync.Once
|
||||
waitBlock chan struct{}
|
||||
exitCode int
|
||||
waitError error
|
||||
}
|
||||
|
||||
func newProcess(process vmcompute.HcsProcess, processID int, computeSystem *System) *Process {
|
||||
return &Process{
|
||||
handle: process,
|
||||
processID: processID,
|
||||
system: computeSystem,
|
||||
waitBlock: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
type processModifyRequest struct {
|
||||
Operation string
|
||||
ConsoleSize *consoleSize `json:",omitempty"`
|
||||
CloseHandle *closeHandle `json:",omitempty"`
|
||||
}
|
||||
|
||||
type consoleSize struct {
|
||||
Height uint16
|
||||
Width uint16
|
||||
}
|
||||
|
||||
type closeHandle struct {
|
||||
Handle string
|
||||
}
|
||||
|
||||
type processStatus struct {
|
||||
ProcessID uint32
|
||||
Exited bool
|
||||
ExitCode uint32
|
||||
LastWaitResult int32
|
||||
}
|
||||
|
||||
const (
|
||||
stdIn string = "StdIn"
|
||||
stdOut string = "StdOut"
|
||||
stdErr string = "StdErr"
|
||||
)
|
||||
|
||||
const (
|
||||
modifyConsoleSize string = "ConsoleSize"
|
||||
modifyCloseHandle string = "CloseHandle"
|
||||
)
|
||||
|
||||
// Pid returns the process ID of the process within the container.
|
||||
func (process *Process) Pid() int {
|
||||
return process.processID
|
||||
}
|
||||
|
||||
// SystemID returns the ID of the process's compute system.
|
||||
func (process *Process) SystemID() string {
|
||||
return process.system.ID()
|
||||
}
|
||||
|
||||
func (process *Process) processSignalResult(ctx context.Context, err error) (bool, error) {
|
||||
switch err {
|
||||
case nil:
|
||||
return true, nil
|
||||
case ErrVmcomputeOperationInvalidState, ErrComputeSystemDoesNotExist, ErrElementNotFound:
|
||||
select {
|
||||
case <-process.waitBlock:
|
||||
// The process exit notification has already arrived.
|
||||
default:
|
||||
// The process should be gone, but we have not received the notification.
|
||||
// After a second, force unblock the process wait to work around a possible
|
||||
// deadlock in the HCS.
|
||||
go func() {
|
||||
time.Sleep(time.Second)
|
||||
process.closedWaitOnce.Do(func() {
|
||||
log.G(ctx).WithError(err).Warn("force unblocking process waits")
|
||||
process.exitCode = -1
|
||||
process.waitError = err
|
||||
close(process.waitBlock)
|
||||
})
|
||||
}()
|
||||
}
|
||||
return false, nil
|
||||
default:
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
// Signal signals the process with `options`.
|
||||
//
|
||||
// For LCOW `guestrequest.SignalProcessOptionsLCOW`.
|
||||
//
|
||||
// For WCOW `guestrequest.SignalProcessOptionsWCOW`.
|
||||
func (process *Process) Signal(ctx context.Context, options interface{}) (bool, error) {
|
||||
process.handleLock.RLock()
|
||||
defer process.handleLock.RUnlock()
|
||||
|
||||
operation := "hcsshim::Process::Signal"
|
||||
|
||||
if process.handle == 0 {
|
||||
return false, makeProcessError(process, operation, ErrAlreadyClosed, nil)
|
||||
}
|
||||
|
||||
optionsb, err := json.Marshal(options)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
resultJSON, err := vmcompute.HcsSignalProcess(ctx, process.handle, string(optionsb))
|
||||
events := processHcsResult(ctx, resultJSON)
|
||||
delivered, err := process.processSignalResult(ctx, err)
|
||||
if err != nil {
|
||||
err = makeProcessError(process, operation, err, events)
|
||||
}
|
||||
return delivered, err
|
||||
}
|
||||
|
||||
// Kill signals the process to terminate but does not wait for it to finish terminating.
|
||||
func (process *Process) Kill(ctx context.Context) (bool, error) {
|
||||
process.handleLock.RLock()
|
||||
defer process.handleLock.RUnlock()
|
||||
|
||||
operation := "hcsshim::Process::Kill"
|
||||
|
||||
if process.handle == 0 {
|
||||
return false, makeProcessError(process, operation, ErrAlreadyClosed, nil)
|
||||
}
|
||||
|
||||
resultJSON, err := vmcompute.HcsTerminateProcess(ctx, process.handle)
|
||||
events := processHcsResult(ctx, resultJSON)
|
||||
delivered, err := process.processSignalResult(ctx, err)
|
||||
if err != nil {
|
||||
err = makeProcessError(process, operation, err, events)
|
||||
}
|
||||
return delivered, err
|
||||
}
|
||||
|
||||
// waitBackground waits for the process exit notification. Once received sets
|
||||
// `process.waitError` (if any) and unblocks all `Wait` calls.
|
||||
//
|
||||
// This MUST be called exactly once per `process.handle` but `Wait` is safe to
|
||||
// call multiple times.
|
||||
func (process *Process) waitBackground() {
|
||||
operation := "hcsshim::Process::waitBackground"
|
||||
ctx, span := trace.StartSpan(context.Background(), operation)
|
||||
defer span.End()
|
||||
span.AddAttributes(
|
||||
trace.StringAttribute("cid", process.SystemID()),
|
||||
trace.Int64Attribute("pid", int64(process.processID)))
|
||||
|
||||
var (
|
||||
err error
|
||||
exitCode = -1
|
||||
)
|
||||
|
||||
err = waitForNotification(ctx, process.callbackNumber, hcsNotificationProcessExited, nil)
|
||||
if err != nil {
|
||||
err = makeProcessError(process, operation, err, nil)
|
||||
log.G(ctx).WithError(err).Error("failed wait")
|
||||
} else {
|
||||
process.handleLock.RLock()
|
||||
defer process.handleLock.RUnlock()
|
||||
|
||||
// Make sure we didnt race with Close() here
|
||||
if process.handle != 0 {
|
||||
propertiesJSON, resultJSON, err := vmcompute.HcsGetProcessProperties(ctx, process.handle)
|
||||
events := processHcsResult(ctx, resultJSON)
|
||||
if err != nil {
|
||||
err = makeProcessError(process, operation, err, events)
|
||||
} else {
|
||||
properties := &processStatus{}
|
||||
err = json.Unmarshal([]byte(propertiesJSON), properties)
|
||||
if err != nil {
|
||||
err = makeProcessError(process, operation, err, nil)
|
||||
} else {
|
||||
if properties.LastWaitResult != 0 {
|
||||
log.G(ctx).WithField("wait-result", properties.LastWaitResult).Warning("non-zero last wait result")
|
||||
} else {
|
||||
exitCode = int(properties.ExitCode)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
log.G(ctx).WithField("exitCode", exitCode).Debug("process exited")
|
||||
|
||||
process.closedWaitOnce.Do(func() {
|
||||
process.exitCode = exitCode
|
||||
process.waitError = err
|
||||
close(process.waitBlock)
|
||||
})
|
||||
oc.SetSpanStatus(span, err)
|
||||
}
|
||||
|
||||
// Wait waits for the process to exit. If the process has already exited returns
|
||||
// the pervious error (if any).
|
||||
func (process *Process) Wait() error {
|
||||
<-process.waitBlock
|
||||
return process.waitError
|
||||
}
|
||||
|
||||
// ResizeConsole resizes the console of the process.
|
||||
func (process *Process) ResizeConsole(ctx context.Context, width, height uint16) error {
|
||||
process.handleLock.RLock()
|
||||
defer process.handleLock.RUnlock()
|
||||
|
||||
operation := "hcsshim::Process::ResizeConsole"
|
||||
|
||||
if process.handle == 0 {
|
||||
return makeProcessError(process, operation, ErrAlreadyClosed, nil)
|
||||
}
|
||||
|
||||
modifyRequest := processModifyRequest{
|
||||
Operation: modifyConsoleSize,
|
||||
ConsoleSize: &consoleSize{
|
||||
Height: height,
|
||||
Width: width,
|
||||
},
|
||||
}
|
||||
|
||||
modifyRequestb, err := json.Marshal(modifyRequest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
resultJSON, err := vmcompute.HcsModifyProcess(ctx, process.handle, string(modifyRequestb))
|
||||
events := processHcsResult(ctx, resultJSON)
|
||||
if err != nil {
|
||||
return makeProcessError(process, operation, err, events)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ExitCode returns the exit code of the process. The process must have
|
||||
// already terminated.
|
||||
func (process *Process) ExitCode() (int, error) {
|
||||
select {
|
||||
case <-process.waitBlock:
|
||||
if process.waitError != nil {
|
||||
return -1, process.waitError
|
||||
}
|
||||
return process.exitCode, nil
|
||||
default:
|
||||
return -1, makeProcessError(process, "hcsshim::Process::ExitCode", ErrInvalidProcessState, nil)
|
||||
}
|
||||
}
|
||||
|
||||
// StdioLegacy returns the stdin, stdout, and stderr pipes, respectively. Closing
|
||||
// these pipes does not close the underlying pipes; but this function can only
|
||||
// be called once on each Process.
|
||||
func (process *Process) StdioLegacy() (_ io.WriteCloser, _ io.ReadCloser, _ io.ReadCloser, err error) {
|
||||
operation := "hcsshim::Process::StdioLegacy"
|
||||
ctx, span := trace.StartSpan(context.Background(), operation)
|
||||
defer span.End()
|
||||
defer func() { oc.SetSpanStatus(span, err) }()
|
||||
span.AddAttributes(
|
||||
trace.StringAttribute("cid", process.SystemID()),
|
||||
trace.Int64Attribute("pid", int64(process.processID)))
|
||||
|
||||
process.handleLock.RLock()
|
||||
defer process.handleLock.RUnlock()
|
||||
|
||||
if process.handle == 0 {
|
||||
return nil, nil, nil, makeProcessError(process, operation, ErrAlreadyClosed, nil)
|
||||
}
|
||||
|
||||
processInfo, resultJSON, err := vmcompute.HcsGetProcessInfo(ctx, process.handle)
|
||||
events := processHcsResult(ctx, resultJSON)
|
||||
if err != nil {
|
||||
return nil, nil, nil, makeProcessError(process, operation, err, events)
|
||||
}
|
||||
|
||||
pipes, err := makeOpenFiles([]syscall.Handle{processInfo.StdInput, processInfo.StdOutput, processInfo.StdError})
|
||||
if err != nil {
|
||||
return nil, nil, nil, makeProcessError(process, operation, err, nil)
|
||||
}
|
||||
|
||||
return pipes[0], pipes[1], pipes[2], nil
|
||||
}
|
||||
|
||||
// Stdio returns the stdin, stdout, and stderr pipes, respectively.
|
||||
// To close them, close the process handle.
|
||||
func (process *Process) Stdio() (stdin io.Writer, stdout, stderr io.Reader) {
|
||||
return process.stdin, process.stdout, process.stderr
|
||||
}
|
||||
|
||||
// CloseStdin closes the write side of the stdin pipe so that the process is
|
||||
// notified on the read side that there is no more data in stdin.
|
||||
func (process *Process) CloseStdin(ctx context.Context) error {
|
||||
process.handleLock.RLock()
|
||||
defer process.handleLock.RUnlock()
|
||||
|
||||
operation := "hcsshim::Process::CloseStdin"
|
||||
|
||||
if process.handle == 0 {
|
||||
return makeProcessError(process, operation, ErrAlreadyClosed, nil)
|
||||
}
|
||||
|
||||
modifyRequest := processModifyRequest{
|
||||
Operation: modifyCloseHandle,
|
||||
CloseHandle: &closeHandle{
|
||||
Handle: stdIn,
|
||||
},
|
||||
}
|
||||
|
||||
modifyRequestb, err := json.Marshal(modifyRequest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
resultJSON, err := vmcompute.HcsModifyProcess(ctx, process.handle, string(modifyRequestb))
|
||||
events := processHcsResult(ctx, resultJSON)
|
||||
if err != nil {
|
||||
return makeProcessError(process, operation, err, events)
|
||||
}
|
||||
|
||||
if process.stdin != nil {
|
||||
process.stdin.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close cleans up any state associated with the process but does not kill
|
||||
// or wait on it.
|
||||
func (process *Process) Close() (err error) {
|
||||
operation := "hcsshim::Process::Close"
|
||||
ctx, span := trace.StartSpan(context.Background(), operation)
|
||||
defer span.End()
|
||||
defer func() { oc.SetSpanStatus(span, err) }()
|
||||
span.AddAttributes(
|
||||
trace.StringAttribute("cid", process.SystemID()),
|
||||
trace.Int64Attribute("pid", int64(process.processID)))
|
||||
|
||||
process.handleLock.Lock()
|
||||
defer process.handleLock.Unlock()
|
||||
|
||||
// Don't double free this
|
||||
if process.handle == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if process.stdin != nil {
|
||||
process.stdin.Close()
|
||||
}
|
||||
if process.stdout != nil {
|
||||
process.stdout.Close()
|
||||
}
|
||||
if process.stderr != nil {
|
||||
process.stderr.Close()
|
||||
}
|
||||
|
||||
if err = process.unregisterCallback(ctx); err != nil {
|
||||
return makeProcessError(process, operation, err, nil)
|
||||
}
|
||||
|
||||
if err = vmcompute.HcsCloseProcess(ctx, process.handle); err != nil {
|
||||
return makeProcessError(process, operation, err, nil)
|
||||
}
|
||||
|
||||
process.handle = 0
|
||||
process.closedWaitOnce.Do(func() {
|
||||
process.exitCode = -1
|
||||
process.waitError = ErrAlreadyClosed
|
||||
close(process.waitBlock)
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (process *Process) registerCallback(ctx context.Context) error {
|
||||
callbackContext := ¬ifcationWatcherContext{
|
||||
channels: newProcessChannels(),
|
||||
systemID: process.SystemID(),
|
||||
processID: process.processID,
|
||||
}
|
||||
|
||||
callbackMapLock.Lock()
|
||||
callbackNumber := nextCallback
|
||||
nextCallback++
|
||||
callbackMap[callbackNumber] = callbackContext
|
||||
callbackMapLock.Unlock()
|
||||
|
||||
callbackHandle, err := vmcompute.HcsRegisterProcessCallback(ctx, process.handle, notificationWatcherCallback, callbackNumber)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
callbackContext.handle = callbackHandle
|
||||
process.callbackNumber = callbackNumber
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (process *Process) unregisterCallback(ctx context.Context) error {
|
||||
callbackNumber := process.callbackNumber
|
||||
|
||||
callbackMapLock.RLock()
|
||||
callbackContext := callbackMap[callbackNumber]
|
||||
callbackMapLock.RUnlock()
|
||||
|
||||
if callbackContext == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
handle := callbackContext.handle
|
||||
|
||||
if handle == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// vmcompute.HcsUnregisterProcessCallback has its own synchronization to
|
||||
// wait for all callbacks to complete. We must NOT hold the callbackMapLock.
|
||||
err := vmcompute.HcsUnregisterProcessCallback(ctx, handle)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
closeChannels(callbackContext.channels)
|
||||
|
||||
callbackMapLock.Lock()
|
||||
delete(callbackMap, callbackNumber)
|
||||
callbackMapLock.Unlock()
|
||||
|
||||
handle = 0
|
||||
|
||||
return nil
|
||||
}
|
656
vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go
generated
vendored
Normal file
656
vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go
generated
vendored
Normal file
@ -0,0 +1,656 @@
|
||||
package hcs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/Microsoft/hcsshim/internal/cow"
|
||||
"github.com/Microsoft/hcsshim/internal/log"
|
||||
"github.com/Microsoft/hcsshim/internal/oc"
|
||||
"github.com/Microsoft/hcsshim/internal/schema1"
|
||||
"github.com/Microsoft/hcsshim/internal/timeout"
|
||||
"github.com/Microsoft/hcsshim/internal/vmcompute"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// currentContainerStarts is used to limit the number of concurrent container
|
||||
// starts.
|
||||
var currentContainerStarts containerStarts
|
||||
|
||||
type containerStarts struct {
|
||||
maxParallel int
|
||||
inProgress int
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
func init() {
|
||||
mpsS := os.Getenv("HCSSHIM_MAX_PARALLEL_START")
|
||||
if len(mpsS) > 0 {
|
||||
mpsI, err := strconv.Atoi(mpsS)
|
||||
if err != nil || mpsI < 0 {
|
||||
return
|
||||
}
|
||||
currentContainerStarts.maxParallel = mpsI
|
||||
}
|
||||
}
|
||||
|
||||
type System struct {
|
||||
handleLock sync.RWMutex
|
||||
handle vmcompute.HcsSystem
|
||||
id string
|
||||
callbackNumber uintptr
|
||||
|
||||
closedWaitOnce sync.Once
|
||||
waitBlock chan struct{}
|
||||
waitError error
|
||||
exitError error
|
||||
|
||||
os, typ string
|
||||
}
|
||||
|
||||
func newSystem(id string) *System {
|
||||
return &System{
|
||||
id: id,
|
||||
waitBlock: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
// CreateComputeSystem creates a new compute system with the given configuration but does not start it.
|
||||
func CreateComputeSystem(ctx context.Context, id string, hcsDocumentInterface interface{}) (_ *System, err error) {
|
||||
operation := "hcsshim::CreateComputeSystem"
|
||||
|
||||
// hcsCreateComputeSystemContext is an async operation. Start the outer span
|
||||
// here to measure the full create time.
|
||||
ctx, span := trace.StartSpan(ctx, operation)
|
||||
defer span.End()
|
||||
defer func() { oc.SetSpanStatus(span, err) }()
|
||||
span.AddAttributes(trace.StringAttribute("cid", id))
|
||||
|
||||
computeSystem := newSystem(id)
|
||||
|
||||
hcsDocumentB, err := json.Marshal(hcsDocumentInterface)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
hcsDocument := string(hcsDocumentB)
|
||||
|
||||
var (
|
||||
identity syscall.Handle
|
||||
resultJSON string
|
||||
createError error
|
||||
)
|
||||
computeSystem.handle, resultJSON, createError = vmcompute.HcsCreateComputeSystem(ctx, id, hcsDocument, identity)
|
||||
if createError == nil || IsPending(createError) {
|
||||
defer func() {
|
||||
if err != nil {
|
||||
computeSystem.Close()
|
||||
}
|
||||
}()
|
||||
if err = computeSystem.registerCallback(ctx); err != nil {
|
||||
// Terminate the compute system if it still exists. We're okay to
|
||||
// ignore a failure here.
|
||||
computeSystem.Terminate(ctx)
|
||||
return nil, makeSystemError(computeSystem, operation, "", err, nil)
|
||||
}
|
||||
}
|
||||
|
||||
events, err := processAsyncHcsResult(ctx, createError, resultJSON, computeSystem.callbackNumber, hcsNotificationSystemCreateCompleted, &timeout.SystemCreate)
|
||||
if err != nil {
|
||||
if err == ErrTimeout {
|
||||
// Terminate the compute system if it still exists. We're okay to
|
||||
// ignore a failure here.
|
||||
computeSystem.Terminate(ctx)
|
||||
}
|
||||
return nil, makeSystemError(computeSystem, operation, hcsDocument, err, events)
|
||||
}
|
||||
go computeSystem.waitBackground()
|
||||
if err = computeSystem.getCachedProperties(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return computeSystem, nil
|
||||
}
|
||||
|
||||
// OpenComputeSystem opens an existing compute system by ID.
|
||||
func OpenComputeSystem(ctx context.Context, id string) (*System, error) {
|
||||
operation := "hcsshim::OpenComputeSystem"
|
||||
|
||||
computeSystem := newSystem(id)
|
||||
handle, resultJSON, err := vmcompute.HcsOpenComputeSystem(ctx, id)
|
||||
events := processHcsResult(ctx, resultJSON)
|
||||
if err != nil {
|
||||
return nil, makeSystemError(computeSystem, operation, "", err, events)
|
||||
}
|
||||
computeSystem.handle = handle
|
||||
defer func() {
|
||||
if err != nil {
|
||||
computeSystem.Close()
|
||||
}
|
||||
}()
|
||||
if err = computeSystem.registerCallback(ctx); err != nil {
|
||||
return nil, makeSystemError(computeSystem, operation, "", err, nil)
|
||||
}
|
||||
go computeSystem.waitBackground()
|
||||
if err = computeSystem.getCachedProperties(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return computeSystem, nil
|
||||
}
|
||||
|
||||
func (computeSystem *System) getCachedProperties(ctx context.Context) error {
|
||||
props, err := computeSystem.Properties(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
computeSystem.typ = strings.ToLower(props.SystemType)
|
||||
computeSystem.os = strings.ToLower(props.RuntimeOSType)
|
||||
if computeSystem.os == "" && computeSystem.typ == "container" {
|
||||
// Pre-RS5 HCS did not return the OS, but it only supported containers
|
||||
// that ran Windows.
|
||||
computeSystem.os = "windows"
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// OS returns the operating system of the compute system, "linux" or "windows".
|
||||
func (computeSystem *System) OS() string {
|
||||
return computeSystem.os
|
||||
}
|
||||
|
||||
// IsOCI returns whether processes in the compute system should be created via
|
||||
// OCI.
|
||||
func (computeSystem *System) IsOCI() bool {
|
||||
return computeSystem.os == "linux" && computeSystem.typ == "container"
|
||||
}
|
||||
|
||||
// GetComputeSystems gets a list of the compute systems on the system that match the query
|
||||
func GetComputeSystems(ctx context.Context, q schema1.ComputeSystemQuery) ([]schema1.ContainerProperties, error) {
|
||||
operation := "hcsshim::GetComputeSystems"
|
||||
|
||||
queryb, err := json.Marshal(q)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
computeSystemsJSON, resultJSON, err := vmcompute.HcsEnumerateComputeSystems(ctx, string(queryb))
|
||||
events := processHcsResult(ctx, resultJSON)
|
||||
if err != nil {
|
||||
return nil, &HcsError{Op: operation, Err: err, Events: events}
|
||||
}
|
||||
|
||||
if computeSystemsJSON == "" {
|
||||
return nil, ErrUnexpectedValue
|
||||
}
|
||||
computeSystems := []schema1.ContainerProperties{}
|
||||
if err = json.Unmarshal([]byte(computeSystemsJSON), &computeSystems); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return computeSystems, nil
|
||||
}
|
||||
|
||||
// Start synchronously starts the computeSystem.
|
||||
func (computeSystem *System) Start(ctx context.Context) (err error) {
|
||||
operation := "hcsshim::System::Start"
|
||||
|
||||
// hcsStartComputeSystemContext is an async operation. Start the outer span
|
||||
// here to measure the full start time.
|
||||
ctx, span := trace.StartSpan(ctx, operation)
|
||||
defer span.End()
|
||||
defer func() { oc.SetSpanStatus(span, err) }()
|
||||
span.AddAttributes(trace.StringAttribute("cid", computeSystem.id))
|
||||
|
||||
computeSystem.handleLock.RLock()
|
||||
defer computeSystem.handleLock.RUnlock()
|
||||
|
||||
if computeSystem.handle == 0 {
|
||||
return makeSystemError(computeSystem, operation, "", ErrAlreadyClosed, nil)
|
||||
}
|
||||
|
||||
// This is a very simple backoff-retry loop to limit the number
|
||||
// of parallel container starts if environment variable
|
||||
// HCSSHIM_MAX_PARALLEL_START is set to a positive integer.
|
||||
// It should generally only be used as a workaround to various
|
||||
// platform issues that exist between RS1 and RS4 as of Aug 2018
|
||||
if currentContainerStarts.maxParallel > 0 {
|
||||
for {
|
||||
currentContainerStarts.Lock()
|
||||
if currentContainerStarts.inProgress < currentContainerStarts.maxParallel {
|
||||
currentContainerStarts.inProgress++
|
||||
currentContainerStarts.Unlock()
|
||||
break
|
||||
}
|
||||
if currentContainerStarts.inProgress == currentContainerStarts.maxParallel {
|
||||
currentContainerStarts.Unlock()
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
// Make sure we decrement the count when we are done.
|
||||
defer func() {
|
||||
currentContainerStarts.Lock()
|
||||
currentContainerStarts.inProgress--
|
||||
currentContainerStarts.Unlock()
|
||||
}()
|
||||
}
|
||||
|
||||
resultJSON, err := vmcompute.HcsStartComputeSystem(ctx, computeSystem.handle, "")
|
||||
events, err := processAsyncHcsResult(ctx, err, resultJSON, computeSystem.callbackNumber, hcsNotificationSystemStartCompleted, &timeout.SystemStart)
|
||||
if err != nil {
|
||||
return makeSystemError(computeSystem, operation, "", err, events)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ID returns the compute system's identifier.
|
||||
func (computeSystem *System) ID() string {
|
||||
return computeSystem.id
|
||||
}
|
||||
|
||||
// Shutdown requests a compute system shutdown.
|
||||
func (computeSystem *System) Shutdown(ctx context.Context) error {
|
||||
computeSystem.handleLock.RLock()
|
||||
defer computeSystem.handleLock.RUnlock()
|
||||
|
||||
operation := "hcsshim::System::Shutdown"
|
||||
|
||||
if computeSystem.handle == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
resultJSON, err := vmcompute.HcsShutdownComputeSystem(ctx, computeSystem.handle, "")
|
||||
events := processHcsResult(ctx, resultJSON)
|
||||
switch err {
|
||||
case nil, ErrVmcomputeAlreadyStopped, ErrComputeSystemDoesNotExist, ErrVmcomputeOperationPending:
|
||||
default:
|
||||
return makeSystemError(computeSystem, operation, "", err, events)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Terminate requests a compute system terminate.
|
||||
func (computeSystem *System) Terminate(ctx context.Context) error {
|
||||
computeSystem.handleLock.RLock()
|
||||
defer computeSystem.handleLock.RUnlock()
|
||||
|
||||
operation := "hcsshim::System::Terminate"
|
||||
|
||||
if computeSystem.handle == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
resultJSON, err := vmcompute.HcsTerminateComputeSystem(ctx, computeSystem.handle, "")
|
||||
events := processHcsResult(ctx, resultJSON)
|
||||
switch err {
|
||||
case nil, ErrVmcomputeAlreadyStopped, ErrComputeSystemDoesNotExist, ErrVmcomputeOperationPending:
|
||||
default:
|
||||
return makeSystemError(computeSystem, operation, "", err, events)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// waitBackground waits for the compute system exit notification. Once received
|
||||
// sets `computeSystem.waitError` (if any) and unblocks all `Wait` calls.
|
||||
//
|
||||
// This MUST be called exactly once per `computeSystem.handle` but `Wait` is
|
||||
// safe to call multiple times.
|
||||
func (computeSystem *System) waitBackground() {
|
||||
operation := "hcsshim::System::waitBackground"
|
||||
ctx, span := trace.StartSpan(context.Background(), operation)
|
||||
defer span.End()
|
||||
span.AddAttributes(trace.StringAttribute("cid", computeSystem.id))
|
||||
|
||||
err := waitForNotification(ctx, computeSystem.callbackNumber, hcsNotificationSystemExited, nil)
|
||||
switch err {
|
||||
case nil:
|
||||
log.G(ctx).Debug("system exited")
|
||||
case ErrVmcomputeUnexpectedExit:
|
||||
log.G(ctx).Debug("unexpected system exit")
|
||||
computeSystem.exitError = makeSystemError(computeSystem, operation, "", err, nil)
|
||||
err = nil
|
||||
default:
|
||||
err = makeSystemError(computeSystem, operation, "", err, nil)
|
||||
}
|
||||
computeSystem.closedWaitOnce.Do(func() {
|
||||
computeSystem.waitError = err
|
||||
close(computeSystem.waitBlock)
|
||||
})
|
||||
oc.SetSpanStatus(span, err)
|
||||
}
|
||||
|
||||
// Wait synchronously waits for the compute system to shutdown or terminate. If
|
||||
// the compute system has already exited returns the previous error (if any).
|
||||
func (computeSystem *System) Wait() error {
|
||||
<-computeSystem.waitBlock
|
||||
return computeSystem.waitError
|
||||
}
|
||||
|
||||
// ExitError returns an error describing the reason the compute system terminated.
|
||||
func (computeSystem *System) ExitError() error {
|
||||
select {
|
||||
case <-computeSystem.waitBlock:
|
||||
if computeSystem.waitError != nil {
|
||||
return computeSystem.waitError
|
||||
}
|
||||
return computeSystem.exitError
|
||||
default:
|
||||
return errors.New("container not exited")
|
||||
}
|
||||
}
|
||||
|
||||
func (computeSystem *System) Properties(ctx context.Context, types ...schema1.PropertyType) (*schema1.ContainerProperties, error) {
|
||||
computeSystem.handleLock.RLock()
|
||||
defer computeSystem.handleLock.RUnlock()
|
||||
|
||||
operation := "hcsshim::System::Properties"
|
||||
|
||||
queryBytes, err := json.Marshal(schema1.PropertyQuery{PropertyTypes: types})
|
||||
if err != nil {
|
||||
return nil, makeSystemError(computeSystem, operation, "", err, nil)
|
||||
}
|
||||
|
||||
propertiesJSON, resultJSON, err := vmcompute.HcsGetComputeSystemProperties(ctx, computeSystem.handle, string(queryBytes))
|
||||
events := processHcsResult(ctx, resultJSON)
|
||||
if err != nil {
|
||||
return nil, makeSystemError(computeSystem, operation, "", err, events)
|
||||
}
|
||||
|
||||
if propertiesJSON == "" {
|
||||
return nil, ErrUnexpectedValue
|
||||
}
|
||||
properties := &schema1.ContainerProperties{}
|
||||
if err := json.Unmarshal([]byte(propertiesJSON), properties); err != nil {
|
||||
return nil, makeSystemError(computeSystem, operation, "", err, nil)
|
||||
}
|
||||
|
||||
return properties, nil
|
||||
}
|
||||
|
||||
// Pause pauses the execution of the computeSystem. This feature is not enabled in TP5.
|
||||
func (computeSystem *System) Pause(ctx context.Context) (err error) {
|
||||
operation := "hcsshim::System::Pause"
|
||||
|
||||
// hcsPauseComputeSystemContext is an async peration. Start the outer span
|
||||
// here to measure the full pause time.
|
||||
ctx, span := trace.StartSpan(ctx, operation)
|
||||
defer span.End()
|
||||
defer func() { oc.SetSpanStatus(span, err) }()
|
||||
span.AddAttributes(trace.StringAttribute("cid", computeSystem.id))
|
||||
|
||||
computeSystem.handleLock.RLock()
|
||||
defer computeSystem.handleLock.RUnlock()
|
||||
|
||||
if computeSystem.handle == 0 {
|
||||
return makeSystemError(computeSystem, operation, "", ErrAlreadyClosed, nil)
|
||||
}
|
||||
|
||||
resultJSON, err := vmcompute.HcsPauseComputeSystem(ctx, computeSystem.handle, "")
|
||||
events, err := processAsyncHcsResult(ctx, err, resultJSON, computeSystem.callbackNumber, hcsNotificationSystemPauseCompleted, &timeout.SystemPause)
|
||||
if err != nil {
|
||||
return makeSystemError(computeSystem, operation, "", err, events)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Resume resumes the execution of the computeSystem. This feature is not enabled in TP5.
|
||||
func (computeSystem *System) Resume(ctx context.Context) (err error) {
|
||||
operation := "hcsshim::System::Resume"
|
||||
|
||||
// hcsResumeComputeSystemContext is an async operation. Start the outer span
|
||||
// here to measure the full restore time.
|
||||
ctx, span := trace.StartSpan(ctx, operation)
|
||||
defer span.End()
|
||||
defer func() { oc.SetSpanStatus(span, err) }()
|
||||
span.AddAttributes(trace.StringAttribute("cid", computeSystem.id))
|
||||
|
||||
computeSystem.handleLock.RLock()
|
||||
defer computeSystem.handleLock.RUnlock()
|
||||
|
||||
if computeSystem.handle == 0 {
|
||||
return makeSystemError(computeSystem, operation, "", ErrAlreadyClosed, nil)
|
||||
}
|
||||
|
||||
resultJSON, err := vmcompute.HcsResumeComputeSystem(ctx, computeSystem.handle, "")
|
||||
events, err := processAsyncHcsResult(ctx, err, resultJSON, computeSystem.callbackNumber, hcsNotificationSystemResumeCompleted, &timeout.SystemResume)
|
||||
if err != nil {
|
||||
return makeSystemError(computeSystem, operation, "", err, events)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (computeSystem *System) createProcess(ctx context.Context, operation string, c interface{}) (*Process, *vmcompute.HcsProcessInformation, error) {
|
||||
computeSystem.handleLock.RLock()
|
||||
defer computeSystem.handleLock.RUnlock()
|
||||
|
||||
if computeSystem.handle == 0 {
|
||||
return nil, nil, makeSystemError(computeSystem, operation, "", ErrAlreadyClosed, nil)
|
||||
}
|
||||
|
||||
configurationb, err := json.Marshal(c)
|
||||
if err != nil {
|
||||
return nil, nil, makeSystemError(computeSystem, operation, "", err, nil)
|
||||
}
|
||||
|
||||
configuration := string(configurationb)
|
||||
processInfo, processHandle, resultJSON, err := vmcompute.HcsCreateProcess(ctx, computeSystem.handle, configuration)
|
||||
events := processHcsResult(ctx, resultJSON)
|
||||
if err != nil {
|
||||
return nil, nil, makeSystemError(computeSystem, operation, configuration, err, events)
|
||||
}
|
||||
|
||||
log.G(ctx).WithField("pid", processInfo.ProcessId).Debug("created process pid")
|
||||
return newProcess(processHandle, int(processInfo.ProcessId), computeSystem), &processInfo, nil
|
||||
}
|
||||
|
||||
// CreateProcessNoStdio launches a new process within the computeSystem. The
|
||||
// Stdio handles are not cached on the process struct.
|
||||
func (computeSystem *System) CreateProcessNoStdio(c interface{}) (_ cow.Process, err error) {
|
||||
operation := "hcsshim::System::CreateProcessNoStdio"
|
||||
ctx, span := trace.StartSpan(context.Background(), operation)
|
||||
defer span.End()
|
||||
defer func() { oc.SetSpanStatus(span, err) }()
|
||||
span.AddAttributes(trace.StringAttribute("cid", computeSystem.id))
|
||||
|
||||
process, processInfo, err := computeSystem.createProcess(ctx, operation, c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
process.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
// We don't do anything with these handles. Close them so they don't leak.
|
||||
syscall.Close(processInfo.StdInput)
|
||||
syscall.Close(processInfo.StdOutput)
|
||||
syscall.Close(processInfo.StdError)
|
||||
|
||||
if err = process.registerCallback(ctx); err != nil {
|
||||
return nil, makeSystemError(computeSystem, operation, "", err, nil)
|
||||
}
|
||||
go process.waitBackground()
|
||||
|
||||
return process, nil
|
||||
}
|
||||
|
||||
// CreateProcess launches a new process within the computeSystem.
|
||||
func (computeSystem *System) CreateProcess(ctx context.Context, c interface{}) (cow.Process, error) {
|
||||
operation := "hcsshim::System::CreateProcess"
|
||||
process, processInfo, err := computeSystem.createProcess(ctx, operation, c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
process.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
pipes, err := makeOpenFiles([]syscall.Handle{processInfo.StdInput, processInfo.StdOutput, processInfo.StdError})
|
||||
if err != nil {
|
||||
return nil, makeSystemError(computeSystem, operation, "", err, nil)
|
||||
}
|
||||
process.stdin = pipes[0]
|
||||
process.stdout = pipes[1]
|
||||
process.stderr = pipes[2]
|
||||
|
||||
if err = process.registerCallback(ctx); err != nil {
|
||||
return nil, makeSystemError(computeSystem, operation, "", err, nil)
|
||||
}
|
||||
go process.waitBackground()
|
||||
|
||||
return process, nil
|
||||
}
|
||||
|
||||
// OpenProcess gets an interface to an existing process within the computeSystem.
|
||||
func (computeSystem *System) OpenProcess(ctx context.Context, pid int) (*Process, error) {
|
||||
computeSystem.handleLock.RLock()
|
||||
defer computeSystem.handleLock.RUnlock()
|
||||
|
||||
operation := "hcsshim::System::OpenProcess"
|
||||
|
||||
if computeSystem.handle == 0 {
|
||||
return nil, makeSystemError(computeSystem, operation, "", ErrAlreadyClosed, nil)
|
||||
}
|
||||
|
||||
processHandle, resultJSON, err := vmcompute.HcsOpenProcess(ctx, computeSystem.handle, uint32(pid))
|
||||
events := processHcsResult(ctx, resultJSON)
|
||||
if err != nil {
|
||||
return nil, makeSystemError(computeSystem, operation, "", err, events)
|
||||
}
|
||||
|
||||
process := newProcess(processHandle, pid, computeSystem)
|
||||
if err = process.registerCallback(ctx); err != nil {
|
||||
return nil, makeSystemError(computeSystem, operation, "", err, nil)
|
||||
}
|
||||
go process.waitBackground()
|
||||
|
||||
return process, nil
|
||||
}
|
||||
|
||||
// Close cleans up any state associated with the compute system but does not terminate or wait for it.
|
||||
func (computeSystem *System) Close() (err error) {
|
||||
operation := "hcsshim::System::Close"
|
||||
ctx, span := trace.StartSpan(context.Background(), operation)
|
||||
defer span.End()
|
||||
defer func() { oc.SetSpanStatus(span, err) }()
|
||||
span.AddAttributes(trace.StringAttribute("cid", computeSystem.id))
|
||||
|
||||
computeSystem.handleLock.Lock()
|
||||
defer computeSystem.handleLock.Unlock()
|
||||
|
||||
// Don't double free this
|
||||
if computeSystem.handle == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err = computeSystem.unregisterCallback(ctx); err != nil {
|
||||
return makeSystemError(computeSystem, operation, "", err, nil)
|
||||
}
|
||||
|
||||
err = vmcompute.HcsCloseComputeSystem(ctx, computeSystem.handle)
|
||||
if err != nil {
|
||||
return makeSystemError(computeSystem, operation, "", err, nil)
|
||||
}
|
||||
|
||||
computeSystem.handle = 0
|
||||
computeSystem.closedWaitOnce.Do(func() {
|
||||
computeSystem.waitError = ErrAlreadyClosed
|
||||
close(computeSystem.waitBlock)
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (computeSystem *System) registerCallback(ctx context.Context) error {
|
||||
callbackContext := ¬ifcationWatcherContext{
|
||||
channels: newSystemChannels(),
|
||||
systemID: computeSystem.id,
|
||||
}
|
||||
|
||||
callbackMapLock.Lock()
|
||||
callbackNumber := nextCallback
|
||||
nextCallback++
|
||||
callbackMap[callbackNumber] = callbackContext
|
||||
callbackMapLock.Unlock()
|
||||
|
||||
callbackHandle, err := vmcompute.HcsRegisterComputeSystemCallback(ctx, computeSystem.handle, notificationWatcherCallback, callbackNumber)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
callbackContext.handle = callbackHandle
|
||||
computeSystem.callbackNumber = callbackNumber
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (computeSystem *System) unregisterCallback(ctx context.Context) error {
|
||||
callbackNumber := computeSystem.callbackNumber
|
||||
|
||||
callbackMapLock.RLock()
|
||||
callbackContext := callbackMap[callbackNumber]
|
||||
callbackMapLock.RUnlock()
|
||||
|
||||
if callbackContext == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
handle := callbackContext.handle
|
||||
|
||||
if handle == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// hcsUnregisterComputeSystemCallback has its own syncronization
|
||||
// to wait for all callbacks to complete. We must NOT hold the callbackMapLock.
|
||||
err := vmcompute.HcsUnregisterComputeSystemCallback(ctx, handle)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
closeChannels(callbackContext.channels)
|
||||
|
||||
callbackMapLock.Lock()
|
||||
delete(callbackMap, callbackNumber)
|
||||
callbackMapLock.Unlock()
|
||||
|
||||
handle = 0
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Modify the System by sending a request to HCS
|
||||
func (computeSystem *System) Modify(ctx context.Context, config interface{}) error {
|
||||
computeSystem.handleLock.RLock()
|
||||
defer computeSystem.handleLock.RUnlock()
|
||||
|
||||
operation := "hcsshim::System::Modify"
|
||||
|
||||
if computeSystem.handle == 0 {
|
||||
return makeSystemError(computeSystem, operation, "", ErrAlreadyClosed, nil)
|
||||
}
|
||||
|
||||
requestBytes, err := json.Marshal(config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
requestJSON := string(requestBytes)
|
||||
resultJSON, err := vmcompute.HcsModifyComputeSystem(ctx, computeSystem.handle, requestJSON)
|
||||
events := processHcsResult(ctx, resultJSON)
|
||||
if err != nil {
|
||||
return makeSystemError(computeSystem, operation, requestJSON, err, events)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
33
vendor/github.com/Microsoft/hcsshim/internal/hcs/utils.go
generated
vendored
Normal file
33
vendor/github.com/Microsoft/hcsshim/internal/hcs/utils.go
generated
vendored
Normal file
@ -0,0 +1,33 @@
|
||||
package hcs
|
||||
|
||||
import (
|
||||
"io"
|
||||
"syscall"
|
||||
|
||||
"github.com/Microsoft/go-winio"
|
||||
)
|
||||
|
||||
// makeOpenFiles calls winio.MakeOpenFile for each handle in a slice but closes all the handles
|
||||
// if there is an error.
|
||||
func makeOpenFiles(hs []syscall.Handle) (_ []io.ReadWriteCloser, err error) {
|
||||
fs := make([]io.ReadWriteCloser, len(hs))
|
||||
for i, h := range hs {
|
||||
if h != syscall.Handle(0) {
|
||||
if err == nil {
|
||||
fs[i], err = winio.MakeOpenFile(h)
|
||||
}
|
||||
if err != nil {
|
||||
syscall.Close(h)
|
||||
}
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
for _, f := range fs {
|
||||
if f != nil {
|
||||
f.Close()
|
||||
}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return fs, nil
|
||||
}
|
69
vendor/github.com/Microsoft/hcsshim/internal/hcs/waithelper.go
generated
vendored
Normal file
69
vendor/github.com/Microsoft/hcsshim/internal/hcs/waithelper.go
generated
vendored
Normal file
@ -0,0 +1,69 @@
|
||||
package hcs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/Microsoft/hcsshim/internal/log"
|
||||
)
|
||||
|
||||
func processAsyncHcsResult(ctx context.Context, err error, resultJSON string, callbackNumber uintptr, expectedNotification hcsNotification, timeout *time.Duration) ([]ErrorEvent, error) {
|
||||
events := processHcsResult(ctx, resultJSON)
|
||||
if IsPending(err) {
|
||||
return nil, waitForNotification(ctx, callbackNumber, expectedNotification, timeout)
|
||||
}
|
||||
|
||||
return events, err
|
||||
}
|
||||
|
||||
func waitForNotification(ctx context.Context, callbackNumber uintptr, expectedNotification hcsNotification, timeout *time.Duration) error {
|
||||
callbackMapLock.RLock()
|
||||
if _, ok := callbackMap[callbackNumber]; !ok {
|
||||
callbackMapLock.RUnlock()
|
||||
log.G(ctx).WithField("callbackNumber", callbackNumber).Error("failed to waitForNotification: callbackNumber does not exist in callbackMap")
|
||||
return ErrHandleClose
|
||||
}
|
||||
channels := callbackMap[callbackNumber].channels
|
||||
callbackMapLock.RUnlock()
|
||||
|
||||
expectedChannel := channels[expectedNotification]
|
||||
if expectedChannel == nil {
|
||||
log.G(ctx).WithField("type", expectedNotification).Error("unknown notification type in waitForNotification")
|
||||
return ErrInvalidNotificationType
|
||||
}
|
||||
|
||||
var c <-chan time.Time
|
||||
if timeout != nil {
|
||||
timer := time.NewTimer(*timeout)
|
||||
c = timer.C
|
||||
defer timer.Stop()
|
||||
}
|
||||
|
||||
select {
|
||||
case err, ok := <-expectedChannel:
|
||||
if !ok {
|
||||
return ErrHandleClose
|
||||
}
|
||||
return err
|
||||
case err, ok := <-channels[hcsNotificationSystemExited]:
|
||||
if !ok {
|
||||
return ErrHandleClose
|
||||
}
|
||||
// If the expected notification is hcsNotificationSystemExited which of the two selects
|
||||
// chosen is random. Return the raw error if hcsNotificationSystemExited is expected
|
||||
if channels[hcsNotificationSystemExited] == expectedChannel {
|
||||
return err
|
||||
}
|
||||
return ErrUnexpectedContainerExit
|
||||
case _, ok := <-channels[hcsNotificationServiceDisconnect]:
|
||||
if !ok {
|
||||
return ErrHandleClose
|
||||
}
|
||||
// hcsNotificationServiceDisconnect should never be an expected notification
|
||||
// it does not need the same handling as hcsNotificationSystemExited
|
||||
return ErrUnexpectedProcessAbort
|
||||
case <-c:
|
||||
return ErrTimeout
|
||||
}
|
||||
return nil
|
||||
}
|
Reference in New Issue
Block a user