act/pkg/container/docker_run.go

765 lines
19 KiB
Go
Raw Normal View History

package container
import (
2020-02-23 17:01:25 -06:00
"archive/tar"
"bufio"
2020-02-23 17:01:25 -06:00
"bytes"
"context"
"fmt"
"io"
2020-02-24 18:38:49 -06:00
"io/ioutil"
"os"
2020-02-24 18:38:49 -06:00
"path/filepath"
"regexp"
"runtime"
"strings"
2020-04-16 18:24:30 -05:00
"github.com/go-git/go-billy/v5/helper/polyfill"
"github.com/go-git/go-billy/v5/osfs"
"github.com/go-git/go-git/v5/plumbing/format/gitignore"
"github.com/joho/godotenv"
2020-04-16 18:24:30 -05:00
"github.com/docker/cli/cli/connhelper"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
2020-02-23 17:01:25 -06:00
"github.com/docker/docker/api/types/mount"
"github.com/docker/docker/client"
"github.com/docker/docker/pkg/stdcopy"
specs "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/Masterminds/semver"
"github.com/pkg/errors"
2020-02-23 17:01:25 -06:00
log "github.com/sirupsen/logrus"
"golang.org/x/term"
"github.com/nektos/act/pkg/common"
)
2020-02-23 17:01:25 -06:00
// NewContainerInput the input for the New function
type NewContainerInput struct {
Image string
Username string
Password string
Entrypoint []string
Cmd []string
WorkingDir string
Env []string
Binds []string
Mounts map[string]string
Name string
Stdout io.Writer
Stderr io.Writer
NetworkMode string
2020-08-01 15:21:49 -05:00
Privileged bool
UsernsMode string
Platform string
Hostname string
2020-02-23 17:01:25 -06:00
}
// FileEntry is a file to copy to a container
type FileEntry struct {
Name string
Mode int64
Body string
}
// Container for managing docker run containers
type Container interface {
Create(capAdd []string, capDrop []string) common.Executor
2020-02-23 17:01:25 -06:00
Copy(destPath string, files ...*FileEntry) common.Executor
CopyDir(destPath string, srcPath string, useGitIgnore bool) common.Executor
GetContainerArchive(ctx context.Context, srcPath string) (io.ReadCloser, error)
2020-02-23 17:01:25 -06:00
Pull(forcePull bool) common.Executor
Start(attach bool) common.Executor
Exec(command []string, env map[string]string, user, workdir string) common.Executor
UpdateFromEnv(srcPath string, env *map[string]string) common.Executor
UpdateFromImageEnv(env *map[string]string) common.Executor
UpdateFromPath(env *map[string]string) common.Executor
2020-02-23 17:01:25 -06:00
Remove() common.Executor
Close() common.Executor
refactor: remove composite action runcontext workaround (#1085) * refactor: remove composite action runcontext workaround The RunContext is cloned to execute a composite action with all its steps in a similar context. This required some workaround, since the command handler has kept a reference to the original RunContext. This is solved now, by replacing the docker LogWriter with a proper scoped LogWriter. This prepares for a simpler setup of composite actions to be able to create and re-create the composite RunContext for pre/main/post action steps. * test: check env-vars for local js and docker actions * test: test remote docker and js actions * fix: merge github context into env when read and setup * refacotr: simplify composite context setup * test: use a map matcher to test input setup * fix: restore composite log output Since we create a new line writer, we need to log the raw_output as well. Otherwise no output will be available from the log-writer * fix: add RunContext JobName to fill GITHUB_JOBNAME * test: use nektos/act-test-actions * fix: allow masking values in composite actions To allow masking of values from composite actions, we need to use a custom job logger with a reference to the masked values for the composite run context. * refactor: keep existing logger for composite actions To not introduce another new logger while still be able to use the masking from the composite action, we add the masks to the go context. To leverage that context, we also add the context to the log entries where the valueMasker then could get the actual mask values. With this way to 'inject' the masked values into the logger, we do - keep the logger - keep the coloring - stay away from inconsistencies due to parallel jobs * fix: re-add removed color increase This one should have never removed :-) * fix: add missing ExtraPath attribute * fix: merge run context env into composite run context env This adds a test and fix for the parent environment. It should be inherited by the composite environment. * test: add missing test case * fix: store github token next to secrets We must not expose the secrets to composite actions, but the `github.token` is available inside composite actions. To provide this we store the token in the config and create it in the GithubContext from there. The token can be used with `github.token` but is not available as `secrets.GITHUB_TOKEN`. This implements the same behavior as on GitHub. Co-authored-by: Björn Brauer <bjoern.brauer@new-work.se> Co-authored-by: Marcus Noll <markus.noll@new-work.se> * fixup! fix: allow masking values in composite actions * style: use tabs instead of spaces to fix linter errors Co-authored-by: Björn Brauer <bjoern.brauer@new-work.se> Co-authored-by: Marcus Noll <markus.noll@new-work.se> Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com>
2022-05-11 14:06:05 -05:00
ReplaceLogWriter(io.Writer, io.Writer) (io.Writer, io.Writer)
2020-02-23 17:01:25 -06:00
}
// NewContainer creates a reference to a container
func NewContainer(input *NewContainerInput) Container {
cr := new(containerReference)
cr.input = input
2020-02-23 17:01:25 -06:00
return cr
}
// supportsContainerImagePlatform returns true if the underlying Docker server
// API version is 1.41 and beyond
func supportsContainerImagePlatform(ctx context.Context, cli *client.Client) bool {
logger := common.Logger(ctx)
ver, err := cli.ServerVersion(ctx)
if err != nil {
logger.Panicf("Failed to get Docker API Version: %s", err)
return false
}
sv, err := semver.NewVersion(ver.APIVersion)
if err != nil {
logger.Panicf("Failed to unmarshal Docker Version: %s", err)
return false
}
constraint, _ := semver.NewConstraint(">= 1.41")
return constraint.Check(sv)
}
func (cr *containerReference) Create(capAdd []string, capDrop []string) common.Executor {
return common.
NewInfoExecutor("%sdocker create image=%s platform=%s entrypoint=%+q cmd=%+q", logPrefix, cr.input.Image, cr.input.Platform, cr.input.Entrypoint, cr.input.Cmd).
Then(
common.NewPipelineExecutor(
cr.connect(),
cr.find(),
cr.create(capAdd, capDrop),
2020-02-23 17:01:25 -06:00
).IfNot(common.Dryrun),
)
}
2020-02-23 17:01:25 -06:00
func (cr *containerReference) Start(attach bool) common.Executor {
return common.
NewInfoExecutor("%sdocker run image=%s platform=%s entrypoint=%+q cmd=%+q", logPrefix, cr.input.Image, cr.input.Platform, cr.input.Entrypoint, cr.input.Cmd).
2020-02-23 17:01:25 -06:00
Then(
common.NewPipelineExecutor(
cr.connect(),
cr.find(),
cr.attach().IfBool(attach),
cr.start(),
2020-02-23 17:01:25 -06:00
cr.wait().IfBool(attach),
).IfNot(common.Dryrun),
)
}
2020-02-23 17:01:25 -06:00
func (cr *containerReference) Pull(forcePull bool) common.Executor {
return common.
NewInfoExecutor("%sdocker pull image=%s platform=%s username=%s forcePull=%t", logPrefix, cr.input.Image, cr.input.Platform, cr.input.Username, forcePull).
Then(
NewDockerPullExecutor(NewDockerPullExecutorInput{
Image: cr.input.Image,
ForcePull: forcePull,
Platform: cr.input.Platform,
Username: cr.input.Username,
Password: cr.input.Password,
}),
)
2020-02-23 17:01:25 -06:00
}
2020-02-23 17:01:25 -06:00
func (cr *containerReference) Copy(destPath string, files ...*FileEntry) common.Executor {
return common.NewPipelineExecutor(
cr.connect(),
cr.find(),
cr.copyContent(destPath, files...),
).IfNot(common.Dryrun)
}
func (cr *containerReference) CopyDir(destPath string, srcPath string, useGitIgnore bool) common.Executor {
2020-02-24 18:38:49 -06:00
return common.NewPipelineExecutor(
2020-02-24 19:48:21 -06:00
common.NewInfoExecutor("%sdocker cp src=%s dst=%s", logPrefix, srcPath, destPath),
cr.Exec([]string{"mkdir", "-p", destPath}, nil, "", ""),
cr.copyDir(destPath, srcPath, useGitIgnore),
2020-02-24 18:38:49 -06:00
).IfNot(common.Dryrun)
}
func (cr *containerReference) GetContainerArchive(ctx context.Context, srcPath string) (io.ReadCloser, error) {
if common.Dryrun(ctx) {
return nil, fmt.Errorf("DRYRUN is not supported in GetContainerArchive")
}
a, _, err := cr.cli.CopyFromContainer(ctx, cr.id, srcPath)
return a, err
}
func (cr *containerReference) UpdateFromEnv(srcPath string, env *map[string]string) common.Executor {
return cr.extractEnv(srcPath, env).IfNot(common.Dryrun)
}
func (cr *containerReference) UpdateFromImageEnv(env *map[string]string) common.Executor {
return cr.extractFromImageEnv(env).IfNot(common.Dryrun)
}
func (cr *containerReference) UpdateFromPath(env *map[string]string) common.Executor {
return cr.extractPath(env).IfNot(common.Dryrun)
}
func (cr *containerReference) Exec(command []string, env map[string]string, user, workdir string) common.Executor {
2020-02-23 17:01:25 -06:00
return common.NewPipelineExecutor(
common.NewInfoExecutor("%sdocker exec cmd=[%s] user=%s workdir=%s", logPrefix, strings.Join(command, " "), user, workdir),
2020-02-23 17:01:25 -06:00
cr.connect(),
cr.find(),
cr.exec(command, env, user, workdir),
2020-02-23 17:01:25 -06:00
).IfNot(common.Dryrun)
}
2020-02-23 17:01:25 -06:00
func (cr *containerReference) Remove() common.Executor {
return common.NewPipelineExecutor(
cr.connect(),
cr.find(),
).Finally(
cr.remove(),
).IfNot(common.Dryrun)
}
refactor: remove composite action runcontext workaround (#1085) * refactor: remove composite action runcontext workaround The RunContext is cloned to execute a composite action with all its steps in a similar context. This required some workaround, since the command handler has kept a reference to the original RunContext. This is solved now, by replacing the docker LogWriter with a proper scoped LogWriter. This prepares for a simpler setup of composite actions to be able to create and re-create the composite RunContext for pre/main/post action steps. * test: check env-vars for local js and docker actions * test: test remote docker and js actions * fix: merge github context into env when read and setup * refacotr: simplify composite context setup * test: use a map matcher to test input setup * fix: restore composite log output Since we create a new line writer, we need to log the raw_output as well. Otherwise no output will be available from the log-writer * fix: add RunContext JobName to fill GITHUB_JOBNAME * test: use nektos/act-test-actions * fix: allow masking values in composite actions To allow masking of values from composite actions, we need to use a custom job logger with a reference to the masked values for the composite run context. * refactor: keep existing logger for composite actions To not introduce another new logger while still be able to use the masking from the composite action, we add the masks to the go context. To leverage that context, we also add the context to the log entries where the valueMasker then could get the actual mask values. With this way to 'inject' the masked values into the logger, we do - keep the logger - keep the coloring - stay away from inconsistencies due to parallel jobs * fix: re-add removed color increase This one should have never removed :-) * fix: add missing ExtraPath attribute * fix: merge run context env into composite run context env This adds a test and fix for the parent environment. It should be inherited by the composite environment. * test: add missing test case * fix: store github token next to secrets We must not expose the secrets to composite actions, but the `github.token` is available inside composite actions. To provide this we store the token in the config and create it in the GithubContext from there. The token can be used with `github.token` but is not available as `secrets.GITHUB_TOKEN`. This implements the same behavior as on GitHub. Co-authored-by: Björn Brauer <bjoern.brauer@new-work.se> Co-authored-by: Marcus Noll <markus.noll@new-work.se> * fixup! fix: allow masking values in composite actions * style: use tabs instead of spaces to fix linter errors Co-authored-by: Björn Brauer <bjoern.brauer@new-work.se> Co-authored-by: Marcus Noll <markus.noll@new-work.se> Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com>
2022-05-11 14:06:05 -05:00
func (cr *containerReference) ReplaceLogWriter(stdout io.Writer, stderr io.Writer) (io.Writer, io.Writer) {
out := cr.input.Stdout
err := cr.input.Stderr
cr.input.Stdout = stdout
cr.input.Stderr = stderr
return out, err
}
type containerReference struct {
cli *client.Client
id string
2020-02-23 17:01:25 -06:00
input *NewContainerInput
}
func GetDockerClient(ctx context.Context) (cli *client.Client, err error) {
// TODO: this should maybe need to be a global option, not hidden in here?
// though i'm not sure how that works out when there's another Executor :D
// I really would like something that works on OSX native for eg
dockerHost := os.Getenv("DOCKER_HOST")
if strings.HasPrefix(dockerHost, "ssh://") {
var helper *connhelper.ConnectionHelper
helper, err = connhelper.GetConnectionHelper(dockerHost)
if err != nil {
return nil, err
}
cli, err = client.NewClientWithOpts(
client.WithHost(helper.Host),
client.WithDialContext(helper.Dialer),
)
} else {
cli, err = client.NewClientWithOpts(client.FromEnv)
}
if err != nil {
return nil, errors.WithStack(err)
}
cli.NegotiateAPIVersion(ctx)
return cli, err
}
func GetHostInfo(ctx context.Context) (info types.Info, err error) {
var cli *client.Client
cli, err = GetDockerClient(ctx)
if err != nil {
return info, err
}
defer cli.Close()
info, err = cli.Info(ctx)
if err != nil {
return info, err
}
return info, nil
}
func (cr *containerReference) connect() common.Executor {
return func(ctx context.Context) error {
2020-02-23 17:01:25 -06:00
if cr.cli != nil {
return nil
}
cli, err := GetDockerClient(ctx)
if err != nil {
return err
}
cr.cli = cli
return nil
}
}
func (cr *containerReference) Close() common.Executor {
return func(ctx context.Context) error {
if cr.cli != nil {
cr.cli.Close()
cr.cli = nil
}
return nil
}
}
func (cr *containerReference) find() common.Executor {
return func(ctx context.Context) error {
2020-02-23 17:01:25 -06:00
if cr.id != "" {
return nil
}
containers, err := cr.cli.ContainerList(ctx, types.ContainerListOptions{
All: true,
})
if err != nil {
return errors.WithStack(err)
}
for _, c := range containers {
for _, name := range c.Names {
if name[1:] == cr.input.Name {
cr.id = c.ID
return nil
}
}
}
cr.id = ""
return nil
}
}
func (cr *containerReference) remove() common.Executor {
return func(ctx context.Context) error {
if cr.id == "" {
return nil
}
logger := common.Logger(ctx)
err := cr.cli.ContainerRemove(ctx, cr.id, types.ContainerRemoveOptions{
RemoveVolumes: true,
Force: true,
})
if err != nil {
2020-02-24 00:34:48 -06:00
logger.Error(errors.WithStack(err))
}
logger.Debugf("Removed container: %v", cr.id)
2020-02-23 18:36:44 -06:00
cr.id = ""
return nil
}
}
func (cr *containerReference) create(capAdd []string, capDrop []string) common.Executor {
return func(ctx context.Context) error {
if cr.id != "" {
return nil
}
logger := common.Logger(ctx)
isTerminal := term.IsTerminal(int(os.Stdout.Fd()))
input := cr.input
config := &container.Config{
Image: input.Image,
WorkingDir: input.WorkingDir,
Env: input.Env,
Tty: isTerminal,
Hostname: input.Hostname,
}
if len(input.Cmd) != 0 {
config.Cmd = input.Cmd
}
if len(input.Entrypoint) != 0 {
config.Entrypoint = input.Entrypoint
}
2020-02-23 17:01:25 -06:00
mounts := make([]mount.Mount, 0)
for mountSource, mountTarget := range input.Mounts {
mounts = append(mounts, mount.Mount{
Type: mount.TypeVolume,
Source: mountSource,
Target: mountTarget,
})
}
var platSpecs *specs.Platform
if supportsContainerImagePlatform(ctx, cr.cli) && cr.input.Platform != "" {
desiredPlatform := strings.SplitN(cr.input.Platform, `/`, 2)
if len(desiredPlatform) != 2 {
logger.Panicf("Incorrect container platform option. %s is not a valid platform.", cr.input.Platform)
}
platSpecs = &specs.Platform{
Architecture: desiredPlatform[1],
OS: desiredPlatform[0],
}
}
resp, err := cr.cli.ContainerCreate(ctx, config, &container.HostConfig{
CapAdd: capAdd,
CapDrop: capDrop,
Binds: input.Binds,
Mounts: mounts,
NetworkMode: container.NetworkMode(input.NetworkMode),
2020-08-01 15:21:49 -05:00
Privileged: input.Privileged,
UsernsMode: container.UsernsMode(input.UsernsMode),
}, nil, platSpecs, input.Name)
if err != nil {
return errors.WithStack(err)
}
logger.Debugf("Created container name=%s id=%v from image %v (platform: %s)", input.Name, resp.ID, input.Image, input.Platform)
logger.Debugf("ENV ==> %v", input.Env)
cr.id = resp.ID
return nil
}
}
var singleLineEnvPattern, mulitiLineEnvPattern *regexp.Regexp
func (cr *containerReference) extractEnv(srcPath string, env *map[string]string) common.Executor {
if singleLineEnvPattern == nil {
// Single line pattern matches:
// SOME_VAR=data=moredata
// SOME_VAR=datamoredata
singleLineEnvPattern = regexp.MustCompile(`^([^=]*)\=(.*)$`)
mulitiLineEnvPattern = regexp.MustCompile(`^([^<]+)<<(\w+)$`)
}
localEnv := *env
return func(ctx context.Context) error {
envTar, _, err := cr.cli.CopyFromContainer(ctx, cr.id, srcPath)
if err != nil {
return nil
}
defer envTar.Close()
reader := tar.NewReader(envTar)
_, err = reader.Next()
if err != nil && err != io.EOF {
return errors.WithStack(err)
}
s := bufio.NewScanner(reader)
multiLineEnvKey := ""
multiLineEnvDelimiter := ""
multiLineEnvContent := ""
for s.Scan() {
line := s.Text()
if singleLineEnv := singleLineEnvPattern.FindStringSubmatch(line); singleLineEnv != nil {
localEnv[singleLineEnv[1]] = singleLineEnv[2]
}
if line == multiLineEnvDelimiter {
localEnv[multiLineEnvKey] = multiLineEnvContent
multiLineEnvKey, multiLineEnvDelimiter, multiLineEnvContent = "", "", ""
}
if multiLineEnvKey != "" && multiLineEnvDelimiter != "" {
if multiLineEnvContent != "" {
multiLineEnvContent += "\n"
}
multiLineEnvContent += line
}
if mulitiLineEnvStart := mulitiLineEnvPattern.FindStringSubmatch(line); mulitiLineEnvStart != nil {
multiLineEnvKey = mulitiLineEnvStart[1]
multiLineEnvDelimiter = mulitiLineEnvStart[2]
}
}
env = &localEnv
return nil
}
}
func (cr *containerReference) extractFromImageEnv(env *map[string]string) common.Executor {
envMap := *env
return func(ctx context.Context) error {
logger := common.Logger(ctx)
inspect, _, err := cr.cli.ImageInspectWithRaw(ctx, cr.input.Image)
if err != nil {
logger.Error(err)
}
imageEnv, err := godotenv.Unmarshal(strings.Join(inspect.Config.Env, "\n"))
if err != nil {
logger.Error(err)
}
for k, v := range imageEnv {
if k == "PATH" {
if envMap[k] == "" {
envMap[k] = v
} else {
envMap[k] += `:` + v
}
} else if envMap[k] == "" {
envMap[k] = v
}
}
env = &envMap
return nil
}
}
func (cr *containerReference) extractPath(env *map[string]string) common.Executor {
localEnv := *env
return func(ctx context.Context) error {
pathTar, _, err := cr.cli.CopyFromContainer(ctx, cr.id, localEnv["GITHUB_PATH"])
if err != nil {
return errors.WithStack(err)
}
defer pathTar.Close()
reader := tar.NewReader(pathTar)
_, err = reader.Next()
if err != nil && err != io.EOF {
return errors.WithStack(err)
}
s := bufio.NewScanner(reader)
for s.Scan() {
line := s.Text()
localEnv["PATH"] = fmt.Sprintf("%s:%s", line, localEnv["PATH"])
}
env = &localEnv
return nil
}
}
func (cr *containerReference) exec(cmd []string, env map[string]string, user, workdir string) common.Executor {
return func(ctx context.Context) error {
logger := common.Logger(ctx)
// Fix slashes when running on Windows
if runtime.GOOS == "windows" {
var newCmd []string
for _, v := range cmd {
newCmd = append(newCmd, strings.ReplaceAll(v, `\`, `/`))
}
cmd = newCmd
}
2020-02-23 17:01:25 -06:00
logger.Debugf("Exec command '%s'", cmd)
isTerminal := term.IsTerminal(int(os.Stdout.Fd()))
2020-02-23 17:01:25 -06:00
envList := make([]string, 0)
for k, v := range env {
envList = append(envList, fmt.Sprintf("%s=%s", k, v))
}
var wd string
if workdir != "" {
if strings.HasPrefix(workdir, "/") {
wd = workdir
} else {
wd = fmt.Sprintf("%s/%s", cr.input.WorkingDir, workdir)
}
} else {
wd = cr.input.WorkingDir
}
logger.Debugf("Working directory '%s'", wd)
2020-02-23 17:01:25 -06:00
idResp, err := cr.cli.ContainerExecCreate(ctx, cr.id, types.ExecConfig{
User: user,
2020-02-23 17:01:25 -06:00
Cmd: cmd,
WorkingDir: wd,
2020-02-23 17:01:25 -06:00
Env: envList,
Tty: isTerminal,
AttachStderr: true,
AttachStdout: true,
})
if err != nil {
return errors.WithStack(err)
}
resp, err := cr.cli.ContainerExecAttach(ctx, idResp.ID, types.ExecStartCheck{
Tty: isTerminal,
})
if err != nil {
return errors.WithStack(err)
}
defer resp.Close()
2020-02-23 17:01:25 -06:00
var outWriter io.Writer
outWriter = cr.input.Stdout
if outWriter == nil {
outWriter = os.Stdout
}
errWriter := cr.input.Stderr
if errWriter == nil {
errWriter = os.Stderr
}
if !isTerminal || os.Getenv("NORAW") != "" {
_, err = stdcopy.StdCopy(outWriter, errWriter, resp.Reader)
} else {
_, err = io.Copy(outWriter, resp.Reader)
}
if err != nil {
logger.Error(err)
}
inspectResp, err := cr.cli.ContainerExecInspect(ctx, idResp.ID)
if err != nil {
return errors.WithStack(err)
}
if inspectResp.ExitCode == 0 {
return nil
}
return fmt.Errorf("exit with `FAILURE`: %v", inspectResp.ExitCode)
}
}
func (cr *containerReference) copyDir(dstPath string, srcPath string, useGitIgnore bool) common.Executor {
2020-02-24 18:38:49 -06:00
return func(ctx context.Context) error {
logger := common.Logger(ctx)
tarFile, err := ioutil.TempFile("", "act")
if err != nil {
return err
}
log.Debugf("Writing tarball %s from %s", tarFile.Name(), srcPath)
defer func(tarFile *os.File) {
name := tarFile.Name()
err := tarFile.Close()
if err != nil {
logger.Error(err)
}
err = os.Remove(name)
if err != nil {
logger.Error(err)
}
}(tarFile)
2020-02-24 18:38:49 -06:00
tw := tar.NewWriter(tarFile)
srcPrefix := filepath.Dir(srcPath)
if !strings.HasSuffix(srcPrefix, string(filepath.Separator)) {
srcPrefix += string(filepath.Separator)
}
2020-02-24 19:48:21 -06:00
log.Debugf("Stripping prefix:%s src:%s", srcPrefix, srcPath)
2020-02-24 18:38:49 -06:00
var ignorer gitignore.Matcher
if useGitIgnore {
ps, err := gitignore.ReadPatterns(polyfill.New(osfs.New(srcPath)), nil)
if err != nil {
log.Debugf("Error loading .gitignore: %v", err)
}
ignorer = gitignore.NewMatcher(ps)
}
2020-03-18 08:55:39 -05:00
fc := &fileCollector{
Fs: &defaultFs{},
Ignorer: ignorer,
SrcPath: srcPath,
SrcPrefix: srcPrefix,
Handler: &tarCollector{
TarWriter: tw,
},
}
2020-02-24 18:38:49 -06:00
err = filepath.Walk(srcPath, fc.collectFiles(ctx, []string{}))
2020-02-24 18:38:49 -06:00
if err != nil {
return err
}
if err := tw.Close(); err != nil {
return err
}
logger.Debugf("Extracting content from '%s' to '%s'", tarFile.Name(), dstPath)
_, err = tarFile.Seek(0, 0)
if err != nil {
return errors.WithStack(err)
}
err = cr.cli.CopyToContainer(ctx, cr.id, dstPath, tarFile, types.CopyToContainerOptions{})
if err != nil {
return errors.WithStack(err)
}
return nil
}
}
2020-02-23 17:01:25 -06:00
func (cr *containerReference) copyContent(dstPath string, files ...*FileEntry) common.Executor {
return func(ctx context.Context) error {
logger := common.Logger(ctx)
var buf bytes.Buffer
tw := tar.NewWriter(&buf)
for _, file := range files {
log.Debugf("Writing entry to tarball %s len:%d", file.Name, len(file.Body))
hdr := &tar.Header{
Name: file.Name,
Mode: file.Mode,
Size: int64(len(file.Body)),
}
if err := tw.WriteHeader(hdr); err != nil {
return err
}
2020-02-23 17:01:25 -06:00
if _, err := tw.Write([]byte(file.Body)); err != nil {
return err
}
}
if err := tw.Close(); err != nil {
return err
}
logger.Debugf("Extracting content to '%s'", dstPath)
err := cr.cli.CopyToContainer(ctx, cr.id, dstPath, &buf, types.CopyToContainerOptions{})
if err != nil {
return errors.WithStack(err)
}
return nil
}
}
func (cr *containerReference) attach() common.Executor {
return func(ctx context.Context) error {
out, err := cr.cli.ContainerAttach(ctx, cr.id, types.ContainerAttachOptions{
Stream: true,
Stdout: true,
Stderr: true,
})
if err != nil {
return errors.WithStack(err)
}
isTerminal := term.IsTerminal(int(os.Stdout.Fd()))
var outWriter io.Writer
outWriter = cr.input.Stdout
if outWriter == nil {
outWriter = os.Stdout
}
errWriter := cr.input.Stderr
if errWriter == nil {
errWriter = os.Stderr
}
go func() {
if !isTerminal || os.Getenv("NORAW") != "" {
_, err = stdcopy.StdCopy(outWriter, errWriter, out.Reader)
} else {
_, err = io.Copy(outWriter, out.Reader)
}
if err != nil {
common.Logger(ctx).Error(err)
}
}()
return nil
}
}
func (cr *containerReference) start() common.Executor {
return func(ctx context.Context) error {
logger := common.Logger(ctx)
2020-02-23 17:01:25 -06:00
logger.Debugf("Starting container: %v", cr.id)
if err := cr.cli.ContainerStart(ctx, cr.id, types.ContainerStartOptions{}); err != nil {
return errors.WithStack(err)
}
logger.Debugf("Started container: %v", cr.id)
return nil
}
}
func (cr *containerReference) wait() common.Executor {
return func(ctx context.Context) error {
logger := common.Logger(ctx)
statusCh, errCh := cr.cli.ContainerWait(ctx, cr.id, container.WaitConditionNotRunning)
var statusCode int64
select {
case err := <-errCh:
if err != nil {
return errors.WithStack(err)
}
case status := <-statusCh:
statusCode = status.StatusCode
}
logger.Debugf("Return status: %v", statusCode)
if statusCode == 0 {
return nil
}
return fmt.Errorf("exit with `FAILURE`: %v", statusCode)
}
}