act/pkg/runner/runner.go

183 lines
6.6 KiB
Go
Raw Normal View History

2020-02-04 18:38:41 -06:00
package runner
import (
"context"
"fmt"
2020-02-04 18:38:41 -06:00
"io/ioutil"
"path/filepath"
"regexp"
"runtime"
"strings"
2020-02-04 18:38:41 -06:00
"github.com/nektos/act/pkg/common"
"github.com/nektos/act/pkg/model"
log "github.com/sirupsen/logrus"
)
// Runner provides capabilities to run GitHub actions
type Runner interface {
NewPlanExecutor(plan *model.Plan) common.Executor
2020-02-04 18:38:41 -06:00
}
// Config contains the config for a new runner
type Config struct {
Actor string // the user that triggered the event
Workdir string // path to working directory
BindWorkdir bool // bind the workdir to the job container
EventName string // name of event to run
EventPath string // path to JSON file to use for event.json in containers
DefaultBranch string // name of the main branch for this repository
ReuseContainers bool // reuse containers to maintain state
ForcePull bool // force pulling of the image, even if already present
LogOutput bool // log the output from docker run
Env map[string]string // env for containers
Secrets map[string]string // list of secrets
InsecureSecrets bool // switch hiding output when printing to terminal
Platforms map[string]string // list of platforms
Privileged bool // use privileged mode
UsernsMode string // user namespace to use
ContainerArchitecture string // Desired OS/architecture platform for running containers
ContainerDaemonSocket string // Path to Docker daemon socket
UseGitIgnore bool // controls if paths in .gitignore should not be copied into container, default true
GitHubInstance string // GitHub instance to use, default "github.com"
ContainerCapAdd []string // list of kernel capabilities to add to the containers
ContainerCapDrop []string // list of kernel capabilities to remove from the containers
AutoRemove bool // controls if the container is automatically removed upon workflow completion
Asset server implementation (#677) * Add asset server and upload handling of binary files Co-authored-by: Björn Brauer <zaubernerd@zaubernerd.de> * Add asset download parts to the asset server Co-authored-by: Björn Brauer <zaubernerd@zaubernerd.de> * Add artifact-server-path flag If the flag is not defined, the artifact server isn't started. This includes the configuration of ACTIONS_RUNTIME_URL and ACTIONS_RUNTIME_TOKEN which are set if the server is started. Co-authored-by: Björn Brauer <zaubernerd@zaubernerd.de> * Move ACTIONS_RUNTIME_* vars into the withGithubEnv setup Co-authored-by: Björn Brauer <zaubernerd@zaubernerd.de> * feat: add artifact server port as flag This commits adds a flag to define the artifact server port. If not given, the port defaults to 34567. Co-authored-by: Björn Brauer <bjoern.brauer@new-work.se> * test: add artifact server tests Co-authored-by: Björn Brauer <bjoern.brauer@new-work.se> * refactor: use fs.FS This allows to add tests with in-memory file system * feat: add support for gzip encoded uploads Co-authored-by: Björn Brauer <bjoern.brauer@new-work.se> * test: add artifact integration test * chore: run act tests with asset server path Co-authored-by: Björn Brauer <bjoern.brauer@new-work.se> * docs: add new cli flags Co-authored-by: Björn Brauer <bjoern.brauer@new-work.se> * test: add test workflow to testdata * feat: add log output * refactor: log shutdown error instead of panic * feat: use outbound ip for the asset server This change should allow to use the host ip in macos and windows. Since docker is running in an intermediate vm, localhost is not sufficient to have the artifacts in the host system. * fix: do not use canceled context To shutdown artifact server, we should not use the already canceled context but the parent context instead. Co-authored-by: Björn Brauer <zaubernerd@zaubernerd.de> * feat: shutdown artifact server at end of pipeline When the pipeline is done the asset server should be shut down gracefully. Co-authored-by: Björn Brauer <zaubernerd@zaubernerd.de> * fix: close server if graceful shutdown failed Co-authored-by: Björn Brauer <zaubernerd@zaubernerd.de> * fix: ignore server closed error from listen call Co-authored-by: Björn Brauer <zaubernerd@zaubernerd.de> Co-authored-by: Björn Brauer <zaubernerd@zaubernerd.de> Co-authored-by: Björn Brauer <bjoern.brauer@new-work.se>
2021-11-10 11:57:22 -06:00
ArtifactServerPath string // the path where the artifact server stores uploads
ArtifactServerPort string // the port the artifact server binds to
2020-02-04 18:38:41 -06:00
}
// Resolves the equivalent host path inside the container
// This is required for windows and WSL 2 to translate things like C:\Users\Myproject to /mnt/users/Myproject
// For use in docker volumes and binds
func (config *Config) containerPath(path string) string {
if runtime.GOOS == "windows" && strings.Contains(path, "/") {
log.Error("You cannot specify linux style local paths (/mnt/etc) on Windows as it does not understand them.")
return ""
}
abspath, err := filepath.Abs(path)
if err != nil {
log.Error(err)
return ""
}
// Test if the path is a windows path
windowsPathRegex := regexp.MustCompile(`^([a-zA-Z]):\\(.+)$`)
windowsPathComponents := windowsPathRegex.FindStringSubmatch(abspath)
// Return as-is if no match
if windowsPathComponents == nil {
return abspath
}
// Convert to WSL2-compatible path if it is a windows path
// NOTE: Cannot use filepath because it will use the wrong path separators assuming we want the path to be windows
// based if running on Windows, and because we are feeding this to Docker, GoLang auto-path-translate doesn't work.
driveLetter := strings.ToLower(windowsPathComponents[1])
translatedPath := strings.ReplaceAll(windowsPathComponents[2], `\`, `/`)
// Should make something like /mnt/c/Users/person/My Folder/MyActProject
result := strings.Join([]string{"/mnt", driveLetter, translatedPath}, `/`)
return result
}
// Resolves the equivalent host path inside the container
// This is required for windows and WSL 2 to translate things like C:\Users\Myproject to /mnt/users/Myproject
func (config *Config) ContainerWorkdir() string {
return config.containerPath(config.Workdir)
}
2020-02-04 18:38:41 -06:00
type runnerImpl struct {
config *Config
eventJSON string
}
// New Creates a new Runner
func New(runnerConfig *Config) (Runner, error) {
2020-02-04 18:38:41 -06:00
runner := &runnerImpl{
config: runnerConfig,
}
runner.eventJSON = "{}"
if runnerConfig.EventPath != "" {
2020-02-04 18:38:41 -06:00
log.Debugf("Reading event.json from %s", runner.config.EventPath)
eventJSONBytes, err := ioutil.ReadFile(runner.config.EventPath)
if err != nil {
return nil, err
2020-02-04 18:38:41 -06:00
}
runner.eventJSON = string(eventJSONBytes)
}
return runner, nil
2020-02-04 18:38:41 -06:00
}
func (runner *runnerImpl) NewPlanExecutor(plan *model.Plan) common.Executor {
maxJobNameLen := 0
2020-02-04 18:38:41 -06:00
pipeline := make([]common.Executor, 0)
for s, stage := range plan.Stages {
2020-02-04 18:38:41 -06:00
stageExecutor := make([]common.Executor, 0)
for r, run := range stage.Runs {
job := run.Job()
2020-02-23 17:01:25 -06:00
matrixes := job.GetMatrixes()
maxParallel := 4
if job.Strategy != nil {
maxParallel = job.Strategy.MaxParallel
}
if len(matrixes) < maxParallel {
maxParallel = len(matrixes)
}
b := 0
for i, matrix := range matrixes {
rc := runner.newRunContext(run, matrix)
rc.JobName = rc.Name
if len(matrixes) > 1 {
rc.Name = fmt.Sprintf("%s-%d", rc.Name, i+1)
}
if len(rc.String()) > maxJobNameLen {
maxJobNameLen = len(rc.String())
}
stageExecutor = append(stageExecutor, func(ctx context.Context) error {
jobName := fmt.Sprintf("%-*s", maxJobNameLen, rc.String())
return rc.Executor().Finally(func(ctx context.Context) error {
isLastRunningContainer := func(currentStage int, currentRun int) bool {
return currentStage == len(plan.Stages)-1 && currentRun == len(stage.Runs)-1
}
if runner.config.AutoRemove && isLastRunningContainer(s, r) {
log.Infof("Cleaning up container for job %s", rc.JobName)
if err := rc.stopJobContainer()(ctx); err != nil {
log.Errorf("Error while cleaning container: %v", err)
}
}
return nil
})(WithJobLogger(ctx, jobName, rc.Config.Secrets, rc.Config.InsecureSecrets))
})
b++
if b == maxParallel {
pipeline = append(pipeline, common.NewParallelExecutor(stageExecutor...))
stageExecutor = make([]common.Executor, 0)
b = 0
}
}
2020-02-04 18:38:41 -06:00
}
}
return common.NewPipelineExecutor(pipeline...)
2020-02-04 18:38:41 -06:00
}
func (runner *runnerImpl) newRunContext(run *model.Run, matrix map[string]interface{}) *RunContext {
2020-02-23 17:01:25 -06:00
rc := &RunContext{
Config: runner.config,
Run: run,
EventJSON: runner.eventJSON,
StepResults: make(map[string]*stepResult),
Matrix: matrix,
}
rc.ExprEval = rc.NewExpressionEvaluator()
rc.Name = rc.ExprEval.Interpolate(run.String())
return rc
2020-02-04 18:38:41 -06:00
}