act/pkg/runner/runner.go

241 lines
9.9 KiB
Go
Raw Normal View History

2020-02-04 18:38:41 -06:00
package runner
import (
"context"
"encoding/json"
"fmt"
"os"
"time"
2020-02-04 18:38:41 -06:00
log "github.com/sirupsen/logrus"
2020-02-04 18:38:41 -06:00
Fix container network issue (#56) Follow: https://gitea.com/gitea/act_runner/pulls/184 Close https://gitea.com/gitea/act_runner/issues/177 #### changes: - `act` create new networks only if the value of `NeedCreateNetwork` is true, and remove these networks at last. `NeedCreateNetwork` is passed by `act_runner`. 'NeedCreateNetwork' is true only if `container.network` in the configuration file of the `act_runner` is empty. - In the `docker create` phase, specify the network to which containers will connect. Because, if not specify , container will connect to `bridge` network which is created automatically by Docker. - If the network is user defined network ( the value of `container.network` is empty or `<custom-network>`. Because, the network created by `act` is also user defined network.), will also specify alias by `--network-alias`. The alias of service is `<service-id>`. So we can be access service container by `<service-id>:<port>` in the steps of job. - Won't try to `docker network connect ` network after `docker start` any more. - Because on the one hand, `docker network connect` applies only to user defined networks, if try to `docker network connect host <container-name>` will return error. - On the other hand, we just specify network in the stage of `docker create`, the same effect can be achieved. - Won't try to remove containers and networks berfore the stage of `docker start`, because the name of these containers and netwoks won't be repeat. Co-authored-by: Jason Song <i@wolfogre.com> Reviewed-on: https://gitea.com/gitea/act/pulls/56 Reviewed-by: Jason Song <i@wolfogre.com> Co-authored-by: sillyguodong <gedong_1994@163.com> Co-committed-by: sillyguodong <gedong_1994@163.com>
2023-05-16 01:03:55 -05:00
docker_container "github.com/docker/docker/api/types/container"
2020-02-04 18:38:41 -06:00
"github.com/nektos/act/pkg/common"
"github.com/nektos/act/pkg/container"
2020-02-04 18:38:41 -06:00
"github.com/nektos/act/pkg/model"
)
// Runner provides capabilities to run GitHub actions
type Runner interface {
NewPlanExecutor(plan *model.Plan) common.Executor
2020-02-04 18:38:41 -06:00
}
// Config contains the config for a new runner
type Config struct {
Actor string // the user that triggered the event
Workdir string // path to working directory
ActionCacheDir string // path used for caching action contents
BindWorkdir bool // bind the workdir to the job container
EventName string // name of event to run
EventPath string // path to JSON file to use for event.json in containers
DefaultBranch string // name of the main branch for this repository
ReuseContainers bool // reuse containers to maintain state
ForcePull bool // force pulling of the image, even if already present
ForceRebuild bool // force rebuilding local docker image action
LogOutput bool // log the output from docker run
JSONLogger bool // use json or text logger
Env map[string]string // env for containers
Inputs map[string]string // manually passed action inputs
Secrets map[string]string // list of secrets
Vars map[string]string // list of vars
Token string // GitHub token
InsecureSecrets bool // switch hiding output when printing to terminal
Platforms map[string]string // list of platforms
Privileged bool // use privileged mode
UsernsMode string // user namespace to use
ContainerArchitecture string // Desired OS/architecture platform for running containers
ContainerDaemonSocket string // Path to Docker daemon socket
ContainerOptions string // Options for the job container
UseGitIgnore bool // controls if paths in .gitignore should not be copied into container, default true
GitHubInstance string // GitHub instance to use, default "github.com"
ContainerCapAdd []string // list of kernel capabilities to add to the containers
ContainerCapDrop []string // list of kernel capabilities to remove from the containers
AutoRemove bool // controls if the container is automatically removed upon workflow completion
ArtifactServerPath string // the path where the artifact server stores uploads
ArtifactServerAddr string // the address the artifact server binds to
ArtifactServerPort string // the port the artifact server binds to
NoSkipCheckout bool // do not skip actions/checkout
RemoteName string // remote name in local git repo config
ReplaceGheActionWithGithubCom []string // Use actions from GitHub Enterprise instance to GitHub
ReplaceGheActionTokenWithGithubCom string // Token of private action repo on GitHub.
Matrix map[string]map[string]bool // Matrix config to run
2022-09-21 01:26:19 -05:00
2022-11-22 02:39:19 -06:00
PresetGitHubContext *model.GithubContext // the preset github context, overrides some fields like DefaultBranch, Env, Secrets etc.
EventJSON string // the content of JSON file to use for event.json in containers, overrides EventPath
ContainerNamePrefix string // the prefix of container name
ContainerMaxLifetime time.Duration // the max lifetime of job containers
Fix container network issue (#56) Follow: https://gitea.com/gitea/act_runner/pulls/184 Close https://gitea.com/gitea/act_runner/issues/177 #### changes: - `act` create new networks only if the value of `NeedCreateNetwork` is true, and remove these networks at last. `NeedCreateNetwork` is passed by `act_runner`. 'NeedCreateNetwork' is true only if `container.network` in the configuration file of the `act_runner` is empty. - In the `docker create` phase, specify the network to which containers will connect. Because, if not specify , container will connect to `bridge` network which is created automatically by Docker. - If the network is user defined network ( the value of `container.network` is empty or `<custom-network>`. Because, the network created by `act` is also user defined network.), will also specify alias by `--network-alias`. The alias of service is `<service-id>`. So we can be access service container by `<service-id>:<port>` in the steps of job. - Won't try to `docker network connect ` network after `docker start` any more. - Because on the one hand, `docker network connect` applies only to user defined networks, if try to `docker network connect host <container-name>` will return error. - On the other hand, we just specify network in the stage of `docker create`, the same effect can be achieved. - Won't try to remove containers and networks berfore the stage of `docker start`, because the name of these containers and netwoks won't be repeat. Co-authored-by: Jason Song <i@wolfogre.com> Reviewed-on: https://gitea.com/gitea/act/pulls/56 Reviewed-by: Jason Song <i@wolfogre.com> Co-authored-by: sillyguodong <gedong_1994@163.com> Co-committed-by: sillyguodong <gedong_1994@163.com>
2023-05-16 01:03:55 -05:00
ContainerNetworkMode docker_container.NetworkMode // the network mode of job containers (the value of --network)
DefaultActionInstance string // the default actions web site
2022-11-22 02:39:19 -06:00
PlatformPicker func(labels []string) string // platform picker, it will take precedence over Platforms if isn't nil
JobLoggerLevel *log.Level // the level of job logger
ValidVolumes []string // only volumes (and bind mounts) in this slice can be mounted on the job container or service containers
2020-02-04 18:38:41 -06:00
}
2023-05-04 04:45:53 -05:00
// GetToken: Adapt to Gitea
func (c Config) GetToken() string {
token := c.Secrets["GITHUB_TOKEN"]
if c.Secrets["GITEA_TOKEN"] != "" {
token = c.Secrets["GITEA_TOKEN"]
}
return token
}
type caller struct {
runContext *RunContext
}
2020-02-04 18:38:41 -06:00
type runnerImpl struct {
config *Config
eventJSON string
caller *caller // the job calling this runner (caller of a reusable workflow)
2020-02-04 18:38:41 -06:00
}
// New Creates a new Runner
func New(runnerConfig *Config) (Runner, error) {
2020-02-04 18:38:41 -06:00
runner := &runnerImpl{
config: runnerConfig,
}
return runner.configure()
}
func (runner *runnerImpl) configure() (Runner, error) {
2020-02-04 18:38:41 -06:00
runner.eventJSON = "{}"
if runner.config.EventJSON != "" {
runner.eventJSON = runner.config.EventJSON
} else if runner.config.EventPath != "" {
2020-02-04 18:38:41 -06:00
log.Debugf("Reading event.json from %s", runner.config.EventPath)
eventJSONBytes, err := os.ReadFile(runner.config.EventPath)
2020-02-04 18:38:41 -06:00
if err != nil {
return nil, err
2020-02-04 18:38:41 -06:00
}
runner.eventJSON = string(eventJSONBytes)
} else if len(runner.config.Inputs) != 0 {
eventMap := map[string]map[string]string{
"inputs": runner.config.Inputs,
}
eventJSON, err := json.Marshal(eventMap)
if err != nil {
return nil, err
}
runner.eventJSON = string(eventJSON)
2020-02-04 18:38:41 -06:00
}
return runner, nil
2020-02-04 18:38:41 -06:00
}
// NewPlanExecutor ...
func (runner *runnerImpl) NewPlanExecutor(plan *model.Plan) common.Executor {
maxJobNameLen := 0
stagePipeline := make([]common.Executor, 0)
for i := range plan.Stages {
stage := plan.Stages[i]
stagePipeline = append(stagePipeline, func(ctx context.Context) error {
pipeline := make([]common.Executor, 0)
for _, run := range stage.Runs {
stageExecutor := make([]common.Executor, 0)
job := run.Job()
if job.Strategy != nil {
strategyRc := runner.newRunContext(ctx, run, nil)
if err := strategyRc.NewExpressionEvaluator(ctx).EvaluateYamlNode(ctx, &job.Strategy.RawMatrix); err != nil {
log.Errorf("Error while evaluating matrix: %v", err)
}
}
var matrixes []map[string]interface{}
if m, err := job.GetMatrixes(); err != nil {
log.Errorf("Error while get job's matrix: %v", err)
} else {
matrixes = selectMatrixes(m, runner.config.Matrix)
}
log.Debugf("Final matrix after applying user inclusions '%v'", matrixes)
maxParallel := 4
if job.Strategy != nil {
maxParallel = job.Strategy.MaxParallel
}
if len(matrixes) < maxParallel {
maxParallel = len(matrixes)
}
for i, matrix := range matrixes {
matrix := matrix
rc := runner.newRunContext(ctx, run, matrix)
rc.JobName = rc.Name
if len(matrixes) > 1 {
rc.Name = fmt.Sprintf("%s-%d", rc.Name, i+1)
}
if len(rc.String()) > maxJobNameLen {
maxJobNameLen = len(rc.String())
}
stageExecutor = append(stageExecutor, func(ctx context.Context) error {
jobName := fmt.Sprintf("%-*s", maxJobNameLen, rc.String())
return rc.Executor()(common.WithJobErrorContainer(WithJobLogger(ctx, rc.Run.JobID, jobName, rc.Config, &rc.Masks, matrix)))
})
}
pipeline = append(pipeline, common.NewParallelExecutor(maxParallel, stageExecutor...))
}
var ncpu int
info, err := container.GetHostInfo(ctx)
if err != nil {
log.Errorf("failed to obtain container engine info: %s", err)
ncpu = 1 // sane default?
} else {
ncpu = info.NCPU
}
return common.NewParallelExecutor(ncpu, pipeline...)(ctx)
})
2020-02-04 18:38:41 -06:00
}
return common.NewPipelineExecutor(stagePipeline...).Then(handleFailure(plan))
fix: continue jobs + steps after failure (#840) * fix: continue jobs + steps after failure To allow proper if expression handling on jobs and steps (like always, success, failure, ...) we need to continue running all executors in the prepared chain. To keep the error handling intact we add an occurred error to the go context and handle it later in the pipeline/chain. Also we add the job result to the needs context to give expressions access to it. The needs object, failure and success functions are split between run context (on jobs) and step context. Closes #442 Co-authored-by: Björn Brauer <zaubernerd@zaubernerd.de> * style: correct linter warnings Co-authored-by: Björn Brauer <zaubernerd@zaubernerd.de> * fix: job if value defaults to success() As described in the documentation, a default value of "success()" is applied when no "if" value is present on the job. https://docs.github.com/en/actions/learn-github-actions/expressions#job-status-check-functions Co-authored-by: Markus Wolf <mail@markus-wolf.de> * fix: check job needs recursively Ensure job result includes results of previous jobs Co-authored-by: Markus Wolf <markus.wolf@new-work.se> * test: add runner test for job status check functions Co-authored-by: Markus Wolf <markus.wolf@new-work.se> * test: add unit tests for run context if evaluation Co-authored-by: Björn Brauer <zaubernerd@zaubernerd.de> * refactor: move if expression evaluation Move if expression evaluation into own function (step context) to better support unit testing. Co-authored-by: Björn Brauer <zaubernerd@zaubernerd.de> * test: add unit tests for step context if evaluation Co-authored-by: Markus Wolf <markus.wolf@new-work.se> * fix: handle job error more resilient The job error is not stored in a context map instead of a context added value. Since context values are immutable an added value requires to keep the new context in all cases. This is fragile since it might slip unnoticed to other parts of the code. Storing the error of a job in the context map will make it more stable, since the map is always there and the context of the pipeline is stable for the whole run. * feat: steps should use a default if expression of success() * test: add integration test for if-expressions * chore: disable editorconfig-checker for yaml multiline string Co-authored-by: Björn Brauer <zaubernerd@zaubernerd.de> Co-authored-by: Björn Brauer <bjoern.brauer@new-work.se>
2021-12-08 14:57:42 -06:00
}
func handleFailure(plan *model.Plan) common.Executor {
return func(ctx context.Context) error {
for _, stage := range plan.Stages {
for _, run := range stage.Runs {
if run.Job().Result == "failure" {
return fmt.Errorf("Job '%s' failed", run.String())
}
}
}
return nil
}
2020-02-04 18:38:41 -06:00
}
func selectMatrixes(originalMatrixes []map[string]interface{}, targetMatrixValues map[string]map[string]bool) []map[string]interface{} {
matrixes := make([]map[string]interface{}, 0)
for _, original := range originalMatrixes {
flag := true
for key, val := range original {
if allowedVals, ok := targetMatrixValues[key]; ok {
valToString := fmt.Sprintf("%v", val)
if _, ok := allowedVals[valToString]; !ok {
flag = false
}
}
}
if flag {
matrixes = append(matrixes, original)
}
}
return matrixes
}
func (runner *runnerImpl) newRunContext(ctx context.Context, run *model.Run, matrix map[string]interface{}) *RunContext {
2020-02-23 17:01:25 -06:00
rc := &RunContext{
Config: runner.config,
Run: run,
EventJSON: runner.eventJSON,
StepResults: make(map[string]*model.StepResult),
2020-02-23 17:01:25 -06:00
Matrix: matrix,
caller: runner.caller,
}
rc.ExprEval = rc.NewExpressionEvaluator(ctx)
rc.Name = rc.ExprEval.Interpolate(ctx, run.String())
return rc
2020-02-04 18:38:41 -06:00
}