act/pkg/runner/run_context.go

1107 lines
32 KiB
Go
Raw Normal View History

package runner
import (
"archive/tar"
"bufio"
"context"
2022-11-16 15:29:45 -06:00
"crypto/rand"
"crypto/sha256"
2022-11-16 15:29:45 -06:00
"encoding/hex"
"encoding/json"
2022-11-16 15:29:45 -06:00
"errors"
"fmt"
"io"
"os"
2020-02-23 17:01:25 -06:00
"path/filepath"
"regexp"
2020-02-24 12:56:49 -06:00
"runtime"
"strings"
"time"
"github.com/opencontainers/selinux/go-selinux"
2021-11-25 23:18:31 -06:00
"github.com/nektos/act/pkg/common"
"github.com/nektos/act/pkg/container"
implement pre and post steps (#1089) * feat: add post step to actions and add state command This commit includes requried changes for running post steps for local and remote actions. This allows general cleanup work to be done after executing an action. Communication is allowed between this steps, by using the action state. * feat: collect pre and post steps for composite actions * refactor: move composite action logic into own file * refactor: restructure composite handling * feat: run composite post steps during post step lifecycle * refactor: remove duplicate log output * feat: run all composite post actions in a step Since composite actions could have multiple pre/post steps inside, we need to run all of them in a single top-level pre/post step. This PR includes a test case for this and the correct order of steps to be executed. * refactor: remove unused lines of code * refactor: simplify test expression * fix: use composite job logger * fix: make step output more readable * fix: enforce running all post executor To make sure every post executor/step is executed, it is chained with it's own Finally executor. * fix: do not run post step if no step result is available Having no step result means we do not run any step (neither pre nor main) and we do not need to run post. * fix: setup defaults If no pre-if or post-if is given, it should default to 'always()'. This could be set even if there is no pre or post step. In fact this is required for composite actions and included post steps to run. * fix: output step related if expression * test: update expectation * feat: run pre step from actions (#1110) This PR implements running pre steps for remote actions. This includes remote actions using inside local composite actions. * fix: set correct expr default status checks For post-if conditions the default status check should be always(), while for all other if expression the default status check is success() References: https://docs.github.com/en/actions/learn-github-actions/expressions#status-check-functions https://docs.github.com/en/actions/creating-actions/metadata-syntax-for-github-actions#runspost-if * fix: remove code added during rebase
2022-05-24 08:36:06 -05:00
"github.com/nektos/act/pkg/exprparser"
"github.com/nektos/act/pkg/model"
)
// RunContext contains info about current job
type RunContext struct {
2022-11-16 15:29:45 -06:00
Name string
Config *Config
Matrix map[string]interface{}
Run *model.Run
EventJSON string
Env map[string]string
GlobalEnv map[string]string // to pass env changes of GITHUB_ENV and set-env correctly, due to dirty Env field
2022-11-16 15:29:45 -06:00
ExtraPath []string
CurrentStep string
StepResults map[string]*model.StepResult
IntraActionState map[string]map[string]string
2022-11-16 15:29:45 -06:00
ExprEval ExpressionEvaluator
JobContainer container.ExecutionsEnvironment
ServiceContainers []container.ExecutionsEnvironment
2022-11-16 15:29:45 -06:00
OutputMappings map[MappableOutput]MappableOutput
JobName string
ActionPath string
Parent *RunContext
Masks []string
cleanUpJobContainer common.Executor
caller *caller // job calling this RunContext (reusable workflows)
}
func (rc *RunContext) AddMask(mask string) {
rc.Masks = append(rc.Masks, mask)
Feature: uses in composite (#793) * Feature: uses in composite * Negate logic * Reduce complexity * Update step_context.go * Update step_context.go * Update step_context.go * Fix syntax error in test * Bump * Disable usage of actions/setup-node@v2 * Bump * Fix step id collision * Fix output command workaround * Make secrets context inaccessible in composite * Fix order after adding a workaround (needs tests) Fixes https://github.com/nektos/act/pull/793#issuecomment-922329838 * Evaluate env before passing one step deeper If env would contain any inputs, steps ctx or secrets there was undefined behaviour * [no ci] prepare secret test * Initial test pass inputs as env * Fix syntax error * extend test also for direct invoke * Fix passing provided env as composite output * Fix syntax error * toUpper 'no such secret', act has a bug * fix indent * Fix env outputs in composite * Test env outputs of composite * Fix inputs not defined in docker actions * Fix interpolate args input of docker actions * Fix lint * AllowCompositeIf now defaults to true see https://github.com/actions/runner/releases/tag/v2.284.0 * Fix lint * Fix env of docker action.yml * Test calling a local docker action from composite With input context hirachy * local-action-dockerfile Test pass on action/runner It seems action/runner ignores overrides of args, if the target docker action has the args property set. * Fix exec permissions of docker-local-noargs * Revert getStepsContext change * fix: handle composite action on error and continue This change is a follow up of https://github.com/nektos/act/pull/840 and integrates with https://github.com/nektos/act/pull/793 There are two things included here: - The default value for a step.if in an action need to be 'success()' - We need to hand the error from a composite action back to the calling executor Co-authored-by: Björn Brauer <bjoern.brauer@new-work.se> * Patch inputs can be bool, float64 and string for workflow_call Also inputs is now always defined, but may be null * Simplify cherry-picked commit * Minor style adjustments * Remove chmod +x from tests now fails on windows like before * Fix GITHUB_ACTION_PATH some action env vars Fixes GITHUB_ACTION_REPOSITORY, GITHUB_ACTION_REF. * Add comment to CompositeRestrictions Co-authored-by: Markus Wolf <markus.wolf@new-work.se> Co-authored-by: Björn Brauer <bjoern.brauer@new-work.se> Co-authored-by: Ryan <me@hackerc.at> Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com>
2021-12-22 13:19:50 -06:00
}
type MappableOutput struct {
StepID string
OutputName string
}
func (rc *RunContext) String() string {
name := fmt.Sprintf("%s/%s", rc.Run.Workflow.Name, rc.Name)
if rc.caller != nil {
// prefix the reusable workflow with the caller job
// this is required to create unique container names
name = fmt.Sprintf("%s/%s", rc.caller.runContext.Run.JobID, name)
}
return name
}
// GetEnv returns the env for the context
func (rc *RunContext) GetEnv() map[string]string {
if rc.Env == nil {
2022-11-16 15:29:45 -06:00
rc.Env = map[string]string{}
if rc.Run != nil && rc.Run.Workflow != nil && rc.Config != nil {
job := rc.Run.Job()
if job != nil {
rc.Env = mergeMaps(rc.Run.Workflow.Env, job.Environment(), rc.Config.Env)
}
}
}
rc.Env["ACT"] = "true"
if !rc.Config.NoSkipCheckout {
rc.Env["ACT_SKIP_CHECKOUT"] = "true"
}
return rc.Env
}
2020-02-23 17:01:25 -06:00
func (rc *RunContext) jobContainerName() string {
return createSimpleContainerName(rc.Config.ContainerNamePrefix, "WORKFLOW-"+rc.Run.Workflow.Name, "JOB-"+rc.Name)
}
Fix container network issue (#56) Follow: https://gitea.com/gitea/act_runner/pulls/184 Close https://gitea.com/gitea/act_runner/issues/177 #### changes: - `act` create new networks only if the value of `NeedCreateNetwork` is true, and remove these networks at last. `NeedCreateNetwork` is passed by `act_runner`. 'NeedCreateNetwork' is true only if `container.network` in the configuration file of the `act_runner` is empty. - In the `docker create` phase, specify the network to which containers will connect. Because, if not specify , container will connect to `bridge` network which is created automatically by Docker. - If the network is user defined network ( the value of `container.network` is empty or `<custom-network>`. Because, the network created by `act` is also user defined network.), will also specify alias by `--network-alias`. The alias of service is `<service-id>`. So we can be access service container by `<service-id>:<port>` in the steps of job. - Won't try to `docker network connect ` network after `docker start` any more. - Because on the one hand, `docker network connect` applies only to user defined networks, if try to `docker network connect host <container-name>` will return error. - On the other hand, we just specify network in the stage of `docker create`, the same effect can be achieved. - Won't try to remove containers and networks berfore the stage of `docker start`, because the name of these containers and netwoks won't be repeat. Co-authored-by: Jason Song <i@wolfogre.com> Reviewed-on: https://gitea.com/gitea/act/pulls/56 Reviewed-by: Jason Song <i@wolfogre.com> Co-authored-by: sillyguodong <gedong_1994@163.com> Co-committed-by: sillyguodong <gedong_1994@163.com>
2023-05-16 01:03:55 -05:00
// networkName return the name of the network which will be created by `act` automatically for job,
// only create network if `rc.Config.ContainerNetworkMode` is empty string.
func (rc *RunContext) networkName() string {
return fmt.Sprintf("%s-network", rc.jobContainerName())
}
func getDockerDaemonSocketMountPath(daemonPath string) string {
if protoIndex := strings.Index(daemonPath, "://"); protoIndex != -1 {
scheme := daemonPath[:protoIndex]
if strings.EqualFold(scheme, "npipe") {
// linux container mount on windows, use the default socket path of the VM / wsl2
return "/var/run/docker.sock"
} else if strings.EqualFold(scheme, "unix") {
return daemonPath[protoIndex+3:]
} else if strings.IndexFunc(scheme, func(r rune) bool {
return (r < 'a' || r > 'z') && (r < 'A' || r > 'Z')
}) == -1 {
// unknown protocol use default
return "/var/run/docker.sock"
}
}
return daemonPath
}
// Returns the binds and mounts for the container, resolving paths as appopriate
func (rc *RunContext) GetBindsAndMounts() ([]string, map[string]string) {
name := rc.jobContainerName()
if rc.Config.ContainerDaemonSocket == "" {
rc.Config.ContainerDaemonSocket = "/var/run/docker.sock"
}
binds := []string{}
if rc.Config.ContainerDaemonSocket != "-" {
daemonPath := getDockerDaemonSocketMountPath(rc.Config.ContainerDaemonSocket)
binds = append(binds, fmt.Sprintf("%s:%s", daemonPath, "/var/run/docker.sock"))
}
2022-11-16 15:29:45 -06:00
ext := container.LinuxContainerEnvironmentExtensions{}
mounts := map[string]string{
"act-toolcache": "/toolcache",
2022-11-16 15:29:45 -06:00
name + "-env": ext.GetActPath(),
}
if job := rc.Run.Job(); job != nil {
if container := job.Container(); container != nil {
for _, v := range container.Volumes {
if !strings.Contains(v, ":") || filepath.IsAbs(v) {
// Bind anonymous volume or host file.
binds = append(binds, v)
} else {
// Mount existing volume.
paths := strings.SplitN(v, ":", 2)
mounts[paths[0]] = paths[1]
}
}
}
}
if rc.Config.BindWorkdir {
bindModifiers := ""
if runtime.GOOS == "darwin" {
bindModifiers = ":delegated"
}
2021-11-25 23:18:31 -06:00
if selinux.GetEnabled() {
bindModifiers = ":z"
}
2022-11-16 15:29:45 -06:00
binds = append(binds, fmt.Sprintf("%s:%s%s", rc.Config.Workdir, ext.ToContainerPath(rc.Config.Workdir), bindModifiers))
} else {
2022-11-16 15:29:45 -06:00
mounts[name] = ext.ToContainerPath(rc.Config.Workdir)
}
// For Gitea
// add some default binds and mounts to ValidVolumes
rc.Config.ValidVolumes = append(rc.Config.ValidVolumes, "act-toolcache")
rc.Config.ValidVolumes = append(rc.Config.ValidVolumes, name)
rc.Config.ValidVolumes = append(rc.Config.ValidVolumes, name+"-env")
// TODO: add a new configuration to control whether the docker daemon can be mounted
rc.Config.ValidVolumes = append(rc.Config.ValidVolumes, getDockerDaemonSocketMountPath(rc.Config.ContainerDaemonSocket))
return binds, mounts
}
2022-11-16 15:29:45 -06:00
func (rc *RunContext) startHostEnvironment() common.Executor {
return func(ctx context.Context) error {
logger := common.Logger(ctx)
rawLogger := logger.WithField("raw_output", true)
logWriter := common.NewLineWriter(rc.commandHandler(ctx), func(s string) bool {
if rc.Config.LogOutput {
rawLogger.Infof("%s", s)
} else {
rawLogger.Debugf("%s", s)
}
return true
})
cacheDir := rc.ActionCacheDir()
randBytes := make([]byte, 8)
_, _ = rand.Read(randBytes)
miscpath := filepath.Join(cacheDir, hex.EncodeToString(randBytes))
actPath := filepath.Join(miscpath, "act")
if err := os.MkdirAll(actPath, 0o777); err != nil {
2022-11-16 15:29:45 -06:00
return err
}
path := filepath.Join(miscpath, "hostexecutor")
if err := os.MkdirAll(path, 0o777); err != nil {
2022-11-16 15:29:45 -06:00
return err
}
runnerTmp := filepath.Join(miscpath, "tmp")
if err := os.MkdirAll(runnerTmp, 0o777); err != nil {
2022-11-16 15:29:45 -06:00
return err
}
toolCache := filepath.Join(cacheDir, "tool_cache")
rc.JobContainer = &container.HostEnvironment{
Path: path,
TmpDir: runnerTmp,
ToolCache: toolCache,
Workdir: rc.Config.Workdir,
ActPath: actPath,
CleanUp: func() {
os.RemoveAll(miscpath)
},
StdOut: logWriter,
}
rc.cleanUpJobContainer = rc.JobContainer.Remove()
for k, v := range rc.JobContainer.GetRunnerContext(ctx) {
if v, ok := v.(string); ok {
rc.Env[fmt.Sprintf("RUNNER_%s", strings.ToUpper(k))] = v
}
}
2022-11-16 15:29:45 -06:00
for _, env := range os.Environ() {
2023-02-16 09:16:46 -06:00
if k, v, ok := strings.Cut(env, "="); ok {
// don't override
if _, ok := rc.Env[k]; !ok {
rc.Env[k] = v
}
2022-11-16 15:29:45 -06:00
}
}
return common.NewPipelineExecutor(
rc.JobContainer.Copy(rc.JobContainer.GetActPath()+"/", &container.FileEntry{
Name: "workflow/event.json",
Mode: 0o644,
2022-11-16 15:29:45 -06:00
Body: rc.EventJSON,
}, &container.FileEntry{
Name: "workflow/envs.txt",
Mode: 0o666,
2022-11-16 15:29:45 -06:00
Body: "",
}),
)(ctx)
}
}
2020-02-23 17:01:25 -06:00
func (rc *RunContext) startJobContainer() common.Executor {
return func(ctx context.Context) error {
logger := common.Logger(ctx)
image := rc.platformImage(ctx)
rawLogger := logger.WithField("raw_output", true)
2020-02-24 14:48:12 -06:00
logWriter := common.NewLineWriter(rc.commandHandler(ctx), func(s string) bool {
2020-02-23 17:01:25 -06:00
if rc.Config.LogOutput {
rawLogger.Infof("%s", s)
2020-02-23 17:01:25 -06:00
} else {
rawLogger.Debugf("%s", s)
2020-02-23 17:01:25 -06:00
}
2020-02-24 14:48:12 -06:00
return true
2020-02-23 17:01:25 -06:00
})
username, password, err := rc.handleCredentials(ctx)
if err != nil {
return fmt.Errorf("failed to handle credentials: %s", err)
}
logger.Infof("\U0001f680 Start image=%s", image)
2020-02-23 17:01:25 -06:00
name := rc.jobContainerName()
// For gitea, to support --volumes-from <container_name_or_id> in options.
// We need to set the container name to the environment variable.
rc.Env["JOB_CONTAINER_NAME"] = name
2020-02-23 17:01:25 -06:00
2020-02-24 12:56:49 -06:00
envList := make([]string, 0)
envList = append(envList, fmt.Sprintf("%s=%s", "RUNNER_TOOL_CACHE", "/opt/hostedtoolcache"))
envList = append(envList, fmt.Sprintf("%s=%s", "RUNNER_OS", "Linux"))
envList = append(envList, fmt.Sprintf("%s=%s", "RUNNER_ARCH", container.RunnerArch(ctx)))
2020-04-23 10:18:36 -05:00
envList = append(envList, fmt.Sprintf("%s=%s", "RUNNER_TEMP", "/tmp"))
envList = append(envList, fmt.Sprintf("%s=%s", "LANG", "C.UTF-8")) // Use same locale as GitHub Actions
2020-02-24 12:56:49 -06:00
2022-11-16 15:29:45 -06:00
ext := container.LinuxContainerEnvironmentExtensions{}
binds, mounts := rc.GetBindsAndMounts()
2020-02-24 19:48:21 -06:00
Fix container network issue (#56) Follow: https://gitea.com/gitea/act_runner/pulls/184 Close https://gitea.com/gitea/act_runner/issues/177 #### changes: - `act` create new networks only if the value of `NeedCreateNetwork` is true, and remove these networks at last. `NeedCreateNetwork` is passed by `act_runner`. 'NeedCreateNetwork' is true only if `container.network` in the configuration file of the `act_runner` is empty. - In the `docker create` phase, specify the network to which containers will connect. Because, if not specify , container will connect to `bridge` network which is created automatically by Docker. - If the network is user defined network ( the value of `container.network` is empty or `<custom-network>`. Because, the network created by `act` is also user defined network.), will also specify alias by `--network-alias`. The alias of service is `<service-id>`. So we can be access service container by `<service-id>:<port>` in the steps of job. - Won't try to `docker network connect ` network after `docker start` any more. - Because on the one hand, `docker network connect` applies only to user defined networks, if try to `docker network connect host <container-name>` will return error. - On the other hand, we just specify network in the stage of `docker create`, the same effect can be achieved. - Won't try to remove containers and networks berfore the stage of `docker start`, because the name of these containers and netwoks won't be repeat. Co-authored-by: Jason Song <i@wolfogre.com> Reviewed-on: https://gitea.com/gitea/act/pulls/56 Reviewed-by: Jason Song <i@wolfogre.com> Co-authored-by: sillyguodong <gedong_1994@163.com> Co-committed-by: sillyguodong <gedong_1994@163.com>
2023-05-16 01:03:55 -05:00
// specify the network to which the container will connect when `docker create` stage. (like execute command line: docker create --network <networkName> <image>)
networkName := string(rc.Config.ContainerNetworkMode)
if networkName == "" {
// if networkName is empty string, will create a new network for the containers.
// and it will be removed after at last.
networkName = rc.networkName()
}
// add service containers
Fix container network issue (#56) Follow: https://gitea.com/gitea/act_runner/pulls/184 Close https://gitea.com/gitea/act_runner/issues/177 #### changes: - `act` create new networks only if the value of `NeedCreateNetwork` is true, and remove these networks at last. `NeedCreateNetwork` is passed by `act_runner`. 'NeedCreateNetwork' is true only if `container.network` in the configuration file of the `act_runner` is empty. - In the `docker create` phase, specify the network to which containers will connect. Because, if not specify , container will connect to `bridge` network which is created automatically by Docker. - If the network is user defined network ( the value of `container.network` is empty or `<custom-network>`. Because, the network created by `act` is also user defined network.), will also specify alias by `--network-alias`. The alias of service is `<service-id>`. So we can be access service container by `<service-id>:<port>` in the steps of job. - Won't try to `docker network connect ` network after `docker start` any more. - Because on the one hand, `docker network connect` applies only to user defined networks, if try to `docker network connect host <container-name>` will return error. - On the other hand, we just specify network in the stage of `docker create`, the same effect can be achieved. - Won't try to remove containers and networks berfore the stage of `docker start`, because the name of these containers and netwoks won't be repeat. Co-authored-by: Jason Song <i@wolfogre.com> Reviewed-on: https://gitea.com/gitea/act/pulls/56 Reviewed-by: Jason Song <i@wolfogre.com> Co-authored-by: sillyguodong <gedong_1994@163.com> Co-committed-by: sillyguodong <gedong_1994@163.com>
2023-05-16 01:03:55 -05:00
for serviceId, spec := range rc.Run.Job().Services {
// interpolate env
interpolatedEnvs := make(map[string]string, len(spec.Env))
for k, v := range spec.Env {
interpolatedEnvs[k] = rc.ExprEval.Interpolate(ctx, v)
}
envs := make([]string, 0, len(interpolatedEnvs))
for k, v := range interpolatedEnvs {
envs = append(envs, fmt.Sprintf("%s=%s", k, v))
}
// interpolate cmd
interpolatedCmd := make([]string, 0, len(spec.Cmd))
for _, v := range spec.Cmd {
interpolatedCmd = append(interpolatedCmd, rc.ExprEval.Interpolate(ctx, v))
}
username, password, err := rc.handleServiceCredentials(ctx, spec.Credentials)
if err != nil {
Fix container network issue (#56) Follow: https://gitea.com/gitea/act_runner/pulls/184 Close https://gitea.com/gitea/act_runner/issues/177 #### changes: - `act` create new networks only if the value of `NeedCreateNetwork` is true, and remove these networks at last. `NeedCreateNetwork` is passed by `act_runner`. 'NeedCreateNetwork' is true only if `container.network` in the configuration file of the `act_runner` is empty. - In the `docker create` phase, specify the network to which containers will connect. Because, if not specify , container will connect to `bridge` network which is created automatically by Docker. - If the network is user defined network ( the value of `container.network` is empty or `<custom-network>`. Because, the network created by `act` is also user defined network.), will also specify alias by `--network-alias`. The alias of service is `<service-id>`. So we can be access service container by `<service-id>:<port>` in the steps of job. - Won't try to `docker network connect ` network after `docker start` any more. - Because on the one hand, `docker network connect` applies only to user defined networks, if try to `docker network connect host <container-name>` will return error. - On the other hand, we just specify network in the stage of `docker create`, the same effect can be achieved. - Won't try to remove containers and networks berfore the stage of `docker start`, because the name of these containers and netwoks won't be repeat. Co-authored-by: Jason Song <i@wolfogre.com> Reviewed-on: https://gitea.com/gitea/act/pulls/56 Reviewed-by: Jason Song <i@wolfogre.com> Co-authored-by: sillyguodong <gedong_1994@163.com> Co-committed-by: sillyguodong <gedong_1994@163.com>
2023-05-16 01:03:55 -05:00
return fmt.Errorf("failed to handle service %s credentials: %w", serviceId, err)
}
serviceBinds, serviceMounts := rc.GetServiceBindsAndMounts(spec.Volumes)
Fix container network issue (#56) Follow: https://gitea.com/gitea/act_runner/pulls/184 Close https://gitea.com/gitea/act_runner/issues/177 #### changes: - `act` create new networks only if the value of `NeedCreateNetwork` is true, and remove these networks at last. `NeedCreateNetwork` is passed by `act_runner`. 'NeedCreateNetwork' is true only if `container.network` in the configuration file of the `act_runner` is empty. - In the `docker create` phase, specify the network to which containers will connect. Because, if not specify , container will connect to `bridge` network which is created automatically by Docker. - If the network is user defined network ( the value of `container.network` is empty or `<custom-network>`. Because, the network created by `act` is also user defined network.), will also specify alias by `--network-alias`. The alias of service is `<service-id>`. So we can be access service container by `<service-id>:<port>` in the steps of job. - Won't try to `docker network connect ` network after `docker start` any more. - Because on the one hand, `docker network connect` applies only to user defined networks, if try to `docker network connect host <container-name>` will return error. - On the other hand, we just specify network in the stage of `docker create`, the same effect can be achieved. - Won't try to remove containers and networks berfore the stage of `docker start`, because the name of these containers and netwoks won't be repeat. Co-authored-by: Jason Song <i@wolfogre.com> Reviewed-on: https://gitea.com/gitea/act/pulls/56 Reviewed-by: Jason Song <i@wolfogre.com> Co-authored-by: sillyguodong <gedong_1994@163.com> Co-committed-by: sillyguodong <gedong_1994@163.com>
2023-05-16 01:03:55 -05:00
serviceContainerName := createSimpleContainerName(rc.jobContainerName(), serviceId)
c := container.NewContainer(&container.NewContainerInput{
Name: serviceContainerName,
WorkingDir: ext.ToContainerPath(rc.Config.Workdir),
Image: spec.Image,
Username: username,
Password: password,
Cmd: interpolatedCmd,
Env: envs,
Mounts: serviceMounts,
Binds: serviceBinds,
Stdout: logWriter,
Stderr: logWriter,
Privileged: rc.Config.Privileged,
UsernsMode: rc.Config.UsernsMode,
Platform: rc.Config.ContainerArchitecture,
AutoRemove: rc.Config.AutoRemove,
Options: spec.Options,
Fix container network issue (#56) Follow: https://gitea.com/gitea/act_runner/pulls/184 Close https://gitea.com/gitea/act_runner/issues/177 #### changes: - `act` create new networks only if the value of `NeedCreateNetwork` is true, and remove these networks at last. `NeedCreateNetwork` is passed by `act_runner`. 'NeedCreateNetwork' is true only if `container.network` in the configuration file of the `act_runner` is empty. - In the `docker create` phase, specify the network to which containers will connect. Because, if not specify , container will connect to `bridge` network which is created automatically by Docker. - If the network is user defined network ( the value of `container.network` is empty or `<custom-network>`. Because, the network created by `act` is also user defined network.), will also specify alias by `--network-alias`. The alias of service is `<service-id>`. So we can be access service container by `<service-id>:<port>` in the steps of job. - Won't try to `docker network connect ` network after `docker start` any more. - Because on the one hand, `docker network connect` applies only to user defined networks, if try to `docker network connect host <container-name>` will return error. - On the other hand, we just specify network in the stage of `docker create`, the same effect can be achieved. - Won't try to remove containers and networks berfore the stage of `docker start`, because the name of these containers and netwoks won't be repeat. Co-authored-by: Jason Song <i@wolfogre.com> Reviewed-on: https://gitea.com/gitea/act/pulls/56 Reviewed-by: Jason Song <i@wolfogre.com> Co-authored-by: sillyguodong <gedong_1994@163.com> Co-committed-by: sillyguodong <gedong_1994@163.com>
2023-05-16 01:03:55 -05:00
NetworkMode: networkName,
NetworkAliases: []string{serviceId},
ValidVolumes: rc.Config.ValidVolumes,
})
rc.ServiceContainers = append(rc.ServiceContainers, c)
}
2022-11-16 15:29:45 -06:00
rc.cleanUpJobContainer = func(ctx context.Context) error {
if rc.JobContainer != nil && !rc.Config.ReuseContainers {
return rc.JobContainer.Remove().
Then(container.NewDockerVolumeRemoveExecutor(rc.jobContainerName(), false)).
Then(container.NewDockerVolumeRemoveExecutor(rc.jobContainerName()+"-env", false))(ctx)
}
return nil
}
2020-02-23 17:01:25 -06:00
rc.JobContainer = container.NewContainer(&container.NewContainerInput{
Fix container network issue (#56) Follow: https://gitea.com/gitea/act_runner/pulls/184 Close https://gitea.com/gitea/act_runner/issues/177 #### changes: - `act` create new networks only if the value of `NeedCreateNetwork` is true, and remove these networks at last. `NeedCreateNetwork` is passed by `act_runner`. 'NeedCreateNetwork' is true only if `container.network` in the configuration file of the `act_runner` is empty. - In the `docker create` phase, specify the network to which containers will connect. Because, if not specify , container will connect to `bridge` network which is created automatically by Docker. - If the network is user defined network ( the value of `container.network` is empty or `<custom-network>`. Because, the network created by `act` is also user defined network.), will also specify alias by `--network-alias`. The alias of service is `<service-id>`. So we can be access service container by `<service-id>:<port>` in the steps of job. - Won't try to `docker network connect ` network after `docker start` any more. - Because on the one hand, `docker network connect` applies only to user defined networks, if try to `docker network connect host <container-name>` will return error. - On the other hand, we just specify network in the stage of `docker create`, the same effect can be achieved. - Won't try to remove containers and networks berfore the stage of `docker start`, because the name of these containers and netwoks won't be repeat. Co-authored-by: Jason Song <i@wolfogre.com> Reviewed-on: https://gitea.com/gitea/act/pulls/56 Reviewed-by: Jason Song <i@wolfogre.com> Co-authored-by: sillyguodong <gedong_1994@163.com> Co-committed-by: sillyguodong <gedong_1994@163.com>
2023-05-16 01:03:55 -05:00
Cmd: nil,
Entrypoint: []string{"/bin/sleep", fmt.Sprint(rc.Config.ContainerMaxLifetime.Round(time.Second).Seconds())},
WorkingDir: ext.ToContainerPath(rc.Config.Workdir),
Image: image,
Username: username,
Password: password,
Name: name,
Env: envList,
Mounts: mounts,
NetworkMode: networkName,
NetworkAliases: []string{rc.Name},
Binds: binds,
Stdout: logWriter,
Stderr: logWriter,
Privileged: rc.Config.Privileged,
UsernsMode: rc.Config.UsernsMode,
Platform: rc.Config.ContainerArchitecture,
Options: rc.options(ctx),
AutoRemove: rc.Config.AutoRemove,
ValidVolumes: rc.Config.ValidVolumes,
2020-02-23 17:01:25 -06:00
})
2022-11-16 15:29:45 -06:00
if rc.JobContainer == nil {
return errors.New("Failed to create job container")
}
2020-02-23 17:01:25 -06:00
return common.NewPipelineExecutor(
rc.pullServicesImages(rc.Config.ForcePull),
2020-02-23 17:01:25 -06:00
rc.JobContainer.Pull(rc.Config.ForcePull),
Fix container network issue (#56) Follow: https://gitea.com/gitea/act_runner/pulls/184 Close https://gitea.com/gitea/act_runner/issues/177 #### changes: - `act` create new networks only if the value of `NeedCreateNetwork` is true, and remove these networks at last. `NeedCreateNetwork` is passed by `act_runner`. 'NeedCreateNetwork' is true only if `container.network` in the configuration file of the `act_runner` is empty. - In the `docker create` phase, specify the network to which containers will connect. Because, if not specify , container will connect to `bridge` network which is created automatically by Docker. - If the network is user defined network ( the value of `container.network` is empty or `<custom-network>`. Because, the network created by `act` is also user defined network.), will also specify alias by `--network-alias`. The alias of service is `<service-id>`. So we can be access service container by `<service-id>:<port>` in the steps of job. - Won't try to `docker network connect ` network after `docker start` any more. - Because on the one hand, `docker network connect` applies only to user defined networks, if try to `docker network connect host <container-name>` will return error. - On the other hand, we just specify network in the stage of `docker create`, the same effect can be achieved. - Won't try to remove containers and networks berfore the stage of `docker start`, because the name of these containers and netwoks won't be repeat. Co-authored-by: Jason Song <i@wolfogre.com> Reviewed-on: https://gitea.com/gitea/act/pulls/56 Reviewed-by: Jason Song <i@wolfogre.com> Co-authored-by: sillyguodong <gedong_1994@163.com> Co-committed-by: sillyguodong <gedong_1994@163.com>
2023-05-16 01:03:55 -05:00
rc.createNetwork(networkName).IfBool(rc.Config.ContainerNetworkMode == ""), // if the value of `ContainerNetworkMode` is empty string, then will create a new network for containers.
rc.startServiceContainers(networkName),
rc.JobContainer.Create(rc.Config.ContainerCapAdd, rc.Config.ContainerCapDrop),
2020-02-23 17:01:25 -06:00
rc.JobContainer.Start(false),
2022-11-16 15:29:45 -06:00
rc.JobContainer.Copy(rc.JobContainer.GetActPath()+"/", &container.FileEntry{
2020-02-23 17:01:25 -06:00
Name: "workflow/event.json",
Mode: 0o644,
2020-02-23 17:01:25 -06:00
Body: rc.EventJSON,
}, &container.FileEntry{
Name: "workflow/envs.txt",
Mode: 0o666,
Body: "",
2020-02-23 17:01:25 -06:00
}),
)(ctx)
}
}
func (rc *RunContext) createNetwork(name string) common.Executor {
return func(ctx context.Context) error {
return container.NewDockerNetworkCreateExecutor(name)(ctx)
}
}
func (rc *RunContext) removeNetwork(name string) common.Executor {
return func(ctx context.Context) error {
return container.NewDockerNetworkRemoveExecutor(name)(ctx)
}
}
func (rc *RunContext) execJobContainer(cmd []string, env map[string]string, user, workdir string) common.Executor {
2020-02-23 17:01:25 -06:00
return func(ctx context.Context) error {
return rc.JobContainer.Exec(cmd, env, user, workdir)(ctx)
2020-02-23 17:01:25 -06:00
}
}
func (rc *RunContext) ApplyExtraPath(ctx context.Context, env *map[string]string) {
if rc.ExtraPath != nil && len(rc.ExtraPath) > 0 {
path := rc.JobContainer.GetPathVariableName()
if rc.JobContainer.IsEnvironmentCaseInsensitive() {
// On windows system Path and PATH could also be in the map
for k := range *env {
if strings.EqualFold(path, k) {
path = k
break
}
}
}
if (*env)[path] == "" {
cenv := map[string]string{}
var cpath string
if err := rc.JobContainer.UpdateFromImageEnv(&cenv)(ctx); err == nil {
if p, ok := cenv[path]; ok {
cpath = p
}
}
if len(cpath) == 0 {
cpath = rc.JobContainer.DefaultPathVariable()
}
(*env)[path] = cpath
}
(*env)[path] = rc.JobContainer.JoinPathVariable(append(rc.ExtraPath, (*env)[path])...)
}
}
func (rc *RunContext) UpdateExtraPath(ctx context.Context, githubEnvPath string) error {
if common.Dryrun(ctx) {
return nil
}
pathTar, err := rc.JobContainer.GetContainerArchive(ctx, githubEnvPath)
if err != nil {
return err
}
defer pathTar.Close()
reader := tar.NewReader(pathTar)
_, err = reader.Next()
if err != nil && err != io.EOF {
return err
}
s := bufio.NewScanner(reader)
for s.Scan() {
line := s.Text()
if len(line) > 0 {
rc.addPath(ctx, line)
}
}
return nil
}
// stopJobContainer removes the job container (if it exists) and its volume (if it exists) if !rc.Config.ReuseContainers
2020-02-23 17:01:25 -06:00
func (rc *RunContext) stopJobContainer() common.Executor {
return func(ctx context.Context) error {
2022-11-16 15:29:45 -06:00
if rc.cleanUpJobContainer != nil && !rc.Config.ReuseContainers {
return rc.cleanUpJobContainer(ctx)
2020-02-23 17:01:25 -06:00
}
return nil
}
}
func (rc *RunContext) pullServicesImages(forcePull bool) common.Executor {
return func(ctx context.Context) error {
execs := []common.Executor{}
for _, c := range rc.ServiceContainers {
execs = append(execs, c.Pull(forcePull))
}
return common.NewParallelExecutor(len(execs), execs...)(ctx)
}
}
func (rc *RunContext) startServiceContainers(networkName string) common.Executor {
return func(ctx context.Context) error {
execs := []common.Executor{}
for _, c := range rc.ServiceContainers {
execs = append(execs, common.NewPipelineExecutor(
c.Pull(false),
c.Create(rc.Config.ContainerCapAdd, rc.Config.ContainerCapDrop),
c.Start(false),
))
}
return common.NewParallelExecutor(len(execs), execs...)(ctx)
}
}
func (rc *RunContext) stopServiceContainers() common.Executor {
return func(ctx context.Context) error {
execs := []common.Executor{}
for _, c := range rc.ServiceContainers {
execs = append(execs, c.Remove())
}
return common.NewParallelExecutor(len(execs), execs...)(ctx)
}
}
// Prepare the mounts and binds for the worker
2020-02-24 12:56:49 -06:00
// ActionCacheDir is for rc
func (rc *RunContext) ActionCacheDir() string {
if rc.Config.ActionCacheDir != "" {
return rc.Config.ActionCacheDir
}
2020-02-24 00:34:48 -06:00
var xdgCache string
var ok bool
if xdgCache, ok = os.LookupEnv("XDG_CACHE_HOME"); !ok || xdgCache == "" {
if home, err := os.UserHomeDir(); err == nil {
xdgCache = filepath.Join(home, ".cache")
} else if xdgCache, err = filepath.Abs("."); err != nil {
// It's almost impossible to get here, so the temp dir is a good fallback
xdgCache = os.TempDir()
2020-02-24 00:34:48 -06:00
}
}
return filepath.Join(xdgCache, "act")
}
// Interpolate outputs after a job is done
func (rc *RunContext) interpolateOutputs() common.Executor {
return func(ctx context.Context) error {
ee := rc.NewExpressionEvaluator(ctx)
for k, v := range rc.Run.Job().Outputs {
interpolated := ee.Interpolate(ctx, v)
if v != interpolated {
rc.Run.Job().Outputs[k] = interpolated
}
}
return nil
}
}
func (rc *RunContext) startContainer() common.Executor {
2022-11-16 15:29:45 -06:00
return func(ctx context.Context) error {
if rc.IsHostEnv(ctx) {
2022-11-16 15:29:45 -06:00
return rc.startHostEnvironment()(ctx)
}
return rc.startJobContainer()(ctx)
}
}
func (rc *RunContext) IsHostEnv(ctx context.Context) bool {
platform := rc.runsOnImage(ctx)
image := rc.containerImage(ctx)
return image == "" && strings.EqualFold(platform, "-self-hosted")
}
func (rc *RunContext) stopContainer() common.Executor {
return rc.stopJobContainer()
}
func (rc *RunContext) closeContainer() common.Executor {
return func(ctx context.Context) error {
if rc.JobContainer != nil {
return rc.JobContainer.Close()(ctx)
}
return nil
2020-02-23 17:01:25 -06:00
}
}
fix: continue jobs + steps after failure (#840) * fix: continue jobs + steps after failure To allow proper if expression handling on jobs and steps (like always, success, failure, ...) we need to continue running all executors in the prepared chain. To keep the error handling intact we add an occurred error to the go context and handle it later in the pipeline/chain. Also we add the job result to the needs context to give expressions access to it. The needs object, failure and success functions are split between run context (on jobs) and step context. Closes #442 Co-authored-by: Björn Brauer <zaubernerd@zaubernerd.de> * style: correct linter warnings Co-authored-by: Björn Brauer <zaubernerd@zaubernerd.de> * fix: job if value defaults to success() As described in the documentation, a default value of "success()" is applied when no "if" value is present on the job. https://docs.github.com/en/actions/learn-github-actions/expressions#job-status-check-functions Co-authored-by: Markus Wolf <mail@markus-wolf.de> * fix: check job needs recursively Ensure job result includes results of previous jobs Co-authored-by: Markus Wolf <markus.wolf@new-work.se> * test: add runner test for job status check functions Co-authored-by: Markus Wolf <markus.wolf@new-work.se> * test: add unit tests for run context if evaluation Co-authored-by: Björn Brauer <zaubernerd@zaubernerd.de> * refactor: move if expression evaluation Move if expression evaluation into own function (step context) to better support unit testing. Co-authored-by: Björn Brauer <zaubernerd@zaubernerd.de> * test: add unit tests for step context if evaluation Co-authored-by: Markus Wolf <markus.wolf@new-work.se> * fix: handle job error more resilient The job error is not stored in a context map instead of a context added value. Since context values are immutable an added value requires to keep the new context in all cases. This is fragile since it might slip unnoticed to other parts of the code. Storing the error of a job in the context map will make it more stable, since the map is always there and the context of the pipeline is stable for the whole run. * feat: steps should use a default if expression of success() * test: add integration test for if-expressions * chore: disable editorconfig-checker for yaml multiline string Co-authored-by: Björn Brauer <zaubernerd@zaubernerd.de> Co-authored-by: Björn Brauer <bjoern.brauer@new-work.se>
2021-12-08 14:57:42 -06:00
func (rc *RunContext) matrix() map[string]interface{} {
return rc.Matrix
}
fix: continue jobs + steps after failure (#840) * fix: continue jobs + steps after failure To allow proper if expression handling on jobs and steps (like always, success, failure, ...) we need to continue running all executors in the prepared chain. To keep the error handling intact we add an occurred error to the go context and handle it later in the pipeline/chain. Also we add the job result to the needs context to give expressions access to it. The needs object, failure and success functions are split between run context (on jobs) and step context. Closes #442 Co-authored-by: Björn Brauer <zaubernerd@zaubernerd.de> * style: correct linter warnings Co-authored-by: Björn Brauer <zaubernerd@zaubernerd.de> * fix: job if value defaults to success() As described in the documentation, a default value of "success()" is applied when no "if" value is present on the job. https://docs.github.com/en/actions/learn-github-actions/expressions#job-status-check-functions Co-authored-by: Markus Wolf <mail@markus-wolf.de> * fix: check job needs recursively Ensure job result includes results of previous jobs Co-authored-by: Markus Wolf <markus.wolf@new-work.se> * test: add runner test for job status check functions Co-authored-by: Markus Wolf <markus.wolf@new-work.se> * test: add unit tests for run context if evaluation Co-authored-by: Björn Brauer <zaubernerd@zaubernerd.de> * refactor: move if expression evaluation Move if expression evaluation into own function (step context) to better support unit testing. Co-authored-by: Björn Brauer <zaubernerd@zaubernerd.de> * test: add unit tests for step context if evaluation Co-authored-by: Markus Wolf <markus.wolf@new-work.se> * fix: handle job error more resilient The job error is not stored in a context map instead of a context added value. Since context values are immutable an added value requires to keep the new context in all cases. This is fragile since it might slip unnoticed to other parts of the code. Storing the error of a job in the context map will make it more stable, since the map is always there and the context of the pipeline is stable for the whole run. * feat: steps should use a default if expression of success() * test: add integration test for if-expressions * chore: disable editorconfig-checker for yaml multiline string Co-authored-by: Björn Brauer <zaubernerd@zaubernerd.de> Co-authored-by: Björn Brauer <bjoern.brauer@new-work.se>
2021-12-08 14:57:42 -06:00
func (rc *RunContext) result(result string) {
rc.Run.Job().Result = result
}
func (rc *RunContext) steps() []*model.Step {
return rc.Run.Job().Steps
}
// Executor returns a pipeline executor for all the steps in the job
func (rc *RunContext) Executor() common.Executor {
var executor common.Executor
switch rc.Run.Job().Type() {
case model.JobTypeDefault:
executor = newJobExecutor(rc, &stepFactoryImpl{}, rc)
case model.JobTypeReusableWorkflowLocal:
executor = newLocalReusableWorkflowExecutor(rc)
case model.JobTypeReusableWorkflowRemote:
executor = newRemoteReusableWorkflowExecutor(rc)
}
return func(ctx context.Context) error {
res, err := rc.isEnabled(ctx)
if err != nil {
return err
}
if res {
return executor(ctx)
}
return nil
}
2020-02-23 17:01:25 -06:00
}
func (rc *RunContext) containerImage(ctx context.Context) string {
job := rc.Run.Job()
c := job.Container()
if c != nil {
return rc.ExprEval.Interpolate(ctx, c.Image)
}
return ""
}
func (rc *RunContext) runsOnImage(ctx context.Context) string {
job := rc.Run.Job()
if job.RunsOn() == nil {
common.Logger(ctx).Errorf("'runs-on' key not defined in %s", rc.String())
}
2022-11-22 02:39:19 -06:00
runsOn := job.RunsOn()
for i, v := range runsOn {
runsOn[i] = rc.ExprEval.Interpolate(ctx, v)
}
if pick := rc.Config.PlatformPicker; pick != nil {
if image := pick(runsOn); image != "" {
return image
}
}
for _, runnerLabel := range runsOn {
image := rc.Config.Platforms[strings.ToLower(runnerLabel)]
if image != "" {
return image
}
}
return ""
}
func (rc *RunContext) platformImage(ctx context.Context) string {
if containerImage := rc.containerImage(ctx); containerImage != "" {
return containerImage
}
return rc.runsOnImage(ctx)
}
func (rc *RunContext) options(ctx context.Context) string {
job := rc.Run.Job()
c := job.Container()
if c == nil {
return rc.Config.ContainerOptions
}
return c.Options
}
func (rc *RunContext) isEnabled(ctx context.Context) (bool, error) {
2020-02-23 17:01:25 -06:00
job := rc.Run.Job()
l := common.Logger(ctx)
runJob, err := EvalBool(ctx, rc.ExprEval, job.If.Value, exprparser.DefaultStatusCheckSuccess)
if err != nil {
return false, fmt.Errorf(" \u274C Error in if-expression: \"if: %s\" (%s)", job.If.Value, err)
}
if !runJob {
l.WithField("jobResult", "skipped").Debugf("Skipping job '%s' due to '%s'", job.Name, job.If.Value)
return false, nil
2020-02-23 17:01:25 -06:00
}
2020-02-20 21:43:20 -06:00
if job.Type() != model.JobTypeDefault {
return true, nil
}
img := rc.platformImage(ctx)
if img == "" {
if job.RunsOn() == nil {
l.Errorf("'runs-on' key not defined in %s", rc.String())
}
for _, runnerLabel := range job.RunsOn() {
platformName := rc.ExprEval.Interpolate(ctx, runnerLabel)
2021-09-13 18:14:41 -05:00
l.Infof("\U0001F6A7 Skipping unsupported platform -- Try running with `-P %+v=...`", platformName)
}
return false, nil
}
return true, nil
}
func mergeMaps(maps ...map[string]string) map[string]string {
rtnMap := make(map[string]string)
for _, m := range maps {
for k, v := range m {
rtnMap[k] = v
}
}
return rtnMap
}
// deprecated: use createSimpleContainerName
2020-02-23 17:01:25 -06:00
func createContainerName(parts ...string) string {
name := strings.Join(parts, "-")
2020-02-23 17:01:25 -06:00
pattern := regexp.MustCompile("[^a-zA-Z0-9]")
name = pattern.ReplaceAllString(name, "-")
name = strings.ReplaceAll(name, "--", "-")
hash := sha256.Sum256([]byte(name))
// SHA256 is 64 hex characters. So trim name to 63 characters to make room for the hash and separator
trimmedName := strings.Trim(trimToLen(name, 63), "-")
return fmt.Sprintf("%s-%x", trimmedName, hash)
}
func createSimpleContainerName(parts ...string) string {
pattern := regexp.MustCompile("[^a-zA-Z0-9-]")
name := make([]string, 0, len(parts))
for _, v := range parts {
v = pattern.ReplaceAllString(v, "-")
v = strings.Trim(v, "-")
for strings.Contains(v, "--") {
v = strings.ReplaceAll(v, "--", "-")
}
if v != "" {
name = append(name, v)
}
}
return strings.Join(name, "_")
}
func trimToLen(s string, l int) string {
2020-02-20 21:43:20 -06:00
if l < 0 {
l = 0
}
if len(s) > l {
return s[:l]
}
return s
}
func (rc *RunContext) getJobContext() *model.JobContext {
jobStatus := "success"
for _, stepStatus := range rc.StepResults {
if stepStatus.Conclusion == model.StepStatusFailure {
jobStatus = "failure"
break
}
}
return &model.JobContext{
Status: jobStatus,
}
}
func (rc *RunContext) getStepsContext() map[string]*model.StepResult {
return rc.StepResults
}
func (rc *RunContext) getGithubContext(ctx context.Context) *model.GithubContext {
logger := common.Logger(ctx)
ghc := &model.GithubContext{
Event: make(map[string]interface{}),
Workflow: rc.Run.Workflow.Name,
RunID: rc.Config.Env["GITHUB_RUN_ID"],
RunNumber: rc.Config.Env["GITHUB_RUN_NUMBER"],
Actor: rc.Config.Actor,
EventName: rc.Config.EventName,
Action: rc.CurrentStep,
refactor: remove composite action runcontext workaround (#1085) * refactor: remove composite action runcontext workaround The RunContext is cloned to execute a composite action with all its steps in a similar context. This required some workaround, since the command handler has kept a reference to the original RunContext. This is solved now, by replacing the docker LogWriter with a proper scoped LogWriter. This prepares for a simpler setup of composite actions to be able to create and re-create the composite RunContext for pre/main/post action steps. * test: check env-vars for local js and docker actions * test: test remote docker and js actions * fix: merge github context into env when read and setup * refacotr: simplify composite context setup * test: use a map matcher to test input setup * fix: restore composite log output Since we create a new line writer, we need to log the raw_output as well. Otherwise no output will be available from the log-writer * fix: add RunContext JobName to fill GITHUB_JOBNAME * test: use nektos/act-test-actions * fix: allow masking values in composite actions To allow masking of values from composite actions, we need to use a custom job logger with a reference to the masked values for the composite run context. * refactor: keep existing logger for composite actions To not introduce another new logger while still be able to use the masking from the composite action, we add the masks to the go context. To leverage that context, we also add the context to the log entries where the valueMasker then could get the actual mask values. With this way to 'inject' the masked values into the logger, we do - keep the logger - keep the coloring - stay away from inconsistencies due to parallel jobs * fix: re-add removed color increase This one should have never removed :-) * fix: add missing ExtraPath attribute * fix: merge run context env into composite run context env This adds a test and fix for the parent environment. It should be inherited by the composite environment. * test: add missing test case * fix: store github token next to secrets We must not expose the secrets to composite actions, but the `github.token` is available inside composite actions. To provide this we store the token in the config and create it in the GithubContext from there. The token can be used with `github.token` but is not available as `secrets.GITHUB_TOKEN`. This implements the same behavior as on GitHub. Co-authored-by: Björn Brauer <bjoern.brauer@new-work.se> Co-authored-by: Marcus Noll <markus.noll@new-work.se> * fixup! fix: allow masking values in composite actions * style: use tabs instead of spaces to fix linter errors Co-authored-by: Björn Brauer <bjoern.brauer@new-work.se> Co-authored-by: Marcus Noll <markus.noll@new-work.se> Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com>
2022-05-11 14:06:05 -05:00
Token: rc.Config.Token,
Job: rc.Run.JobID,
Feature: uses in composite (#793) * Feature: uses in composite * Negate logic * Reduce complexity * Update step_context.go * Update step_context.go * Update step_context.go * Fix syntax error in test * Bump * Disable usage of actions/setup-node@v2 * Bump * Fix step id collision * Fix output command workaround * Make secrets context inaccessible in composite * Fix order after adding a workaround (needs tests) Fixes https://github.com/nektos/act/pull/793#issuecomment-922329838 * Evaluate env before passing one step deeper If env would contain any inputs, steps ctx or secrets there was undefined behaviour * [no ci] prepare secret test * Initial test pass inputs as env * Fix syntax error * extend test also for direct invoke * Fix passing provided env as composite output * Fix syntax error * toUpper 'no such secret', act has a bug * fix indent * Fix env outputs in composite * Test env outputs of composite * Fix inputs not defined in docker actions * Fix interpolate args input of docker actions * Fix lint * AllowCompositeIf now defaults to true see https://github.com/actions/runner/releases/tag/v2.284.0 * Fix lint * Fix env of docker action.yml * Test calling a local docker action from composite With input context hirachy * local-action-dockerfile Test pass on action/runner It seems action/runner ignores overrides of args, if the target docker action has the args property set. * Fix exec permissions of docker-local-noargs * Revert getStepsContext change * fix: handle composite action on error and continue This change is a follow up of https://github.com/nektos/act/pull/840 and integrates with https://github.com/nektos/act/pull/793 There are two things included here: - The default value for a step.if in an action need to be 'success()' - We need to hand the error from a composite action back to the calling executor Co-authored-by: Björn Brauer <bjoern.brauer@new-work.se> * Patch inputs can be bool, float64 and string for workflow_call Also inputs is now always defined, but may be null * Simplify cherry-picked commit * Minor style adjustments * Remove chmod +x from tests now fails on windows like before * Fix GITHUB_ACTION_PATH some action env vars Fixes GITHUB_ACTION_REPOSITORY, GITHUB_ACTION_REF. * Add comment to CompositeRestrictions Co-authored-by: Markus Wolf <markus.wolf@new-work.se> Co-authored-by: Björn Brauer <bjoern.brauer@new-work.se> Co-authored-by: Ryan <me@hackerc.at> Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com>
2021-12-22 13:19:50 -06:00
ActionPath: rc.ActionPath,
RepositoryOwner: rc.Config.Env["GITHUB_REPOSITORY_OWNER"],
RetentionDays: rc.Config.Env["GITHUB_RETENTION_DAYS"],
RunnerPerflog: rc.Config.Env["RUNNER_PERFLOG"],
RunnerTrackingID: rc.Config.Env["RUNNER_TRACKING_ID"],
Repository: rc.Config.Env["GITHUB_REPOSITORY"],
Ref: rc.Config.Env["GITHUB_REF"],
Sha: rc.Config.Env["SHA_REF"],
RefName: rc.Config.Env["GITHUB_REF_NAME"],
RefType: rc.Config.Env["GITHUB_REF_TYPE"],
BaseRef: rc.Config.Env["GITHUB_BASE_REF"],
HeadRef: rc.Config.Env["GITHUB_HEAD_REF"],
Workspace: rc.Config.Env["GITHUB_WORKSPACE"],
}
2022-11-16 15:29:45 -06:00
if rc.JobContainer != nil {
ghc.EventPath = rc.JobContainer.GetActPath() + "/workflow/event.json"
ghc.Workspace = rc.JobContainer.ToContainerPath(rc.Config.Workdir)
}
if ghc.RunID == "" {
ghc.RunID = "1"
}
if ghc.RunNumber == "" {
ghc.RunNumber = "1"
}
if ghc.RetentionDays == "" {
ghc.RetentionDays = "0"
}
if ghc.RunnerPerflog == "" {
ghc.RunnerPerflog = "/dev/null"
}
// Backwards compatibility for configs that require
// a default rather than being run as a cmd
if ghc.Actor == "" {
ghc.Actor = "nektos/act"
}
{ // Adapt to Gitea
if preset := rc.Config.PresetGitHubContext; preset != nil {
ghc.Event = preset.Event
ghc.RunID = preset.RunID
ghc.RunNumber = preset.RunNumber
ghc.Actor = preset.Actor
ghc.Repository = preset.Repository
ghc.EventName = preset.EventName
ghc.Sha = preset.Sha
ghc.Ref = preset.Ref
ghc.RefName = preset.RefName
ghc.RefType = preset.RefType
ghc.HeadRef = preset.HeadRef
ghc.BaseRef = preset.BaseRef
ghc.Token = preset.Token
ghc.RepositoryOwner = preset.RepositoryOwner
ghc.RetentionDays = preset.RetentionDays
instance := rc.Config.GitHubInstance
if !strings.HasPrefix(instance, "http://") &&
!strings.HasPrefix(instance, "https://") {
instance = "https://" + instance
}
ghc.ServerURL = instance
ghc.APIURL = instance + "/api/v1" // the version of Gitea is v1
ghc.GraphQLURL = "" // Gitea doesn't support graphql
return ghc
}
}
if rc.EventJSON != "" {
err := json.Unmarshal([]byte(rc.EventJSON), &ghc.Event)
if err != nil {
logger.Errorf("Unable to Unmarshal event '%s': %v", rc.EventJSON, err)
}
}
ghc.SetBaseAndHeadRef()
repoPath := rc.Config.Workdir
ghc.SetRepositoryAndOwner(ctx, rc.Config.GitHubInstance, rc.Config.RemoteName, repoPath)
if ghc.Ref == "" {
ghc.SetRef(ctx, rc.Config.DefaultBranch, repoPath)
}
if ghc.Sha == "" {
ghc.SetSha(ctx, repoPath)
}
ghc.SetRefTypeAndName()
// defaults
ghc.ServerURL = "https://github.com"
ghc.APIURL = "https://api.github.com"
ghc.GraphQLURL = "https://api.github.com/graphql"
// per GHES
if rc.Config.GitHubInstance != "github.com" {
ghc.ServerURL = fmt.Sprintf("https://%s", rc.Config.GitHubInstance)
ghc.APIURL = fmt.Sprintf("https://%s/api/v3", rc.Config.GitHubInstance)
ghc.GraphQLURL = fmt.Sprintf("https://%s/api/graphql", rc.Config.GitHubInstance)
}
{ // Adapt to Gitea
instance := rc.Config.GitHubInstance
if !strings.HasPrefix(instance, "http://") &&
!strings.HasPrefix(instance, "https://") {
instance = "https://" + instance
}
ghc.ServerURL = instance
ghc.APIURL = instance + "/api/v1" // the version of Gitea is v1
ghc.GraphQLURL = "" // Gitea doesn't support graphql
}
// allow to be overridden by user
if rc.Config.Env["GITHUB_SERVER_URL"] != "" {
ghc.ServerURL = rc.Config.Env["GITHUB_SERVER_URL"]
}
if rc.Config.Env["GITHUB_API_URL"] != "" {
ghc.APIURL = rc.Config.Env["GITHUB_API_URL"]
}
if rc.Config.Env["GITHUB_GRAPHQL_URL"] != "" {
ghc.GraphQLURL = rc.Config.Env["GITHUB_GRAPHQL_URL"]
}
return ghc
}
func isLocalCheckout(ghc *model.GithubContext, step *model.Step) bool {
if step.Type() == model.StepTypeInvalid {
// This will be errored out by the executor later, we need this here to avoid a null panic though
return false
}
if step.Type() != model.StepTypeUsesActionRemote {
return false
}
remoteAction := newRemoteAction(step.Uses)
if remoteAction == nil {
// IsCheckout() will nil panic if we dont bail out early
return false
}
if !remoteAction.IsCheckout() {
return false
}
if repository, ok := step.With["repository"]; ok && repository != ghc.Repository {
return false
}
if repository, ok := step.With["ref"]; ok && repository != ghc.Ref {
return false
}
return true
}
func nestedMapLookup(m map[string]interface{}, ks ...string) (rval interface{}) {
var ok bool
if len(ks) == 0 { // degenerate input
return nil
}
if rval, ok = m[ks[0]]; !ok {
return nil
} else if len(ks) == 1 { // we've reached the final key
return rval
} else if m, ok = rval.(map[string]interface{}); !ok {
return nil
} else { // 1+ more keys
return nestedMapLookup(m, ks[1:]...)
}
}
func (rc *RunContext) withGithubEnv(ctx context.Context, github *model.GithubContext, env map[string]string) map[string]string {
env["CI"] = "true"
env["GITHUB_WORKFLOW"] = github.Workflow
env["GITHUB_RUN_ID"] = github.RunID
env["GITHUB_RUN_NUMBER"] = github.RunNumber
env["GITHUB_ACTION"] = github.Action
Feature: uses in composite (#793) * Feature: uses in composite * Negate logic * Reduce complexity * Update step_context.go * Update step_context.go * Update step_context.go * Fix syntax error in test * Bump * Disable usage of actions/setup-node@v2 * Bump * Fix step id collision * Fix output command workaround * Make secrets context inaccessible in composite * Fix order after adding a workaround (needs tests) Fixes https://github.com/nektos/act/pull/793#issuecomment-922329838 * Evaluate env before passing one step deeper If env would contain any inputs, steps ctx or secrets there was undefined behaviour * [no ci] prepare secret test * Initial test pass inputs as env * Fix syntax error * extend test also for direct invoke * Fix passing provided env as composite output * Fix syntax error * toUpper 'no such secret', act has a bug * fix indent * Fix env outputs in composite * Test env outputs of composite * Fix inputs not defined in docker actions * Fix interpolate args input of docker actions * Fix lint * AllowCompositeIf now defaults to true see https://github.com/actions/runner/releases/tag/v2.284.0 * Fix lint * Fix env of docker action.yml * Test calling a local docker action from composite With input context hirachy * local-action-dockerfile Test pass on action/runner It seems action/runner ignores overrides of args, if the target docker action has the args property set. * Fix exec permissions of docker-local-noargs * Revert getStepsContext change * fix: handle composite action on error and continue This change is a follow up of https://github.com/nektos/act/pull/840 and integrates with https://github.com/nektos/act/pull/793 There are two things included here: - The default value for a step.if in an action need to be 'success()' - We need to hand the error from a composite action back to the calling executor Co-authored-by: Björn Brauer <bjoern.brauer@new-work.se> * Patch inputs can be bool, float64 and string for workflow_call Also inputs is now always defined, but may be null * Simplify cherry-picked commit * Minor style adjustments * Remove chmod +x from tests now fails on windows like before * Fix GITHUB_ACTION_PATH some action env vars Fixes GITHUB_ACTION_REPOSITORY, GITHUB_ACTION_REF. * Add comment to CompositeRestrictions Co-authored-by: Markus Wolf <markus.wolf@new-work.se> Co-authored-by: Björn Brauer <bjoern.brauer@new-work.se> Co-authored-by: Ryan <me@hackerc.at> Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com>
2021-12-22 13:19:50 -06:00
env["GITHUB_ACTION_PATH"] = github.ActionPath
env["GITHUB_ACTION_REPOSITORY"] = github.ActionRepository
env["GITHUB_ACTION_REF"] = github.ActionRef
2020-02-24 12:56:49 -06:00
env["GITHUB_ACTIONS"] = "true"
env["GITHUB_ACTOR"] = github.Actor
env["GITHUB_REPOSITORY"] = github.Repository
env["GITHUB_EVENT_NAME"] = github.EventName
env["GITHUB_EVENT_PATH"] = github.EventPath
env["GITHUB_WORKSPACE"] = github.Workspace
env["GITHUB_SHA"] = github.Sha
env["GITHUB_REF"] = github.Ref
env["GITHUB_REF_NAME"] = github.RefName
env["GITHUB_REF_TYPE"] = github.RefType
env["GITHUB_TOKEN"] = github.Token
env["GITHUB_JOB"] = github.Job
env["GITHUB_REPOSITORY_OWNER"] = github.RepositoryOwner
env["GITHUB_RETENTION_DAYS"] = github.RetentionDays
env["RUNNER_PERFLOG"] = github.RunnerPerflog
env["RUNNER_TRACKING_ID"] = github.RunnerTrackingID
env["GITHUB_BASE_REF"] = github.BaseRef
env["GITHUB_HEAD_REF"] = github.HeadRef
env["GITHUB_SERVER_URL"] = github.ServerURL
env["GITHUB_API_URL"] = github.APIURL
env["GITHUB_GRAPHQL_URL"] = github.GraphQLURL
{ // Adapt to Gitea
instance := rc.Config.GitHubInstance
if !strings.HasPrefix(instance, "http://") &&
!strings.HasPrefix(instance, "https://") {
instance = "https://" + instance
2022-11-16 04:00:45 -06:00
}
2023-05-04 04:45:53 -05:00
env["GITHUB_SERVER_URL"] = instance
env["GITHUB_API_URL"] = instance + "/api/v1" // the version of Gitea is v1
env["GITHUB_GRAPHQL_URL"] = "" // Gitea doesn't support graphql
}
Asset server implementation (#677) * Add asset server and upload handling of binary files Co-authored-by: Björn Brauer <zaubernerd@zaubernerd.de> * Add asset download parts to the asset server Co-authored-by: Björn Brauer <zaubernerd@zaubernerd.de> * Add artifact-server-path flag If the flag is not defined, the artifact server isn't started. This includes the configuration of ACTIONS_RUNTIME_URL and ACTIONS_RUNTIME_TOKEN which are set if the server is started. Co-authored-by: Björn Brauer <zaubernerd@zaubernerd.de> * Move ACTIONS_RUNTIME_* vars into the withGithubEnv setup Co-authored-by: Björn Brauer <zaubernerd@zaubernerd.de> * feat: add artifact server port as flag This commits adds a flag to define the artifact server port. If not given, the port defaults to 34567. Co-authored-by: Björn Brauer <bjoern.brauer@new-work.se> * test: add artifact server tests Co-authored-by: Björn Brauer <bjoern.brauer@new-work.se> * refactor: use fs.FS This allows to add tests with in-memory file system * feat: add support for gzip encoded uploads Co-authored-by: Björn Brauer <bjoern.brauer@new-work.se> * test: add artifact integration test * chore: run act tests with asset server path Co-authored-by: Björn Brauer <bjoern.brauer@new-work.se> * docs: add new cli flags Co-authored-by: Björn Brauer <bjoern.brauer@new-work.se> * test: add test workflow to testdata * feat: add log output * refactor: log shutdown error instead of panic * feat: use outbound ip for the asset server This change should allow to use the host ip in macos and windows. Since docker is running in an intermediate vm, localhost is not sufficient to have the artifacts in the host system. * fix: do not use canceled context To shutdown artifact server, we should not use the already canceled context but the parent context instead. Co-authored-by: Björn Brauer <zaubernerd@zaubernerd.de> * feat: shutdown artifact server at end of pipeline When the pipeline is done the asset server should be shut down gracefully. Co-authored-by: Björn Brauer <zaubernerd@zaubernerd.de> * fix: close server if graceful shutdown failed Co-authored-by: Björn Brauer <zaubernerd@zaubernerd.de> * fix: ignore server closed error from listen call Co-authored-by: Björn Brauer <zaubernerd@zaubernerd.de> Co-authored-by: Björn Brauer <zaubernerd@zaubernerd.de> Co-authored-by: Björn Brauer <bjoern.brauer@new-work.se>
2021-11-10 11:57:22 -06:00
if rc.Config.ArtifactServerPath != "" {
setActionRuntimeVars(rc, env)
}
job := rc.Run.Job()
if job.RunsOn() != nil {
for _, runnerLabel := range job.RunsOn() {
platformName := rc.ExprEval.Interpolate(ctx, runnerLabel)
if platformName != "" {
if platformName == "ubuntu-latest" {
// hardcode current ubuntu-latest since we have no way to check that 'on the fly'
env["ImageOS"] = "ubuntu20"
} else {
platformName = strings.SplitN(strings.Replace(platformName, `-`, ``, 1), `.`, 2)[0]
env["ImageOS"] = platformName
}
}
}
}
return env
}
Asset server implementation (#677) * Add asset server and upload handling of binary files Co-authored-by: Björn Brauer <zaubernerd@zaubernerd.de> * Add asset download parts to the asset server Co-authored-by: Björn Brauer <zaubernerd@zaubernerd.de> * Add artifact-server-path flag If the flag is not defined, the artifact server isn't started. This includes the configuration of ACTIONS_RUNTIME_URL and ACTIONS_RUNTIME_TOKEN which are set if the server is started. Co-authored-by: Björn Brauer <zaubernerd@zaubernerd.de> * Move ACTIONS_RUNTIME_* vars into the withGithubEnv setup Co-authored-by: Björn Brauer <zaubernerd@zaubernerd.de> * feat: add artifact server port as flag This commits adds a flag to define the artifact server port. If not given, the port defaults to 34567. Co-authored-by: Björn Brauer <bjoern.brauer@new-work.se> * test: add artifact server tests Co-authored-by: Björn Brauer <bjoern.brauer@new-work.se> * refactor: use fs.FS This allows to add tests with in-memory file system * feat: add support for gzip encoded uploads Co-authored-by: Björn Brauer <bjoern.brauer@new-work.se> * test: add artifact integration test * chore: run act tests with asset server path Co-authored-by: Björn Brauer <bjoern.brauer@new-work.se> * docs: add new cli flags Co-authored-by: Björn Brauer <bjoern.brauer@new-work.se> * test: add test workflow to testdata * feat: add log output * refactor: log shutdown error instead of panic * feat: use outbound ip for the asset server This change should allow to use the host ip in macos and windows. Since docker is running in an intermediate vm, localhost is not sufficient to have the artifacts in the host system. * fix: do not use canceled context To shutdown artifact server, we should not use the already canceled context but the parent context instead. Co-authored-by: Björn Brauer <zaubernerd@zaubernerd.de> * feat: shutdown artifact server at end of pipeline When the pipeline is done the asset server should be shut down gracefully. Co-authored-by: Björn Brauer <zaubernerd@zaubernerd.de> * fix: close server if graceful shutdown failed Co-authored-by: Björn Brauer <zaubernerd@zaubernerd.de> * fix: ignore server closed error from listen call Co-authored-by: Björn Brauer <zaubernerd@zaubernerd.de> Co-authored-by: Björn Brauer <zaubernerd@zaubernerd.de> Co-authored-by: Björn Brauer <bjoern.brauer@new-work.se>
2021-11-10 11:57:22 -06:00
func setActionRuntimeVars(rc *RunContext, env map[string]string) {
actionsRuntimeURL := os.Getenv("ACTIONS_RUNTIME_URL")
if actionsRuntimeURL == "" {
actionsRuntimeURL = fmt.Sprintf("http://%s:%s/", rc.Config.ArtifactServerAddr, rc.Config.ArtifactServerPort)
Asset server implementation (#677) * Add asset server and upload handling of binary files Co-authored-by: Björn Brauer <zaubernerd@zaubernerd.de> * Add asset download parts to the asset server Co-authored-by: Björn Brauer <zaubernerd@zaubernerd.de> * Add artifact-server-path flag If the flag is not defined, the artifact server isn't started. This includes the configuration of ACTIONS_RUNTIME_URL and ACTIONS_RUNTIME_TOKEN which are set if the server is started. Co-authored-by: Björn Brauer <zaubernerd@zaubernerd.de> * Move ACTIONS_RUNTIME_* vars into the withGithubEnv setup Co-authored-by: Björn Brauer <zaubernerd@zaubernerd.de> * feat: add artifact server port as flag This commits adds a flag to define the artifact server port. If not given, the port defaults to 34567. Co-authored-by: Björn Brauer <bjoern.brauer@new-work.se> * test: add artifact server tests Co-authored-by: Björn Brauer <bjoern.brauer@new-work.se> * refactor: use fs.FS This allows to add tests with in-memory file system * feat: add support for gzip encoded uploads Co-authored-by: Björn Brauer <bjoern.brauer@new-work.se> * test: add artifact integration test * chore: run act tests with asset server path Co-authored-by: Björn Brauer <bjoern.brauer@new-work.se> * docs: add new cli flags Co-authored-by: Björn Brauer <bjoern.brauer@new-work.se> * test: add test workflow to testdata * feat: add log output * refactor: log shutdown error instead of panic * feat: use outbound ip for the asset server This change should allow to use the host ip in macos and windows. Since docker is running in an intermediate vm, localhost is not sufficient to have the artifacts in the host system. * fix: do not use canceled context To shutdown artifact server, we should not use the already canceled context but the parent context instead. Co-authored-by: Björn Brauer <zaubernerd@zaubernerd.de> * feat: shutdown artifact server at end of pipeline When the pipeline is done the asset server should be shut down gracefully. Co-authored-by: Björn Brauer <zaubernerd@zaubernerd.de> * fix: close server if graceful shutdown failed Co-authored-by: Björn Brauer <zaubernerd@zaubernerd.de> * fix: ignore server closed error from listen call Co-authored-by: Björn Brauer <zaubernerd@zaubernerd.de> Co-authored-by: Björn Brauer <zaubernerd@zaubernerd.de> Co-authored-by: Björn Brauer <bjoern.brauer@new-work.se>
2021-11-10 11:57:22 -06:00
}
env["ACTIONS_RUNTIME_URL"] = actionsRuntimeURL
actionsRuntimeToken := os.Getenv("ACTIONS_RUNTIME_TOKEN")
if actionsRuntimeToken == "" {
actionsRuntimeToken = "token"
}
env["ACTIONS_RUNTIME_TOKEN"] = actionsRuntimeToken
}
func (rc *RunContext) handleCredentials(ctx context.Context) (username, password string, err error) {
// TODO: remove below 2 lines when we can release act with breaking changes
username = rc.Config.Secrets["DOCKER_USERNAME"]
password = rc.Config.Secrets["DOCKER_PASSWORD"]
container := rc.Run.Job().Container()
if container == nil || container.Credentials == nil {
return
}
if container.Credentials != nil && len(container.Credentials) != 2 {
err = fmt.Errorf("invalid property count for key 'credentials:'")
return
}
ee := rc.NewExpressionEvaluator(ctx)
if username = ee.Interpolate(ctx, container.Credentials["username"]); username == "" {
err = fmt.Errorf("failed to interpolate container.credentials.username")
return
}
if password = ee.Interpolate(ctx, container.Credentials["password"]); password == "" {
err = fmt.Errorf("failed to interpolate container.credentials.password")
return
}
if container.Credentials["username"] == "" || container.Credentials["password"] == "" {
err = fmt.Errorf("container.credentials cannot be empty")
return
}
return username, password, err
}
func (rc *RunContext) handleServiceCredentials(ctx context.Context, creds map[string]string) (username, password string, err error) {
if creds == nil {
return
}
if len(creds) != 2 {
err = fmt.Errorf("invalid property count for key 'credentials:'")
return
}
ee := rc.NewExpressionEvaluator(ctx)
if username = ee.Interpolate(ctx, creds["username"]); username == "" {
err = fmt.Errorf("failed to interpolate credentials.username")
return
}
if password = ee.Interpolate(ctx, creds["password"]); password == "" {
err = fmt.Errorf("failed to interpolate credentials.password")
return
}
return
}
// GetServiceBindsAndMounts returns the binds and mounts for the service container, resolving paths as appopriate
func (rc *RunContext) GetServiceBindsAndMounts(svcVolumes []string) ([]string, map[string]string) {
if rc.Config.ContainerDaemonSocket == "" {
rc.Config.ContainerDaemonSocket = "/var/run/docker.sock"
}
binds := []string{}
if rc.Config.ContainerDaemonSocket != "-" {
daemonPath := getDockerDaemonSocketMountPath(rc.Config.ContainerDaemonSocket)
binds = append(binds, fmt.Sprintf("%s:%s", daemonPath, "/var/run/docker.sock"))
}
mounts := map[string]string{}
for _, v := range svcVolumes {
if !strings.Contains(v, ":") || filepath.IsAbs(v) {
// Bind anonymous volume or host file.
binds = append(binds, v)
} else {
// Mount existing volume.
paths := strings.SplitN(v, ":", 2)
mounts[paths[0]] = paths[1]
}
}
return binds, mounts
}