2020-02-04 18:38:41 -06:00
|
|
|
package runner
|
|
|
|
|
|
|
|
import (
|
2020-02-11 11:10:35 -06:00
|
|
|
"context"
|
2023-01-13 13:28:17 -06:00
|
|
|
"encoding/json"
|
2020-02-17 12:25:28 -06:00
|
|
|
"fmt"
|
2022-10-29 12:15:38 -05:00
|
|
|
"os"
|
2023-06-05 22:00:54 -05:00
|
|
|
"runtime"
|
2023-05-04 04:54:09 -05:00
|
|
|
"time"
|
2020-02-04 18:38:41 -06:00
|
|
|
|
2023-08-01 22:52:14 -05:00
|
|
|
docker_container "github.com/docker/docker/api/types/container"
|
2022-03-22 14:26:10 -05:00
|
|
|
log "github.com/sirupsen/logrus"
|
2020-02-04 18:38:41 -06:00
|
|
|
|
|
|
|
"github.com/nektos/act/pkg/common"
|
|
|
|
"github.com/nektos/act/pkg/model"
|
|
|
|
)
|
|
|
|
|
|
|
|
// Runner provides capabilities to run GitHub actions
|
|
|
|
type Runner interface {
|
2020-02-07 00:17:58 -06:00
|
|
|
NewPlanExecutor(plan *model.Plan) common.Executor
|
2020-02-04 18:38:41 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
// Config contains the config for a new runner
|
|
|
|
type Config struct {
|
Add support for service containers (#1949)
* Support services (#42)
Removed createSimpleContainerName and AutoRemove flag
Co-authored-by: Lunny Xiao <xiaolunwen@gmail.com>
Co-authored-by: Jason Song <i@wolfogre.com>
Reviewed-on: https://gitea.com/gitea/act/pulls/42
Reviewed-by: Jason Song <i@wolfogre.com>
Co-authored-by: Zettat123 <zettat123@gmail.com>
Co-committed-by: Zettat123 <zettat123@gmail.com>
* Support services options (#45)
Reviewed-on: https://gitea.com/gitea/act/pulls/45
Reviewed-by: Lunny Xiao <xiaolunwen@gmail.com>
Co-authored-by: Zettat123 <zettat123@gmail.com>
Co-committed-by: Zettat123 <zettat123@gmail.com>
* Support intepolation for `env` of `services` (#47)
Reviewed-on: https://gitea.com/gitea/act/pulls/47
Reviewed-by: Lunny Xiao <xiaolunwen@gmail.com>
Co-authored-by: Zettat123 <zettat123@gmail.com>
Co-committed-by: Zettat123 <zettat123@gmail.com>
* Support services `credentials` (#51)
If a service's image is from a container registry requires authentication, `act_runner` will need `credentials` to pull the image, see [documentation](https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idservicesservice_idcredentials).
Currently, `act_runner` incorrectly uses the `credentials` of `containers` to pull services' images and the `credentials` of services won't be used, see the related code: https://gitea.com/gitea/act/src/commit/0c1f2edb996a87ee17dcf3cfa7259c04be02abd7/pkg/runner/run_context.go#L228-L269
Co-authored-by: Jason Song <i@wolfogre.com>
Reviewed-on: https://gitea.com/gitea/act/pulls/51
Reviewed-by: Jason Song <i@wolfogre.com>
Reviewed-by: Lunny Xiao <xiaolunwen@gmail.com>
Co-authored-by: Zettat123 <zettat123@gmail.com>
Co-committed-by: Zettat123 <zettat123@gmail.com>
* Add ContainerMaxLifetime and ContainerNetworkMode options
from: https://gitea.com/gitea/act/commit/b9c20dcaa43899cb3bb327619d447248303170e0
* Fix container network issue (#56)
Follow: https://gitea.com/gitea/act_runner/pulls/184
Close https://gitea.com/gitea/act_runner/issues/177
- `act` create new networks only if the value of `NeedCreateNetwork` is true, and remove these networks at last. `NeedCreateNetwork` is passed by `act_runner`. 'NeedCreateNetwork' is true only if `container.network` in the configuration file of the `act_runner` is empty.
- In the `docker create` phase, specify the network to which containers will connect. Because, if not specify , container will connect to `bridge` network which is created automatically by Docker.
- If the network is user defined network ( the value of `container.network` is empty or `<custom-network>`. Because, the network created by `act` is also user defined network.), will also specify alias by `--network-alias`. The alias of service is `<service-id>`. So we can be access service container by `<service-id>:<port>` in the steps of job.
- Won't try to `docker network connect ` network after `docker start` any more.
- Because on the one hand, `docker network connect` applies only to user defined networks, if try to `docker network connect host <container-name>` will return error.
- On the other hand, we just specify network in the stage of `docker create`, the same effect can be achieved.
- Won't try to remove containers and networks berfore the stage of `docker start`, because the name of these containers and netwoks won't be repeat.
Co-authored-by: Jason Song <i@wolfogre.com>
Reviewed-on: https://gitea.com/gitea/act/pulls/56
Reviewed-by: Jason Song <i@wolfogre.com>
Co-authored-by: sillyguodong <gedong_1994@163.com>
Co-committed-by: sillyguodong <gedong_1994@163.com>
* Check volumes (#60)
This PR adds a `ValidVolumes` config. Users can specify the volumes (including bind mounts) that can be mounted to containers by this config.
Options related to volumes:
- [jobs.<job_id>.container.volumes](https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idcontainervolumes)
- [jobs.<job_id>.services.<service_id>.volumes](https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idservicesservice_idvolumes)
In addition, volumes specified by `options` will also be checked.
Currently, the following default volumes (see https://gitea.com/gitea/act/src/commit/a72822b3f83d3e68ffc697101b713b7badf57e2f/pkg/runner/run_context.go#L116-L166) will be added to `ValidVolumes`:
- `act-toolcache`
- `<container-name>` and `<container-name>-env`
- `/var/run/docker.sock` (We need to add a new configuration to control whether the docker daemon can be mounted)
Co-authored-by: Jason Song <i@wolfogre.com>
Reviewed-on: https://gitea.com/gitea/act/pulls/60
Reviewed-by: Jason Song <i@wolfogre.com>
Co-authored-by: Zettat123 <zettat123@gmail.com>
Co-committed-by: Zettat123 <zettat123@gmail.com>
* Remove ContainerMaxLifetime; fix lint
* Remove unused ValidVolumes
* Remove ConnectToNetwork
* Add docker stubs
* Close docker clients to prevent file descriptor leaks
* Fix the error when removing network in self-hosted mode (#69)
Fixes https://gitea.com/gitea/act_runner/issues/255
Reviewed-on: https://gitea.com/gitea/act/pulls/69
Co-authored-by: Zettat123 <zettat123@gmail.com>
Co-committed-by: Zettat123 <zettat123@gmail.com>
* Move service container and network cleanup to rc.cleanUpJobContainer
* Add --network flag; default to host if not using service containers or set explicitly
* Correctly close executor to prevent fd leak
* Revert to tail instead of full path
* fix network duplication
* backport networkingConfig for aliaes
* don't hardcode netMode host
* Convert services test to table driven tests
* Add failing tests for services
* Expose service container ports onto the host
* Set container network mode in artifacts server test to host mode
* Log container network mode when creating/starting a container
* fix: Correctly handle ContainerNetworkMode
* fix: missing service container network
* Always remove service containers
Although we usually keep containers running if the workflow errored
(unless `--rm` is given) in order to facilitate debugging and we have
a flag (`--reuse`) to always keep containers running in order to speed
up repeated `act` invocations, I believe that these should only apply
to job containers and not service containers, because changing the
network settings on a service container requires re-creating it anyway.
* Remove networks only if no active endpoints exist
* Ensure job containers are stopped before starting a new job
* fix: go build -tags WITHOUT_DOCKER
---------
Co-authored-by: Zettat123 <zettat123@gmail.com>
Co-authored-by: Lunny Xiao <xiaolunwen@gmail.com>
Co-authored-by: Jason Song <i@wolfogre.com>
Co-authored-by: sillyguodong <gedong_1994@163.com>
Co-authored-by: ChristopherHX <christopher.homberger@web.de>
Co-authored-by: ZauberNerd <zaubernerd@zaubernerd.de>
2023-10-19 04:24:52 -05:00
|
|
|
Actor string // the user that triggered the event
|
|
|
|
Workdir string // path to working directory
|
|
|
|
ActionCacheDir string // path used for caching action contents
|
2024-01-19 18:20:15 -06:00
|
|
|
ActionOfflineMode bool // when offline, use caching action contents
|
Add support for service containers (#1949)
* Support services (#42)
Removed createSimpleContainerName and AutoRemove flag
Co-authored-by: Lunny Xiao <xiaolunwen@gmail.com>
Co-authored-by: Jason Song <i@wolfogre.com>
Reviewed-on: https://gitea.com/gitea/act/pulls/42
Reviewed-by: Jason Song <i@wolfogre.com>
Co-authored-by: Zettat123 <zettat123@gmail.com>
Co-committed-by: Zettat123 <zettat123@gmail.com>
* Support services options (#45)
Reviewed-on: https://gitea.com/gitea/act/pulls/45
Reviewed-by: Lunny Xiao <xiaolunwen@gmail.com>
Co-authored-by: Zettat123 <zettat123@gmail.com>
Co-committed-by: Zettat123 <zettat123@gmail.com>
* Support intepolation for `env` of `services` (#47)
Reviewed-on: https://gitea.com/gitea/act/pulls/47
Reviewed-by: Lunny Xiao <xiaolunwen@gmail.com>
Co-authored-by: Zettat123 <zettat123@gmail.com>
Co-committed-by: Zettat123 <zettat123@gmail.com>
* Support services `credentials` (#51)
If a service's image is from a container registry requires authentication, `act_runner` will need `credentials` to pull the image, see [documentation](https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idservicesservice_idcredentials).
Currently, `act_runner` incorrectly uses the `credentials` of `containers` to pull services' images and the `credentials` of services won't be used, see the related code: https://gitea.com/gitea/act/src/commit/0c1f2edb996a87ee17dcf3cfa7259c04be02abd7/pkg/runner/run_context.go#L228-L269
Co-authored-by: Jason Song <i@wolfogre.com>
Reviewed-on: https://gitea.com/gitea/act/pulls/51
Reviewed-by: Jason Song <i@wolfogre.com>
Reviewed-by: Lunny Xiao <xiaolunwen@gmail.com>
Co-authored-by: Zettat123 <zettat123@gmail.com>
Co-committed-by: Zettat123 <zettat123@gmail.com>
* Add ContainerMaxLifetime and ContainerNetworkMode options
from: https://gitea.com/gitea/act/commit/b9c20dcaa43899cb3bb327619d447248303170e0
* Fix container network issue (#56)
Follow: https://gitea.com/gitea/act_runner/pulls/184
Close https://gitea.com/gitea/act_runner/issues/177
- `act` create new networks only if the value of `NeedCreateNetwork` is true, and remove these networks at last. `NeedCreateNetwork` is passed by `act_runner`. 'NeedCreateNetwork' is true only if `container.network` in the configuration file of the `act_runner` is empty.
- In the `docker create` phase, specify the network to which containers will connect. Because, if not specify , container will connect to `bridge` network which is created automatically by Docker.
- If the network is user defined network ( the value of `container.network` is empty or `<custom-network>`. Because, the network created by `act` is also user defined network.), will also specify alias by `--network-alias`. The alias of service is `<service-id>`. So we can be access service container by `<service-id>:<port>` in the steps of job.
- Won't try to `docker network connect ` network after `docker start` any more.
- Because on the one hand, `docker network connect` applies only to user defined networks, if try to `docker network connect host <container-name>` will return error.
- On the other hand, we just specify network in the stage of `docker create`, the same effect can be achieved.
- Won't try to remove containers and networks berfore the stage of `docker start`, because the name of these containers and netwoks won't be repeat.
Co-authored-by: Jason Song <i@wolfogre.com>
Reviewed-on: https://gitea.com/gitea/act/pulls/56
Reviewed-by: Jason Song <i@wolfogre.com>
Co-authored-by: sillyguodong <gedong_1994@163.com>
Co-committed-by: sillyguodong <gedong_1994@163.com>
* Check volumes (#60)
This PR adds a `ValidVolumes` config. Users can specify the volumes (including bind mounts) that can be mounted to containers by this config.
Options related to volumes:
- [jobs.<job_id>.container.volumes](https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idcontainervolumes)
- [jobs.<job_id>.services.<service_id>.volumes](https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idservicesservice_idvolumes)
In addition, volumes specified by `options` will also be checked.
Currently, the following default volumes (see https://gitea.com/gitea/act/src/commit/a72822b3f83d3e68ffc697101b713b7badf57e2f/pkg/runner/run_context.go#L116-L166) will be added to `ValidVolumes`:
- `act-toolcache`
- `<container-name>` and `<container-name>-env`
- `/var/run/docker.sock` (We need to add a new configuration to control whether the docker daemon can be mounted)
Co-authored-by: Jason Song <i@wolfogre.com>
Reviewed-on: https://gitea.com/gitea/act/pulls/60
Reviewed-by: Jason Song <i@wolfogre.com>
Co-authored-by: Zettat123 <zettat123@gmail.com>
Co-committed-by: Zettat123 <zettat123@gmail.com>
* Remove ContainerMaxLifetime; fix lint
* Remove unused ValidVolumes
* Remove ConnectToNetwork
* Add docker stubs
* Close docker clients to prevent file descriptor leaks
* Fix the error when removing network in self-hosted mode (#69)
Fixes https://gitea.com/gitea/act_runner/issues/255
Reviewed-on: https://gitea.com/gitea/act/pulls/69
Co-authored-by: Zettat123 <zettat123@gmail.com>
Co-committed-by: Zettat123 <zettat123@gmail.com>
* Move service container and network cleanup to rc.cleanUpJobContainer
* Add --network flag; default to host if not using service containers or set explicitly
* Correctly close executor to prevent fd leak
* Revert to tail instead of full path
* fix network duplication
* backport networkingConfig for aliaes
* don't hardcode netMode host
* Convert services test to table driven tests
* Add failing tests for services
* Expose service container ports onto the host
* Set container network mode in artifacts server test to host mode
* Log container network mode when creating/starting a container
* fix: Correctly handle ContainerNetworkMode
* fix: missing service container network
* Always remove service containers
Although we usually keep containers running if the workflow errored
(unless `--rm` is given) in order to facilitate debugging and we have
a flag (`--reuse`) to always keep containers running in order to speed
up repeated `act` invocations, I believe that these should only apply
to job containers and not service containers, because changing the
network settings on a service container requires re-creating it anyway.
* Remove networks only if no active endpoints exist
* Ensure job containers are stopped before starting a new job
* fix: go build -tags WITHOUT_DOCKER
---------
Co-authored-by: Zettat123 <zettat123@gmail.com>
Co-authored-by: Lunny Xiao <xiaolunwen@gmail.com>
Co-authored-by: Jason Song <i@wolfogre.com>
Co-authored-by: sillyguodong <gedong_1994@163.com>
Co-authored-by: ChristopherHX <christopher.homberger@web.de>
Co-authored-by: ZauberNerd <zaubernerd@zaubernerd.de>
2023-10-19 04:24:52 -05:00
|
|
|
BindWorkdir bool // bind the workdir to the job container
|
|
|
|
EventName string // name of event to run
|
|
|
|
EventPath string // path to JSON file to use for event.json in containers
|
|
|
|
DefaultBranch string // name of the main branch for this repository
|
|
|
|
ReuseContainers bool // reuse containers to maintain state
|
|
|
|
ForcePull bool // force pulling of the image, even if already present
|
|
|
|
ForceRebuild bool // force rebuilding local docker image action
|
|
|
|
LogOutput bool // log the output from docker run
|
|
|
|
JSONLogger bool // use json or text logger
|
|
|
|
LogPrefixJobID bool // switches from the full job name to the job id
|
|
|
|
Env map[string]string // env for containers
|
|
|
|
Inputs map[string]string // manually passed action inputs
|
|
|
|
Secrets map[string]string // list of secrets
|
|
|
|
Vars map[string]string // list of vars
|
|
|
|
Token string // GitHub token
|
|
|
|
InsecureSecrets bool // switch hiding output when printing to terminal
|
|
|
|
Platforms map[string]string // list of platforms
|
|
|
|
Privileged bool // use privileged mode
|
|
|
|
UsernsMode string // user namespace to use
|
|
|
|
ContainerArchitecture string // Desired OS/architecture platform for running containers
|
|
|
|
ContainerDaemonSocket string // Path to Docker daemon socket
|
|
|
|
ContainerOptions string // Options for the job container
|
|
|
|
UseGitIgnore bool // controls if paths in .gitignore should not be copied into container, default true
|
|
|
|
GitHubInstance string // GitHub instance to use, default "github.com"
|
|
|
|
ContainerCapAdd []string // list of kernel capabilities to add to the containers
|
|
|
|
ContainerCapDrop []string // list of kernel capabilities to remove from the containers
|
|
|
|
AutoRemove bool // controls if the container is automatically removed upon workflow completion
|
|
|
|
ArtifactServerPath string // the path where the artifact server stores uploads
|
|
|
|
ArtifactServerAddr string // the address the artifact server binds to
|
|
|
|
ArtifactServerPort string // the port the artifact server binds to
|
|
|
|
NoSkipCheckout bool // do not skip actions/checkout
|
|
|
|
RemoteName string // remote name in local git repo config
|
|
|
|
ReplaceGheActionWithGithubCom []string // Use actions from GitHub Enterprise instance to GitHub
|
|
|
|
ReplaceGheActionTokenWithGithubCom string // Token of private action repo on GitHub.
|
|
|
|
Matrix map[string]map[string]bool // Matrix config to run
|
|
|
|
ContainerNetworkMode docker_container.NetworkMode // the network mode of job containers (the value of --network)
|
2024-01-19 17:49:35 -06:00
|
|
|
ActionCache ActionCache // Use a custom ActionCache Implementation
|
2022-09-21 01:26:19 -05:00
|
|
|
|
2022-11-22 02:39:19 -06:00
|
|
|
PresetGitHubContext *model.GithubContext // the preset github context, overrides some fields like DefaultBranch, Env, Secrets etc.
|
|
|
|
EventJSON string // the content of JSON file to use for event.json in containers, overrides EventPath
|
|
|
|
ContainerNamePrefix string // the prefix of container name
|
|
|
|
ContainerMaxLifetime time.Duration // the max lifetime of job containers
|
2023-06-30 02:45:13 -05:00
|
|
|
DefaultActionInstance string // the default actions web site
|
2022-11-22 02:39:19 -06:00
|
|
|
PlatformPicker func(labels []string) string // platform picker, it will take precedence over Platforms if isn't nil
|
2023-03-08 00:46:39 -06:00
|
|
|
JobLoggerLevel *log.Level // the level of job logger
|
2023-06-05 04:21:59 -05:00
|
|
|
ValidVolumes []string // only volumes (and bind mounts) in this slice can be mounted on the job container or service containers
|
2024-03-05 02:07:29 -06:00
|
|
|
InsecureSkipTLS bool // whether to skip verifying TLS certificate of the Gitea instance
|
2023-11-14 16:02:37 -06:00
|
|
|
|
|
|
|
ContainerNetworkEnableIPv6 bool // create the network with IPv6 support enabled
|
2020-02-04 18:38:41 -06:00
|
|
|
}
|
|
|
|
|
2023-05-04 04:45:53 -05:00
|
|
|
// GetToken: Adapt to Gitea
|
2023-04-06 01:16:20 -05:00
|
|
|
func (c Config) GetToken() string {
|
|
|
|
token := c.Secrets["GITHUB_TOKEN"]
|
|
|
|
if c.Secrets["GITEA_TOKEN"] != "" {
|
|
|
|
token = c.Secrets["GITEA_TOKEN"]
|
|
|
|
}
|
|
|
|
return token
|
|
|
|
}
|
|
|
|
|
2022-12-15 10:45:22 -06:00
|
|
|
type caller struct {
|
|
|
|
runContext *RunContext
|
|
|
|
}
|
|
|
|
|
2020-02-04 18:38:41 -06:00
|
|
|
type runnerImpl struct {
|
|
|
|
config *Config
|
|
|
|
eventJSON string
|
2022-12-15 10:45:22 -06:00
|
|
|
caller *caller // the job calling this runner (caller of a reusable workflow)
|
2020-02-04 18:38:41 -06:00
|
|
|
}
|
|
|
|
|
2020-02-07 00:17:58 -06:00
|
|
|
// New Creates a new Runner
|
|
|
|
func New(runnerConfig *Config) (Runner, error) {
|
2020-02-04 18:38:41 -06:00
|
|
|
runner := &runnerImpl{
|
|
|
|
config: runnerConfig,
|
|
|
|
}
|
|
|
|
|
2022-12-15 10:45:22 -06:00
|
|
|
return runner.configure()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (runner *runnerImpl) configure() (Runner, error) {
|
2020-02-04 18:38:41 -06:00
|
|
|
runner.eventJSON = "{}"
|
2023-03-15 22:45:29 -05:00
|
|
|
if runner.config.EventJSON != "" {
|
|
|
|
runner.eventJSON = runner.config.EventJSON
|
|
|
|
} else if runner.config.EventPath != "" {
|
2020-02-04 18:38:41 -06:00
|
|
|
log.Debugf("Reading event.json from %s", runner.config.EventPath)
|
2022-10-29 12:15:38 -05:00
|
|
|
eventJSONBytes, err := os.ReadFile(runner.config.EventPath)
|
2020-02-04 18:38:41 -06:00
|
|
|
if err != nil {
|
2020-02-07 00:17:58 -06:00
|
|
|
return nil, err
|
2020-02-04 18:38:41 -06:00
|
|
|
}
|
|
|
|
runner.eventJSON = string(eventJSONBytes)
|
2023-01-13 13:28:17 -06:00
|
|
|
} else if len(runner.config.Inputs) != 0 {
|
|
|
|
eventMap := map[string]map[string]string{
|
|
|
|
"inputs": runner.config.Inputs,
|
|
|
|
}
|
|
|
|
eventJSON, err := json.Marshal(eventMap)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
runner.eventJSON = string(eventJSON)
|
2020-02-04 18:38:41 -06:00
|
|
|
}
|
2020-02-07 00:17:58 -06:00
|
|
|
return runner, nil
|
2020-02-04 18:38:41 -06:00
|
|
|
}
|
|
|
|
|
2022-03-30 12:20:45 -05:00
|
|
|
// NewPlanExecutor ...
|
2020-02-07 00:17:58 -06:00
|
|
|
func (runner *runnerImpl) NewPlanExecutor(plan *model.Plan) common.Executor {
|
2020-02-27 01:29:43 -06:00
|
|
|
maxJobNameLen := 0
|
2020-02-17 12:25:28 -06:00
|
|
|
|
2022-02-15 10:35:02 -06:00
|
|
|
stagePipeline := make([]common.Executor, 0)
|
2023-06-05 22:00:54 -05:00
|
|
|
log.Debugf("Plan Stages: %v", plan.Stages)
|
|
|
|
|
2022-02-15 10:35:02 -06:00
|
|
|
for i := range plan.Stages {
|
|
|
|
stage := plan.Stages[i]
|
|
|
|
stagePipeline = append(stagePipeline, func(ctx context.Context) error {
|
|
|
|
pipeline := make([]common.Executor, 0)
|
2022-12-06 09:45:06 -06:00
|
|
|
for _, run := range stage.Runs {
|
2023-06-05 22:00:54 -05:00
|
|
|
log.Debugf("Stages Runs: %v", stage.Runs)
|
2022-02-25 12:47:16 -06:00
|
|
|
stageExecutor := make([]common.Executor, 0)
|
2022-02-15 10:35:02 -06:00
|
|
|
job := run.Job()
|
2023-06-05 22:00:54 -05:00
|
|
|
log.Debugf("Job.Name: %v", job.Name)
|
|
|
|
log.Debugf("Job.RawNeeds: %v", job.RawNeeds)
|
|
|
|
log.Debugf("Job.RawRunsOn: %v", job.RawRunsOn)
|
|
|
|
log.Debugf("Job.Env: %v", job.Env)
|
|
|
|
log.Debugf("Job.If: %v", job.If)
|
|
|
|
for step := range job.Steps {
|
|
|
|
if nil != job.Steps[step] {
|
|
|
|
log.Debugf("Job.Steps: %v", job.Steps[step].String())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
log.Debugf("Job.TimeoutMinutes: %v", job.TimeoutMinutes)
|
|
|
|
log.Debugf("Job.Services: %v", job.Services)
|
|
|
|
log.Debugf("Job.Strategy: %v", job.Strategy)
|
|
|
|
log.Debugf("Job.RawContainer: %v", job.RawContainer)
|
|
|
|
log.Debugf("Job.Defaults.Run.Shell: %v", job.Defaults.Run.Shell)
|
|
|
|
log.Debugf("Job.Defaults.Run.WorkingDirectory: %v", job.Defaults.Run.WorkingDirectory)
|
|
|
|
log.Debugf("Job.Outputs: %v", job.Outputs)
|
|
|
|
log.Debugf("Job.Uses: %v", job.Uses)
|
|
|
|
log.Debugf("Job.With: %v", job.With)
|
|
|
|
// log.Debugf("Job.RawSecrets: %v", job.RawSecrets)
|
|
|
|
log.Debugf("Job.Result: %v", job.Result)
|
2022-03-30 12:20:45 -05:00
|
|
|
|
2022-02-15 10:35:02 -06:00
|
|
|
if job.Strategy != nil {
|
2023-06-05 22:00:54 -05:00
|
|
|
log.Debugf("Job.Strategy.FailFast: %v", job.Strategy.FailFast)
|
|
|
|
log.Debugf("Job.Strategy.MaxParallel: %v", job.Strategy.MaxParallel)
|
|
|
|
log.Debugf("Job.Strategy.FailFastString: %v", job.Strategy.FailFastString)
|
|
|
|
log.Debugf("Job.Strategy.MaxParallelString: %v", job.Strategy.MaxParallelString)
|
|
|
|
log.Debugf("Job.Strategy.RawMatrix: %v", job.Strategy.RawMatrix)
|
|
|
|
|
2022-06-17 10:55:21 -05:00
|
|
|
strategyRc := runner.newRunContext(ctx, run, nil)
|
|
|
|
if err := strategyRc.NewExpressionEvaluator(ctx).EvaluateYamlNode(ctx, &job.Strategy.RawMatrix); err != nil {
|
2022-02-15 10:35:02 -06:00
|
|
|
log.Errorf("Error while evaluating matrix: %v", err)
|
|
|
|
}
|
|
|
|
}
|
2023-03-19 12:25:55 -05:00
|
|
|
|
2023-04-18 09:17:36 -05:00
|
|
|
var matrixes []map[string]interface{}
|
|
|
|
if m, err := job.GetMatrixes(); err != nil {
|
2023-04-07 03:31:03 -05:00
|
|
|
log.Errorf("Error while get job's matrix: %v", err)
|
2023-04-18 09:17:36 -05:00
|
|
|
} else {
|
2023-06-05 22:00:54 -05:00
|
|
|
log.Debugf("Job Matrices: %v", m)
|
|
|
|
log.Debugf("Runner Matrices: %v", runner.config.Matrix)
|
2023-04-18 09:17:36 -05:00
|
|
|
matrixes = selectMatrixes(m, runner.config.Matrix)
|
2023-04-07 03:31:03 -05:00
|
|
|
}
|
2023-03-19 12:25:55 -05:00
|
|
|
log.Debugf("Final matrix after applying user inclusions '%v'", matrixes)
|
|
|
|
|
2022-02-15 10:35:02 -06:00
|
|
|
maxParallel := 4
|
|
|
|
if job.Strategy != nil {
|
|
|
|
maxParallel = job.Strategy.MaxParallel
|
2020-02-27 01:29:43 -06:00
|
|
|
}
|
2022-02-15 10:35:02 -06:00
|
|
|
|
|
|
|
if len(matrixes) < maxParallel {
|
|
|
|
maxParallel = len(matrixes)
|
2020-02-27 01:29:43 -06:00
|
|
|
}
|
2022-02-15 10:35:02 -06:00
|
|
|
|
|
|
|
for i, matrix := range matrixes {
|
2022-10-05 17:13:00 -05:00
|
|
|
matrix := matrix
|
2022-06-17 10:55:21 -05:00
|
|
|
rc := runner.newRunContext(ctx, run, matrix)
|
2022-02-15 10:35:02 -06:00
|
|
|
rc.JobName = rc.Name
|
|
|
|
if len(matrixes) > 1 {
|
|
|
|
rc.Name = fmt.Sprintf("%s-%d", rc.Name, i+1)
|
|
|
|
}
|
|
|
|
if len(rc.String()) > maxJobNameLen {
|
|
|
|
maxJobNameLen = len(rc.String())
|
|
|
|
}
|
|
|
|
stageExecutor = append(stageExecutor, func(ctx context.Context) error {
|
|
|
|
jobName := fmt.Sprintf("%-*s", maxJobNameLen, rc.String())
|
2023-07-10 23:27:43 -05:00
|
|
|
executor, err := rc.Executor()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return executor(common.WithJobErrorContainer(WithJobLogger(ctx, rc.Run.JobID, jobName, rc.Config, &rc.Masks, matrix)))
|
2022-02-15 10:35:02 -06:00
|
|
|
})
|
2021-09-26 11:21:12 -05:00
|
|
|
}
|
2022-02-25 12:47:16 -06:00
|
|
|
pipeline = append(pipeline, common.NewParallelExecutor(maxParallel, stageExecutor...))
|
2020-02-17 12:11:16 -06:00
|
|
|
}
|
2023-06-05 22:00:54 -05:00
|
|
|
ncpu := runtime.NumCPU()
|
|
|
|
if 1 > ncpu {
|
|
|
|
ncpu = 1
|
2022-03-22 14:26:10 -05:00
|
|
|
}
|
2023-06-05 22:00:54 -05:00
|
|
|
log.Debugf("Detected CPUs: %d", ncpu)
|
2022-03-22 14:26:10 -05:00
|
|
|
return common.NewParallelExecutor(ncpu, pipeline...)(ctx)
|
2022-02-15 10:35:02 -06:00
|
|
|
})
|
2020-02-04 18:38:41 -06:00
|
|
|
}
|
|
|
|
|
2022-02-15 10:35:02 -06:00
|
|
|
return common.NewPipelineExecutor(stagePipeline...).Then(handleFailure(plan))
|
2021-12-08 14:57:42 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
func handleFailure(plan *model.Plan) common.Executor {
|
|
|
|
return func(ctx context.Context) error {
|
|
|
|
for _, stage := range plan.Stages {
|
|
|
|
for _, run := range stage.Runs {
|
|
|
|
if run.Job().Result == "failure" {
|
|
|
|
return fmt.Errorf("Job '%s' failed", run.String())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
2020-02-04 18:38:41 -06:00
|
|
|
}
|
|
|
|
|
2023-03-19 12:25:55 -05:00
|
|
|
func selectMatrixes(originalMatrixes []map[string]interface{}, targetMatrixValues map[string]map[string]bool) []map[string]interface{} {
|
|
|
|
matrixes := make([]map[string]interface{}, 0)
|
|
|
|
for _, original := range originalMatrixes {
|
|
|
|
flag := true
|
|
|
|
for key, val := range original {
|
|
|
|
if allowedVals, ok := targetMatrixValues[key]; ok {
|
|
|
|
valToString := fmt.Sprintf("%v", val)
|
|
|
|
if _, ok := allowedVals[valToString]; !ok {
|
|
|
|
flag = false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if flag {
|
|
|
|
matrixes = append(matrixes, original)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return matrixes
|
|
|
|
}
|
|
|
|
|
2022-06-17 10:55:21 -05:00
|
|
|
func (runner *runnerImpl) newRunContext(ctx context.Context, run *model.Run, matrix map[string]interface{}) *RunContext {
|
2020-02-23 17:01:25 -06:00
|
|
|
rc := &RunContext{
|
|
|
|
Config: runner.config,
|
|
|
|
Run: run,
|
|
|
|
EventJSON: runner.eventJSON,
|
2021-12-22 13:52:09 -06:00
|
|
|
StepResults: make(map[string]*model.StepResult),
|
2020-02-23 17:01:25 -06:00
|
|
|
Matrix: matrix,
|
2022-12-15 10:45:22 -06:00
|
|
|
caller: runner.caller,
|
2020-02-17 12:11:16 -06:00
|
|
|
}
|
2022-06-17 10:55:21 -05:00
|
|
|
rc.ExprEval = rc.NewExpressionEvaluator(ctx)
|
|
|
|
rc.Name = rc.ExprEval.Interpolate(ctx, run.String())
|
2022-12-15 10:45:22 -06:00
|
|
|
|
2020-02-27 01:29:43 -06:00
|
|
|
return rc
|
2020-02-04 18:38:41 -06:00
|
|
|
}
|