act/pkg/runner/job_executor.go
sillyguodong 9283cfc9b1 Fix container network issue (#56)
Follow: https://gitea.com/gitea/act_runner/pulls/184
Close https://gitea.com/gitea/act_runner/issues/177

#### changes:
- `act` create new networks only if the value of `NeedCreateNetwork` is true, and remove these networks at last. `NeedCreateNetwork` is passed by `act_runner`. 'NeedCreateNetwork' is true only if  `container.network` in the configuration file of the `act_runner` is empty.
- In the `docker create` phase, specify the network to which containers will connect. Because, if not specify , container will connect to `bridge` network which is created automatically by Docker.
  - If the network is user defined network ( the value of `container.network` is empty or `<custom-network>`.  Because, the network created by `act` is also user defined network.), will also specify alias by `--network-alias`. The alias of service is `<service-id>`. So we can be access service container by `<service-id>:<port>` in the steps of job.
- Won't try to `docker network connect ` network after `docker start` any more.
  - Because on the one hand,  `docker network connect` applies only to user defined networks, if try to `docker network connect host <container-name>` will return error.
  - On the other hand, we just specify network in the stage of `docker create`, the same effect can be achieved.
- Won't try to remove containers and networks berfore  the stage of `docker start`, because the name of these containers and netwoks won't be repeat.

Co-authored-by: Jason Song <i@wolfogre.com>
Reviewed-on: https://gitea.com/gitea/act/pulls/56
Reviewed-by: Jason Song <i@wolfogre.com>
Co-authored-by: sillyguodong <gedong_1994@163.com>
Co-committed-by: sillyguodong <gedong_1994@163.com>
2023-05-16 14:03:55 +08:00

225 lines
6.5 KiB
Go

package runner
import (
"context"
"fmt"
"time"
"github.com/nektos/act/pkg/common"
"github.com/nektos/act/pkg/model"
)
type jobInfo interface {
matrix() map[string]interface{}
steps() []*model.Step
startContainer() common.Executor
stopContainer() common.Executor
closeContainer() common.Executor
interpolateOutputs() common.Executor
result(result string)
}
func newJobExecutor(info jobInfo, sf stepFactory, rc *RunContext) common.Executor {
steps := make([]common.Executor, 0)
preSteps := make([]common.Executor, 0)
var postExecutor common.Executor
steps = append(steps, func(ctx context.Context) error {
logger := common.Logger(ctx)
if len(info.matrix()) > 0 {
logger.Infof("\U0001F9EA Matrix: %v", info.matrix())
}
return nil
})
infoSteps := info.steps()
if len(infoSteps) == 0 {
return common.NewDebugExecutor("No steps found")
}
preSteps = append(preSteps, func(ctx context.Context) error {
// Have to be skipped for some Tests
if rc.Run == nil {
return nil
}
rc.ExprEval = rc.NewExpressionEvaluator(ctx)
// evaluate environment variables since they can contain
// GitHub's special environment variables.
for k, v := range rc.GetEnv() {
rc.Env[k] = rc.ExprEval.Interpolate(ctx, v)
}
return nil
})
for i, stepModel := range infoSteps {
stepModel := stepModel
if stepModel == nil {
return func(ctx context.Context) error {
return fmt.Errorf("invalid Step %v: missing run or uses key", i)
}
}
if stepModel.ID == "" {
stepModel.ID = fmt.Sprintf("%d", i)
}
stepModel.Number = i
step, err := sf.newStep(stepModel, rc)
if err != nil {
return common.NewErrorExecutor(err)
}
preExec := step.pre()
preSteps = append(preSteps, useStepLogger(rc, stepModel, stepStagePre, func(ctx context.Context) error {
logger := common.Logger(ctx)
preErr := preExec(ctx)
if preErr != nil {
logger.Errorf("%v", preErr)
common.SetJobError(ctx, preErr)
} else if ctx.Err() != nil {
logger.Errorf("%v", ctx.Err())
common.SetJobError(ctx, ctx.Err())
}
return preErr
}))
stepExec := step.main()
steps = append(steps, useStepLogger(rc, stepModel, stepStageMain, func(ctx context.Context) error {
logger := common.Logger(ctx)
err := stepExec(ctx)
if err != nil {
logger.Errorf("%v", err)
common.SetJobError(ctx, err)
} else if ctx.Err() != nil {
logger.Errorf("%v", ctx.Err())
common.SetJobError(ctx, ctx.Err())
}
return nil
}))
postExec := useStepLogger(rc, stepModel, stepStagePost, step.post())
if postExecutor != nil {
// run the post exector in reverse order
postExecutor = postExec.Finally(postExecutor)
} else {
postExecutor = postExec
}
}
postExecutor = postExecutor.Finally(func(ctx context.Context) error {
jobError := common.JobError(ctx)
var err error
if rc.Config.AutoRemove || jobError == nil {
// always allow 1 min for stopping and removing the runner, even if we were cancelled
ctx, cancel := context.WithTimeout(common.WithLogger(context.Background(), common.Logger(ctx)), time.Minute)
defer cancel()
logger := common.Logger(ctx)
logger.Infof("Cleaning up services for job %s", rc.JobName)
if err := rc.stopServiceContainers()(ctx); err != nil {
logger.Errorf("Error while cleaning services: %v", err)
}
logger.Infof("Cleaning up container for job %s", rc.JobName)
if err = info.stopContainer()(ctx); err != nil {
logger.Errorf("Error while stop job container: %v", err)
}
if rc.Config.ContainerNetworkMode == "" {
// if the value of `ContainerNetworkMode` is empty string,
// it means that the network to which containers are connecting is created by `act_runner`,
// so, we should remove the network at last.
logger.Infof("Cleaning up network for job %s, and network name is: %s", rc.JobName, rc.networkName())
if err := rc.removeNetwork(rc.networkName())(ctx); err != nil {
logger.Errorf("Error while cleaning network: %v", err)
}
}
}
setJobResult(ctx, info, rc, jobError == nil)
setJobOutputs(ctx, rc)
return err
})
pipeline := make([]common.Executor, 0)
pipeline = append(pipeline, preSteps...)
pipeline = append(pipeline, steps...)
return common.NewPipelineExecutor(info.startContainer(), common.NewPipelineExecutor(pipeline...).
Finally(func(ctx context.Context) error {
var cancel context.CancelFunc
if ctx.Err() == context.Canceled {
// in case of an aborted run, we still should execute the
// post steps to allow cleanup.
ctx, cancel = context.WithTimeout(common.WithLogger(context.Background(), common.Logger(ctx)), 5*time.Minute)
defer cancel()
}
return postExecutor(ctx)
}).
Finally(info.interpolateOutputs()).
Finally(info.closeContainer()))
}
func setJobResult(ctx context.Context, info jobInfo, rc *RunContext, success bool) {
logger := common.Logger(ctx)
jobResult := "success"
// we have only one result for a whole matrix build, so we need
// to keep an existing result state if we run a matrix
if len(info.matrix()) > 0 && rc.Run.Job().Result != "" {
jobResult = rc.Run.Job().Result
}
if !success {
jobResult = "failure"
}
info.result(jobResult)
if rc.caller != nil {
// set reusable workflow job result
rc.caller.runContext.result(jobResult)
}
jobResultMessage := "succeeded"
if jobResult != "success" {
jobResultMessage = "failed"
}
logger.WithField("jobResult", jobResult).Infof("\U0001F3C1 Job %s", jobResultMessage)
}
func setJobOutputs(ctx context.Context, rc *RunContext) {
if rc.caller != nil {
// map outputs for reusable workflows
callerOutputs := make(map[string]string)
ee := rc.NewExpressionEvaluator(ctx)
for k, v := range rc.Run.Workflow.WorkflowCallConfig().Outputs {
callerOutputs[k] = ee.Interpolate(ctx, ee.Interpolate(ctx, v.Value))
}
rc.caller.runContext.Run.Job().Outputs = callerOutputs
}
}
func useStepLogger(rc *RunContext, stepModel *model.Step, stage stepStage, executor common.Executor) common.Executor {
return func(ctx context.Context) error {
ctx = withStepLogger(ctx, stepModel.Number, stepModel.ID, rc.ExprEval.Interpolate(ctx, stepModel.String()), stage.String())
rawLogger := common.Logger(ctx).WithField("raw_output", true)
logWriter := common.NewLineWriter(rc.commandHandler(ctx), func(s string) bool {
if rc.Config.LogOutput {
rawLogger.Infof("%s", s)
} else {
rawLogger.Debugf("%s", s)
}
return true
})
oldout, olderr := rc.JobContainer.ReplaceLogWriter(logWriter, logWriter)
defer rc.JobContainer.ReplaceLogWriter(oldout, olderr)
return executor(ctx)
}
}