Signed-off-by: Casey Lee <cplee@nektos.com>
This commit is contained in:
Casey Lee 2020-02-10 16:53:14 -08:00
parent ac8258db4b
commit 835b36cb63
No known key found for this signature in database
GPG key ID: 1899120ECD0A1784
8 changed files with 50 additions and 20 deletions

View file

@ -1,3 +1,3 @@
#!/bin/sh #!/bin/sh
set -e set -e
go test -cover ./runner go test -cover ./pkg/runner

View file

@ -1,7 +1,7 @@
linters-settings: linters-settings:
gocyclo: gocyclo:
# minimal code complexity to report, 30 by default (but we recommend 10-20) # minimal code complexity to report, 30 by default (but we recommend 10-20)
min-complexity: 10 min-complexity: 12
gocritic: gocritic:
disabled-checks: disabled-checks:
- ifElseChain - ifElseChain
@ -18,4 +18,4 @@ linters:
- nakedret - nakedret
- prealloc - prealloc
- scopelint - scopelint
- gocritic - gocritic

View file

@ -61,13 +61,11 @@ func newRunCommand(ctx context.Context, input *Input) func(*cobra.Command, []str
var eventName string var eventName string
if len(args) > 0 { if len(args) > 0 {
eventName = args[0] eventName = args[0]
} else { } else if events := planner.GetEvents(); len(events) == 1 {
// set default event type if we only have a single workflow in the file. // set default event type if we only have a single workflow in the file.
// this way user dont have to specify the event. // this way user dont have to specify the event.
if events := planner.GetEvents(); len(events) == 1 { log.Debugf("Using detected workflow event: %s", events[0])
log.Debugf("Using detected workflow event: %s", events[0]) eventName = events[0]
eventName = events[0]
}
} }
// build the plan for this run // build the plan for this run

View file

@ -87,7 +87,13 @@ func NewParallelExecutor(executors ...Executor) Executor {
errChan := make(chan error) errChan := make(chan error)
for _, executor := range executors { for _, executor := range executors {
go executor.ChannelError(errChan)(ctx) e := executor
go func() {
err := e.ChannelError(errChan)(ctx)
if err != nil {
log.Fatal(err)
}
}()
} }
for i := 0; i < len(executors); i++ { for i := 0; i < len(executors); i++ {

View file

@ -225,7 +225,7 @@ func NewGitCloneExecutor(input NewGitCloneExecutorInput) Executor {
logger.Errorf("Unable to clone %v %s: %v", input.URL, refName, err) logger.Errorf("Unable to clone %v %s: %v", input.URL, refName, err)
return err return err
} }
os.Chmod(input.Dir, 0755) _ = os.Chmod(input.Dir, 0755)
} }
w, err := r.Worktree() w, err := r.Worktree()

View file

@ -151,11 +151,12 @@ func (p *Plan) mergeStages(stages []*Stage) {
for i := 0; i < len(newStages); i++ { for i := 0; i < len(newStages); i++ {
newStages[i] = new(Stage) newStages[i] = new(Stage)
if i >= len(p.Stages) { if i >= len(p.Stages) {
newStages[i].Runs = append(stages[i].Runs) newStages[i].Runs = append(newStages[i].Runs, stages[i].Runs...)
} else if i >= len(stages) { } else if i >= len(stages) {
newStages[i].Runs = append(p.Stages[i].Runs) newStages[i].Runs = append(newStages[i].Runs, p.Stages[i].Runs...)
} else { } else {
newStages[i].Runs = append(p.Stages[i].Runs, stages[i].Runs...) newStages[i].Runs = append(newStages[i].Runs, p.Stages[i].Runs...)
newStages[i].Runs = append(newStages[i].Runs, stages[i].Runs...)
} }
} }
p.Stages = newStages p.Stages = newStages

View file

@ -3,6 +3,7 @@ package model
import ( import (
"fmt" "fmt"
"io" "io"
"log"
"regexp" "regexp"
"strings" "strings"
@ -23,15 +24,24 @@ func (w *Workflow) On() []string {
switch w.RawOn.Kind { switch w.RawOn.Kind {
case yaml.ScalarNode: case yaml.ScalarNode:
var val string var val string
w.RawOn.Decode(&val) err := w.RawOn.Decode(&val)
if err != nil {
log.Fatal(err)
}
return []string{val} return []string{val}
case yaml.SequenceNode: case yaml.SequenceNode:
var val []string var val []string
w.RawOn.Decode(&val) err := w.RawOn.Decode(&val)
if err != nil {
log.Fatal(err)
}
return val return val
case yaml.MappingNode: case yaml.MappingNode:
var val map[string]interface{} var val map[string]interface{}
w.RawOn.Decode(&val) err := w.RawOn.Decode(&val)
if err != nil {
log.Fatal(err)
}
var keys []string var keys []string
for k := range val { for k := range val {
keys = append(keys, k) keys = append(keys, k)
@ -60,11 +70,17 @@ func (j *Job) Needs() []string {
switch j.RawNeeds.Kind { switch j.RawNeeds.Kind {
case yaml.ScalarNode: case yaml.ScalarNode:
var val string var val string
j.RawNeeds.Decode(&val) err := j.RawNeeds.Decode(&val)
if err != nil {
log.Fatal(err)
}
return []string{val} return []string{val}
case yaml.SequenceNode: case yaml.SequenceNode:
var val []string var val []string
j.RawNeeds.Decode(&val) err := j.RawNeeds.Decode(&val)
if err != nil {
log.Fatal(err)
}
return val return val
} }
return nil return nil

View file

@ -45,7 +45,10 @@ func (rc *RunContext) Close(ctx context.Context) error {
// Executor returns a pipeline executor for all the steps in the job // Executor returns a pipeline executor for all the steps in the job
func (rc *RunContext) Executor() common.Executor { func (rc *RunContext) Executor() common.Executor {
rc.setupTempDir() err := rc.setupTempDir()
if err != nil {
return common.NewErrorExecutor(err)
}
steps := make([]common.Executor, 0) steps := make([]common.Executor, 0)
for i, step := range rc.Run.Job().Steps { for i, step := range rc.Run.Job().Steps {
@ -74,7 +77,13 @@ func (rc *RunContext) setupTempDir() error {
tempBase = "/tmp" tempBase = "/tmp"
} }
rc.Tempdir, err = ioutil.TempDir(tempBase, "act-") rc.Tempdir, err = ioutil.TempDir(tempBase, "act-")
os.Chmod(rc.Tempdir, 0755) if err != nil {
return err
}
err = os.Chmod(rc.Tempdir, 0755)
if err != nil {
return err
}
log.Debugf("Setup tempdir %s", rc.Tempdir) log.Debugf("Setup tempdir %s", rc.Tempdir)
return err return err
} }