Initial commit with support for GitHub actions
This commit is contained in:
parent
d136b830f2
commit
f683af5954
33 changed files with 2941 additions and 1 deletions
4
.editorconfig
Normal file
4
.editorconfig
Normal file
|
@ -0,0 +1,4 @@
|
|||
# Override for Makefile
|
||||
[{Makefile, makefile, GNUmakefile}]
|
||||
indent_style = tab
|
||||
indent_size = 4
|
10
.github/actions/check/Dockerfile
vendored
Normal file
10
.github/actions/check/Dockerfile
vendored
Normal file
|
@ -0,0 +1,10 @@
|
|||
FROM golang:1.11.4-stretch
|
||||
|
||||
RUN go get -u honnef.co/go/tools/cmd/staticcheck
|
||||
RUN go get -u golang.org/x/lint/golint
|
||||
RUN go get -u github.com/fzipp/gocyclo
|
||||
|
||||
COPY "entrypoint.sh" "/entrypoint.sh"
|
||||
RUN chmod +x /entrypoint.sh
|
||||
|
||||
ENTRYPOINT ["/entrypoint.sh"]
|
10
.github/actions/check/entrypoint.sh
vendored
Normal file
10
.github/actions/check/entrypoint.sh
vendored
Normal file
|
@ -0,0 +1,10 @@
|
|||
#!/bin/sh
|
||||
|
||||
#GOPATH=/go
|
||||
#PATH=${GOPATH}/bin:/usr/local/go/bin:${PATH}
|
||||
|
||||
go vet ./...
|
||||
golint -set_exit_status ./...
|
||||
staticcheck ./...
|
||||
gocyclo -over 10 .
|
||||
go test -cover ./...
|
21
.github/main.workflow
vendored
Normal file
21
.github/main.workflow
vendored
Normal file
|
@ -0,0 +1,21 @@
|
|||
workflow "check-and-release" {
|
||||
on = "push"
|
||||
resolves = ["release"]
|
||||
}
|
||||
|
||||
action "check" {
|
||||
uses = "./.github/actions/check"
|
||||
}
|
||||
|
||||
action "branch-filter" {
|
||||
needs = ["check"]
|
||||
uses = "actions/bin/filter@master"
|
||||
args = "tag v*"
|
||||
}
|
||||
|
||||
action "release" {
|
||||
needs = ["branch-filter"]
|
||||
uses = "docker://goreleaser/goreleaser:v0.97"
|
||||
args = "release"
|
||||
secrets = ["GITHUB_TOKEN"]
|
||||
}
|
3
.gitignore
vendored
3
.gitignore
vendored
|
@ -10,3 +10,6 @@
|
|||
|
||||
# Output of the go coverage tool, specifically when used with LiteIDE
|
||||
*.out
|
||||
|
||||
dist/
|
||||
.todo
|
37
.goreleaser.yml
Normal file
37
.goreleaser.yml
Normal file
|
@ -0,0 +1,37 @@
|
|||
before:
|
||||
hooks:
|
||||
- go mod download
|
||||
builds:
|
||||
- env:
|
||||
- CGO_ENABLED=0
|
||||
goos:
|
||||
- darwin
|
||||
- linux
|
||||
- windows
|
||||
goarch:
|
||||
- amd64
|
||||
- 386
|
||||
checksum:
|
||||
name_template: 'checksums.txt'
|
||||
snapshot:
|
||||
name_template: "{{ .Env.SNAPSHOT_VERSION }}"
|
||||
archive:
|
||||
name_template: '{{ .ProjectName }}_{{ .Os }}_{{ .Arch }}'
|
||||
replacements:
|
||||
darwin: Darwin
|
||||
linux: Linux
|
||||
windows: Windows
|
||||
386: i386
|
||||
amd64: x86_64
|
||||
format_overrides:
|
||||
- goos: windows
|
||||
format: zip
|
||||
brew:
|
||||
github:
|
||||
owner: nektos
|
||||
name: homebrew-tap
|
||||
folder: Formula
|
||||
homepage: https://github.com/nektos/act
|
||||
description: Run GitHub Actions locally
|
||||
test: |
|
||||
system "#{bin}/act --version"
|
67
CONTRIBUTING.md
Normal file
67
CONTRIBUTING.md
Normal file
|
@ -0,0 +1,67 @@
|
|||
# Contributing to Act
|
||||
|
||||
Help wanted! We'd love your contributions to Act. Please review the following guidelines before contributing. Also, feel free to propose changes to these guidelines by updating this file and submitting a pull request.
|
||||
|
||||
* [I have a question...](#questions)
|
||||
* [I found a bug...](#bugs)
|
||||
* [I have a feature request...](#features)
|
||||
* [I have a contribution to share...](#process)
|
||||
|
||||
## <a name="questions"></a> Have a Question?
|
||||
|
||||
Please don't open a GitHub issue for questions about how to use `act`, as the goal is to use issues for managing bugs and feature requests. Issues that are related to general support will be closed and redirected to our gitter room.
|
||||
|
||||
For all support related questions, please ask the question in our gitter room: [nektos/act](https://gitter.im/nektos/act).
|
||||
|
||||
## <a name="bugs"></a> Found a Bug?
|
||||
|
||||
If you've identified a bug in `act`, please [submit an issue](#issue) to our GitHub repo: [nektos/act](https://github.com/nektos/act/issues/new). Please also feel free to submit a [Pull Request](#pr) with a fix for the bug!
|
||||
|
||||
## <a name="features"></a> Have a Feature Request?
|
||||
|
||||
All feature requests should start with [submitting an issue](#issue) documenting the user story and acceptance criteria. Again, feel free to submit a [Pull Request](#pr) with a proposed implementation of the feature.
|
||||
|
||||
## <a name="process"></a> Ready to Contribute!
|
||||
|
||||
### <a name="issue"></a> Create an issue
|
||||
|
||||
Before submitting a new issue, please search the issues to make sure there isn't a similar issue doesn't already exist.
|
||||
|
||||
Assuming no existing issues exist, please ensure you include the following bits of information when submitting the issue to ensure we can quickly reproduce your issue:
|
||||
|
||||
* Version of `act`
|
||||
* Platform (Linux, OS X, Windows)
|
||||
* The complete `main.workflow` file used
|
||||
* The complete command that was executed
|
||||
* Any output from the command
|
||||
* Details of the expected results and how they differed from the actual results
|
||||
|
||||
We may have additional questions and will communicate through the GitHub issue, so please respond back to our questions to help reproduce and resolve the issue as quickly as possible.
|
||||
|
||||
New issues can be created with in our [GitHub repo](https://github.com/nektos/act/issues/new).
|
||||
|
||||
### <a name="pr"></a>Pull Requests
|
||||
|
||||
Pull requests should target the `master` branch. Please also reference the issue from the description of the pull request using [special keyword syntax](https://help.github.com/articles/closing-issues-via-commit-messages/) to auto close the issue when the PR is merged. For example, include the phrase `fixes #14` in the PR description to have issue #14 auto close.
|
||||
|
||||
### <a name="style"></a> Styleguide
|
||||
|
||||
When submitting code, please make every effort to follow existing conventions and style in order to keep the code as readable as possible. Here are a few points to keep in mind:
|
||||
|
||||
* Please run `go fmt ./...` before committing to ensure code aligns with go standards.
|
||||
* All dependencies must be defined in the `go.mod` file.
|
||||
* For details on the approved style, check out [Effective Go](https://golang.org/doc/effective_go.html).
|
||||
|
||||
Also, consider the original design principles:
|
||||
|
||||
* **Polyglot** - There will be no prescribed language or framework for developing the microservices. The only requirement will be that the service will be run inside a container and exposed via an HTTP endpoint.
|
||||
* **Cloud Provider** - At this point, the tool will assume AWS for the cloud provider and will not be written in a cloud agnostic manner. However, this does not preclude refactoring to add support for other providers at a later time.
|
||||
* **Declarative** - All resource administration will be handled in a declarative vs. imperative manner. A file will be used to declared the desired state of the resources and the tool will simply assert the actual state matches the desired state. The tool will accomplish this by generating CloudFormation templates.
|
||||
* **Stateless** - The tool will not maintain its own state. Rather, it will rely on the CloudFormation stacks to determine the state of the platform.
|
||||
* **Secure** - All security will be managed by AWS IAM credentials. No additional authentication or authorization mechanisms will be introduced.
|
||||
|
||||
### License
|
||||
|
||||
By contributing your code, you agree to license your contribution under the terms of the [MIT License](LICENSE.md).
|
||||
|
||||
All files are released with the MIT license.
|
56
Makefile
Normal file
56
Makefile
Normal file
|
@ -0,0 +1,56 @@
|
|||
LATEST_VERSION := $(shell git tag -l --sort=creatordate | grep "^v[0-9]*.[0-9]*.[0-9]*$$" | tail -1 | cut -c 2-)
|
||||
ifeq "$(shell git tag -l v$(LATEST_VERSION) --points-at HEAD)" "v$(LATEST_VERSION)"
|
||||
### latest tag points to current commit, this is a release build
|
||||
VERSION ?= $(LATEST_VERSION)
|
||||
else
|
||||
### latest tag points to prior commit, this is a snapshot build
|
||||
MAJOR_VERSION := $(word 1, $(subst ., ,$(LATEST_VERSION)))
|
||||
MINOR_VERSION := $(word 2, $(subst ., ,$(LATEST_VERSION)))
|
||||
PATCH_VERSION := $(word 3, $(subst ., ,$(LATEST_VERSION)))
|
||||
VERSION ?= $(MAJOR_VERSION).$(MINOR_VERSION).$(shell echo $$(( $(PATCH_VERSION) + 1)) )-develop
|
||||
endif
|
||||
IS_SNAPSHOT = $(if $(findstring -, $(VERSION)),true,false)
|
||||
TAG_VERSION = v$(VERSION)
|
||||
|
||||
default: check
|
||||
|
||||
deps:
|
||||
@GO111MODULE=off go get honnef.co/go/tools/cmd/staticcheck
|
||||
@GO111MODULE=off go get golang.org/x/lint/golint
|
||||
@GO111MODULE=off go get github.com/fzipp/gocyclo
|
||||
|
||||
check:
|
||||
go vet ./...
|
||||
golint -set_exit_status ./...
|
||||
staticcheck ./...
|
||||
gocyclo -over 10 .
|
||||
go test -cover ./...
|
||||
|
||||
build: deps check
|
||||
@GO111MODULE=off go get github.com/goreleaser/goreleaser
|
||||
$(eval export SNAPSHOT_VERSION=$(VERSION))
|
||||
@goreleaser --snapshot --rm-dist
|
||||
|
||||
install: build
|
||||
@cp dist/$(shell go env GOOS)_$(shell go env GOARCH)/act /usr/local/bin/act
|
||||
@chmod 755 /usr/local/bin/act
|
||||
@act --version
|
||||
|
||||
installer:
|
||||
@GO111MODULE=off go get github.com/goreleaser/godownloader
|
||||
godownloader -r nektos/act -o install.sh
|
||||
|
||||
|
||||
promote:
|
||||
@echo "VERSION:$(VERSION) IS_SNAPSHOT:$(IS_SNAPSHOT) LATEST_VERSION:$(LATEST_VERSION)"
|
||||
ifeq (false,$(IS_SNAPSHOT))
|
||||
@echo "Unable to promote a non-snapshot"
|
||||
@exit 1
|
||||
endif
|
||||
ifneq ($(shell git status -s),)
|
||||
@echo "Unable to promote a dirty workspace"
|
||||
@exit 1
|
||||
endif
|
||||
$(eval NEW_VERSION := $(word 1,$(subst -, , $(TAG_VERSION))))
|
||||
git tag -a -m "releasing $(NEW_VERSION)" $(NEW_VERSION)
|
||||
git push origin $(NEW_VERSION)
|
58
README.md
58
README.md
|
@ -1 +1,57 @@
|
|||
# act
|
||||
[![Join the chat at https://gitter.im/nektos/act](https://badges.gitter.im/nektos/act.svg)](https://gitter.im/nektos/act?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) [![Go Report Card](https://goreportcard.com/badge/github.com/nektos/act)](https://goreportcard.com/report/github.com/nektos/act)
|
||||
|
||||
# Overview
|
||||
Run your [GitHub Actions](https://developer.github.com/actions/) locally! Why would you want to do this? Two reasons:
|
||||
|
||||
* **Fast Feedback** - Rather than having to commit/push every time you want test out the changes you are making to your `main.workflow` file (or for any changes to embedded GitHub actions), you can use `act` to run the actions locally. The [environment variables](https://developer.github.com/actions/creating-github-actions/accessing-the-runtime-environment/#environment-variables) and [filesystem](https://developer.github.com/actions/creating-github-actions/accessing-the-runtime-environment/#filesystem) are all configured to match what GitHub provides.
|
||||
* **Local Task Runner** - I love [make](https://en.wikipedia.org/wiki/Make_(software)). However, I also hate repeating myself. With `act`, you can use the GitHub Actions defined in your `main.workflow` file to replace your `Makefile`!
|
||||
|
||||
# How Does It Work?
|
||||
When you run `act` it reads in your GitHub Actions from `.github/main.workflow` and determines the set of actions that need to be run. It uses the Docker API to either pull or build the necessary images, as defined in your `main.workflow` file and finally determines the execution path based on the dependencies that were defined. Once it has the execution path, it the uses the Docker API to run containers for each action based on the images prepared earlier. The [environment variables](https://developer.github.com/actions/creating-github-actions/accessing-the-runtime-environment/#environment-variables) and [filesystem](https://developer.github.com/actions/creating-github-actions/accessing-the-runtime-environment/#filesystem) are all configured to match what GitHub provides.
|
||||
|
||||
Let's see it in action with a [sample repo](https://github.com/cplee/github-actions-demo)!
|
||||
|
||||
![Demo](https://github.com/nektos/act/wiki/quickstart/act-quickstart.gif)
|
||||
|
||||
# Installation
|
||||
To install with [Homebrew](https://brew.sh/), run:
|
||||
|
||||
```brew install nektos/tap/act```
|
||||
|
||||
Alternatively, you can use the following:
|
||||
|
||||
```curl https://raw.githubusercontent.com/nektos/act/master/install.sh | sudo sh```
|
||||
|
||||
# Commands
|
||||
|
||||
```
|
||||
# List the actions
|
||||
act -l
|
||||
|
||||
# Run the default (`push`) event:
|
||||
act
|
||||
|
||||
# Run a specific event:
|
||||
act pull-request
|
||||
|
||||
# Run a specific action:
|
||||
act -t test
|
||||
|
||||
# Run in dry-run mode:
|
||||
act -n
|
||||
```
|
||||
|
||||
# Support
|
||||
|
||||
Need help? Ask on [Gitter](https://gitter.im/nektos/act)!
|
||||
|
||||
# Contributing
|
||||
|
||||
Want to contribute to act? Awesome! Check out the [contributing guidelines](CONTRIBUTING.md) to get involved.
|
||||
|
||||
## Building from source
|
||||
|
||||
* Install Go tools 1.11+ - (https://golang.org/doc/install)
|
||||
* Clone this repo `git clone git@github.com:nektos/act.git`
|
||||
* Run unit tests with `make check`
|
||||
* Build and install: `make install`
|
109
actions/log.go
Normal file
109
actions/log.go
Normal file
|
@ -0,0 +1,109 @@
|
|||
package actions
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/crypto/ssh/terminal"
|
||||
)
|
||||
|
||||
type actionLogFormatter struct {
|
||||
}
|
||||
|
||||
var formatter *actionLogFormatter
|
||||
|
||||
func init() {
|
||||
formatter = new(actionLogFormatter)
|
||||
}
|
||||
|
||||
const (
|
||||
//nocolor = 0
|
||||
red = 31
|
||||
green = 32
|
||||
yellow = 33
|
||||
blue = 36
|
||||
gray = 37
|
||||
)
|
||||
|
||||
func newActionLogger(actionName string, dryrun bool) *logrus.Entry {
|
||||
logger := logrus.New()
|
||||
logger.SetFormatter(formatter)
|
||||
logger.SetLevel(logrus.GetLevel())
|
||||
rtn := logger.WithFields(logrus.Fields{"action_name": actionName, "dryrun": dryrun})
|
||||
return rtn
|
||||
}
|
||||
|
||||
func (f *actionLogFormatter) Format(entry *logrus.Entry) ([]byte, error) {
|
||||
b := &bytes.Buffer{}
|
||||
|
||||
if f.isColored(entry) {
|
||||
f.printColored(b, entry)
|
||||
} else {
|
||||
f.print(b, entry)
|
||||
}
|
||||
|
||||
b.WriteByte('\n')
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
||||
func (f *actionLogFormatter) printColored(b *bytes.Buffer, entry *logrus.Entry) {
|
||||
var levelColor int
|
||||
switch entry.Level {
|
||||
case logrus.DebugLevel, logrus.TraceLevel:
|
||||
levelColor = gray
|
||||
case logrus.WarnLevel:
|
||||
levelColor = yellow
|
||||
case logrus.ErrorLevel, logrus.FatalLevel, logrus.PanicLevel:
|
||||
levelColor = red
|
||||
default:
|
||||
levelColor = blue
|
||||
}
|
||||
|
||||
entry.Message = strings.TrimSuffix(entry.Message, "\n")
|
||||
actionName := entry.Data["action_name"]
|
||||
|
||||
if entry.Data["dryrun"] == true {
|
||||
fmt.Fprintf(b, "\x1b[%dm*DRYRUN* \x1b[%dm[%s] \x1b[0m%s", green, levelColor, actionName, entry.Message)
|
||||
} else {
|
||||
fmt.Fprintf(b, "\x1b[%dm[%s] \x1b[0m%s", levelColor, actionName, entry.Message)
|
||||
}
|
||||
}
|
||||
|
||||
func (f *actionLogFormatter) print(b *bytes.Buffer, entry *logrus.Entry) {
|
||||
entry.Message = strings.TrimSuffix(entry.Message, "\n")
|
||||
actionName := entry.Data["action_name"]
|
||||
|
||||
if entry.Data["dryrun"] == true {
|
||||
fmt.Fprintf(b, "*DRYRUN* [%s] %s", actionName, entry.Message)
|
||||
} else {
|
||||
fmt.Fprintf(b, "[%s] %s", actionName, entry.Message)
|
||||
}
|
||||
}
|
||||
|
||||
func (f *actionLogFormatter) isColored(entry *logrus.Entry) bool {
|
||||
|
||||
isColored := checkIfTerminal(entry.Logger.Out)
|
||||
|
||||
if force, ok := os.LookupEnv("CLICOLOR_FORCE"); ok && force != "0" {
|
||||
isColored = true
|
||||
} else if ok && force == "0" {
|
||||
isColored = false
|
||||
} else if os.Getenv("CLICOLOR") == "0" {
|
||||
isColored = false
|
||||
}
|
||||
|
||||
return isColored
|
||||
}
|
||||
|
||||
func checkIfTerminal(w io.Writer) bool {
|
||||
switch v := w.(type) {
|
||||
case *os.File:
|
||||
return terminal.IsTerminal(int(v.Fd()))
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
76
actions/parser.go
Normal file
76
actions/parser.go
Normal file
|
@ -0,0 +1,76 @@
|
|||
package actions
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/hashicorp/hcl"
|
||||
"github.com/hashicorp/hcl/hcl/ast"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// ParseWorkflows will read in the set of actions from the workflow file
|
||||
func ParseWorkflows(workingDir string, workflowPath string) (Workflows, error) {
|
||||
workingDir, err := filepath.Abs(workingDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
log.Debugf("Setting working dir to %s", workingDir)
|
||||
|
||||
if !filepath.IsAbs(workflowPath) {
|
||||
workflowPath = filepath.Join(workingDir, workflowPath)
|
||||
}
|
||||
log.Debugf("Loading workflow config from %s", workflowPath)
|
||||
workflowReader, err := os.Open(workflowPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
buf.ReadFrom(workflowReader)
|
||||
|
||||
workflows := new(workflowsFile)
|
||||
workflows.WorkingDir = workingDir
|
||||
workflows.WorkflowPath = workflowPath
|
||||
|
||||
astFile, err := hcl.ParseBytes(buf.Bytes())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rootNode := ast.Walk(astFile.Node, cleanWorkflowsAST)
|
||||
err = hcl.DecodeObject(workflows, rootNode)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
workflows.TempDir, err = ioutil.TempDir("/tmp", "act-")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// TODO: add validation logic
|
||||
// - check for circular dependencies
|
||||
// - check for valid local path refs
|
||||
// - check for valid dependencies
|
||||
|
||||
return workflows, nil
|
||||
}
|
||||
|
||||
func cleanWorkflowsAST(node ast.Node) (ast.Node, bool) {
|
||||
if objectItem, ok := node.(*ast.ObjectItem); ok {
|
||||
key := objectItem.Keys[0].Token.Value()
|
||||
|
||||
// handle condition where value is a string but should be a list
|
||||
switch key {
|
||||
case "resolves", "needs", "args":
|
||||
if literalType, ok := objectItem.Val.(*ast.LiteralType); ok {
|
||||
listType := new(ast.ListType)
|
||||
listType.Add(literalType)
|
||||
objectItem.Val = listType
|
||||
}
|
||||
}
|
||||
}
|
||||
return node, true
|
||||
}
|
412
actions/runner.go
Normal file
412
actions/runner.go
Normal file
|
@ -0,0 +1,412 @@
|
|||
package actions
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"sort"
|
||||
|
||||
"github.com/howeyc/gopass"
|
||||
"github.com/nektos/act/common"
|
||||
"github.com/nektos/act/container"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var secretCache map[string]string
|
||||
|
||||
func (w *workflowsFile) ListEvents() []string {
|
||||
log.Debugf("Listing all events")
|
||||
events := make([]string, 0)
|
||||
for _, w := range w.Workflow {
|
||||
events = append(events, w.On)
|
||||
}
|
||||
|
||||
// sort the list based on depth of dependencies
|
||||
sort.Slice(events, func(i, j int) bool {
|
||||
return events[i] < events[j]
|
||||
})
|
||||
|
||||
return events
|
||||
}
|
||||
|
||||
func (w *workflowsFile) GraphEvent(eventName string) ([][]string, error) {
|
||||
log.Debugf("Listing actions for event '%s'", eventName)
|
||||
workflow, _, err := w.getWorkflow(eventName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return w.newExecutionGraph(workflow.Resolves...), nil
|
||||
}
|
||||
|
||||
func (w *workflowsFile) RunAction(ctx context.Context, dryrun bool, actionName string) error {
|
||||
log.Debugf("Running action '%s'", actionName)
|
||||
return w.newActionExecutor(ctx, dryrun, "", actionName)()
|
||||
}
|
||||
|
||||
func (w *workflowsFile) RunEvent(ctx context.Context, dryrun bool, eventName string) error {
|
||||
log.Debugf("Running event '%s'", eventName)
|
||||
workflow, _, err := w.getWorkflow(eventName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Debugf("Running actions %s -> %s", eventName, workflow.Resolves)
|
||||
return w.newActionExecutor(ctx, dryrun, eventName, workflow.Resolves...)()
|
||||
}
|
||||
|
||||
func (w *workflowsFile) getWorkflow(eventName string) (*workflowDef, string, error) {
|
||||
for wName, w := range w.Workflow {
|
||||
if w.On == eventName {
|
||||
return &w, wName, nil
|
||||
}
|
||||
}
|
||||
return nil, "", fmt.Errorf("unsupported event: %v", eventName)
|
||||
}
|
||||
|
||||
func (w *workflowsFile) getAction(actionName string) (*actionDef, error) {
|
||||
if a, ok := w.Action[actionName]; ok {
|
||||
return &a, nil
|
||||
}
|
||||
return nil, fmt.Errorf("unsupported action: %v", actionName)
|
||||
}
|
||||
|
||||
func (w *workflowsFile) Close() {
|
||||
os.RemoveAll(w.TempDir)
|
||||
}
|
||||
|
||||
// return a pipeline that is run in series. pipeline is a list of steps to run in parallel
|
||||
func (w *workflowsFile) newExecutionGraph(actionNames ...string) [][]string {
|
||||
// first, build a list of all the necessary actions to run, and their dependencies
|
||||
actionDependencies := make(map[string][]string)
|
||||
for len(actionNames) > 0 {
|
||||
newActionNames := make([]string, 0)
|
||||
for _, aName := range actionNames {
|
||||
// make sure we haven't visited this action yet
|
||||
if _, ok := actionDependencies[aName]; !ok {
|
||||
actionDependencies[aName] = w.Action[aName].Needs
|
||||
newActionNames = append(newActionNames, w.Action[aName].Needs...)
|
||||
}
|
||||
}
|
||||
actionNames = newActionNames
|
||||
}
|
||||
|
||||
// next, build an execution graph
|
||||
graph := make([][]string, 0)
|
||||
for len(actionDependencies) > 0 {
|
||||
stage := make([]string, 0)
|
||||
for aName, aDeps := range actionDependencies {
|
||||
// make sure all deps are in the graph already
|
||||
if listInLists(aDeps, graph...) {
|
||||
stage = append(stage, aName)
|
||||
delete(actionDependencies, aName)
|
||||
}
|
||||
}
|
||||
if len(stage) == 0 {
|
||||
log.Fatalf("Unable to build dependency graph!")
|
||||
}
|
||||
graph = append(graph, stage)
|
||||
}
|
||||
|
||||
return graph
|
||||
}
|
||||
|
||||
// return true iff all strings in srcList exist in at least one of the searchLists
|
||||
func listInLists(srcList []string, searchLists ...[]string) bool {
|
||||
for _, src := range srcList {
|
||||
found := false
|
||||
for _, searchList := range searchLists {
|
||||
for _, search := range searchList {
|
||||
if src == search {
|
||||
found = true
|
||||
}
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (w *workflowsFile) newActionExecutor(ctx context.Context, dryrun bool, eventName string, actionNames ...string) common.Executor {
|
||||
graph := w.newExecutionGraph(actionNames...)
|
||||
|
||||
pipeline := make([]common.Executor, 0)
|
||||
for _, actions := range graph {
|
||||
stage := make([]common.Executor, 0)
|
||||
for _, actionName := range actions {
|
||||
action, err := w.getAction(actionName)
|
||||
if err != nil {
|
||||
return common.NewErrorExecutor(err)
|
||||
}
|
||||
actionExecutor := action.asExecutor(ctx, dryrun, w.WorkingDir, w.TempDir, actionName, w.setupEnvironment(eventName, actionName, dryrun))
|
||||
stage = append(stage, actionExecutor)
|
||||
}
|
||||
pipeline = append(pipeline, common.NewParallelExecutor(stage...))
|
||||
}
|
||||
|
||||
return common.NewPipelineExecutor(pipeline...)
|
||||
}
|
||||
|
||||
func (action *actionDef) asExecutor(ctx context.Context, dryrun bool, workingDir string, tempDir string, actionName string, env []string) common.Executor {
|
||||
logger := newActionLogger(actionName, dryrun)
|
||||
log.Debugf("Using '%s' for action '%s'", action.Uses, actionName)
|
||||
|
||||
in := container.DockerExecutorInput{
|
||||
Ctx: ctx,
|
||||
Logger: logger,
|
||||
Dryrun: dryrun,
|
||||
}
|
||||
|
||||
var image string
|
||||
executors := make([]common.Executor, 0)
|
||||
if imageRef, ok := parseImageReference(action.Uses); ok {
|
||||
executors = append(executors, container.NewDockerPullExecutor(container.NewDockerPullExecutorInput{
|
||||
DockerExecutorInput: in,
|
||||
Image: imageRef,
|
||||
}))
|
||||
image = imageRef
|
||||
} else if contextDir, imageTag, ok := parseImageLocal(workingDir, action.Uses); ok {
|
||||
executors = append(executors, container.NewDockerBuildExecutor(container.NewDockerBuildExecutorInput{
|
||||
DockerExecutorInput: in,
|
||||
ContextDir: contextDir,
|
||||
ImageTag: imageTag,
|
||||
}))
|
||||
image = imageTag
|
||||
} else if cloneURL, ref, path, ok := parseImageGithub(action.Uses); ok {
|
||||
cloneDir := filepath.Join(os.TempDir(), "act", action.Uses)
|
||||
executors = append(executors, common.NewGitCloneExecutor(common.NewGitCloneExecutorInput{
|
||||
URL: cloneURL,
|
||||
Ref: ref,
|
||||
Dir: cloneDir,
|
||||
Logger: logger,
|
||||
Dryrun: dryrun,
|
||||
}))
|
||||
|
||||
contextDir := filepath.Join(cloneDir, path)
|
||||
imageTag := fmt.Sprintf("%s:%s", filepath.Base(cloneURL.Path), ref)
|
||||
|
||||
executors = append(executors, container.NewDockerBuildExecutor(container.NewDockerBuildExecutorInput{
|
||||
DockerExecutorInput: in,
|
||||
ContextDir: contextDir,
|
||||
ImageTag: imageTag,
|
||||
}))
|
||||
image = imageTag
|
||||
} else {
|
||||
return common.NewErrorExecutor(fmt.Errorf("unable to determine executor type for image '%s'", action.Uses))
|
||||
}
|
||||
|
||||
ghReader, err := action.createGithubTarball()
|
||||
if err != nil {
|
||||
return common.NewErrorExecutor(err)
|
||||
}
|
||||
randSuffix := randString(6)
|
||||
containerName := regexp.MustCompile("[^a-zA-Z0-9]").ReplaceAllString(actionName, "-")
|
||||
if len(containerName)+len(randSuffix)+1 > 30 {
|
||||
containerName = containerName[:(30 - (len(randSuffix) + 1))]
|
||||
}
|
||||
executors = append(executors, container.NewDockerRunExecutor(container.NewDockerRunExecutorInput{
|
||||
DockerExecutorInput: in,
|
||||
Cmd: action.Args,
|
||||
Image: image,
|
||||
WorkingDir: "/github/workspace",
|
||||
Env: env,
|
||||
Name: fmt.Sprintf("%s-%s", containerName, randSuffix),
|
||||
Binds: []string{
|
||||
fmt.Sprintf("%s:%s", workingDir, "/github/workspace"),
|
||||
fmt.Sprintf("%s:%s", tempDir, "/github/home"),
|
||||
fmt.Sprintf("%s:%s", "/var/run/docker.sock", "/var/run/docker.sock"),
|
||||
},
|
||||
Content: map[string]io.Reader{"/github": ghReader},
|
||||
}))
|
||||
|
||||
return common.NewPipelineExecutor(executors...)
|
||||
}
|
||||
|
||||
const letterBytes = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
|
||||
|
||||
func randString(slen int) string {
|
||||
b := make([]byte, slen)
|
||||
for i := range b {
|
||||
b[i] = letterBytes[rand.Int63()%int64(len(letterBytes))]
|
||||
}
|
||||
return string(b)
|
||||
}
|
||||
|
||||
func (action *actionDef) createGithubTarball() (io.Reader, error) {
|
||||
var buf bytes.Buffer
|
||||
tw := tar.NewWriter(&buf)
|
||||
var files = []struct {
|
||||
Name string
|
||||
Mode int64
|
||||
Body string
|
||||
}{
|
||||
{"workflow/event.json", 0644, "{}"},
|
||||
}
|
||||
for _, file := range files {
|
||||
hdr := &tar.Header{
|
||||
Name: file.Name,
|
||||
Mode: file.Mode,
|
||||
Size: int64(len(file.Body)),
|
||||
}
|
||||
if err := tw.WriteHeader(hdr); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, err := tw.Write([]byte(file.Body)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if err := tw.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &buf, nil
|
||||
|
||||
}
|
||||
|
||||
func (w *workflowsFile) setupEnvironment(eventName string, actionName string, dryrun bool) []string {
|
||||
env := make([]string, 0)
|
||||
repoPath := w.WorkingDir
|
||||
|
||||
_, workflowName, _ := w.getWorkflow(eventName)
|
||||
|
||||
env = append(env, fmt.Sprintf("HOME=/github/home"))
|
||||
env = append(env, fmt.Sprintf("GITHUB_ACTOR=nektos/act"))
|
||||
env = append(env, fmt.Sprintf("GITHUB_EVENT_PATH=/github/workflow/event.json"))
|
||||
env = append(env, fmt.Sprintf("GITHUB_WORKSPACE=/github/workspace"))
|
||||
env = append(env, fmt.Sprintf("GITHUB_WORKFLOW=%s", workflowName))
|
||||
env = append(env, fmt.Sprintf("GITHUB_EVENT_NAME=%s", eventName))
|
||||
env = append(env, fmt.Sprintf("GITHUB_ACTION=%s", actionName))
|
||||
|
||||
_, rev, err := common.FindGitRevision(repoPath)
|
||||
if err != nil {
|
||||
log.Warningf("unable to get git revision: %v", err)
|
||||
} else {
|
||||
env = append(env, fmt.Sprintf("GITHUB_SHA=%s", rev))
|
||||
}
|
||||
|
||||
repo, err := common.FindGithubRepo(repoPath)
|
||||
if err != nil {
|
||||
log.Warningf("unable to get git repo: %v", err)
|
||||
} else {
|
||||
env = append(env, fmt.Sprintf("GITHUB_REPOSITORY=%s", repo))
|
||||
}
|
||||
|
||||
branch, err := common.FindGitBranch(repoPath)
|
||||
if err != nil {
|
||||
log.Warningf("unable to get git branch: %v", err)
|
||||
} else {
|
||||
env = append(env, fmt.Sprintf("GITHUB_REF=refs/heads/%s", branch))
|
||||
}
|
||||
|
||||
action, err := w.getAction(actionName)
|
||||
if err == nil && !dryrun {
|
||||
action.applyEnvironmentSecrets(&env)
|
||||
}
|
||||
|
||||
return env
|
||||
}
|
||||
|
||||
func (action *actionDef) applyEnvironmentSecrets(env *[]string) {
|
||||
if action != nil {
|
||||
for envKey, envValue := range action.Env {
|
||||
*env = append(*env, fmt.Sprintf("%s=%s", envKey, envValue))
|
||||
}
|
||||
|
||||
for _, secret := range action.Secrets {
|
||||
if secretVal, ok := os.LookupEnv(secret); ok {
|
||||
*env = append(*env, fmt.Sprintf("%s=%s", secret, secretVal))
|
||||
} else {
|
||||
if secretCache == nil {
|
||||
secretCache = make(map[string]string)
|
||||
}
|
||||
|
||||
if secretCache[secret] == "" {
|
||||
fmt.Printf("Provide value for '%s': ", secret)
|
||||
val, err := gopass.GetPasswdMasked()
|
||||
if err != nil {
|
||||
log.Fatal("abort")
|
||||
}
|
||||
|
||||
secretCache[secret] = string(val)
|
||||
}
|
||||
*env = append(*env, fmt.Sprintf("%s=%s", secret, secretCache[secret]))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// imageURL is the directory where a `Dockerfile` should exist
|
||||
func parseImageLocal(workingDir string, contextDir string) (contextDirOut string, tag string, ok bool) {
|
||||
if !filepath.IsAbs(contextDir) {
|
||||
contextDir = filepath.Join(workingDir, contextDir)
|
||||
}
|
||||
if _, err := os.Stat(filepath.Join(contextDir, "Dockerfile")); os.IsNotExist(err) {
|
||||
log.Debugf("Ignoring missing Dockerfile '%s/Dockerfile'", contextDir)
|
||||
return "", "", false
|
||||
}
|
||||
|
||||
sha, _, err := common.FindGitRevision(contextDir)
|
||||
if err != nil {
|
||||
log.Warnf("Unable to determine git revision: %v", err)
|
||||
sha = "latest"
|
||||
}
|
||||
return contextDir, fmt.Sprintf("%s:%s", filepath.Base(contextDir), sha), true
|
||||
}
|
||||
|
||||
// imageURL is the URL for a docker repo
|
||||
func parseImageReference(image string) (ref string, ok bool) {
|
||||
imageURL, err := url.Parse(image)
|
||||
if err != nil {
|
||||
log.Debugf("Unable to parse image as url: %v", err)
|
||||
return "", false
|
||||
}
|
||||
if imageURL.Scheme != "docker" {
|
||||
log.Debugf("Ignoring non-docker ref '%s'", imageURL.String())
|
||||
return "", false
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%s%s", imageURL.Host, imageURL.Path), true
|
||||
}
|
||||
|
||||
// imageURL is the directory where a `Dockerfile` should exist
|
||||
func parseImageGithub(image string) (cloneURL *url.URL, ref string, path string, ok bool) {
|
||||
re := regexp.MustCompile("^([^/@]+)/([^/@]+)(/([^@]*))?(@(.*))?$")
|
||||
matches := re.FindStringSubmatch(image)
|
||||
|
||||
if matches == nil {
|
||||
return nil, "", "", false
|
||||
}
|
||||
|
||||
cloneURL, err := url.Parse(fmt.Sprintf("https://github.com/%s/%s", matches[1], matches[2]))
|
||||
if err != nil {
|
||||
log.Debugf("Unable to parse as URL: %v", err)
|
||||
return nil, "", "", false
|
||||
}
|
||||
|
||||
resp, err := http.Head(cloneURL.String())
|
||||
if resp.StatusCode >= 400 || err != nil {
|
||||
log.Debugf("Unable to HEAD URL %s status=%v err=%v", cloneURL.String(), resp.StatusCode, err)
|
||||
return nil, "", "", false
|
||||
}
|
||||
|
||||
ref = matches[6]
|
||||
if ref == "" {
|
||||
ref = "master"
|
||||
}
|
||||
|
||||
path = matches[4]
|
||||
if path == "" {
|
||||
path = "."
|
||||
}
|
||||
|
||||
return cloneURL, ref, path, true
|
||||
}
|
89
actions/runner_test.go
Normal file
89
actions/runner_test.go
Normal file
|
@ -0,0 +1,89 @@
|
|||
package actions
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/nektos/act/common"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestParseImageReference(t *testing.T) {
|
||||
log.SetLevel(log.DebugLevel)
|
||||
tables := []struct {
|
||||
refIn string
|
||||
refOut string
|
||||
ok bool
|
||||
}{
|
||||
{"docker://myhost.com/foo/bar", "myhost.com/foo/bar", true},
|
||||
{"docker://ubuntu", "ubuntu", true},
|
||||
{"docker://ubuntu:18.04", "ubuntu:18.04", true},
|
||||
{"docker://cibuilds/hugo:0.53", "cibuilds/hugo:0.53", true},
|
||||
{"http://google.com:8080", "", false},
|
||||
{"./foo", "", false},
|
||||
}
|
||||
|
||||
for _, table := range tables {
|
||||
refOut, ok := parseImageReference(table.refIn)
|
||||
assert.Equal(t, table.refOut, refOut)
|
||||
assert.Equal(t, table.ok, ok)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestParseImageLocal(t *testing.T) {
|
||||
log.SetLevel(log.DebugLevel)
|
||||
tables := []struct {
|
||||
pathIn string
|
||||
contextDir string
|
||||
refTag string
|
||||
ok bool
|
||||
}{
|
||||
{"docker://myhost.com/foo/bar", "", "", false},
|
||||
{"http://google.com:8080", "", "", false},
|
||||
{"example/action1", "/example/action1", "action1:", true},
|
||||
}
|
||||
|
||||
revision, _, err := common.FindGitRevision(".")
|
||||
assert.Nil(t, err)
|
||||
basedir, err := filepath.Abs("..")
|
||||
assert.Nil(t, err)
|
||||
for _, table := range tables {
|
||||
contextDir, refTag, ok := parseImageLocal(basedir, table.pathIn)
|
||||
assert.Equal(t, table.ok, ok, "ok match for %s", table.pathIn)
|
||||
if ok {
|
||||
assert.Equal(t, fmt.Sprintf("%s%s", basedir, table.contextDir), contextDir, "context dir doesn't match for %s", table.pathIn)
|
||||
assert.Equal(t, fmt.Sprintf("%s%s", table.refTag, revision), refTag)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
func TestParseImageGithub(t *testing.T) {
|
||||
log.SetLevel(log.DebugLevel)
|
||||
tables := []struct {
|
||||
image string
|
||||
cloneURL string
|
||||
ref string
|
||||
path string
|
||||
ok bool
|
||||
}{
|
||||
{"nektos/act", "https://github.com/nektos/act", "master", ".", true},
|
||||
{"nektos/act/foo", "https://github.com/nektos/act", "master", "foo", true},
|
||||
{"nektos/act@xxxxx", "https://github.com/nektos/act", "xxxxx", ".", true},
|
||||
{"nektos/act/bar/baz@zzzzz", "https://github.com/nektos/act", "zzzzz", "bar/baz", true},
|
||||
{"nektos/zzzzundefinedzzzz", "", "", "", false},
|
||||
}
|
||||
|
||||
for _, table := range tables {
|
||||
cloneURL, ref, path, ok := parseImageGithub(table.image)
|
||||
assert.Equal(t, table.ok, ok, "ok match for %s", table.image)
|
||||
if ok {
|
||||
assert.Equal(t, table.cloneURL, cloneURL.String())
|
||||
assert.Equal(t, table.ref, ref)
|
||||
assert.Equal(t, table.path, path)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
56
actions/types.go
Normal file
56
actions/types.go
Normal file
|
@ -0,0 +1,56 @@
|
|||
package actions
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
// Workflows provides capabilities to work with the workflow file
|
||||
type Workflows interface {
|
||||
EventGrapher
|
||||
EventLister
|
||||
ActionRunner
|
||||
EventRunner
|
||||
Close()
|
||||
}
|
||||
|
||||
// EventGrapher to list the actions
|
||||
type EventGrapher interface {
|
||||
GraphEvent(eventName string) ([][]string, error)
|
||||
}
|
||||
|
||||
// EventLister to list the events
|
||||
type EventLister interface {
|
||||
ListEvents() []string
|
||||
}
|
||||
|
||||
// ActionRunner to run an action
|
||||
type ActionRunner interface {
|
||||
RunAction(ctx context.Context, dryrun bool, action string) error
|
||||
}
|
||||
|
||||
// EventRunner to run an event
|
||||
type EventRunner interface {
|
||||
RunEvent(ctx context.Context, dryrun bool, event string) error
|
||||
}
|
||||
|
||||
type workflowDef struct {
|
||||
On string
|
||||
Resolves []string
|
||||
}
|
||||
|
||||
type actionDef struct {
|
||||
Needs []string
|
||||
Uses string
|
||||
Runs string
|
||||
Args []string
|
||||
Env map[string]string
|
||||
Secrets []string
|
||||
}
|
||||
|
||||
type workflowsFile struct {
|
||||
TempDir string
|
||||
WorkingDir string
|
||||
WorkflowPath string
|
||||
Workflow map[string]workflowDef
|
||||
Action map[string]actionDef
|
||||
}
|
106
cmd/root.go
Normal file
106
cmd/root.go
Normal file
|
@ -0,0 +1,106 @@
|
|||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/nektos/act/actions"
|
||||
"github.com/nektos/act/common"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var verbose bool
|
||||
var workflowPath string
|
||||
var workingDir string
|
||||
var list bool
|
||||
var actionName string
|
||||
var dryrun bool
|
||||
|
||||
// Execute is the entry point to running the CLI
|
||||
func Execute(ctx context.Context, version string) {
|
||||
var rootCmd = &cobra.Command{
|
||||
Use: "act [event name to run]",
|
||||
Short: "Run Github actions locally by specifying the event name (e.g. `push`) or an action name directly.",
|
||||
Args: cobra.MaximumNArgs(1),
|
||||
RunE: newRunAction(ctx),
|
||||
Version: version,
|
||||
SilenceUsage: true,
|
||||
}
|
||||
rootCmd.Flags().BoolVarP(&list, "list", "l", false, "list actions")
|
||||
rootCmd.Flags().StringVarP(&actionName, "action", "a", "", "run action")
|
||||
rootCmd.PersistentFlags().BoolVarP(&dryrun, "dryrun", "n", false, "dryrun mode")
|
||||
rootCmd.PersistentFlags().BoolVarP(&verbose, "verbose", "v", false, "verbose output")
|
||||
rootCmd.PersistentFlags().StringVarP(&workflowPath, "file", "f", "./.github/main.workflow", "path to workflow file")
|
||||
rootCmd.PersistentFlags().StringVarP(&workingDir, "directory", "C", ".", "working directory")
|
||||
if err := rootCmd.Execute(); err != nil {
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func newRunAction(ctx context.Context) func(*cobra.Command, []string) error {
|
||||
return func(cmd *cobra.Command, args []string) error {
|
||||
if verbose {
|
||||
log.SetLevel(log.DebugLevel)
|
||||
}
|
||||
|
||||
workflows, err := actions.ParseWorkflows(workingDir, workflowPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer workflows.Close()
|
||||
|
||||
if list {
|
||||
return listEvents(workflows)
|
||||
}
|
||||
|
||||
if actionName != "" {
|
||||
return workflows.RunAction(ctx, dryrun, actionName)
|
||||
}
|
||||
|
||||
if len(args) == 0 {
|
||||
return workflows.RunEvent(ctx, dryrun, "push")
|
||||
}
|
||||
return workflows.RunEvent(ctx, dryrun, args[0])
|
||||
}
|
||||
}
|
||||
|
||||
func listEvents(workflows actions.Workflows) error {
|
||||
eventNames := workflows.ListEvents()
|
||||
for _, eventName := range eventNames {
|
||||
graph, err := workflows.GraphEvent(eventName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
drawings := make([]*common.Drawing, 0)
|
||||
eventPen := common.NewPen(common.StyleDoubleLine, 91 /*34*/)
|
||||
|
||||
drawings = append(drawings, eventPen.DrawBoxes(fmt.Sprintf("EVENT: %s", eventName)))
|
||||
|
||||
actionPen := common.NewPen(common.StyleSingleLine, 96)
|
||||
arrowPen := common.NewPen(common.StyleNoLine, 97)
|
||||
drawings = append(drawings, arrowPen.DrawArrow())
|
||||
for i, stage := range graph {
|
||||
if i > 0 {
|
||||
drawings = append(drawings, arrowPen.DrawArrow())
|
||||
}
|
||||
drawings = append(drawings, actionPen.DrawBoxes(stage...))
|
||||
}
|
||||
|
||||
maxWidth := 0
|
||||
for _, d := range drawings {
|
||||
if d.GetWidth() > maxWidth {
|
||||
maxWidth = d.GetWidth()
|
||||
}
|
||||
}
|
||||
|
||||
for _, d := range drawings {
|
||||
d.Draw(os.Stdout, maxWidth)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
144
common/draw.go
Normal file
144
common/draw.go
Normal file
|
@ -0,0 +1,144 @@
|
|||
package common
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Style is a specific style
|
||||
type Style int
|
||||
|
||||
// Styles
|
||||
const (
|
||||
StyleDoubleLine = iota
|
||||
StyleSingleLine
|
||||
StyleDashedLine
|
||||
StyleNoLine
|
||||
)
|
||||
|
||||
// NewPen creates a new pen
|
||||
func NewPen(style Style, color int) *Pen {
|
||||
bgcolor := 49
|
||||
if os.Getenv("CLICOLOR") == "0" {
|
||||
color = 0
|
||||
bgcolor = 0
|
||||
}
|
||||
return &Pen{
|
||||
style: style,
|
||||
color: color,
|
||||
bgcolor: bgcolor,
|
||||
}
|
||||
}
|
||||
|
||||
type styleDef struct {
|
||||
cornerTL string
|
||||
cornerTR string
|
||||
cornerBL string
|
||||
cornerBR string
|
||||
lineH string
|
||||
lineV string
|
||||
}
|
||||
|
||||
var styleDefs = []styleDef{
|
||||
{"\u2554", "\u2557", "\u255a", "\u255d", "\u2550", "\u2551"},
|
||||
//{"\u250c", "\u2510", "\u2514", "\u2518", "\u2500", "\u2502"},
|
||||
{"\u256d", "\u256e", "\u2570", "\u256f", "\u2500", "\u2502"},
|
||||
{"\u250c", "\u2510", "\u2514", "\u2518", "\u254c", "\u254e"},
|
||||
{" ", " ", " ", " ", " ", " "},
|
||||
}
|
||||
|
||||
// Pen struct
|
||||
type Pen struct {
|
||||
style Style
|
||||
color int
|
||||
bgcolor int
|
||||
}
|
||||
|
||||
// Drawing struct
|
||||
type Drawing struct {
|
||||
buf *strings.Builder
|
||||
width int
|
||||
}
|
||||
|
||||
func (p *Pen) drawTopBars(buf io.Writer, labels ...string) {
|
||||
style := styleDefs[p.style]
|
||||
for _, label := range labels {
|
||||
bar := strings.Repeat(style.lineH, len(label)+2)
|
||||
fmt.Fprintf(buf, " ")
|
||||
fmt.Fprintf(buf, "\x1b[%d;%dm", p.color, p.bgcolor)
|
||||
fmt.Fprintf(buf, "%s%s%s", style.cornerTL, bar, style.cornerTR)
|
||||
fmt.Fprintf(buf, "\x1b[%dm", 0)
|
||||
}
|
||||
fmt.Fprintf(buf, "\n")
|
||||
}
|
||||
func (p *Pen) drawBottomBars(buf io.Writer, labels ...string) {
|
||||
style := styleDefs[p.style]
|
||||
for _, label := range labels {
|
||||
bar := strings.Repeat(style.lineH, len(label)+2)
|
||||
fmt.Fprintf(buf, " ")
|
||||
fmt.Fprintf(buf, "\x1b[%d;%dm", p.color, p.bgcolor)
|
||||
fmt.Fprintf(buf, "%s%s%s", style.cornerBL, bar, style.cornerBR)
|
||||
fmt.Fprintf(buf, "\x1b[%dm", 0)
|
||||
}
|
||||
fmt.Fprintf(buf, "\n")
|
||||
}
|
||||
func (p *Pen) drawLabels(buf io.Writer, labels ...string) {
|
||||
style := styleDefs[p.style]
|
||||
for _, label := range labels {
|
||||
fmt.Fprintf(buf, " ")
|
||||
fmt.Fprintf(buf, "\x1b[%d;%dm", p.color, p.bgcolor)
|
||||
fmt.Fprintf(buf, "%s %s %s", style.lineV, label, style.lineV)
|
||||
fmt.Fprintf(buf, "\x1b[%dm", 0)
|
||||
}
|
||||
fmt.Fprintf(buf, "\n")
|
||||
}
|
||||
|
||||
// DrawArrow between boxes
|
||||
func (p *Pen) DrawArrow() *Drawing {
|
||||
drawing := &Drawing{
|
||||
buf: new(strings.Builder),
|
||||
width: 1,
|
||||
}
|
||||
fmt.Fprintf(drawing.buf, "\x1b[%dm", p.color)
|
||||
fmt.Fprintf(drawing.buf, "\u2b07")
|
||||
fmt.Fprintf(drawing.buf, "\x1b[%dm", 0)
|
||||
return drawing
|
||||
}
|
||||
|
||||
// DrawBoxes to draw boxes
|
||||
func (p *Pen) DrawBoxes(labels ...string) *Drawing {
|
||||
width := 0
|
||||
for _, l := range labels {
|
||||
width += len(l) + 2 + 2 + 1
|
||||
}
|
||||
drawing := &Drawing{
|
||||
buf: new(strings.Builder),
|
||||
width: width,
|
||||
}
|
||||
p.drawTopBars(drawing.buf, labels...)
|
||||
p.drawLabels(drawing.buf, labels...)
|
||||
p.drawBottomBars(drawing.buf, labels...)
|
||||
|
||||
return drawing
|
||||
}
|
||||
|
||||
// Draw to writer
|
||||
func (d *Drawing) Draw(writer io.Writer, centerOnWidth int) {
|
||||
padSize := (centerOnWidth - d.GetWidth()) / 2
|
||||
if padSize < 0 {
|
||||
padSize = 0
|
||||
}
|
||||
for _, l := range strings.Split(d.buf.String(), "\n") {
|
||||
if len(l) > 0 {
|
||||
padding := strings.Repeat(" ", padSize)
|
||||
fmt.Fprintf(writer, "%s%s\n", padding, l)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// GetWidth of drawing
|
||||
func (d *Drawing) GetWidth() int {
|
||||
return d.width
|
||||
}
|
100
common/executor.go
Normal file
100
common/executor.go
Normal file
|
@ -0,0 +1,100 @@
|
|||
package common
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Warning that implements `error` but safe to ignore
|
||||
type Warning struct {
|
||||
Message string
|
||||
}
|
||||
|
||||
// Error the contract for error
|
||||
func (w Warning) Error() string {
|
||||
return w.Message
|
||||
}
|
||||
|
||||
// Warningf create a warning
|
||||
func Warningf(format string, args ...interface{}) Warning {
|
||||
w := Warning{
|
||||
Message: fmt.Sprintf(format, args...),
|
||||
}
|
||||
return w
|
||||
}
|
||||
|
||||
// Executor define contract for the steps of a workflow
|
||||
type Executor func() error
|
||||
|
||||
// Conditional define contract for the conditional predicate
|
||||
type Conditional func() bool
|
||||
|
||||
// NewPipelineExecutor creates a new executor from a series of other executors
|
||||
func NewPipelineExecutor(executors ...Executor) Executor {
|
||||
return func() error {
|
||||
for _, executor := range executors {
|
||||
if executor == nil {
|
||||
continue
|
||||
}
|
||||
err := executor()
|
||||
if err != nil {
|
||||
switch err.(type) {
|
||||
case Warning:
|
||||
log.Warning(err.Error())
|
||||
return nil
|
||||
default:
|
||||
log.Debugf("%+v", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// NewConditionalExecutor creates a new executor based on conditions
|
||||
func NewConditionalExecutor(conditional Conditional, trueExecutor Executor, falseExecutor Executor) Executor {
|
||||
return func() error {
|
||||
if conditional() {
|
||||
if trueExecutor != nil {
|
||||
return trueExecutor()
|
||||
}
|
||||
} else {
|
||||
if falseExecutor != nil {
|
||||
return falseExecutor()
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func executeWithChan(executor Executor, errChan chan error) {
|
||||
errChan <- executor()
|
||||
}
|
||||
|
||||
// NewErrorExecutor creates a new executor that always errors out
|
||||
func NewErrorExecutor(err error) Executor {
|
||||
return func() error {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// NewParallelExecutor creates a new executor from a parallel of other executors
|
||||
func NewParallelExecutor(executors ...Executor) Executor {
|
||||
return func() error {
|
||||
errChan := make(chan error)
|
||||
|
||||
for _, executor := range executors {
|
||||
go executeWithChan(executor, errChan)
|
||||
}
|
||||
|
||||
for i := 0; i < len(executors); i++ {
|
||||
err := <-errChan
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
84
common/executor_test.go
Normal file
84
common/executor_test.go
Normal file
|
@ -0,0 +1,84 @@
|
|||
package common
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestNewWorkflow(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
// empty
|
||||
emptyWorkflow := NewPipelineExecutor()
|
||||
assert.Nil(emptyWorkflow())
|
||||
|
||||
// error case
|
||||
errorWorkflow := NewErrorExecutor(fmt.Errorf("test error"))
|
||||
assert.NotNil(errorWorkflow())
|
||||
|
||||
// multiple success case
|
||||
runcount := 0
|
||||
successWorkflow := NewPipelineExecutor(
|
||||
func() error {
|
||||
runcount = runcount + 1
|
||||
return nil
|
||||
},
|
||||
func() error {
|
||||
runcount = runcount + 1
|
||||
return nil
|
||||
})
|
||||
assert.Nil(successWorkflow())
|
||||
assert.Equal(2, runcount)
|
||||
}
|
||||
|
||||
func TestNewConditionalExecutor(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
trueCount := 0
|
||||
falseCount := 0
|
||||
|
||||
err := NewConditionalExecutor(func() bool {
|
||||
return false
|
||||
}, func() error {
|
||||
trueCount++
|
||||
return nil
|
||||
}, func() error {
|
||||
falseCount++
|
||||
return nil
|
||||
})()
|
||||
|
||||
assert.Nil(err)
|
||||
assert.Equal(0, trueCount)
|
||||
assert.Equal(1, falseCount)
|
||||
|
||||
err = NewConditionalExecutor(func() bool {
|
||||
return true
|
||||
}, func() error {
|
||||
trueCount++
|
||||
return nil
|
||||
}, func() error {
|
||||
falseCount++
|
||||
return nil
|
||||
})()
|
||||
|
||||
assert.Nil(err)
|
||||
assert.Equal(1, trueCount)
|
||||
assert.Equal(1, falseCount)
|
||||
}
|
||||
|
||||
func TestNewParallelExecutor(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
count := 0
|
||||
emptyWorkflow := NewPipelineExecutor(func() error {
|
||||
count++
|
||||
return nil
|
||||
})
|
||||
|
||||
err := NewParallelExecutor(emptyWorkflow, emptyWorkflow)()
|
||||
assert.Equal(2, count)
|
||||
|
||||
assert.Nil(err)
|
||||
}
|
79
common/file.go
Normal file
79
common/file.go
Normal file
|
@ -0,0 +1,79 @@
|
|||
package common
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
// CopyFile copy file
|
||||
func CopyFile(source string, dest string) (err error) {
|
||||
sourcefile, err := os.Open(source)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer sourcefile.Close()
|
||||
|
||||
destfile, err := os.Create(dest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer destfile.Close()
|
||||
|
||||
_, err = io.Copy(destfile, sourcefile)
|
||||
if err == nil {
|
||||
sourceinfo, err := os.Stat(source)
|
||||
if err != nil {
|
||||
_ = os.Chmod(dest, sourceinfo.Mode())
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// CopyDir recursive copy of directory
|
||||
func CopyDir(source string, dest string) (err error) {
|
||||
|
||||
// get properties of source dir
|
||||
sourceinfo, err := os.Stat(source)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// create dest dir
|
||||
|
||||
err = os.MkdirAll(dest, sourceinfo.Mode())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
directory, _ := os.Open(source)
|
||||
|
||||
objects, err := directory.Readdir(-1)
|
||||
|
||||
for _, obj := range objects {
|
||||
|
||||
sourcefilepointer := source + "/" + obj.Name()
|
||||
|
||||
destinationfilepointer := dest + "/" + obj.Name()
|
||||
|
||||
if obj.IsDir() {
|
||||
// create sub-directories - recursively
|
||||
err = CopyDir(sourcefilepointer, destinationfilepointer)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
}
|
||||
} else {
|
||||
// perform copy
|
||||
err = CopyFile(sourcefilepointer, destinationfilepointer)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
return
|
||||
}
|
219
common/git.go
Normal file
219
common/git.go
Normal file
|
@ -0,0 +1,219 @@
|
|||
package common
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/go-ini/ini"
|
||||
log "github.com/sirupsen/logrus"
|
||||
git "gopkg.in/src-d/go-git.v4"
|
||||
"gopkg.in/src-d/go-git.v4/plumbing"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
var cloneLock sync.Mutex
|
||||
|
||||
// FindGitRevision get the current git revision
|
||||
func FindGitRevision(file string) (shortSha string, sha string, err error) {
|
||||
gitDir, err := findGitDirectory(file)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
head, err := findGitHead(file)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
// load commitid ref
|
||||
refBuf, err := ioutil.ReadFile(fmt.Sprintf("%s/%s", gitDir, head))
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
return string(string(refBuf)[:7]), string(refBuf), nil
|
||||
}
|
||||
|
||||
// FindGitBranch get the current git branch
|
||||
func FindGitBranch(file string) (string, error) {
|
||||
head, err := findGitHead(file)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// get branch name
|
||||
branch := strings.TrimPrefix(head, "refs/heads/")
|
||||
log.Debugf("Found branch: %s", branch)
|
||||
return branch, nil
|
||||
}
|
||||
|
||||
func findGitHead(file string) (string, error) {
|
||||
gitDir, err := findGitDirectory(file)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
log.Debugf("Loading revision from git directory '%s'", gitDir)
|
||||
|
||||
// load HEAD ref
|
||||
headFile, err := os.Open(fmt.Sprintf("%s/HEAD", gitDir))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer func() {
|
||||
headFile.Close()
|
||||
}()
|
||||
|
||||
headBuffer := new(bytes.Buffer)
|
||||
headBuffer.ReadFrom(bufio.NewReader(headFile))
|
||||
head := make(map[string]string)
|
||||
yaml.Unmarshal(headBuffer.Bytes(), head)
|
||||
|
||||
log.Debugf("HEAD points to '%s'", head["ref"])
|
||||
|
||||
return head["ref"], nil
|
||||
}
|
||||
|
||||
// FindGithubRepo get the repo
|
||||
func FindGithubRepo(file string) (string, error) {
|
||||
url, err := findGitRemoteURL(file)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
_, slug, err := findGitSlug(url)
|
||||
return slug, err
|
||||
}
|
||||
|
||||
func findGitRemoteURL(file string) (string, error) {
|
||||
gitDir, err := findGitDirectory(file)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
log.Debugf("Loading slug from git directory '%s'", gitDir)
|
||||
|
||||
gitconfig, err := ini.InsensitiveLoad(fmt.Sprintf("%s/config", gitDir))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
remote, err := gitconfig.GetSection("remote \"origin\"")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
urlKey, err := remote.GetKey("url")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
url := urlKey.String()
|
||||
return url, nil
|
||||
}
|
||||
|
||||
func findGitSlug(url string) (string, string, error) {
|
||||
codeCommitHTTPRegex := regexp.MustCompile(`^http(s?)://git-codecommit\.(.+)\.amazonaws.com/v1/repos/(.+)$`)
|
||||
codeCommitSSHRegex := regexp.MustCompile(`ssh://git-codecommit\.(.+)\.amazonaws.com/v1/repos/(.+)$`)
|
||||
httpRegex := regexp.MustCompile("^http(s?)://.*github.com.*/(.+)/(.+).git$")
|
||||
sshRegex := regexp.MustCompile("github.com:(.+)/(.+).git$")
|
||||
|
||||
if matches := codeCommitHTTPRegex.FindStringSubmatch(url); matches != nil {
|
||||
return "CodeCommit", matches[3], nil
|
||||
} else if matches := codeCommitSSHRegex.FindStringSubmatch(url); matches != nil {
|
||||
return "CodeCommit", matches[2], nil
|
||||
} else if matches := httpRegex.FindStringSubmatch(url); matches != nil {
|
||||
return "GitHub", fmt.Sprintf("%s/%s", matches[2], matches[3]), nil
|
||||
} else if matches := sshRegex.FindStringSubmatch(url); matches != nil {
|
||||
return "GitHub", fmt.Sprintf("%s/%s", matches[1], matches[2]), nil
|
||||
}
|
||||
return "", url, nil
|
||||
}
|
||||
|
||||
func findGitDirectory(fromFile string) (string, error) {
|
||||
absPath, err := filepath.Abs(fromFile)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
log.Debugf("Searching for git directory in %s", absPath)
|
||||
fi, err := os.Stat(absPath)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
var dir string
|
||||
if fi.Mode().IsDir() {
|
||||
dir = absPath
|
||||
} else {
|
||||
dir = path.Dir(absPath)
|
||||
}
|
||||
|
||||
gitPath := path.Join(dir, ".git")
|
||||
fi, err = os.Stat(gitPath)
|
||||
if err == nil && fi.Mode().IsDir() {
|
||||
return gitPath, nil
|
||||
} else if dir == "/" || dir == "C:\\" || dir == "c:\\" {
|
||||
return "", errors.New("unable to find git repo")
|
||||
}
|
||||
|
||||
return findGitDirectory(filepath.Dir(dir))
|
||||
|
||||
}
|
||||
|
||||
// NewGitCloneExecutorInput the input for the NewGitCloneExecutor
|
||||
type NewGitCloneExecutorInput struct {
|
||||
URL *url.URL
|
||||
Ref string
|
||||
Dir string
|
||||
Logger *log.Entry
|
||||
Dryrun bool
|
||||
}
|
||||
|
||||
// NewGitCloneExecutor creates an executor to clone git repos
|
||||
func NewGitCloneExecutor(input NewGitCloneExecutorInput) Executor {
|
||||
return func() error {
|
||||
input.Logger.Infof("git clone '%s'", input.URL.String())
|
||||
input.Logger.Debugf(" cloning %s to %s", input.URL.String(), input.Dir)
|
||||
|
||||
if input.Dryrun {
|
||||
return nil
|
||||
}
|
||||
|
||||
cloneLock.Lock()
|
||||
defer cloneLock.Unlock()
|
||||
|
||||
r, err := git.PlainOpen(input.Dir)
|
||||
if err != nil {
|
||||
r, err = git.PlainClone(input.Dir, false, &git.CloneOptions{
|
||||
URL: input.URL.String(),
|
||||
Progress: input.Logger.WriterLevel(log.DebugLevel),
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
w, err := r.Worktree()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
w.Pull(&git.PullOptions{})
|
||||
input.Logger.Debugf("Cloned %s to %s", input.URL.String(), input.Dir)
|
||||
|
||||
err = w.Checkout(&git.CheckoutOptions{
|
||||
//Branch: plumbing.NewHash(ref),
|
||||
Hash: plumbing.NewHash(input.Ref),
|
||||
})
|
||||
if err != nil {
|
||||
input.Logger.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
input.Logger.Debugf("Checked out %s", input.Ref)
|
||||
return nil
|
||||
}
|
||||
}
|
75
common/git_test.go
Normal file
75
common/git_test.go
Normal file
|
@ -0,0 +1,75 @@
|
|||
package common
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"syscall"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestFindGitSlug(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
var slugTests = []struct {
|
||||
url string // input
|
||||
provider string // expected result
|
||||
slug string // expected result
|
||||
}{
|
||||
{"https://git-codecommit.us-east-1.amazonaws.com/v1/repos/my-repo-name", "CodeCommit", "my-repo-name"},
|
||||
{"ssh://git-codecommit.us-west-2.amazonaws.com/v1/repos/my-repo", "CodeCommit", "my-repo"},
|
||||
{"git@github.com:nektos/act.git", "GitHub", "nektos/act"},
|
||||
{"https://github.com/nektos/act.git", "GitHub", "nektos/act"},
|
||||
{"http://github.com/nektos/act.git", "GitHub", "nektos/act"},
|
||||
{"http://myotherrepo.com/act.git", "", "http://myotherrepo.com/act.git"},
|
||||
}
|
||||
|
||||
for _, tt := range slugTests {
|
||||
provider, slug, err := findGitSlug(tt.url)
|
||||
|
||||
assert.Nil(err)
|
||||
assert.Equal(tt.provider, provider)
|
||||
assert.Equal(tt.slug, slug)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestFindGitRemoteURL(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
basedir, err := ioutil.TempDir("", "act-test")
|
||||
defer os.RemoveAll(basedir)
|
||||
|
||||
assert.Nil(err)
|
||||
|
||||
err = gitCmd("init", basedir)
|
||||
assert.Nil(err)
|
||||
|
||||
remoteURL := "https://git-codecommit.us-east-1.amazonaws.com/v1/repos/my-repo-name"
|
||||
err = gitCmd("config", "-f", fmt.Sprintf("%s/.git/config", basedir), "--add", "remote.origin.url", remoteURL)
|
||||
assert.Nil(err)
|
||||
|
||||
u, err := findGitRemoteURL(basedir)
|
||||
assert.Nil(err)
|
||||
assert.Equal(remoteURL, u)
|
||||
}
|
||||
|
||||
func gitCmd(args ...string) error {
|
||||
var stdout bytes.Buffer
|
||||
cmd := exec.Command("git", args...)
|
||||
cmd.Stdout = &stdout
|
||||
cmd.Stderr = ioutil.Discard
|
||||
|
||||
err := cmd.Run()
|
||||
if exitError, ok := err.(*exec.ExitError); ok {
|
||||
if waitStatus, ok := exitError.Sys().(syscall.WaitStatus); ok {
|
||||
return fmt.Errorf("Exit error %d", waitStatus.ExitStatus())
|
||||
}
|
||||
return exitError
|
||||
}
|
||||
return nil
|
||||
}
|
106
container/docker_build.go
Normal file
106
container/docker_build.go
Normal file
|
@ -0,0 +1,106 @@
|
|||
package container
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/builder/dockerignore"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/fileutils"
|
||||
"github.com/nektos/act/common"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// NewDockerBuildExecutorInput the input for the NewDockerBuildExecutor function
|
||||
type NewDockerBuildExecutorInput struct {
|
||||
DockerExecutorInput
|
||||
ContextDir string
|
||||
ImageTag string
|
||||
}
|
||||
|
||||
// NewDockerBuildExecutor function to create a run executor for the container
|
||||
func NewDockerBuildExecutor(input NewDockerBuildExecutorInput) common.Executor {
|
||||
return func() error {
|
||||
input.Logger.Infof("docker build -t %s %s", input.ImageTag, input.ContextDir)
|
||||
if input.Dryrun {
|
||||
return nil
|
||||
}
|
||||
|
||||
cli, err := client.NewClientWithOpts()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
input.Logger.Debugf("Building image from '%v'", input.ContextDir)
|
||||
|
||||
tags := []string{input.ImageTag}
|
||||
options := types.ImageBuildOptions{
|
||||
Tags: tags,
|
||||
}
|
||||
|
||||
buildContext, err := createBuildContext(input.ContextDir, "Dockerfile")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer buildContext.Close()
|
||||
|
||||
input.Logger.Debugf("Creating image from context dir '%s' with tag '%s'", input.ContextDir, input.ImageTag)
|
||||
resp, err := cli.ImageBuild(input.Ctx, buildContext, options)
|
||||
input.logDockerResponse(resp.Body, err != nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
}
|
||||
func createBuildContext(contextDir string, relDockerfile string) (io.ReadCloser, error) {
|
||||
log.Debugf("Creating archive for build context dir '%s' with relative dockerfile '%s'", contextDir, relDockerfile)
|
||||
|
||||
// And canonicalize dockerfile name to a platform-independent one
|
||||
relDockerfile = archive.CanonicalTarNameForPath(relDockerfile)
|
||||
|
||||
f, err := os.Open(filepath.Join(contextDir, ".dockerignore"))
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
var excludes []string
|
||||
if err == nil {
|
||||
excludes, err = dockerignore.ReadAll(f)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// If .dockerignore mentions .dockerignore or the Dockerfile
|
||||
// then make sure we send both files over to the daemon
|
||||
// because Dockerfile is, obviously, needed no matter what, and
|
||||
// .dockerignore is needed to know if either one needs to be
|
||||
// removed. The daemon will remove them for us, if needed, after it
|
||||
// parses the Dockerfile. Ignore errors here, as they will have been
|
||||
// caught by validateContextDirectory above.
|
||||
var includes = []string{"."}
|
||||
keepThem1, _ := fileutils.Matches(".dockerignore", excludes)
|
||||
keepThem2, _ := fileutils.Matches(relDockerfile, excludes)
|
||||
if keepThem1 || keepThem2 {
|
||||
includes = append(includes, ".dockerignore", relDockerfile)
|
||||
}
|
||||
|
||||
compression := archive.Uncompressed
|
||||
buildCtx, err := archive.TarWithOptions(contextDir, &archive.TarOptions{
|
||||
Compression: compression,
|
||||
ExcludePatterns: excludes,
|
||||
IncludeFiles: includes,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return buildCtx, nil
|
||||
}
|
104
container/docker_common.go
Normal file
104
container/docker_common.go
Normal file
|
@ -0,0 +1,104 @@
|
|||
package container
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// DockerExecutorInput common input params
|
||||
type DockerExecutorInput struct {
|
||||
Ctx context.Context
|
||||
Logger *logrus.Entry
|
||||
Dryrun bool
|
||||
}
|
||||
|
||||
type dockerMessage struct {
|
||||
ID string `json:"id"`
|
||||
Stream string `json:"stream"`
|
||||
Error string `json:"error"`
|
||||
ErrorDetail struct {
|
||||
Message string
|
||||
}
|
||||
Status string `json:"status"`
|
||||
Progress string `json:"progress"`
|
||||
}
|
||||
|
||||
func (i *DockerExecutorInput) logDockerOutput(dockerResponse io.Reader) error {
|
||||
scanner := bufio.NewScanner(dockerResponse)
|
||||
if i.Logger == nil {
|
||||
return nil
|
||||
}
|
||||
for scanner.Scan() {
|
||||
i.Logger.Infof(scanner.Text())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *DockerExecutorInput) streamDockerOutput(dockerResponse io.Reader) error {
|
||||
out := os.Stdout
|
||||
go func() {
|
||||
<-i.Ctx.Done()
|
||||
fmt.Println()
|
||||
}()
|
||||
|
||||
_, err := io.Copy(out, dockerResponse)
|
||||
return err
|
||||
}
|
||||
|
||||
func (i *DockerExecutorInput) writeLog(isError bool, format string, args ...interface{}) {
|
||||
if i.Logger == nil {
|
||||
return
|
||||
}
|
||||
if isError {
|
||||
i.Logger.Errorf(format, args...)
|
||||
} else {
|
||||
i.Logger.Debugf(format, args...)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (i *DockerExecutorInput) logDockerResponse(dockerResponse io.ReadCloser, isError bool) error {
|
||||
if dockerResponse == nil {
|
||||
return nil
|
||||
}
|
||||
defer dockerResponse.Close()
|
||||
|
||||
scanner := bufio.NewScanner(dockerResponse)
|
||||
msg := dockerMessage{}
|
||||
for scanner.Scan() {
|
||||
line := scanner.Bytes()
|
||||
msg.ID = ""
|
||||
msg.Stream = ""
|
||||
msg.Error = ""
|
||||
msg.ErrorDetail.Message = ""
|
||||
msg.Status = ""
|
||||
msg.Progress = ""
|
||||
if err := json.Unmarshal(line, &msg); err == nil {
|
||||
if msg.Error != "" {
|
||||
return fmt.Errorf("%s", msg.Error)
|
||||
}
|
||||
|
||||
if msg.Status != "" {
|
||||
if msg.Progress != "" {
|
||||
i.writeLog(isError, "%s :: %s :: %s\n", msg.Status, msg.ID, msg.Progress)
|
||||
} else {
|
||||
i.writeLog(isError, "%s :: %s\n", msg.Status, msg.ID)
|
||||
}
|
||||
} else if msg.Stream != "" {
|
||||
i.writeLog(isError, msg.Stream)
|
||||
} else {
|
||||
i.writeLog(false, "Unable to handle line: %s", string(line))
|
||||
}
|
||||
} else {
|
||||
i.writeLog(false, "Unable to unmarshal line [%s] ==> %v", string(line), err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
55
container/docker_pull.go
Normal file
55
container/docker_pull.go
Normal file
|
@ -0,0 +1,55 @@
|
|||
package container
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/nektos/act/common"
|
||||
)
|
||||
|
||||
// NewDockerPullExecutorInput the input for the NewDockerPullExecutor function
|
||||
type NewDockerPullExecutorInput struct {
|
||||
DockerExecutorInput
|
||||
Image string
|
||||
}
|
||||
|
||||
// NewDockerPullExecutor function to create a run executor for the container
|
||||
func NewDockerPullExecutor(input NewDockerPullExecutorInput) common.Executor {
|
||||
return func() error {
|
||||
input.Logger.Infof("docker pull %v", input.Image)
|
||||
|
||||
if input.Dryrun {
|
||||
return nil
|
||||
}
|
||||
|
||||
imageRef := cleanImage(input.Image)
|
||||
input.Logger.Debugf("pulling image '%v'", imageRef)
|
||||
|
||||
cli, err := client.NewClientWithOpts()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
reader, err := cli.ImagePull(input.Ctx, imageRef, types.ImagePullOptions{})
|
||||
input.logDockerResponse(reader, err != nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func cleanImage(image string) string {
|
||||
imageParts := len(strings.Split(image, "/"))
|
||||
if imageParts == 1 {
|
||||
image = fmt.Sprintf("docker.io/library/%s", image)
|
||||
} else if imageParts == 2 {
|
||||
image = fmt.Sprintf("docker.io/%s", image)
|
||||
}
|
||||
|
||||
return image
|
||||
}
|
29
container/docker_pull_test.go
Normal file
29
container/docker_pull_test.go
Normal file
|
@ -0,0 +1,29 @@
|
|||
package container
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func init() {
|
||||
log.SetLevel(log.DebugLevel)
|
||||
}
|
||||
|
||||
func TestCleanImage(t *testing.T) {
|
||||
tables := []struct {
|
||||
imageIn string
|
||||
imageOut string
|
||||
}{
|
||||
{"myhost.com/foo/bar", "myhost.com/foo/bar"},
|
||||
{"ubuntu", "docker.io/library/ubuntu"},
|
||||
{"ubuntu:18.04", "docker.io/library/ubuntu:18.04"},
|
||||
{"cibuilds/hugo:0.53", "docker.io/cibuilds/hugo:0.53"},
|
||||
}
|
||||
|
||||
for _, table := range tables {
|
||||
imageOut := cleanImage(table.imageIn)
|
||||
assert.Equal(t, table.imageOut, imageOut)
|
||||
}
|
||||
}
|
184
container/docker_run.go
Normal file
184
container/docker_run.go
Normal file
|
@ -0,0 +1,184 @@
|
|||
package container
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/nektos/act/common"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"golang.org/x/crypto/ssh/terminal"
|
||||
)
|
||||
|
||||
// NewDockerRunExecutorInput the input for the NewDockerRunExecutor function
|
||||
type NewDockerRunExecutorInput struct {
|
||||
DockerExecutorInput
|
||||
Image string
|
||||
Entrypoint string
|
||||
Cmd []string
|
||||
WorkingDir string
|
||||
Env []string
|
||||
Binds []string
|
||||
Content map[string]io.Reader
|
||||
Volumes []string
|
||||
Name string
|
||||
}
|
||||
|
||||
// NewDockerRunExecutor function to create a run executor for the container
|
||||
func NewDockerRunExecutor(input NewDockerRunExecutorInput) common.Executor {
|
||||
return func() error {
|
||||
|
||||
input.Logger.Infof("docker run %s %s", input.Image, input.Cmd)
|
||||
if input.Dryrun {
|
||||
return nil
|
||||
}
|
||||
|
||||
cli, err := client.NewClientWithOpts()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
containerID, err := createContainer(input, cli)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer removeContainer(input, cli, containerID)
|
||||
|
||||
err = copyContentToContainer(input, cli, containerID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = attachContainer(input, cli, containerID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = startContainer(input, cli, containerID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return waitContainer(input, cli, containerID)
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func createContainer(input NewDockerRunExecutorInput, cli *client.Client) (string, error) {
|
||||
isTerminal := terminal.IsTerminal(int(os.Stdout.Fd()))
|
||||
|
||||
cmd := input.Cmd
|
||||
if len(input.Cmd) == 1 {
|
||||
cmd = strings.Split(cmd[0], " ")
|
||||
}
|
||||
|
||||
config := &container.Config{
|
||||
Image: input.Image,
|
||||
Cmd: cmd,
|
||||
WorkingDir: input.WorkingDir,
|
||||
Env: input.Env,
|
||||
Tty: isTerminal,
|
||||
}
|
||||
|
||||
if len(input.Volumes) > 0 {
|
||||
config.Volumes = make(map[string]struct{})
|
||||
for _, vol := range input.Volumes {
|
||||
config.Volumes[vol] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
if input.Entrypoint != "" {
|
||||
config.Entrypoint = []string{input.Entrypoint}
|
||||
}
|
||||
resp, err := cli.ContainerCreate(input.Ctx, config, &container.HostConfig{
|
||||
Binds: input.Binds,
|
||||
}, nil, input.Name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
input.Logger.Debugf("Created container name=%s id=%v from image %v", input.Name, resp.ID, input.Image)
|
||||
log.Debugf("ENV ==> %v", input.Env)
|
||||
|
||||
return resp.ID, nil
|
||||
}
|
||||
|
||||
func removeContainer(input NewDockerRunExecutorInput, cli *client.Client, containerID string) {
|
||||
err := cli.ContainerRemove(context.Background(), containerID, types.ContainerRemoveOptions{
|
||||
RemoveVolumes: true,
|
||||
Force: true,
|
||||
})
|
||||
if err != nil {
|
||||
input.Logger.Errorf("%v", err)
|
||||
}
|
||||
|
||||
input.Logger.Debugf("Removed container: %v", containerID)
|
||||
}
|
||||
|
||||
func copyContentToContainer(input NewDockerRunExecutorInput, cli *client.Client, containerID string) error {
|
||||
for dstPath, srcReader := range input.Content {
|
||||
input.Logger.Debugf("Extracting content to '%s'", dstPath)
|
||||
err := cli.CopyToContainer(input.Ctx, containerID, dstPath, srcReader, types.CopyToContainerOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func attachContainer(input NewDockerRunExecutorInput, cli *client.Client, containerID string) error {
|
||||
out, err := cli.ContainerAttach(input.Ctx, containerID, types.ContainerAttachOptions{
|
||||
Stream: true,
|
||||
Stdout: true,
|
||||
Stderr: true,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
isTerminal := terminal.IsTerminal(int(os.Stdout.Fd()))
|
||||
if !isTerminal || os.Getenv("NORAW") != "" {
|
||||
go input.logDockerOutput(out.Reader)
|
||||
} else {
|
||||
go input.streamDockerOutput(out.Reader)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func startContainer(input NewDockerRunExecutorInput, cli *client.Client, containerID string) error {
|
||||
input.Logger.Debugf("STARTING image=%s entrypoint=%s cmd=%v", input.Image, input.Entrypoint, input.Cmd)
|
||||
|
||||
if err := cli.ContainerStart(input.Ctx, containerID, types.ContainerStartOptions{}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
input.Logger.Debugf("Started container: %v", containerID)
|
||||
return nil
|
||||
}
|
||||
|
||||
func waitContainer(input NewDockerRunExecutorInput, cli *client.Client, containerID string) error {
|
||||
statusCh, errCh := cli.ContainerWait(input.Ctx, containerID, container.WaitConditionNotRunning)
|
||||
var statusCode int64
|
||||
select {
|
||||
case err := <-errCh:
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case status := <-statusCh:
|
||||
statusCode = status.StatusCode
|
||||
}
|
||||
|
||||
input.Logger.Debugf("Return status: %v", statusCode)
|
||||
|
||||
if statusCode == 0 {
|
||||
return nil
|
||||
} else if statusCode == 78 {
|
||||
return fmt.Errorf("exiting with `NEUTRAL`: 78")
|
||||
}
|
||||
|
||||
return fmt.Errorf("exit with `FAILURE`: %v", statusCode)
|
||||
}
|
21
example/.github/main.workflow
vendored
Normal file
21
example/.github/main.workflow
vendored
Normal file
|
@ -0,0 +1,21 @@
|
|||
workflow "build-and-deploy" {
|
||||
on = "push"
|
||||
resolves = ["deploy"]
|
||||
}
|
||||
|
||||
action "build" {
|
||||
uses = "./action1"
|
||||
args = "echo 'build'"
|
||||
}
|
||||
|
||||
action "test" {
|
||||
uses = "docker://ubuntu:18.04"
|
||||
args = "echo 'test'"
|
||||
needs = ["build"]
|
||||
}
|
||||
|
||||
action "deploy" {
|
||||
uses = "./action2"
|
||||
args = "echo 'deploy'"
|
||||
needs = ["test"]
|
||||
}
|
1
example/action1/Dockerfile
Normal file
1
example/action1/Dockerfile
Normal file
|
@ -0,0 +1 @@
|
|||
FROM ubuntu:18.04
|
1
example/action2/Dockerfile
Normal file
1
example/action2/Dockerfile
Normal file
|
@ -0,0 +1 @@
|
|||
FROM alpine:3.8
|
41
go.mod
Normal file
41
go.mod
Normal file
|
@ -0,0 +1,41 @@
|
|||
module github.com/nektos/act
|
||||
|
||||
require (
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 // indirect
|
||||
github.com/Microsoft/go-winio v0.4.11 // indirect
|
||||
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect
|
||||
github.com/containerd/continuity v0.0.0-20181203112020-004b46473808 // indirect
|
||||
github.com/docker/distribution v2.7.0+incompatible // indirect
|
||||
github.com/docker/docker v1.13.1
|
||||
github.com/docker/go-connections v0.4.0 // indirect
|
||||
github.com/docker/go-units v0.3.3 // indirect
|
||||
github.com/go-ini/ini v1.41.0
|
||||
github.com/gogo/protobuf v1.2.0 // indirect
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e // indirect
|
||||
github.com/gorilla/context v1.1.1 // indirect
|
||||
github.com/gorilla/mux v1.6.2 // indirect
|
||||
github.com/hashicorp/hcl v1.0.0
|
||||
github.com/howeyc/gopass v0.0.0-20170109162249-bf9dde6d0d2c
|
||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||
github.com/jtolds/gls v4.2.1+incompatible // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0-rc1 // indirect
|
||||
github.com/opencontainers/image-spec v1.0.1 // indirect
|
||||
github.com/opencontainers/runc v0.1.1 // indirect
|
||||
github.com/sirupsen/logrus v1.2.0
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d // indirect
|
||||
github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c // indirect
|
||||
github.com/spf13/cobra v0.0.3
|
||||
github.com/spf13/pflag v1.0.3 // indirect
|
||||
github.com/stretchr/testify v1.2.2
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793
|
||||
golang.org/x/net v0.0.0-20181220203305-927f97764cc3 // indirect
|
||||
golang.org/x/sys v0.0.0-20181030150119-7e31e0c00fa0 // indirect
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c // indirect
|
||||
google.golang.org/grpc v1.17.0 // indirect
|
||||
gopkg.in/ini.v1 v1.41.0 // indirect
|
||||
gopkg.in/src-d/go-git.v4 v4.8.1
|
||||
gopkg.in/yaml.v2 v2.2.2
|
||||
gotest.tools v2.2.0+incompatible // indirect
|
||||
)
|
||||
|
||||
replace github.com/docker/docker => github.com/docker/engine v0.0.0-20181106193140-f5749085e9cb
|
150
go.sum
Normal file
150
go.sum
Normal file
|
@ -0,0 +1,150 @@
|
|||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
|
||||
github.com/Microsoft/go-winio v0.4.11 h1:zoIOcVf0xPN1tnMVbTtEdI+P8OofVk3NObnwOQ6nK2Q=
|
||||
github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
|
||||
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw=
|
||||
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk=
|
||||
github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7 h1:uSoVVbwJiQipAclBbw+8quDsfcvFjOpI5iCf4p/cqCs=
|
||||
github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs=
|
||||
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA=
|
||||
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/containerd/continuity v0.0.0-20181203112020-004b46473808 h1:4BX8f882bXEDKfWIf0wa8HRvpnBoPszJJXL+TVbBw4M=
|
||||
github.com/containerd/continuity v0.0.0-20181203112020-004b46473808/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/docker/distribution v2.7.0+incompatible h1:neUDAlf3wX6Ml4HdqTrbcOHXtfRN0TFIwt6YFL7N9RU=
|
||||
github.com/docker/distribution v2.7.0+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker v1.13.1 h1:IkZjBSIc8hBjLpqeAbeE5mca5mNgeatLHBy3GO78BWo=
|
||||
github.com/docker/docker v1.13.1/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/engine v0.0.0-20181106193140-f5749085e9cb h1:PyjxRdW1mqCmSoxy/6uP01P7CGbsD+woX+oOWbaUPwQ=
|
||||
github.com/docker/engine v0.0.0-20181106193140-f5749085e9cb/go.mod h1:3CPr2caMgTHxxIAZgEMd3uLYPDlRvPqCpyeRf6ncPcY=
|
||||
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
|
||||
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
||||
github.com/docker/go-units v0.3.3 h1:Xk8S3Xj5sLGlG5g67hJmYMmUgXv5N4PhkjJHHqrwnTk=
|
||||
github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/emirpasic/gods v1.9.0 h1:rUF4PuzEjMChMiNsVjdI+SyLu7rEqpQ5reNFnhC7oFo=
|
||||
github.com/emirpasic/gods v1.9.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o=
|
||||
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 h1:BHsljHzVlRcyQhjrss6TZTdY2VfCqZPbv5k3iBFa2ZQ=
|
||||
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
|
||||
github.com/gliderlabs/ssh v0.1.1 h1:j3L6gSLQalDETeEg/Jg0mGY0/y/N6zI2xX1978P0Uqw=
|
||||
github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
|
||||
github.com/go-ini/ini v1.41.0 h1:526aoxDtxRHFQKMZfcX2OG9oOI8TJ5yPLM0Mkno/uTY=
|
||||
github.com/go-ini/ini v1.41.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
|
||||
github.com/gogo/protobuf v1.2.0 h1:xU6/SpYbvkNYiptHJYEDRseDLvYE7wSqhYYNy0QSUzI=
|
||||
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e h1:JKmoR8x90Iww1ks85zJ1lfDGgIiMDuIptTOhJq+zKyg=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gorilla/context v1.1.1 h1:AWwleXJkX/nhcU9bZSnZoi3h/qGYqQAGhq6zZe/aQW8=
|
||||
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
|
||||
github.com/gorilla/mux v1.6.2 h1:Pgr17XVTNXAk3q/r4CpKzC5xBM/qW1uVLV+IhRZpIIk=
|
||||
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
|
||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||
github.com/howeyc/gopass v0.0.0-20170109162249-bf9dde6d0d2c h1:kQWxfPIHVLbgLzphqk3QUflDy9QdksZR4ygR807bpy0=
|
||||
github.com/howeyc/gopass v0.0.0-20170109162249-bf9dde6d0d2c/go.mod h1:lADxMC39cJJqL93Duh1xhAs4I2Zs8mKS89XWXFGp9cs=
|
||||
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A=
|
||||
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo=
|
||||
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
github.com/jtolds/gls v4.2.1+incompatible h1:fSuqC+Gmlu6l/ZYAoZzx2pyucC8Xza35fpRVWLVmUEE=
|
||||
github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/kevinburke/ssh_config v0.0.0-20180830205328-81db2a75821e h1:RgQk53JHp/Cjunrr1WlsXSZpqXn+uREuHvUVcK82CV8=
|
||||
github.com/kevinburke/ssh_config v0.0.0-20180830205328-81db2a75821e/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM=
|
||||
github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/mitchellh/go-homedir v1.0.0 h1:vKb8ShqSby24Yrqr/yDYkuFz8d0WUjys40rvnGC8aR0=
|
||||
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ=
|
||||
github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
|
||||
github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI=
|
||||
github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
|
||||
github.com/opencontainers/runc v0.1.1 h1:GlxAyO6x8rfZYN9Tt0Kti5a/cP41iuiO2yYT0IJGY8Y=
|
||||
github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
|
||||
github.com/pelletier/go-buffruneio v0.2.0 h1:U4t4R6YkofJ5xHm3dJzuRpPZ0mr5MMCoAWooScCR7aA=
|
||||
github.com/pelletier/go-buffruneio v0.2.0/go.mod h1:JkE26KsDizTr40EUHkXVtNPvgGtbSNq5BcowyYOWdKo=
|
||||
github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ=
|
||||
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
||||
github.com/sirupsen/logrus v1.2.0 h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c h1:Ho+uVpkel/udgjbwB5Lktg9BtvJSh2DT0Hi6LPSyI2w=
|
||||
github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s=
|
||||
github.com/spf13/cobra v0.0.3 h1:ZlrZ4XsMRm04Fr5pSFxBgfND2EBVa1nLpiy1stUsX/8=
|
||||
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
||||
github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
|
||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/src-d/gcfg v1.4.0 h1:xXbNR5AlLSA315x2UO+fTSSAXCDf+Ar38/6oyGbDKQ4=
|
||||
github.com/src-d/gcfg v1.4.0/go.mod h1:p/UMsR43ujA89BJY9duynAwIpvqEujIH/jFlfL7jWoI=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/xanzy/ssh-agent v0.2.0 h1:Adglfbi5p9Z0BmK2oKU9nTG+zKfniSfnaMYB+ULd+Ro=
|
||||
github.com/xanzy/ssh-agent v0.2.0/go.mod h1:0NyE30eGUDliuLEHJgYte/zncp2zdTStcOnWhgSqHD8=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793 h1:u+LnwYTOOW7Ukr/fppxEb1Nwz0AtPflrblfvUudpo+I=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181220203305-927f97764cc3 h1:eH6Eip3UpmR+yM/qI9Ijluzb1bNv/cAU/n+6l8tRSis=
|
||||
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180903190138-2b024373dcd9/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33 h1:I6FyU15t786LL7oL/hn43zqTuEGr4PN7F4XJ1p4E3Y8=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181030150119-7e31e0c00fa0 h1:biUuj9O+0+XckRUCDzjoOGm6yFV5c0IHbm1ODP3e4Zw=
|
||||
golang.org/x/sys v0.0.0-20181030150119-7e31e0c00fa0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c h1:fqgJT0MGcGpPgpWU7VRdRjuArfcOvC4AoJmILihzhDg=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52 h1:JG/0uqcGdTNgq7FdU+61l5Pdmb8putNZlXb65bJBROs=
|
||||
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/grpc v1.17.0 h1:TRJYBgMclJvGYn2rIMjj+h9KtMt5r1Ij7ODVRIZkwhk=
|
||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/ini.v1 v1.41.0 h1:Ka3ViY6gNYSKiVy71zXBEqKplnV35ImDLVG+8uoIklE=
|
||||
gopkg.in/ini.v1 v1.41.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/src-d/go-billy.v4 v4.2.1 h1:omN5CrMrMcQ+4I8bJ0wEhOBPanIRWzFC953IiXKdYzo=
|
||||
gopkg.in/src-d/go-billy.v4 v4.2.1/go.mod h1:tm33zBoOwxjYHZIE+OV8bxTWFMJLrconzFMd38aARFk=
|
||||
gopkg.in/src-d/go-git-fixtures.v3 v3.1.1 h1:XWW/s5W18RaJpmo1l0IYGqXKuJITWRFuA45iOf1dKJs=
|
||||
gopkg.in/src-d/go-git-fixtures.v3 v3.1.1/go.mod h1:dLBcvytrw/TYZsNTWCnkNF2DSIlzWYqTe3rJR56Ac7g=
|
||||
gopkg.in/src-d/go-git.v4 v4.8.1 h1:aAyBmkdE1QUUEHcP4YFCGKmsMQRAuRmUcPEQR7lOAa0=
|
||||
gopkg.in/src-d/go-git.v4 v4.8.1/go.mod h1:Vtut8izDyrM8BUVQnzJ+YvmNcem2J89EmfZYCkLokZk=
|
||||
gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME=
|
||||
gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
|
||||
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
|
||||
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
|
||||
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
401
install.sh
Normal file
401
install.sh
Normal file
|
@ -0,0 +1,401 @@
|
|||
#!/bin/sh
|
||||
set -e
|
||||
# Code generated by godownloader on 2019-01-15T06:15:28Z. DO NOT EDIT.
|
||||
#
|
||||
|
||||
usage() {
|
||||
this=$1
|
||||
cat <<EOF
|
||||
$this: download go binaries for nektos/act
|
||||
|
||||
Usage: $this [-b] bindir [-d] [tag]
|
||||
-b sets bindir or installation directory, Defaults to /usr/local/bin
|
||||
-d turns on debug logging
|
||||
[tag] is a tag from
|
||||
https://github.com/nektos/act/releases
|
||||
If tag is missing, then the latest will be used.
|
||||
|
||||
Generated by godownloader
|
||||
https://github.com/goreleaser/godownloader
|
||||
|
||||
EOF
|
||||
exit 2
|
||||
}
|
||||
|
||||
parse_args() {
|
||||
#BINDIR is /usr/local/bin unless set be ENV
|
||||
# over-ridden by flag below
|
||||
|
||||
BINDIR=${BINDIR:-/usr/local/bin}
|
||||
while getopts "b:dh?" arg; do
|
||||
case "$arg" in
|
||||
b) BINDIR="$OPTARG" ;;
|
||||
d) log_set_priority 10 ;;
|
||||
h | \?) usage "$0" ;;
|
||||
esac
|
||||
done
|
||||
shift $((OPTIND - 1))
|
||||
TAG=$1
|
||||
}
|
||||
# this function wraps all the destructive operations
|
||||
# if a curl|bash cuts off the end of the script due to
|
||||
# network, either nothing will happen or will syntax error
|
||||
# out preventing half-done work
|
||||
execute() {
|
||||
tmpdir=$(mktmpdir)
|
||||
log_debug "downloading files into ${tmpdir}"
|
||||
http_download "${tmpdir}/${TARBALL}" "${TARBALL_URL}"
|
||||
http_download "${tmpdir}/${CHECKSUM}" "${CHECKSUM_URL}"
|
||||
hash_sha256_verify "${tmpdir}/${TARBALL}" "${tmpdir}/${CHECKSUM}"
|
||||
srcdir="${tmpdir}"
|
||||
(cd "${tmpdir}" && untar "${TARBALL}")
|
||||
install -d "${BINDIR}"
|
||||
for binexe in "act" ; do
|
||||
if [ "$OS" = "windows" ]; then
|
||||
binexe="${binexe}.exe"
|
||||
fi
|
||||
install "${srcdir}/${binexe}" "${BINDIR}/"
|
||||
log_info "installed ${BINDIR}/${binexe}"
|
||||
done
|
||||
}
|
||||
is_supported_platform() {
|
||||
platform=$1
|
||||
found=1
|
||||
case "$platform" in
|
||||
darwin/amd64) found=0 ;;
|
||||
darwin/386) found=0 ;;
|
||||
linux/amd64) found=0 ;;
|
||||
linux/386) found=0 ;;
|
||||
windows/amd64) found=0 ;;
|
||||
windows/386) found=0 ;;
|
||||
esac
|
||||
return $found
|
||||
}
|
||||
check_platform() {
|
||||
if is_supported_platform "$PLATFORM"; then
|
||||
# optional logging goes here
|
||||
true
|
||||
else
|
||||
log_crit "platform $PLATFORM is not supported. Make sure this script is up-to-date and file request at https://github.com/${PREFIX}/issues/new"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
tag_to_version() {
|
||||
if [ -z "${TAG}" ]; then
|
||||
log_info "checking GitHub for latest tag"
|
||||
else
|
||||
log_info "checking GitHub for tag '${TAG}'"
|
||||
fi
|
||||
REALTAG=$(github_release "$OWNER/$REPO" "${TAG}") && true
|
||||
if test -z "$REALTAG"; then
|
||||
log_crit "unable to find '${TAG}' - use 'latest' or see https://github.com/${PREFIX}/releases for details"
|
||||
exit 1
|
||||
fi
|
||||
# if version starts with 'v', remove it
|
||||
TAG="$REALTAG"
|
||||
VERSION=${TAG#v}
|
||||
}
|
||||
adjust_format() {
|
||||
# change format (tar.gz or zip) based on ARCH
|
||||
case ${ARCH} in
|
||||
windows) FORMAT=zip ;;
|
||||
esac
|
||||
true
|
||||
}
|
||||
adjust_os() {
|
||||
# adjust archive name based on OS
|
||||
case ${OS} in
|
||||
386) OS=i386 ;;
|
||||
amd64) OS=x86_64 ;;
|
||||
darwin) OS=Darwin ;;
|
||||
linux) OS=Linux ;;
|
||||
windows) OS=Windows ;;
|
||||
esac
|
||||
true
|
||||
}
|
||||
adjust_arch() {
|
||||
# adjust archive name based on ARCH
|
||||
case ${ARCH} in
|
||||
386) ARCH=i386 ;;
|
||||
amd64) ARCH=x86_64 ;;
|
||||
darwin) ARCH=Darwin ;;
|
||||
linux) ARCH=Linux ;;
|
||||
windows) ARCH=Windows ;;
|
||||
esac
|
||||
true
|
||||
}
|
||||
|
||||
cat /dev/null <<EOF
|
||||
------------------------------------------------------------------------
|
||||
https://github.com/client9/shlib - portable posix shell functions
|
||||
Public domain - http://unlicense.org
|
||||
https://github.com/client9/shlib/blob/master/LICENSE.md
|
||||
but credit (and pull requests) appreciated.
|
||||
------------------------------------------------------------------------
|
||||
EOF
|
||||
is_command() {
|
||||
command -v "$1" >/dev/null
|
||||
}
|
||||
echoerr() {
|
||||
echo "$@" 1>&2
|
||||
}
|
||||
log_prefix() {
|
||||
echo "$0"
|
||||
}
|
||||
_logp=6
|
||||
log_set_priority() {
|
||||
_logp="$1"
|
||||
}
|
||||
log_priority() {
|
||||
if test -z "$1"; then
|
||||
echo "$_logp"
|
||||
return
|
||||
fi
|
||||
[ "$1" -le "$_logp" ]
|
||||
}
|
||||
log_tag() {
|
||||
case $1 in
|
||||
0) echo "emerg" ;;
|
||||
1) echo "alert" ;;
|
||||
2) echo "crit" ;;
|
||||
3) echo "err" ;;
|
||||
4) echo "warning" ;;
|
||||
5) echo "notice" ;;
|
||||
6) echo "info" ;;
|
||||
7) echo "debug" ;;
|
||||
*) echo "$1" ;;
|
||||
esac
|
||||
}
|
||||
log_debug() {
|
||||
log_priority 7 || return 0
|
||||
echoerr "$(log_prefix)" "$(log_tag 7)" "$@"
|
||||
}
|
||||
log_info() {
|
||||
log_priority 6 || return 0
|
||||
echoerr "$(log_prefix)" "$(log_tag 6)" "$@"
|
||||
}
|
||||
log_err() {
|
||||
log_priority 3 || return 0
|
||||
echoerr "$(log_prefix)" "$(log_tag 3)" "$@"
|
||||
}
|
||||
log_crit() {
|
||||
log_priority 2 || return 0
|
||||
echoerr "$(log_prefix)" "$(log_tag 2)" "$@"
|
||||
}
|
||||
uname_os() {
|
||||
os=$(uname -s | tr '[:upper:]' '[:lower:]')
|
||||
case "$os" in
|
||||
msys_nt) os="windows" ;;
|
||||
esac
|
||||
echo "$os"
|
||||
}
|
||||
uname_arch() {
|
||||
arch=$(uname -m)
|
||||
case $arch in
|
||||
x86_64) arch="amd64" ;;
|
||||
x86) arch="386" ;;
|
||||
i686) arch="386" ;;
|
||||
i386) arch="386" ;;
|
||||
aarch64) arch="arm64" ;;
|
||||
armv5*) arch="armv5" ;;
|
||||
armv6*) arch="armv6" ;;
|
||||
armv7*) arch="armv7" ;;
|
||||
esac
|
||||
echo ${arch}
|
||||
}
|
||||
uname_os_check() {
|
||||
os=$(uname_os)
|
||||
case "$os" in
|
||||
darwin) return 0 ;;
|
||||
dragonfly) return 0 ;;
|
||||
freebsd) return 0 ;;
|
||||
linux) return 0 ;;
|
||||
android) return 0 ;;
|
||||
nacl) return 0 ;;
|
||||
netbsd) return 0 ;;
|
||||
openbsd) return 0 ;;
|
||||
plan9) return 0 ;;
|
||||
solaris) return 0 ;;
|
||||
windows) return 0 ;;
|
||||
esac
|
||||
log_crit "uname_os_check '$(uname -s)' got converted to '$os' which is not a GOOS value. Please file bug at https://github.com/client9/shlib"
|
||||
return 1
|
||||
}
|
||||
uname_arch_check() {
|
||||
arch=$(uname_arch)
|
||||
case "$arch" in
|
||||
386) return 0 ;;
|
||||
amd64) return 0 ;;
|
||||
arm64) return 0 ;;
|
||||
armv5) return 0 ;;
|
||||
armv6) return 0 ;;
|
||||
armv7) return 0 ;;
|
||||
ppc64) return 0 ;;
|
||||
ppc64le) return 0 ;;
|
||||
mips) return 0 ;;
|
||||
mipsle) return 0 ;;
|
||||
mips64) return 0 ;;
|
||||
mips64le) return 0 ;;
|
||||
s390x) return 0 ;;
|
||||
amd64p32) return 0 ;;
|
||||
esac
|
||||
log_crit "uname_arch_check '$(uname -m)' got converted to '$arch' which is not a GOARCH value. Please file bug report at https://github.com/client9/shlib"
|
||||
return 1
|
||||
}
|
||||
untar() {
|
||||
tarball=$1
|
||||
case "${tarball}" in
|
||||
*.tar.gz | *.tgz) tar -xzf "${tarball}" ;;
|
||||
*.tar) tar -xf "${tarball}" ;;
|
||||
*.zip) unzip "${tarball}" ;;
|
||||
*)
|
||||
log_err "untar unknown archive format for ${tarball}"
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
}
|
||||
mktmpdir() {
|
||||
test -z "$TMPDIR" && TMPDIR="$(mktemp -d)"
|
||||
mkdir -p "${TMPDIR}"
|
||||
echo "${TMPDIR}"
|
||||
}
|
||||
http_download_curl() {
|
||||
local_file=$1
|
||||
source_url=$2
|
||||
header=$3
|
||||
if [ -z "$header" ]; then
|
||||
code=$(curl -w '%{http_code}' -sL -o "$local_file" "$source_url")
|
||||
else
|
||||
code=$(curl -w '%{http_code}' -sL -H "$header" -o "$local_file" "$source_url")
|
||||
fi
|
||||
if [ "$code" != "200" ]; then
|
||||
log_debug "http_download_curl received HTTP status $code"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
http_download_wget() {
|
||||
local_file=$1
|
||||
source_url=$2
|
||||
header=$3
|
||||
if [ -z "$header" ]; then
|
||||
wget -q -O "$local_file" "$source_url"
|
||||
else
|
||||
wget -q --header "$header" -O "$local_file" "$source_url"
|
||||
fi
|
||||
}
|
||||
http_download() {
|
||||
log_debug "http_download $2"
|
||||
if is_command curl; then
|
||||
http_download_curl "$@"
|
||||
return
|
||||
elif is_command wget; then
|
||||
http_download_wget "$@"
|
||||
return
|
||||
fi
|
||||
log_crit "http_download unable to find wget or curl"
|
||||
return 1
|
||||
}
|
||||
http_copy() {
|
||||
tmp=$(mktemp)
|
||||
http_download "${tmp}" "$1" "$2" || return 1
|
||||
body=$(cat "$tmp")
|
||||
rm -f "${tmp}"
|
||||
echo "$body"
|
||||
}
|
||||
github_release() {
|
||||
owner_repo=$1
|
||||
version=$2
|
||||
test -z "$version" && version="latest"
|
||||
giturl="https://github.com/${owner_repo}/releases/${version}"
|
||||
json=$(http_copy "$giturl" "Accept:application/json")
|
||||
test -z "$json" && return 1
|
||||
version=$(echo "$json" | tr -s '\n' ' ' | sed 's/.*"tag_name":"//' | sed 's/".*//')
|
||||
test -z "$version" && return 1
|
||||
echo "$version"
|
||||
}
|
||||
hash_sha256() {
|
||||
TARGET=${1:-/dev/stdin}
|
||||
if is_command gsha256sum; then
|
||||
hash=$(gsha256sum "$TARGET") || return 1
|
||||
echo "$hash" | cut -d ' ' -f 1
|
||||
elif is_command sha256sum; then
|
||||
hash=$(sha256sum "$TARGET") || return 1
|
||||
echo "$hash" | cut -d ' ' -f 1
|
||||
elif is_command shasum; then
|
||||
hash=$(shasum -a 256 "$TARGET" 2>/dev/null) || return 1
|
||||
echo "$hash" | cut -d ' ' -f 1
|
||||
elif is_command openssl; then
|
||||
hash=$(openssl -dst openssl dgst -sha256 "$TARGET") || return 1
|
||||
echo "$hash" | cut -d ' ' -f a
|
||||
else
|
||||
log_crit "hash_sha256 unable to find command to compute sha-256 hash"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
hash_sha256_verify() {
|
||||
TARGET=$1
|
||||
checksums=$2
|
||||
if [ -z "$checksums" ]; then
|
||||
log_err "hash_sha256_verify checksum file not specified in arg2"
|
||||
return 1
|
||||
fi
|
||||
BASENAME=${TARGET##*/}
|
||||
want=$(grep "${BASENAME}" "${checksums}" 2>/dev/null | tr '\t' ' ' | cut -d ' ' -f 1)
|
||||
if [ -z "$want" ]; then
|
||||
log_err "hash_sha256_verify unable to find checksum for '${TARGET}' in '${checksums}'"
|
||||
return 1
|
||||
fi
|
||||
got=$(hash_sha256 "$TARGET")
|
||||
if [ "$want" != "$got" ]; then
|
||||
log_err "hash_sha256_verify checksum for '$TARGET' did not verify ${want} vs $got"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
cat /dev/null <<EOF
|
||||
------------------------------------------------------------------------
|
||||
End of functions from https://github.com/client9/shlib
|
||||
------------------------------------------------------------------------
|
||||
EOF
|
||||
|
||||
PROJECT_NAME="act"
|
||||
OWNER=nektos
|
||||
REPO="act"
|
||||
BINARY=act
|
||||
FORMAT=tar.gz
|
||||
OS=$(uname_os)
|
||||
ARCH=$(uname_arch)
|
||||
PREFIX="$OWNER/$REPO"
|
||||
|
||||
# use in logging routines
|
||||
log_prefix() {
|
||||
echo "$PREFIX"
|
||||
}
|
||||
PLATFORM="${OS}/${ARCH}"
|
||||
GITHUB_DOWNLOAD=https://github.com/${OWNER}/${REPO}/releases/download
|
||||
|
||||
uname_os_check "$OS"
|
||||
uname_arch_check "$ARCH"
|
||||
|
||||
parse_args "$@"
|
||||
|
||||
check_platform
|
||||
|
||||
tag_to_version
|
||||
|
||||
adjust_format
|
||||
|
||||
adjust_os
|
||||
|
||||
adjust_arch
|
||||
|
||||
log_info "found version: ${VERSION} for ${TAG}/${OS}/${ARCH}"
|
||||
|
||||
NAME=${PROJECT_NAME}_${OS}_${ARCH}
|
||||
TARBALL=${NAME}.${FORMAT}
|
||||
TARBALL_URL=${GITHUB_DOWNLOAD}/${TAG}/${TARBALL}
|
||||
CHECKSUM=checksums.txt
|
||||
CHECKSUM_URL=${GITHUB_DOWNLOAD}/${TAG}/${CHECKSUM}
|
||||
|
||||
|
||||
execute
|
34
main.go
Normal file
34
main.go
Normal file
|
@ -0,0 +1,34 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"os/signal"
|
||||
|
||||
"github.com/nektos/act/cmd"
|
||||
)
|
||||
|
||||
var version string
|
||||
|
||||
func main() {
|
||||
ctx := context.Background()
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
|
||||
// trap Ctrl+C and call cancel on the context
|
||||
c := make(chan os.Signal, 1)
|
||||
signal.Notify(c, os.Interrupt)
|
||||
defer func() {
|
||||
signal.Stop(c)
|
||||
cancel()
|
||||
}()
|
||||
go func() {
|
||||
select {
|
||||
case <-c:
|
||||
cancel()
|
||||
case <-ctx.Done():
|
||||
}
|
||||
}()
|
||||
|
||||
// run the command
|
||||
cmd.Execute(ctx, version)
|
||||
}
|
Loading…
Reference in a new issue