Merge pull request 'Bump nektos/act to 0.2.59' (#90) from harryzcy/act:bump-nektos-act into main
Reviewed-on: https://gitea.com/gitea/act/pulls/90
This commit is contained in:
commit
e6fec7324e
50 changed files with 1103 additions and 357 deletions
42
.github/workflows/checks.yml
vendored
42
.github/workflows/checks.yml
vendored
|
@ -15,10 +15,10 @@ jobs:
|
||||||
name: lint
|
name: lint
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
- uses: actions/setup-go@v4
|
- uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version-file: go.mod
|
go-version-file: go.mod
|
||||||
check-latest: true
|
check-latest: true
|
||||||
|
@ -26,7 +26,7 @@ jobs:
|
||||||
with:
|
with:
|
||||||
version: v1.53
|
version: v1.53
|
||||||
only-new-issues: true
|
only-new-issues: true
|
||||||
- uses: megalinter/megalinter/flavors/go@v7.4.0
|
- uses: megalinter/megalinter/flavors/go@v7.8.0
|
||||||
env:
|
env:
|
||||||
DEFAULT_BRANCH: master
|
DEFAULT_BRANCH: master
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
@ -38,12 +38,12 @@ jobs:
|
||||||
name: test-linux
|
name: test-linux
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 2
|
fetch-depth: 2
|
||||||
- name: Set up QEMU
|
- name: Set up QEMU
|
||||||
uses: docker/setup-qemu-action@v3
|
uses: docker/setup-qemu-action@v3
|
||||||
- uses: actions/setup-go@v4
|
- uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version-file: go.mod
|
go-version-file: go.mod
|
||||||
check-latest: true
|
check-latest: true
|
||||||
|
@ -61,7 +61,7 @@ jobs:
|
||||||
- name: Run act from cli
|
- name: Run act from cli
|
||||||
run: go run main.go -P ubuntu-latest=node:16-buster-slim -C ./pkg/runner/testdata/ -W ./basic/push.yml
|
run: go run main.go -P ubuntu-latest=node:16-buster-slim -C ./pkg/runner/testdata/ -W ./basic/push.yml
|
||||||
- name: Upload Codecov report
|
- name: Upload Codecov report
|
||||||
uses: codecov/codecov-action@v3.1.4
|
uses: codecov/codecov-action@v3.1.5
|
||||||
with:
|
with:
|
||||||
files: coverage.txt
|
files: coverage.txt
|
||||||
fail_ci_if_error: true # optional (default = false)
|
fail_ci_if_error: true # optional (default = false)
|
||||||
|
@ -75,10 +75,10 @@ jobs:
|
||||||
name: test-${{matrix.os}}
|
name: test-${{matrix.os}}
|
||||||
runs-on: ${{matrix.os}}
|
runs-on: ${{matrix.os}}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 2
|
fetch-depth: 2
|
||||||
- uses: actions/setup-go@v4
|
- uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version-file: go.mod
|
go-version-file: go.mod
|
||||||
check-latest: true
|
check-latest: true
|
||||||
|
@ -92,8 +92,8 @@ jobs:
|
||||||
name: snapshot
|
name: snapshot
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
- uses: actions/setup-go@v4
|
- uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version-file: go.mod
|
go-version-file: go.mod
|
||||||
check-latest: true
|
check-latest: true
|
||||||
|
@ -111,67 +111,67 @@ jobs:
|
||||||
args: release --snapshot --clean
|
args: release --snapshot --clean
|
||||||
- name: Capture x86_64 (64-bit) Linux binary
|
- name: Capture x86_64 (64-bit) Linux binary
|
||||||
if: ${{ !env.ACT }}
|
if: ${{ !env.ACT }}
|
||||||
uses: actions/upload-artifact@v3
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: act-linux-amd64
|
name: act-linux-amd64
|
||||||
path: dist/act_linux_amd64_v1/act
|
path: dist/act_linux_amd64_v1/act
|
||||||
- name: Capture i386 (32-bit) Linux binary
|
- name: Capture i386 (32-bit) Linux binary
|
||||||
if: ${{ !env.ACT }}
|
if: ${{ !env.ACT }}
|
||||||
uses: actions/upload-artifact@v3
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: act-linux-i386
|
name: act-linux-i386
|
||||||
path: dist/act_linux_386/act
|
path: dist/act_linux_386/act
|
||||||
- name: Capture arm64 (64-bit) Linux binary
|
- name: Capture arm64 (64-bit) Linux binary
|
||||||
if: ${{ !env.ACT }}
|
if: ${{ !env.ACT }}
|
||||||
uses: actions/upload-artifact@v3
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: act-linux-arm64
|
name: act-linux-arm64
|
||||||
path: dist/act_linux_arm64/act
|
path: dist/act_linux_arm64/act
|
||||||
- name: Capture armv6 (32-bit) Linux binary
|
- name: Capture armv6 (32-bit) Linux binary
|
||||||
if: ${{ !env.ACT }}
|
if: ${{ !env.ACT }}
|
||||||
uses: actions/upload-artifact@v3
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: act-linux-armv6
|
name: act-linux-armv6
|
||||||
path: dist/act_linux_arm_6/act
|
path: dist/act_linux_arm_6/act
|
||||||
- name: Capture armv7 (32-bit) Linux binary
|
- name: Capture armv7 (32-bit) Linux binary
|
||||||
if: ${{ !env.ACT }}
|
if: ${{ !env.ACT }}
|
||||||
uses: actions/upload-artifact@v3
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: act-linux-armv7
|
name: act-linux-armv7
|
||||||
path: dist/act_linux_arm_7/act
|
path: dist/act_linux_arm_7/act
|
||||||
- name: Capture x86_64 (64-bit) Windows binary
|
- name: Capture x86_64 (64-bit) Windows binary
|
||||||
if: ${{ !env.ACT }}
|
if: ${{ !env.ACT }}
|
||||||
uses: actions/upload-artifact@v3
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: act-windows-amd64
|
name: act-windows-amd64
|
||||||
path: dist/act_windows_amd64_v1/act.exe
|
path: dist/act_windows_amd64_v1/act.exe
|
||||||
- name: Capture i386 (32-bit) Windows binary
|
- name: Capture i386 (32-bit) Windows binary
|
||||||
if: ${{ !env.ACT }}
|
if: ${{ !env.ACT }}
|
||||||
uses: actions/upload-artifact@v3
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: act-windows-i386
|
name: act-windows-i386
|
||||||
path: dist/act_windows_386/act.exe
|
path: dist/act_windows_386/act.exe
|
||||||
- name: Capture arm64 (64-bit) Windows binary
|
- name: Capture arm64 (64-bit) Windows binary
|
||||||
if: ${{ !env.ACT }}
|
if: ${{ !env.ACT }}
|
||||||
uses: actions/upload-artifact@v3
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: act-windows-arm64
|
name: act-windows-arm64
|
||||||
path: dist/act_windows_arm64/act.exe
|
path: dist/act_windows_arm64/act.exe
|
||||||
- name: Capture armv7 (32-bit) Windows binary
|
- name: Capture armv7 (32-bit) Windows binary
|
||||||
if: ${{ !env.ACT }}
|
if: ${{ !env.ACT }}
|
||||||
uses: actions/upload-artifact@v3
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: act-windows-armv7
|
name: act-windows-armv7
|
||||||
path: dist/act_windows_arm_7/act.exe
|
path: dist/act_windows_arm_7/act.exe
|
||||||
- name: Capture x86_64 (64-bit) MacOS binary
|
- name: Capture x86_64 (64-bit) MacOS binary
|
||||||
if: ${{ !env.ACT }}
|
if: ${{ !env.ACT }}
|
||||||
uses: actions/upload-artifact@v3
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: act-macos-amd64
|
name: act-macos-amd64
|
||||||
path: dist/act_darwin_amd64_v1/act
|
path: dist/act_darwin_amd64_v1/act
|
||||||
- name: Capture arm64 (64-bit) MacOS binary
|
- name: Capture arm64 (64-bit) MacOS binary
|
||||||
if: ${{ !env.ACT }}
|
if: ${{ !env.ACT }}
|
||||||
uses: actions/upload-artifact@v3
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: act-macos-arm64
|
name: act-macos-arm64
|
||||||
path: dist/act_darwin_arm64/act
|
path: dist/act_darwin_arm64/act
|
||||||
|
|
4
.github/workflows/promote.yml
vendored
4
.github/workflows/promote.yml
vendored
|
@ -9,13 +9,13 @@ jobs:
|
||||||
name: promote
|
name: promote
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
ref: master
|
ref: master
|
||||||
token: ${{ secrets.GORELEASER_GITHUB_TOKEN }}
|
token: ${{ secrets.GORELEASER_GITHUB_TOKEN }}
|
||||||
- uses: fregante/setup-git-user@v2
|
- uses: fregante/setup-git-user@v2
|
||||||
- uses: actions/setup-go@v4
|
- uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version-file: go.mod
|
go-version-file: go.mod
|
||||||
check-latest: true
|
check-latest: true
|
||||||
|
|
6
.github/workflows/release.yml
vendored
6
.github/workflows/release.yml
vendored
|
@ -9,10 +9,10 @@ jobs:
|
||||||
name: release
|
name: release
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
- uses: actions/setup-go@v4
|
- uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version-file: go.mod
|
go-version-file: go.mod
|
||||||
check-latest: true
|
check-latest: true
|
||||||
|
@ -43,7 +43,7 @@ jobs:
|
||||||
apiKey: ${{ secrets.CHOCO_APIKEY }}
|
apiKey: ${{ secrets.CHOCO_APIKEY }}
|
||||||
push: true
|
push: true
|
||||||
- name: GitHub CLI extension
|
- name: GitHub CLI extension
|
||||||
uses: actions/github-script@v6
|
uses: actions/github-script@v7
|
||||||
with:
|
with:
|
||||||
github-token: ${{ secrets.GORELEASER_GITHUB_TOKEN }}
|
github-token: ${{ secrets.GORELEASER_GITHUB_TOKEN }}
|
||||||
script: |
|
script: |
|
||||||
|
|
2
.github/workflows/stale.yml
vendored
2
.github/workflows/stale.yml
vendored
|
@ -8,7 +8,7 @@ jobs:
|
||||||
name: Stale
|
name: Stale
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/stale@v8
|
- uses: actions/stale@v9
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
stale-issue-message: 'Issue is stale and will be closed in 14 days unless there is new activity'
|
stale-issue-message: 'Issue is stale and will be closed in 14 days unless there is new activity'
|
||||||
|
|
|
@ -71,6 +71,10 @@ pull_request_rules:
|
||||||
- and:
|
- and:
|
||||||
- 'approved-reviews-by=@nektos/act-maintainers'
|
- 'approved-reviews-by=@nektos/act-maintainers'
|
||||||
- '#approved-reviews-by>=2'
|
- '#approved-reviews-by>=2'
|
||||||
|
- and:
|
||||||
|
- 'author=@nektos/act-maintainers'
|
||||||
|
- 'approved-reviews-by=@nektos/act-maintainers'
|
||||||
|
- '#approved-reviews-by>=1'
|
||||||
- -draft
|
- -draft
|
||||||
- -merged
|
- -merged
|
||||||
- -closed
|
- -closed
|
||||||
|
|
2
VERSION
2
VERSION
|
@ -1 +1 @@
|
||||||
0.2.52
|
0.2.59
|
|
@ -55,7 +55,10 @@ type Input struct {
|
||||||
replaceGheActionTokenWithGithubCom string
|
replaceGheActionTokenWithGithubCom string
|
||||||
matrix []string
|
matrix []string
|
||||||
actionCachePath string
|
actionCachePath string
|
||||||
|
actionOfflineMode bool
|
||||||
logPrefixJobID bool
|
logPrefixJobID bool
|
||||||
|
networkName string
|
||||||
|
useNewActionCache bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *Input) resolve(path string) string {
|
func (i *Input) resolve(path string) string {
|
||||||
|
|
|
@ -15,7 +15,7 @@ func (i *Input) newPlatforms() map[string]string {
|
||||||
for _, p := range i.platforms {
|
for _, p := range i.platforms {
|
||||||
pParts := strings.Split(p, "=")
|
pParts := strings.Split(p, "=")
|
||||||
if len(pParts) == 2 {
|
if len(pParts) == 2 {
|
||||||
platforms[pParts[0]] = pParts[1]
|
platforms[strings.ToLower(pParts[0])] = pParts[1]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return platforms
|
return platforms
|
||||||
|
|
54
cmd/root.go
54
cmd/root.go
|
@ -14,6 +14,7 @@ import (
|
||||||
"github.com/AlecAivazis/survey/v2"
|
"github.com/AlecAivazis/survey/v2"
|
||||||
"github.com/adrg/xdg"
|
"github.com/adrg/xdg"
|
||||||
"github.com/andreaskoch/go-fswatch"
|
"github.com/andreaskoch/go-fswatch"
|
||||||
|
docker_container "github.com/docker/docker/api/types/container"
|
||||||
"github.com/joho/godotenv"
|
"github.com/joho/godotenv"
|
||||||
gitignore "github.com/sabhiram/go-gitignore"
|
gitignore "github.com/sabhiram/go-gitignore"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
|
@ -96,6 +97,9 @@ func Execute(ctx context.Context, version string) {
|
||||||
rootCmd.PersistentFlags().StringVarP(&input.cacheServerAddr, "cache-server-addr", "", common.GetOutboundIP().String(), "Defines the address to which the cache server binds.")
|
rootCmd.PersistentFlags().StringVarP(&input.cacheServerAddr, "cache-server-addr", "", common.GetOutboundIP().String(), "Defines the address to which the cache server binds.")
|
||||||
rootCmd.PersistentFlags().Uint16VarP(&input.cacheServerPort, "cache-server-port", "", 0, "Defines the port where the artifact server listens. 0 means a randomly available port.")
|
rootCmd.PersistentFlags().Uint16VarP(&input.cacheServerPort, "cache-server-port", "", 0, "Defines the port where the artifact server listens. 0 means a randomly available port.")
|
||||||
rootCmd.PersistentFlags().StringVarP(&input.actionCachePath, "action-cache-path", "", filepath.Join(CacheHomeDir, "act"), "Defines the path where the actions get cached and host workspaces created.")
|
rootCmd.PersistentFlags().StringVarP(&input.actionCachePath, "action-cache-path", "", filepath.Join(CacheHomeDir, "act"), "Defines the path where the actions get cached and host workspaces created.")
|
||||||
|
rootCmd.PersistentFlags().BoolVarP(&input.actionOfflineMode, "action-offline-mode", "", false, "If action contents exists, it will not be fetch and pull again. If turn on this,will turn off force pull")
|
||||||
|
rootCmd.PersistentFlags().StringVarP(&input.networkName, "network", "", "host", "Sets a docker network name. Defaults to host.")
|
||||||
|
rootCmd.PersistentFlags().BoolVarP(&input.useNewActionCache, "use-new-action-cache", "", false, "Enable using the new Action Cache for storing Actions locally")
|
||||||
rootCmd.SetArgs(args())
|
rootCmd.SetArgs(args())
|
||||||
|
|
||||||
if err := rootCmd.Execute(); err != nil {
|
if err := rootCmd.Execute(); err != nil {
|
||||||
|
@ -103,23 +107,22 @@ func Execute(ctx context.Context, version string) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Return locations where Act's config can be found in order: XDG spec, .actrc in HOME directory, .actrc in invocation directory
|
||||||
func configLocations() []string {
|
func configLocations() []string {
|
||||||
configFileName := ".actrc"
|
configFileName := ".actrc"
|
||||||
|
|
||||||
// reference: https://specifications.freedesktop.org/basedir-spec/latest/ar01s03.html
|
homePath := filepath.Join(UserHomeDir, configFileName)
|
||||||
var actrcXdg string
|
invocationPath := filepath.Join(".", configFileName)
|
||||||
for _, fileName := range []string{"act/actrc", configFileName} {
|
|
||||||
if foundConfig, err := xdg.SearchConfigFile(fileName); foundConfig != "" && err == nil {
|
// Though named xdg, adrg's lib support macOS and Windows config paths as well
|
||||||
actrcXdg = foundConfig
|
// It also takes cares of creating the parent folder so we don't need to bother later
|
||||||
break
|
specPath, err := xdg.ConfigFile("act/actrc")
|
||||||
}
|
if err != nil {
|
||||||
|
specPath = homePath
|
||||||
}
|
}
|
||||||
|
|
||||||
return []string{
|
// This order should be enforced since the survey part relies on it
|
||||||
filepath.Join(UserHomeDir, configFileName),
|
return []string{specPath, homePath, invocationPath}
|
||||||
actrcXdg,
|
|
||||||
filepath.Join(".", configFileName),
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var commonSocketPaths = []string{
|
var commonSocketPaths = []string{
|
||||||
|
@ -265,7 +268,8 @@ func readArgsFile(file string, split bool) []string {
|
||||||
}()
|
}()
|
||||||
scanner := bufio.NewScanner(f)
|
scanner := bufio.NewScanner(f)
|
||||||
for scanner.Scan() {
|
for scanner.Scan() {
|
||||||
arg := strings.TrimSpace(scanner.Text())
|
arg := os.ExpandEnv(strings.TrimSpace(scanner.Text()))
|
||||||
|
|
||||||
if strings.HasPrefix(arg, "-") && split {
|
if strings.HasPrefix(arg, "-") && split {
|
||||||
args = append(args, regexp.MustCompile(`\s`).Split(arg, 2)...)
|
args = append(args, regexp.MustCompile(`\s`).Split(arg, 2)...)
|
||||||
} else if !split {
|
} else if !split {
|
||||||
|
@ -291,8 +295,8 @@ func cleanup(inputs *Input) func(*cobra.Command, []string) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseEnvs(env []string, envs map[string]string) bool {
|
func parseEnvs(env []string) map[string]string {
|
||||||
if env != nil {
|
envs := make(map[string]string, len(env))
|
||||||
for _, envVar := range env {
|
for _, envVar := range env {
|
||||||
e := strings.SplitN(envVar, `=`, 2)
|
e := strings.SplitN(envVar, `=`, 2)
|
||||||
if len(e) == 2 {
|
if len(e) == 2 {
|
||||||
|
@ -301,9 +305,7 @@ func parseEnvs(env []string, envs map[string]string) bool {
|
||||||
envs[e[0]] = ""
|
envs[e[0]] = ""
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return true
|
return envs
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func readYamlFile(file string) (map[string]string, error) {
|
func readYamlFile(file string) (map[string]string, error) {
|
||||||
|
@ -409,13 +411,11 @@ func newRunCommand(ctx context.Context, input *Input) func(*cobra.Command, []str
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Debugf("Loading environment from %s", input.Envfile())
|
log.Debugf("Loading environment from %s", input.Envfile())
|
||||||
envs := make(map[string]string)
|
envs := parseEnvs(input.envs)
|
||||||
_ = parseEnvs(input.envs, envs)
|
|
||||||
_ = readEnvs(input.Envfile(), envs)
|
_ = readEnvs(input.Envfile(), envs)
|
||||||
|
|
||||||
log.Debugf("Loading action inputs from %s", input.Inputfile())
|
log.Debugf("Loading action inputs from %s", input.Inputfile())
|
||||||
inputs := make(map[string]string)
|
inputs := parseEnvs(input.inputs)
|
||||||
_ = parseEnvs(input.inputs, inputs)
|
|
||||||
_ = readEnvs(input.Inputfile(), inputs)
|
_ = readEnvs(input.Inputfile(), inputs)
|
||||||
|
|
||||||
log.Debugf("Loading secrets from %s", input.Secretfile())
|
log.Debugf("Loading secrets from %s", input.Secretfile())
|
||||||
|
@ -552,6 +552,7 @@ func newRunCommand(ctx context.Context, input *Input) func(*cobra.Command, []str
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !cfgFound && len(cfgLocations) > 0 {
|
if !cfgFound && len(cfgLocations) > 0 {
|
||||||
|
// The first config location refers to the global config folder one
|
||||||
if err := defaultImageSurvey(cfgLocations[0]); err != nil {
|
if err := defaultImageSurvey(cfgLocations[0]); err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -578,11 +579,12 @@ func newRunCommand(ctx context.Context, input *Input) func(*cobra.Command, []str
|
||||||
EventName: eventName,
|
EventName: eventName,
|
||||||
EventPath: input.EventPath(),
|
EventPath: input.EventPath(),
|
||||||
DefaultBranch: defaultbranch,
|
DefaultBranch: defaultbranch,
|
||||||
ForcePull: input.forcePull,
|
ForcePull: !input.actionOfflineMode && input.forcePull,
|
||||||
ForceRebuild: input.forceRebuild,
|
ForceRebuild: input.forceRebuild,
|
||||||
ReuseContainers: input.reuseContainers,
|
ReuseContainers: input.reuseContainers,
|
||||||
Workdir: input.Workdir(),
|
Workdir: input.Workdir(),
|
||||||
ActionCacheDir: input.actionCachePath,
|
ActionCacheDir: input.actionCachePath,
|
||||||
|
ActionOfflineMode: input.actionOfflineMode,
|
||||||
BindWorkdir: input.bindWorkdir,
|
BindWorkdir: input.bindWorkdir,
|
||||||
LogOutput: !input.noOutput,
|
LogOutput: !input.noOutput,
|
||||||
JSONLogger: input.jsonLogger,
|
JSONLogger: input.jsonLogger,
|
||||||
|
@ -612,6 +614,12 @@ func newRunCommand(ctx context.Context, input *Input) func(*cobra.Command, []str
|
||||||
ReplaceGheActionWithGithubCom: input.replaceGheActionWithGithubCom,
|
ReplaceGheActionWithGithubCom: input.replaceGheActionWithGithubCom,
|
||||||
ReplaceGheActionTokenWithGithubCom: input.replaceGheActionTokenWithGithubCom,
|
ReplaceGheActionTokenWithGithubCom: input.replaceGheActionTokenWithGithubCom,
|
||||||
Matrix: matrixes,
|
Matrix: matrixes,
|
||||||
|
ContainerNetworkMode: docker_container.NetworkMode(input.networkName),
|
||||||
|
}
|
||||||
|
if input.useNewActionCache {
|
||||||
|
config.ActionCache = &runner.GoGitActionCache{
|
||||||
|
Path: config.ActionCacheDir,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
r, err := runner.New(config)
|
r, err := runner.New(config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
48
go.mod
48
go.mod
|
@ -4,46 +4,48 @@ go 1.20
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/AlecAivazis/survey/v2 v2.3.7
|
github.com/AlecAivazis/survey/v2 v2.3.7
|
||||||
github.com/Masterminds/semver v1.5.0
|
|
||||||
github.com/adrg/xdg v0.4.0
|
github.com/adrg/xdg v0.4.0
|
||||||
github.com/andreaskoch/go-fswatch v1.0.0
|
github.com/andreaskoch/go-fswatch v1.0.0
|
||||||
github.com/creack/pty v1.1.18
|
github.com/creack/pty v1.1.21
|
||||||
github.com/docker/cli v24.0.6+incompatible
|
github.com/docker/cli v24.0.7+incompatible
|
||||||
github.com/docker/distribution v2.8.3+incompatible
|
github.com/docker/distribution v2.8.3+incompatible
|
||||||
github.com/docker/docker v24.0.6+incompatible // 24.0 branch
|
github.com/docker/docker v24.0.7+incompatible // 24.0 branch
|
||||||
github.com/docker/go-connections v0.4.0
|
github.com/docker/go-connections v0.4.0
|
||||||
github.com/go-git/go-billy/v5 v5.5.0
|
github.com/go-git/go-billy/v5 v5.5.0
|
||||||
github.com/go-git/go-git/v5 v5.9.0
|
github.com/go-git/go-git/v5 v5.11.0
|
||||||
github.com/gobwas/glob v0.2.3
|
|
||||||
github.com/imdario/mergo v0.3.16
|
github.com/imdario/mergo v0.3.16
|
||||||
github.com/joho/godotenv v1.5.1
|
github.com/joho/godotenv v1.5.1
|
||||||
github.com/julienschmidt/httprouter v1.3.0
|
github.com/julienschmidt/httprouter v1.3.0
|
||||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51
|
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51
|
||||||
github.com/mattn/go-isatty v0.0.19
|
github.com/mattn/go-isatty v0.0.20
|
||||||
github.com/moby/buildkit v0.12.2
|
github.com/moby/buildkit v0.12.5
|
||||||
github.com/moby/patternmatcher v0.6.0
|
github.com/moby/patternmatcher v0.6.0
|
||||||
github.com/opencontainers/image-spec v1.1.0-rc5
|
github.com/opencontainers/image-spec v1.1.0-rc3
|
||||||
github.com/opencontainers/selinux v1.11.0
|
github.com/opencontainers/selinux v1.11.0
|
||||||
github.com/pkg/errors v0.9.1
|
github.com/pkg/errors v0.9.1
|
||||||
github.com/rhysd/actionlint v1.6.26
|
github.com/rhysd/actionlint v1.6.26
|
||||||
github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06
|
github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06
|
||||||
github.com/sirupsen/logrus v1.9.3
|
github.com/sirupsen/logrus v1.9.3
|
||||||
github.com/spf13/cobra v1.7.0
|
github.com/spf13/cobra v1.8.0
|
||||||
github.com/spf13/pflag v1.0.5
|
github.com/spf13/pflag v1.0.5
|
||||||
github.com/stretchr/testify v1.8.4
|
github.com/stretchr/testify v1.8.4
|
||||||
github.com/timshannon/bolthold v0.0.0-20210913165410-232392fc8a6a
|
github.com/timshannon/bolthold v0.0.0-20210913165410-232392fc8a6a
|
||||||
go.etcd.io/bbolt v1.3.7
|
go.etcd.io/bbolt v1.3.8
|
||||||
golang.org/x/term v0.13.0
|
golang.org/x/term v0.16.0
|
||||||
gopkg.in/yaml.v3 v3.0.1
|
gopkg.in/yaml.v3 v3.0.1
|
||||||
gotest.tools/v3 v3.5.1
|
gotest.tools/v3 v3.5.1
|
||||||
)
|
)
|
||||||
|
|
||||||
|
require (
|
||||||
|
github.com/Masterminds/semver v1.5.0
|
||||||
|
github.com/gobwas/glob v0.2.3
|
||||||
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
dario.cat/mergo v1.0.0 // indirect
|
dario.cat/mergo v1.0.0 // indirect
|
||||||
github.com/Microsoft/go-winio v0.6.1 // indirect
|
github.com/Microsoft/go-winio v0.6.1 // indirect
|
||||||
github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 // indirect
|
github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 // indirect
|
||||||
github.com/acomagu/bufpipe v1.0.4 // indirect
|
github.com/cloudflare/circl v1.3.7 // indirect
|
||||||
github.com/cloudflare/circl v1.3.3 // indirect
|
|
||||||
github.com/containerd/containerd v1.7.2 // indirect
|
github.com/containerd/containerd v1.7.2 // indirect
|
||||||
github.com/cyphar/filepath-securejoin v0.2.4 // indirect
|
github.com/cyphar/filepath-securejoin v0.2.4 // indirect
|
||||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||||
|
@ -55,12 +57,12 @@ require (
|
||||||
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect
|
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect
|
||||||
github.com/gogo/protobuf v1.3.2 // indirect
|
github.com/gogo/protobuf v1.3.2 // indirect
|
||||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||||
github.com/google/go-cmp v0.5.9 // indirect
|
github.com/google/go-cmp v0.6.0 // indirect
|
||||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
|
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
|
||||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||||
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect
|
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect
|
||||||
github.com/kevinburke/ssh_config v1.2.0 // indirect
|
github.com/kevinburke/ssh_config v1.2.0 // indirect
|
||||||
github.com/klauspost/compress v1.16.3 // indirect
|
github.com/klauspost/compress v1.17.2 // indirect
|
||||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||||
github.com/mattn/go-runewidth v0.0.15 // indirect
|
github.com/mattn/go-runewidth v0.0.15 // indirect
|
||||||
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b // indirect
|
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b // indirect
|
||||||
|
@ -68,24 +70,24 @@ require (
|
||||||
github.com/moby/sys/sequential v0.5.0 // indirect
|
github.com/moby/sys/sequential v0.5.0 // indirect
|
||||||
github.com/moby/term v0.0.0-20200312100748-672ec06f55cd // indirect
|
github.com/moby/term v0.0.0-20200312100748-672ec06f55cd // indirect
|
||||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||||
github.com/opencontainers/runc v1.1.7 // indirect
|
github.com/opencontainers/runc v1.1.12 // indirect
|
||||||
github.com/pjbgf/sha1cd v0.3.0 // indirect
|
github.com/pjbgf/sha1cd v0.3.0 // indirect
|
||||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||||
github.com/rivo/uniseg v0.4.4 // indirect
|
github.com/rivo/uniseg v0.4.4 // indirect
|
||||||
github.com/robfig/cron/v3 v3.0.1 // indirect
|
github.com/robfig/cron/v3 v3.0.1 // indirect
|
||||||
github.com/sergi/go-diff v1.2.0 // indirect
|
github.com/sergi/go-diff v1.2.0 // indirect
|
||||||
github.com/skeema/knownhosts v1.2.0 // indirect
|
github.com/skeema/knownhosts v1.2.1 // indirect
|
||||||
github.com/stretchr/objx v0.5.0 // indirect
|
github.com/stretchr/objx v0.5.0 // indirect
|
||||||
github.com/xanzy/ssh-agent v0.3.3 // indirect
|
github.com/xanzy/ssh-agent v0.3.3 // indirect
|
||||||
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect
|
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
||||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||||
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
|
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
|
||||||
golang.org/x/crypto v0.13.0 // indirect
|
golang.org/x/crypto v0.17.0 // indirect
|
||||||
golang.org/x/mod v0.12.0 // indirect
|
golang.org/x/mod v0.12.0 // indirect
|
||||||
golang.org/x/net v0.15.0 // indirect
|
golang.org/x/net v0.19.0 // indirect
|
||||||
golang.org/x/sync v0.3.0 // indirect
|
golang.org/x/sync v0.3.0 // indirect
|
||||||
golang.org/x/sys v0.13.0 // indirect
|
golang.org/x/sys v0.16.0 // indirect
|
||||||
golang.org/x/text v0.13.0 // indirect
|
golang.org/x/text v0.14.0 // indirect
|
||||||
golang.org/x/tools v0.13.0 // indirect
|
golang.org/x/tools v0.13.0 // indirect
|
||||||
gopkg.in/warnings.v0 v0.1.2 // indirect
|
gopkg.in/warnings.v0 v0.1.2 // indirect
|
||||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||||
|
|
86
go.sum
86
go.sum
|
@ -15,8 +15,6 @@ github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2 h1:+vx7roKuyA63n
|
||||||
github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2/go.mod h1:HBCaDeC1lPdgDeDbhX8XFpy1jqjK0IBG8W5K+xYqA0w=
|
github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2/go.mod h1:HBCaDeC1lPdgDeDbhX8XFpy1jqjK0IBG8W5K+xYqA0w=
|
||||||
github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 h1:kkhsdkhsCvIsutKu5zLMgWtgh9YxGCNAw8Ad8hjwfYg=
|
github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 h1:kkhsdkhsCvIsutKu5zLMgWtgh9YxGCNAw8Ad8hjwfYg=
|
||||||
github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0=
|
github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0=
|
||||||
github.com/acomagu/bufpipe v1.0.4 h1:e3H4WUzM3npvo5uv95QuJM3cQspFNtFBzvJ2oNjKIDQ=
|
|
||||||
github.com/acomagu/bufpipe v1.0.4/go.mod h1:mxdxdup/WdsKVreO5GpW4+M/1CE2sMG4jeGJ2sYmHc4=
|
|
||||||
github.com/adrg/xdg v0.4.0 h1:RzRqFcjH4nE5C6oTAxhBtoE2IRyjBSa62SCbyPidvls=
|
github.com/adrg/xdg v0.4.0 h1:RzRqFcjH4nE5C6oTAxhBtoE2IRyjBSa62SCbyPidvls=
|
||||||
github.com/adrg/xdg v0.4.0/go.mod h1:N6ag73EX4wyxeaoeHctc1mas01KZgsj5tYiAIwqJE/E=
|
github.com/adrg/xdg v0.4.0/go.mod h1:N6ag73EX4wyxeaoeHctc1mas01KZgsj5tYiAIwqJE/E=
|
||||||
github.com/andreaskoch/go-fswatch v1.0.0 h1:la8nP/HiaFCxP2IM6NZNUCoxgLWuyNFgH0RligBbnJU=
|
github.com/andreaskoch/go-fswatch v1.0.0 h1:la8nP/HiaFCxP2IM6NZNUCoxgLWuyNFgH0RligBbnJU=
|
||||||
|
@ -24,14 +22,15 @@ github.com/andreaskoch/go-fswatch v1.0.0/go.mod h1:r5/iV+4jfwoY2sYqBkg8vpF04ehOv
|
||||||
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8=
|
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8=
|
||||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
|
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
|
||||||
github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0=
|
github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0=
|
||||||
github.com/cloudflare/circl v1.3.3 h1:fE/Qz0QdIGqeWfnwq0RE0R7MI51s0M2E4Ga9kq5AEMs=
|
|
||||||
github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA=
|
github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA=
|
||||||
|
github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU=
|
||||||
|
github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA=
|
||||||
github.com/containerd/containerd v1.7.2 h1:UF2gdONnxO8I6byZXDi5sXWiWvlW3D/sci7dTQimEJo=
|
github.com/containerd/containerd v1.7.2 h1:UF2gdONnxO8I6byZXDi5sXWiWvlW3D/sci7dTQimEJo=
|
||||||
github.com/containerd/containerd v1.7.2/go.mod h1:afcz74+K10M/+cjGHIVQrCt3RAQhUSCAjJ9iMYhhkuI=
|
github.com/containerd/containerd v1.7.2/go.mod h1:afcz74+K10M/+cjGHIVQrCt3RAQhUSCAjJ9iMYhhkuI=
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||||
github.com/creack/pty v1.1.17/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
|
github.com/creack/pty v1.1.17/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
|
||||||
github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
|
github.com/creack/pty v1.1.21 h1:1/QdRyBaHHJP61QkWMXlOIBfsgdDeeKfK8SYVUWJKf0=
|
||||||
github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
|
github.com/creack/pty v1.1.21/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
|
||||||
github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg=
|
github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg=
|
||||||
github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
|
github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
|
||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
@ -39,12 +38,12 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0=
|
github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0=
|
||||||
github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
|
github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
|
||||||
github.com/docker/cli v24.0.6+incompatible h1:fF+XCQCgJjjQNIMjzaSmiKJSCcfcXb3TWTcc7GAneOY=
|
github.com/docker/cli v24.0.7+incompatible h1:wa/nIwYFW7BVTGa7SWPVyyXU9lgORqUb1xfI36MSkFg=
|
||||||
github.com/docker/cli v24.0.6+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
github.com/docker/cli v24.0.7+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||||
github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk=
|
github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk=
|
||||||
github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||||
github.com/docker/docker v24.0.6+incompatible h1:hceabKCtUgDqPu+qm0NgsaXf28Ljf4/pWFL7xjWWDgE=
|
github.com/docker/docker v24.0.7+incompatible h1:Wo6l37AuwP3JaMnZa226lzVXGA3F9Ig1seQen0cKYlM=
|
||||||
github.com/docker/docker v24.0.6+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
github.com/docker/docker v24.0.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||||
github.com/docker/docker-credential-helpers v0.7.0 h1:xtCHsjxogADNZcdv1pKUHXryefjlVRqWqIhk/uXJp0A=
|
github.com/docker/docker-credential-helpers v0.7.0 h1:xtCHsjxogADNZcdv1pKUHXryefjlVRqWqIhk/uXJp0A=
|
||||||
github.com/docker/docker-credential-helpers v0.7.0/go.mod h1:rETQfLdHNT3foU5kuNkFR1R1V12OJRRO5lzt2D1b5X0=
|
github.com/docker/docker-credential-helpers v0.7.0/go.mod h1:rETQfLdHNT3foU5kuNkFR1R1V12OJRRO5lzt2D1b5X0=
|
||||||
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
|
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
|
||||||
|
@ -61,9 +60,9 @@ github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66D
|
||||||
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic=
|
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic=
|
||||||
github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU=
|
github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU=
|
||||||
github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgFRpCtpDCKow=
|
github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgFRpCtpDCKow=
|
||||||
github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20230305113008-0c11038e723f h1:Pz0DHeFij3XFhoBRGUDPzSJ+w2UcK5/0JvF8DRI58r8=
|
github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399 h1:eMje31YglSBqCdIqdhKBW8lokaMrL3uTkpGYlE2OOT4=
|
||||||
github.com/go-git/go-git/v5 v5.9.0 h1:cD9SFA7sHVRdJ7AYck1ZaAa/yeuBvGPxwXDL8cxrObY=
|
github.com/go-git/go-git/v5 v5.11.0 h1:XIZc1p+8YzypNr34itUfSvYJcv+eYdTnTvOZ2vD3cA4=
|
||||||
github.com/go-git/go-git/v5 v5.9.0/go.mod h1:RKIqga24sWdMGZF+1Ekv9kylsDz6LzdTSI2s/OsZWE0=
|
github.com/go-git/go-git/v5 v5.11.0/go.mod h1:6GFcX2P3NM7FPBfpePbpLd21XxsgdAt+lKqXmCUiUCY=
|
||||||
github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
|
github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
|
||||||
github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
|
github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
|
||||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||||
|
@ -72,8 +71,8 @@ github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l
|
||||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||||
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
|
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
|
||||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
|
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
|
||||||
github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec h1:qv2VnGeEQHchGaZ/u7lxST/RaJw+cv273q79D81Xbog=
|
github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec h1:qv2VnGeEQHchGaZ/u7lxST/RaJw+cv273q79D81Xbog=
|
||||||
|
@ -94,31 +93,29 @@ github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4
|
||||||
github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM=
|
github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM=
|
||||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||||
github.com/klauspost/compress v1.16.3 h1:XuJt9zzcnaz6a16/OU53ZjWp/v7/42WcR5t2a0PcNQY=
|
github.com/klauspost/compress v1.17.2 h1:RlWWUY/Dr4fL8qk9YG7DTZ7PDgME2V4csBXA8L/ixi4=
|
||||||
github.com/klauspost/compress v1.16.3/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
|
github.com/klauspost/compress v1.17.2/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
|
||||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||||
github.com/matryer/is v1.2.0 h1:92UTHpy8CDwaJ08GqLDzhhuixiBUUD1p3AU6PHddz4A=
|
|
||||||
github.com/matryer/is v1.2.0/go.mod h1:2fLPjFQM9rhQ15aVEtbuwhJinnOqrmgXPNdZsdwlWXA=
|
|
||||||
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||||
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||||
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
|
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
|
||||||
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||||
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||||
github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
|
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||||
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||||
github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U=
|
github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U=
|
||||||
github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||||
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b h1:j7+1HpAFS1zy5+Q4qx1fWh90gTKwiN4QCGoY9TWyyO4=
|
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b h1:j7+1HpAFS1zy5+Q4qx1fWh90gTKwiN4QCGoY9TWyyO4=
|
||||||
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE=
|
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE=
|
||||||
github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
|
github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
|
||||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||||
github.com/moby/buildkit v0.12.2 h1:B7guBgY6sfk4dBlv/ORUxyYlp0UojYaYyATgtNwSCXc=
|
github.com/moby/buildkit v0.12.5 h1:RNHH1l3HDhYyZafr5EgstEu8aGNCwyfvMtrQDtjH9T0=
|
||||||
github.com/moby/buildkit v0.12.2/go.mod h1:adB4y0SxxX8trnrY+oEulb48ODLqPO6pKMF0ppGcCoI=
|
github.com/moby/buildkit v0.12.5/go.mod h1:YGwjA2loqyiYfZeEo8FtI7z4x5XponAaIWsWcSjWwso=
|
||||||
github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk=
|
github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk=
|
||||||
github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc=
|
github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc=
|
||||||
github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc=
|
github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc=
|
||||||
|
@ -129,10 +126,10 @@ github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
|
||||||
github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI=
|
github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI=
|
||||||
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
||||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||||
github.com/opencontainers/image-spec v1.1.0-rc5 h1:Ygwkfw9bpDvs+c9E34SdgGOj41dX/cbdlwvlWt0pnFI=
|
github.com/opencontainers/image-spec v1.1.0-rc3 h1:fzg1mXZFj8YdPeNkRXMg+zb88BFV0Ys52cJydRwBkb8=
|
||||||
github.com/opencontainers/image-spec v1.1.0-rc5/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8=
|
github.com/opencontainers/image-spec v1.1.0-rc3/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8=
|
||||||
github.com/opencontainers/runc v1.1.7 h1:y2EZDS8sNng4Ksf0GUYNhKbTShZJPJg1FiXJNH/uoCk=
|
github.com/opencontainers/runc v1.1.12 h1:BOIssBaW1La0/qbNZHXOOa71dZfZEQOzW7dqQf3phss=
|
||||||
github.com/opencontainers/runc v1.1.7/go.mod h1:CbUumNnWCuTGFukNXahoo/RFBZvDAgRh/smNYNOhA50=
|
github.com/opencontainers/runc v1.1.12/go.mod h1:S+lQwSfncpBha7XTy/5lBwWgm5+y5Ma/O44Ekby9FK8=
|
||||||
github.com/opencontainers/selinux v1.11.0 h1:+5Zbo97w3Lbmb3PeqQtpmTkMwsW5nRI3YaLpt7tQ7oU=
|
github.com/opencontainers/selinux v1.11.0 h1:+5Zbo97w3Lbmb3PeqQtpmTkMwsW5nRI3YaLpt7tQ7oU=
|
||||||
github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec=
|
github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec=
|
||||||
github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4=
|
github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4=
|
||||||
|
@ -159,10 +156,10 @@ github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6Mwd
|
||||||
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||||
github.com/skeema/knownhosts v1.2.0 h1:h9r9cf0+u7wSE+M183ZtMGgOJKiL96brpaz5ekfJCpM=
|
github.com/skeema/knownhosts v1.2.1 h1:SHWdIUa82uGZz+F+47k8SY4QhhI291cXCpopT1lK2AQ=
|
||||||
github.com/skeema/knownhosts v1.2.0/go.mod h1:g4fPeYpque7P0xefxtGzV81ihjC8sX2IqpAoNkjxbMo=
|
github.com/skeema/knownhosts v1.2.1/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo=
|
||||||
github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I=
|
github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0=
|
||||||
github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0=
|
github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho=
|
||||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||||
|
@ -184,8 +181,9 @@ github.com/timshannon/bolthold v0.0.0-20210913165410-232392fc8a6a h1:oIi7H/bwFUY
|
||||||
github.com/timshannon/bolthold v0.0.0-20210913165410-232392fc8a6a/go.mod h1:iSvujNDmpZ6eQX+bg/0X3lF7LEmZ8N77g2a/J/+Zt2U=
|
github.com/timshannon/bolthold v0.0.0-20210913165410-232392fc8a6a/go.mod h1:iSvujNDmpZ6eQX+bg/0X3lF7LEmZ8N77g2a/J/+Zt2U=
|
||||||
github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM=
|
github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM=
|
||||||
github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw=
|
github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw=
|
||||||
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c=
|
|
||||||
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
||||||
|
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo=
|
||||||
|
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
||||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0=
|
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0=
|
||||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
|
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
|
||||||
github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74=
|
github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74=
|
||||||
|
@ -194,8 +192,8 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
|
||||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||||
go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4=
|
go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4=
|
||||||
go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ=
|
go.etcd.io/bbolt v1.3.8 h1:xs88BrvEv273UsB79e0hcVrlUWmS0a8upikMFhSyAtA=
|
||||||
go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw=
|
go.etcd.io/bbolt v1.3.8/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw=
|
||||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
|
@ -203,8 +201,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y
|
||||||
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||||
golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
|
golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
|
||||||
golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
|
golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
|
||||||
golang.org/x/crypto v0.13.0 h1:mvySKfSWJ+UKUii46M40LOvyWfN0s2U+46/jDd0e6Ck=
|
golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k=
|
||||||
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
|
golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
|
||||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||||
|
@ -222,8 +220,8 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug
|
||||||
golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
|
golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
|
||||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||||
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
|
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
|
||||||
golang.org/x/net v0.15.0 h1:ugBLEUaxABaB5AJqW9enI0ACdci2RUd4eP51NTBvuJ8=
|
golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c=
|
||||||
golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
|
golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U=
|
||||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
@ -252,15 +250,15 @@ golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE=
|
golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU=
|
||||||
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||||
golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
|
golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
|
||||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||||
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
|
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
|
||||||
golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek=
|
golang.org/x/term v0.16.0 h1:m+B6fahuftsE9qjo0VWp2FW0mB3MTJvR0BaMQrq0pmE=
|
||||||
golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U=
|
golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY=
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
|
@ -268,8 +266,8 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||||
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||||
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||||
golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k=
|
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
|
||||||
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||||
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
|
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
|
||||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||||
|
|
|
@ -9,6 +9,7 @@ import (
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"regexp"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
|
@ -386,7 +387,12 @@ func (h *Handler) findCache(db *bolthold.Store, keys []string, version string) (
|
||||||
|
|
||||||
for _, prefix := range keys[1:] {
|
for _, prefix := range keys[1:] {
|
||||||
found := false
|
found := false
|
||||||
if err := db.ForEach(bolthold.Where("Key").Ge(prefix).And("Version").Eq(version).SortBy("Key"), func(v *Cache) error {
|
prefixPattern := fmt.Sprintf("^%s", regexp.QuoteMeta(prefix))
|
||||||
|
re, err := regexp.Compile(prefixPattern)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if err := db.ForEach(bolthold.Where("Key").RegExp(re).And("Version").Eq(version).SortBy("CreatedAt").Reverse(), func(v *Cache) error {
|
||||||
if !strings.HasPrefix(v.Key, prefix) {
|
if !strings.HasPrefix(v.Key, prefix) {
|
||||||
return stop
|
return stop
|
||||||
}
|
}
|
||||||
|
|
|
@ -225,6 +225,7 @@ type NewGitCloneExecutorInput struct {
|
||||||
Ref string
|
Ref string
|
||||||
Dir string
|
Dir string
|
||||||
Token string
|
Token string
|
||||||
|
OfflineMode bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// CloneIfRequired ...
|
// CloneIfRequired ...
|
||||||
|
@ -302,13 +303,17 @@ func NewGitCloneExecutor(input NewGitCloneExecutorInput) common.Executor {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
isOfflineMode := input.OfflineMode
|
||||||
|
|
||||||
// fetch latest changes
|
// fetch latest changes
|
||||||
fetchOptions, pullOptions := gitOptions(input.Token)
|
fetchOptions, pullOptions := gitOptions(input.Token)
|
||||||
|
|
||||||
|
if !isOfflineMode {
|
||||||
err = r.Fetch(&fetchOptions)
|
err = r.Fetch(&fetchOptions)
|
||||||
if err != nil && !errors.Is(err, git.NoErrAlreadyUpToDate) {
|
if err != nil && !errors.Is(err, git.NoErrAlreadyUpToDate) {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
var hash *plumbing.Hash
|
var hash *plumbing.Hash
|
||||||
rev := plumbing.Revision(input.Ref)
|
rev := plumbing.Revision(input.Ref)
|
||||||
|
@ -367,10 +372,11 @@ func NewGitCloneExecutor(input NewGitCloneExecutorInput) common.Executor {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if !isOfflineMode {
|
||||||
if err = w.Pull(&pullOptions); err != nil && err != git.NoErrAlreadyUpToDate {
|
if err = w.Pull(&pullOptions); err != nil && err != git.NoErrAlreadyUpToDate {
|
||||||
logger.Debugf("Unable to pull %s: %v", refName, err)
|
logger.Debugf("Unable to pull %s: %v", refName, err)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
logger.Debugf("Cloned %s to %s", input.URL, input.Dir)
|
logger.Debugf("Cloned %s to %s", input.URL, input.Dir)
|
||||||
|
|
||||||
if hash.String() != input.Ref && refType == "branch" {
|
if hash.String() != input.Ref && refType == "branch" {
|
||||||
|
|
|
@ -4,6 +4,7 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"io"
|
"io"
|
||||||
|
|
||||||
|
"github.com/docker/go-connections/nat"
|
||||||
"github.com/nektos/act/pkg/common"
|
"github.com/nektos/act/pkg/common"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -26,11 +27,13 @@ type NewContainerInput struct {
|
||||||
UsernsMode string
|
UsernsMode string
|
||||||
Platform string
|
Platform string
|
||||||
Options string
|
Options string
|
||||||
|
NetworkAliases []string
|
||||||
|
ExposedPorts nat.PortSet
|
||||||
|
PortBindings nat.PortMap
|
||||||
|
|
||||||
// Gitea specific
|
// Gitea specific
|
||||||
AutoRemove bool
|
AutoRemove bool
|
||||||
|
|
||||||
NetworkAliases []string
|
|
||||||
ValidVolumes []string
|
ValidVolumes []string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows))
|
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows || netbsd))
|
||||||
|
|
||||||
package container
|
package container
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows))
|
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows || netbsd))
|
||||||
|
|
||||||
package container
|
package container
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows))
|
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows || netbsd))
|
||||||
|
|
||||||
// This file is exact copy of https://github.com/docker/cli/blob/9ac8584acfd501c3f4da0e845e3a40ed15c85041/cli/command/container/opts.go
|
// This file is exact copy of https://github.com/docker/cli/blob/9ac8584acfd501c3f4da0e845e3a40ed15c85041/cli/command/container/opts.go
|
||||||
// appended with license information.
|
// appended with license information.
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows))
|
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows || netbsd))
|
||||||
|
|
||||||
package container
|
package container
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows))
|
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows || netbsd))
|
||||||
|
|
||||||
package container
|
package container
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows))
|
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows || netbsd))
|
||||||
|
|
||||||
package container
|
package container
|
||||||
|
|
||||||
|
@ -15,6 +15,20 @@ func NewDockerNetworkCreateExecutor(name string) common.Executor {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
defer cli.Close()
|
||||||
|
|
||||||
|
// Only create the network if it doesn't exist
|
||||||
|
networks, err := cli.NetworkList(ctx, types.NetworkListOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
common.Logger(ctx).Debugf("%v", networks)
|
||||||
|
for _, network := range networks {
|
||||||
|
if network.Name == name {
|
||||||
|
common.Logger(ctx).Debugf("Network %v exists", name)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
_, err = cli.NetworkCreate(ctx, name, types.NetworkCreate{
|
_, err = cli.NetworkCreate(ctx, name, types.NetworkCreate{
|
||||||
Driver: "bridge",
|
Driver: "bridge",
|
||||||
|
@ -34,7 +48,32 @@ func NewDockerNetworkRemoveExecutor(name string) common.Executor {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
defer cli.Close()
|
||||||
|
|
||||||
return cli.NetworkRemove(ctx, name)
|
// Make shure that all network of the specified name are removed
|
||||||
|
// cli.NetworkRemove refuses to remove a network if there are duplicates
|
||||||
|
networks, err := cli.NetworkList(ctx, types.NetworkListOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
common.Logger(ctx).Debugf("%v", networks)
|
||||||
|
for _, network := range networks {
|
||||||
|
if network.Name == name {
|
||||||
|
result, err := cli.NetworkInspect(ctx, network.ID, types.NetworkInspectOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(result.Containers) == 0 {
|
||||||
|
if err = cli.NetworkRemove(ctx, network.ID); err != nil {
|
||||||
|
common.Logger(ctx).Debugf("%v", err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
common.Logger(ctx).Debugf("Refusing to remove network %v because it still has active endpoints", name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows))
|
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows || netbsd))
|
||||||
|
|
||||||
package container
|
package container
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows))
|
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows || netbsd))
|
||||||
|
|
||||||
package container
|
package container
|
||||||
|
|
||||||
|
@ -38,6 +38,7 @@ import (
|
||||||
"golang.org/x/term"
|
"golang.org/x/term"
|
||||||
|
|
||||||
"github.com/nektos/act/pkg/common"
|
"github.com/nektos/act/pkg/common"
|
||||||
|
"github.com/nektos/act/pkg/filecollector"
|
||||||
)
|
)
|
||||||
|
|
||||||
// NewContainer creates a reference to a container
|
// NewContainer creates a reference to a container
|
||||||
|
@ -86,7 +87,7 @@ func supportsContainerImagePlatform(ctx context.Context, cli client.APIClient) b
|
||||||
|
|
||||||
func (cr *containerReference) Create(capAdd []string, capDrop []string) common.Executor {
|
func (cr *containerReference) Create(capAdd []string, capDrop []string) common.Executor {
|
||||||
return common.
|
return common.
|
||||||
NewInfoExecutor("%sdocker create image=%s platform=%s entrypoint=%+q cmd=%+q", logPrefix, cr.input.Image, cr.input.Platform, cr.input.Entrypoint, cr.input.Cmd).
|
NewInfoExecutor("%sdocker create image=%s platform=%s entrypoint=%+q cmd=%+q network=%+q", logPrefix, cr.input.Image, cr.input.Platform, cr.input.Entrypoint, cr.input.Cmd, cr.input.NetworkMode).
|
||||||
Then(
|
Then(
|
||||||
common.NewPipelineExecutor(
|
common.NewPipelineExecutor(
|
||||||
cr.connect(),
|
cr.connect(),
|
||||||
|
@ -98,7 +99,7 @@ func (cr *containerReference) Create(capAdd []string, capDrop []string) common.E
|
||||||
|
|
||||||
func (cr *containerReference) Start(attach bool) common.Executor {
|
func (cr *containerReference) Start(attach bool) common.Executor {
|
||||||
return common.
|
return common.
|
||||||
NewInfoExecutor("%sdocker run image=%s platform=%s entrypoint=%+q cmd=%+q", logPrefix, cr.input.Image, cr.input.Platform, cr.input.Entrypoint, cr.input.Cmd).
|
NewInfoExecutor("%sdocker run image=%s platform=%s entrypoint=%+q cmd=%+q network=%+q", logPrefix, cr.input.Image, cr.input.Platform, cr.input.Entrypoint, cr.input.Cmd, cr.input.NetworkMode).
|
||||||
Then(
|
Then(
|
||||||
common.NewPipelineExecutor(
|
common.NewPipelineExecutor(
|
||||||
cr.connect(),
|
cr.connect(),
|
||||||
|
@ -260,8 +261,10 @@ func RunnerArch(ctx context.Context) string {
|
||||||
|
|
||||||
archMapper := map[string]string{
|
archMapper := map[string]string{
|
||||||
"x86_64": "X64",
|
"x86_64": "X64",
|
||||||
|
"amd64": "X64",
|
||||||
"386": "X86",
|
"386": "X86",
|
||||||
"aarch64": "ARM64",
|
"aarch64": "ARM64",
|
||||||
|
"arm64": "ARM64",
|
||||||
}
|
}
|
||||||
if arch, ok := archMapper[info.Architecture]; ok {
|
if arch, ok := archMapper[info.Architecture]; ok {
|
||||||
return arch
|
return arch
|
||||||
|
@ -370,8 +373,8 @@ func (cr *containerReference) mergeContainerConfigs(ctx context.Context, config
|
||||||
// So comment out the following code.
|
// So comment out the following code.
|
||||||
|
|
||||||
// if len(copts.netMode.Value()) == 0 {
|
// if len(copts.netMode.Value()) == 0 {
|
||||||
// if err = copts.netMode.Set("host"); err != nil {
|
// if err = copts.netMode.Set(cr.input.NetworkMode); err != nil {
|
||||||
// return nil, nil, fmt.Errorf("Cannot parse networkmode=host. This is an internal error and should not happen: '%w'", err)
|
// return nil, nil, fmt.Errorf("Cannot parse networkmode=%s. This is an internal error and should not happen: '%w'", cr.input.NetworkMode, err)
|
||||||
// }
|
// }
|
||||||
// }
|
// }
|
||||||
|
|
||||||
|
@ -429,6 +432,7 @@ func (cr *containerReference) create(capAdd []string, capDrop []string) common.E
|
||||||
Image: input.Image,
|
Image: input.Image,
|
||||||
WorkingDir: input.WorkingDir,
|
WorkingDir: input.WorkingDir,
|
||||||
Env: input.Env,
|
Env: input.Env,
|
||||||
|
ExposedPorts: input.ExposedPorts,
|
||||||
Tty: isTerminal,
|
Tty: isTerminal,
|
||||||
}
|
}
|
||||||
logger.Debugf("Common container.Config ==> %+v", config)
|
logger.Debugf("Common container.Config ==> %+v", config)
|
||||||
|
@ -472,6 +476,7 @@ func (cr *containerReference) create(capAdd []string, capDrop []string) common.E
|
||||||
NetworkMode: container.NetworkMode(input.NetworkMode),
|
NetworkMode: container.NetworkMode(input.NetworkMode),
|
||||||
Privileged: input.Privileged,
|
Privileged: input.Privileged,
|
||||||
UsernsMode: container.UsernsMode(input.UsernsMode),
|
UsernsMode: container.UsernsMode(input.UsernsMode),
|
||||||
|
PortBindings: input.PortBindings,
|
||||||
AutoRemove: input.AutoRemove,
|
AutoRemove: input.AutoRemove,
|
||||||
}
|
}
|
||||||
logger.Debugf("Common container.HostConfig ==> %+v", hostConfig)
|
logger.Debugf("Common container.HostConfig ==> %+v", hostConfig)
|
||||||
|
@ -487,7 +492,10 @@ func (cr *containerReference) create(capAdd []string, capDrop []string) common.E
|
||||||
// For Gitea
|
// For Gitea
|
||||||
// network-scoped alias is supported only for containers in user defined networks
|
// network-scoped alias is supported only for containers in user defined networks
|
||||||
var networkingConfig *network.NetworkingConfig
|
var networkingConfig *network.NetworkingConfig
|
||||||
if hostConfig.NetworkMode.IsUserDefined() && len(input.NetworkAliases) > 0 {
|
logger.Debugf("input.NetworkAliases ==> %v", input.NetworkAliases)
|
||||||
|
n := hostConfig.NetworkMode
|
||||||
|
// IsUserDefined and IsHost are broken on windows
|
||||||
|
if n.IsUserDefined() && n != "host" && len(input.NetworkAliases) > 0 {
|
||||||
endpointConfig := &network.EndpointSettings{
|
endpointConfig := &network.EndpointSettings{
|
||||||
Aliases: input.NetworkAliases,
|
Aliases: input.NetworkAliases,
|
||||||
}
|
}
|
||||||
|
@ -709,10 +717,28 @@ func (cr *containerReference) waitForCommand(ctx context.Context, isTerminal boo
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cr *containerReference) CopyTarStream(ctx context.Context, destPath string, tarStream io.Reader) error {
|
func (cr *containerReference) CopyTarStream(ctx context.Context, destPath string, tarStream io.Reader) error {
|
||||||
err := cr.cli.CopyToContainer(ctx, cr.id, destPath, tarStream, types.CopyToContainerOptions{})
|
// Mkdir
|
||||||
|
buf := &bytes.Buffer{}
|
||||||
|
tw := tar.NewWriter(buf)
|
||||||
|
_ = tw.WriteHeader(&tar.Header{
|
||||||
|
Name: destPath,
|
||||||
|
Mode: 777,
|
||||||
|
Typeflag: tar.TypeDir,
|
||||||
|
})
|
||||||
|
tw.Close()
|
||||||
|
err := cr.cli.CopyToContainer(ctx, cr.id, "/", buf, types.CopyToContainerOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to mkdir to copy content to container: %w", err)
|
||||||
|
}
|
||||||
|
// Copy Content
|
||||||
|
err = cr.cli.CopyToContainer(ctx, cr.id, destPath, tarStream, types.CopyToContainerOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to copy content to container: %w", err)
|
return fmt.Errorf("failed to copy content to container: %w", err)
|
||||||
}
|
}
|
||||||
|
// If this fails, then folders have wrong permissions on non root container
|
||||||
|
if cr.UID != 0 || cr.GID != 0 {
|
||||||
|
_ = cr.Exec([]string{"chown", "-R", fmt.Sprintf("%d:%d", cr.UID, cr.GID), destPath}, nil, "0", "")(ctx)
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -753,12 +779,12 @@ func (cr *containerReference) copyDir(dstPath string, srcPath string, useGitIgno
|
||||||
ignorer = gitignore.NewMatcher(ps)
|
ignorer = gitignore.NewMatcher(ps)
|
||||||
}
|
}
|
||||||
|
|
||||||
fc := &fileCollector{
|
fc := &filecollector.FileCollector{
|
||||||
Fs: &defaultFs{},
|
Fs: &filecollector.DefaultFs{},
|
||||||
Ignorer: ignorer,
|
Ignorer: ignorer,
|
||||||
SrcPath: srcPath,
|
SrcPath: srcPath,
|
||||||
SrcPrefix: srcPrefix,
|
SrcPrefix: srcPrefix,
|
||||||
Handler: &tarCollector{
|
Handler: &filecollector.TarCollector{
|
||||||
TarWriter: tw,
|
TarWriter: tw,
|
||||||
UID: cr.UID,
|
UID: cr.UID,
|
||||||
GID: cr.GID,
|
GID: cr.GID,
|
||||||
|
@ -766,7 +792,7 @@ func (cr *containerReference) copyDir(dstPath string, srcPath string, useGitIgno
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
err = filepath.Walk(srcPath, fc.collectFiles(ctx, []string{}))
|
err = filepath.Walk(srcPath, fc.CollectFiles(ctx, []string{}))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,6 +23,7 @@ func TestDocker(t *testing.T) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
client, err := GetDockerClient(ctx)
|
client, err := GetDockerClient(ctx)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
defer client.Close()
|
||||||
|
|
||||||
dockerBuild := NewDockerBuildExecutor(NewDockerBuildExecutorInput{
|
dockerBuild := NewDockerBuildExecutor(NewDockerBuildExecutorInput{
|
||||||
ContextDir: "testdata",
|
ContextDir: "testdata",
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
//go:build WITHOUT_DOCKER || !(linux || darwin || windows)
|
//go:build WITHOUT_DOCKER || !(linux || darwin || windows || netbsd)
|
||||||
|
|
||||||
package container
|
package container
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows))
|
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows || netbsd))
|
||||||
|
|
||||||
package container
|
package container
|
||||||
|
|
||||||
|
|
|
@ -21,6 +21,7 @@ import (
|
||||||
"golang.org/x/term"
|
"golang.org/x/term"
|
||||||
|
|
||||||
"github.com/nektos/act/pkg/common"
|
"github.com/nektos/act/pkg/common"
|
||||||
|
"github.com/nektos/act/pkg/filecollector"
|
||||||
"github.com/nektos/act/pkg/lookpath"
|
"github.com/nektos/act/pkg/lookpath"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -71,7 +72,7 @@ func (e *HostEnvironment) CopyTarStream(ctx context.Context, destPath string, ta
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
tr := tar.NewReader(tarStream)
|
tr := tar.NewReader(tarStream)
|
||||||
cp := ©Collector{
|
cp := &filecollector.CopyCollector{
|
||||||
DstDir: destPath,
|
DstDir: destPath,
|
||||||
}
|
}
|
||||||
for {
|
for {
|
||||||
|
@ -110,16 +111,16 @@ func (e *HostEnvironment) CopyDir(destPath string, srcPath string, useGitIgnore
|
||||||
|
|
||||||
ignorer = gitignore.NewMatcher(ps)
|
ignorer = gitignore.NewMatcher(ps)
|
||||||
}
|
}
|
||||||
fc := &fileCollector{
|
fc := &filecollector.FileCollector{
|
||||||
Fs: &defaultFs{},
|
Fs: &filecollector.DefaultFs{},
|
||||||
Ignorer: ignorer,
|
Ignorer: ignorer,
|
||||||
SrcPath: srcPath,
|
SrcPath: srcPath,
|
||||||
SrcPrefix: srcPrefix,
|
SrcPrefix: srcPrefix,
|
||||||
Handler: ©Collector{
|
Handler: &filecollector.CopyCollector{
|
||||||
DstDir: destPath,
|
DstDir: destPath,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
return filepath.Walk(srcPath, fc.collectFiles(ctx, []string{}))
|
return filepath.Walk(srcPath, fc.CollectFiles(ctx, []string{}))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -132,21 +133,21 @@ func (e *HostEnvironment) GetContainerArchive(ctx context.Context, srcPath strin
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
tc := &tarCollector{
|
tc := &filecollector.TarCollector{
|
||||||
TarWriter: tw,
|
TarWriter: tw,
|
||||||
}
|
}
|
||||||
if fi.IsDir() {
|
if fi.IsDir() {
|
||||||
srcPrefix := filepath.Dir(srcPath)
|
srcPrefix := srcPath
|
||||||
if !strings.HasSuffix(srcPrefix, string(filepath.Separator)) {
|
if !strings.HasSuffix(srcPrefix, string(filepath.Separator)) {
|
||||||
srcPrefix += string(filepath.Separator)
|
srcPrefix += string(filepath.Separator)
|
||||||
}
|
}
|
||||||
fc := &fileCollector{
|
fc := &filecollector.FileCollector{
|
||||||
Fs: &defaultFs{},
|
Fs: &filecollector.DefaultFs{},
|
||||||
SrcPath: srcPath,
|
SrcPath: srcPath,
|
||||||
SrcPrefix: srcPrefix,
|
SrcPrefix: srcPrefix,
|
||||||
Handler: tc,
|
Handler: tc,
|
||||||
}
|
}
|
||||||
err = filepath.Walk(srcPath, fc.collectFiles(ctx, []string{}))
|
err = filepath.Walk(srcPath, fc.CollectFiles(ctx, []string{}))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,4 +1,71 @@
|
||||||
package container
|
package container
|
||||||
|
|
||||||
|
import (
|
||||||
|
"archive/tar"
|
||||||
|
"context"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
// Type assert HostEnvironment implements ExecutionsEnvironment
|
// Type assert HostEnvironment implements ExecutionsEnvironment
|
||||||
var _ ExecutionsEnvironment = &HostEnvironment{}
|
var _ ExecutionsEnvironment = &HostEnvironment{}
|
||||||
|
|
||||||
|
func TestCopyDir(t *testing.T) {
|
||||||
|
dir, err := os.MkdirTemp("", "test-host-env-*")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
defer os.RemoveAll(dir)
|
||||||
|
ctx := context.Background()
|
||||||
|
e := &HostEnvironment{
|
||||||
|
Path: filepath.Join(dir, "path"),
|
||||||
|
TmpDir: filepath.Join(dir, "tmp"),
|
||||||
|
ToolCache: filepath.Join(dir, "tool_cache"),
|
||||||
|
ActPath: filepath.Join(dir, "act_path"),
|
||||||
|
StdOut: os.Stdout,
|
||||||
|
Workdir: path.Join("testdata", "scratch"),
|
||||||
|
}
|
||||||
|
_ = os.MkdirAll(e.Path, 0700)
|
||||||
|
_ = os.MkdirAll(e.TmpDir, 0700)
|
||||||
|
_ = os.MkdirAll(e.ToolCache, 0700)
|
||||||
|
_ = os.MkdirAll(e.ActPath, 0700)
|
||||||
|
err = e.CopyDir(e.Workdir, e.Path, true)(ctx)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetContainerArchive(t *testing.T) {
|
||||||
|
dir, err := os.MkdirTemp("", "test-host-env-*")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
defer os.RemoveAll(dir)
|
||||||
|
ctx := context.Background()
|
||||||
|
e := &HostEnvironment{
|
||||||
|
Path: filepath.Join(dir, "path"),
|
||||||
|
TmpDir: filepath.Join(dir, "tmp"),
|
||||||
|
ToolCache: filepath.Join(dir, "tool_cache"),
|
||||||
|
ActPath: filepath.Join(dir, "act_path"),
|
||||||
|
StdOut: os.Stdout,
|
||||||
|
Workdir: path.Join("testdata", "scratch"),
|
||||||
|
}
|
||||||
|
_ = os.MkdirAll(e.Path, 0700)
|
||||||
|
_ = os.MkdirAll(e.TmpDir, 0700)
|
||||||
|
_ = os.MkdirAll(e.ToolCache, 0700)
|
||||||
|
_ = os.MkdirAll(e.ActPath, 0700)
|
||||||
|
expectedContent := []byte("sdde/7sh")
|
||||||
|
err = os.WriteFile(filepath.Join(e.Path, "action.yml"), expectedContent, 0600)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
archive, err := e.GetContainerArchive(ctx, e.Path)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
defer archive.Close()
|
||||||
|
reader := tar.NewReader(archive)
|
||||||
|
h, err := reader.Next()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, "action.yml", h.Name)
|
||||||
|
content, err := io.ReadAll(reader)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, expectedContent, content)
|
||||||
|
_, err = reader.Next()
|
||||||
|
assert.ErrorIs(t, err, io.EOF)
|
||||||
|
}
|
||||||
|
|
1
pkg/container/testdata/scratch/test.txt
vendored
Normal file
1
pkg/container/testdata/scratch/test.txt
vendored
Normal file
|
@ -0,0 +1 @@
|
||||||
|
testfile
|
|
@ -230,6 +230,7 @@ func TestFunctionFormat(t *testing.T) {
|
||||||
{"format('{0', '{1}', 'World')", nil, "Unclosed brackets. The following format string is invalid: '{0'", "format-invalid-format-string"},
|
{"format('{0', '{1}', 'World')", nil, "Unclosed brackets. The following format string is invalid: '{0'", "format-invalid-format-string"},
|
||||||
{"format('{2}', '{1}', 'World')", "", "The following format string references more arguments than were supplied: '{2}'", "format-invalid-replacement-reference"},
|
{"format('{2}', '{1}', 'World')", "", "The following format string references more arguments than were supplied: '{2}'", "format-invalid-replacement-reference"},
|
||||||
{"format('{2147483648}')", "", "The following format string is invalid: '{2147483648}'", "format-invalid-replacement-reference"},
|
{"format('{2147483648}')", "", "The following format string is invalid: '{2147483648}'", "format-invalid-replacement-reference"},
|
||||||
|
{"format('{0} {1} {2} {3}', 1.0, 1.1, 1234567890.0, 12345678901234567890.0)", "1 1.1 1234567890 1.23456789012346E+19", nil, "format-floats"},
|
||||||
}
|
}
|
||||||
|
|
||||||
env := &EvaluationEnvironment{
|
env := &EvaluationEnvironment{
|
||||||
|
|
|
@ -449,7 +449,7 @@ func (impl *interperterImpl) coerceToString(value reflect.Value) reflect.Value {
|
||||||
} else if math.IsInf(value.Float(), -1) {
|
} else if math.IsInf(value.Float(), -1) {
|
||||||
return reflect.ValueOf("-Infinity")
|
return reflect.ValueOf("-Infinity")
|
||||||
}
|
}
|
||||||
return reflect.ValueOf(fmt.Sprint(value))
|
return reflect.ValueOf(fmt.Sprintf("%.15G", value.Float()))
|
||||||
|
|
||||||
case reflect.Slice:
|
case reflect.Slice:
|
||||||
return reflect.ValueOf("Array")
|
return reflect.ValueOf("Array")
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
package container
|
package filecollector
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"archive/tar"
|
"archive/tar"
|
||||||
|
@ -17,18 +17,18 @@ import (
|
||||||
"github.com/go-git/go-git/v5/plumbing/format/index"
|
"github.com/go-git/go-git/v5/plumbing/format/index"
|
||||||
)
|
)
|
||||||
|
|
||||||
type fileCollectorHandler interface {
|
type Handler interface {
|
||||||
WriteFile(path string, fi fs.FileInfo, linkName string, f io.Reader) error
|
WriteFile(path string, fi fs.FileInfo, linkName string, f io.Reader) error
|
||||||
}
|
}
|
||||||
|
|
||||||
type tarCollector struct {
|
type TarCollector struct {
|
||||||
TarWriter *tar.Writer
|
TarWriter *tar.Writer
|
||||||
UID int
|
UID int
|
||||||
GID int
|
GID int
|
||||||
DstDir string
|
DstDir string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tc tarCollector) WriteFile(fpath string, fi fs.FileInfo, linkName string, f io.Reader) error {
|
func (tc TarCollector) WriteFile(fpath string, fi fs.FileInfo, linkName string, f io.Reader) error {
|
||||||
// create a new dir/file header
|
// create a new dir/file header
|
||||||
header, err := tar.FileInfoHeader(fi, linkName)
|
header, err := tar.FileInfoHeader(fi, linkName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -59,11 +59,11 @@ func (tc tarCollector) WriteFile(fpath string, fi fs.FileInfo, linkName string,
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type copyCollector struct {
|
type CopyCollector struct {
|
||||||
DstDir string
|
DstDir string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cc *copyCollector) WriteFile(fpath string, fi fs.FileInfo, linkName string, f io.Reader) error {
|
func (cc *CopyCollector) WriteFile(fpath string, fi fs.FileInfo, linkName string, f io.Reader) error {
|
||||||
fdestpath := filepath.Join(cc.DstDir, fpath)
|
fdestpath := filepath.Join(cc.DstDir, fpath)
|
||||||
if err := os.MkdirAll(filepath.Dir(fdestpath), 0o777); err != nil {
|
if err := os.MkdirAll(filepath.Dir(fdestpath), 0o777); err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -82,29 +82,29 @@ func (cc *copyCollector) WriteFile(fpath string, fi fs.FileInfo, linkName string
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type fileCollector struct {
|
type FileCollector struct {
|
||||||
Ignorer gitignore.Matcher
|
Ignorer gitignore.Matcher
|
||||||
SrcPath string
|
SrcPath string
|
||||||
SrcPrefix string
|
SrcPrefix string
|
||||||
Fs fileCollectorFs
|
Fs Fs
|
||||||
Handler fileCollectorHandler
|
Handler Handler
|
||||||
}
|
}
|
||||||
|
|
||||||
type fileCollectorFs interface {
|
type Fs interface {
|
||||||
Walk(root string, fn filepath.WalkFunc) error
|
Walk(root string, fn filepath.WalkFunc) error
|
||||||
OpenGitIndex(path string) (*index.Index, error)
|
OpenGitIndex(path string) (*index.Index, error)
|
||||||
Open(path string) (io.ReadCloser, error)
|
Open(path string) (io.ReadCloser, error)
|
||||||
Readlink(path string) (string, error)
|
Readlink(path string) (string, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
type defaultFs struct {
|
type DefaultFs struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (*defaultFs) Walk(root string, fn filepath.WalkFunc) error {
|
func (*DefaultFs) Walk(root string, fn filepath.WalkFunc) error {
|
||||||
return filepath.Walk(root, fn)
|
return filepath.Walk(root, fn)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (*defaultFs) OpenGitIndex(path string) (*index.Index, error) {
|
func (*DefaultFs) OpenGitIndex(path string) (*index.Index, error) {
|
||||||
r, err := git.PlainOpen(path)
|
r, err := git.PlainOpen(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -116,16 +116,16 @@ func (*defaultFs) OpenGitIndex(path string) (*index.Index, error) {
|
||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (*defaultFs) Open(path string) (io.ReadCloser, error) {
|
func (*DefaultFs) Open(path string) (io.ReadCloser, error) {
|
||||||
return os.Open(path)
|
return os.Open(path)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (*defaultFs) Readlink(path string) (string, error) {
|
func (*DefaultFs) Readlink(path string) (string, error) {
|
||||||
return os.Readlink(path)
|
return os.Readlink(path)
|
||||||
}
|
}
|
||||||
|
|
||||||
//nolint:gocyclo
|
//nolint:gocyclo
|
||||||
func (fc *fileCollector) collectFiles(ctx context.Context, submodulePath []string) filepath.WalkFunc {
|
func (fc *FileCollector) CollectFiles(ctx context.Context, submodulePath []string) filepath.WalkFunc {
|
||||||
i, _ := fc.Fs.OpenGitIndex(path.Join(fc.SrcPath, path.Join(submodulePath...)))
|
i, _ := fc.Fs.OpenGitIndex(path.Join(fc.SrcPath, path.Join(submodulePath...)))
|
||||||
return func(file string, fi os.FileInfo, err error) error {
|
return func(file string, fi os.FileInfo, err error) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -166,7 +166,7 @@ func (fc *fileCollector) collectFiles(ctx context.Context, submodulePath []strin
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err == nil && entry.Mode == filemode.Submodule {
|
if err == nil && entry.Mode == filemode.Submodule {
|
||||||
err = fc.Fs.Walk(file, fc.collectFiles(ctx, split))
|
err = fc.Fs.Walk(file, fc.CollectFiles(ctx, split))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
|
@ -1,4 +1,4 @@
|
||||||
package container
|
package filecollector
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"archive/tar"
|
"archive/tar"
|
||||||
|
@ -95,16 +95,16 @@ func TestIgnoredTrackedfile(t *testing.T) {
|
||||||
tw := tar.NewWriter(tmpTar)
|
tw := tar.NewWriter(tmpTar)
|
||||||
ps, _ := gitignore.ReadPatterns(worktree, []string{})
|
ps, _ := gitignore.ReadPatterns(worktree, []string{})
|
||||||
ignorer := gitignore.NewMatcher(ps)
|
ignorer := gitignore.NewMatcher(ps)
|
||||||
fc := &fileCollector{
|
fc := &FileCollector{
|
||||||
Fs: &memoryFs{Filesystem: fs},
|
Fs: &memoryFs{Filesystem: fs},
|
||||||
Ignorer: ignorer,
|
Ignorer: ignorer,
|
||||||
SrcPath: "mygitrepo",
|
SrcPath: "mygitrepo",
|
||||||
SrcPrefix: "mygitrepo" + string(filepath.Separator),
|
SrcPrefix: "mygitrepo" + string(filepath.Separator),
|
||||||
Handler: &tarCollector{
|
Handler: &TarCollector{
|
||||||
TarWriter: tw,
|
TarWriter: tw,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
err := fc.Fs.Walk("mygitrepo", fc.collectFiles(context.Background(), []string{}))
|
err := fc.Fs.Walk("mygitrepo", fc.CollectFiles(context.Background(), []string{}))
|
||||||
assert.NoError(t, err, "successfully collect files")
|
assert.NoError(t, err, "successfully collect files")
|
||||||
tw.Close()
|
tw.Close()
|
||||||
_, _ = tmpTar.Seek(0, io.SeekStart)
|
_, _ = tmpTar.Seek(0, io.SeekStart)
|
||||||
|
@ -115,3 +115,58 @@ func TestIgnoredTrackedfile(t *testing.T) {
|
||||||
_, err = tr.Next()
|
_, err = tr.Next()
|
||||||
assert.ErrorIs(t, err, io.EOF, "tar must only contain one element")
|
assert.ErrorIs(t, err, io.EOF, "tar must only contain one element")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestSymlinks(t *testing.T) {
|
||||||
|
fs := memfs.New()
|
||||||
|
_ = fs.MkdirAll("mygitrepo/.git", 0o777)
|
||||||
|
dotgit, _ := fs.Chroot("mygitrepo/.git")
|
||||||
|
worktree, _ := fs.Chroot("mygitrepo")
|
||||||
|
repo, _ := git.Init(filesystem.NewStorage(dotgit, cache.NewObjectLRUDefault()), worktree)
|
||||||
|
// This file shouldn't be in the tar
|
||||||
|
f, err := worktree.Create(".env")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
_, err = f.Write([]byte("test=val1\n"))
|
||||||
|
assert.NoError(t, err)
|
||||||
|
f.Close()
|
||||||
|
err = worktree.Symlink(".env", "test.env")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
w, err := repo.Worktree()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
// .gitignore is in the tar after adding it to the index
|
||||||
|
_, err = w.Add(".env")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
_, err = w.Add("test.env")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
tmpTar, _ := fs.Create("temp.tar")
|
||||||
|
tw := tar.NewWriter(tmpTar)
|
||||||
|
ps, _ := gitignore.ReadPatterns(worktree, []string{})
|
||||||
|
ignorer := gitignore.NewMatcher(ps)
|
||||||
|
fc := &FileCollector{
|
||||||
|
Fs: &memoryFs{Filesystem: fs},
|
||||||
|
Ignorer: ignorer,
|
||||||
|
SrcPath: "mygitrepo",
|
||||||
|
SrcPrefix: "mygitrepo" + string(filepath.Separator),
|
||||||
|
Handler: &TarCollector{
|
||||||
|
TarWriter: tw,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
err = fc.Fs.Walk("mygitrepo", fc.CollectFiles(context.Background(), []string{}))
|
||||||
|
assert.NoError(t, err, "successfully collect files")
|
||||||
|
tw.Close()
|
||||||
|
_, _ = tmpTar.Seek(0, io.SeekStart)
|
||||||
|
tr := tar.NewReader(tmpTar)
|
||||||
|
h, err := tr.Next()
|
||||||
|
files := map[string]tar.Header{}
|
||||||
|
for err == nil {
|
||||||
|
files[h.Name] = *h
|
||||||
|
h, err = tr.Next()
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.Equal(t, ".env", files[".env"].Name)
|
||||||
|
assert.Equal(t, "test.env", files["test.env"].Name)
|
||||||
|
assert.Equal(t, ".env", files["test.env"].Linkname)
|
||||||
|
assert.ErrorIs(t, err, io.EOF, "tar must be read cleanly to EOF")
|
||||||
|
}
|
|
@ -168,7 +168,7 @@ func (ghc *GithubContext) SetRepositoryAndOwner(ctx context.Context, githubInsta
|
||||||
if ghc.Repository == "" {
|
if ghc.Repository == "" {
|
||||||
repo, err := git.FindGithubRepo(ctx, repoPath, githubInstance, remoteName)
|
repo, err := git.FindGithubRepo(ctx, repoPath, githubInstance, remoteName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
common.Logger(ctx).Warningf("unable to get git repo: %v", err)
|
common.Logger(ctx).Warningf("unable to get git repo (githubInstance: %v; remoteName: %v, repoPath: %v): %v", githubInstance, remoteName, repoPath, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
ghc.Repository = repo
|
ghc.Repository = repo
|
||||||
|
|
|
@ -148,12 +148,10 @@ func NewWorkflowPlanner(path string, noWorkflowRecurse bool) (WorkflowPlanner, e
|
||||||
workflow.Name = wf.workflowDirEntry.Name()
|
workflow.Name = wf.workflowDirEntry.Name()
|
||||||
}
|
}
|
||||||
|
|
||||||
jobNameRegex := regexp.MustCompile(`^([[:alpha:]_][[:alnum:]_\-]*)$`)
|
err = validateJobName(workflow)
|
||||||
for k := range workflow.Jobs {
|
if err != nil {
|
||||||
if ok := jobNameRegex.MatchString(k); !ok {
|
|
||||||
_ = f.Close()
|
_ = f.Close()
|
||||||
return nil, fmt.Errorf("workflow is not valid. '%s': Job name '%s' is invalid. Names must start with a letter or '_' and contain only alphanumeric characters, '-', or '_'", workflow.Name, k)
|
return nil, err
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
wp.workflows = append(wp.workflows, workflow)
|
wp.workflows = append(wp.workflows, workflow)
|
||||||
|
@ -171,6 +169,42 @@ func CombineWorkflowPlanner(workflows ...*Workflow) WorkflowPlanner {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func NewSingleWorkflowPlanner(name string, f io.Reader) (WorkflowPlanner, error) {
|
||||||
|
wp := new(workflowPlanner)
|
||||||
|
|
||||||
|
log.Debugf("Reading workflow %s", name)
|
||||||
|
workflow, err := ReadWorkflow(f)
|
||||||
|
if err != nil {
|
||||||
|
if err == io.EOF {
|
||||||
|
return nil, fmt.Errorf("unable to read workflow '%s': file is empty: %w", name, err)
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("workflow is not valid. '%s': %w", name, err)
|
||||||
|
}
|
||||||
|
workflow.File = name
|
||||||
|
if workflow.Name == "" {
|
||||||
|
workflow.Name = name
|
||||||
|
}
|
||||||
|
|
||||||
|
err = validateJobName(workflow)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
wp.workflows = append(wp.workflows, workflow)
|
||||||
|
|
||||||
|
return wp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateJobName(workflow *Workflow) error {
|
||||||
|
jobNameRegex := regexp.MustCompile(`^([[:alpha:]_][[:alnum:]_\-]*)$`)
|
||||||
|
for k := range workflow.Jobs {
|
||||||
|
if ok := jobNameRegex.MatchString(k); !ok {
|
||||||
|
return fmt.Errorf("workflow is not valid. '%s': Job name '%s' is invalid. Names must start with a letter or '_' and contain only alphanumeric characters, '-', or '_'", workflow.Name, k)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
type workflowPlanner struct {
|
type workflowPlanner struct {
|
||||||
workflows []*Workflow
|
workflows []*Workflow
|
||||||
}
|
}
|
||||||
|
|
|
@ -103,22 +103,40 @@ type WorkflowDispatch struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *Workflow) WorkflowDispatchConfig() *WorkflowDispatch {
|
func (w *Workflow) WorkflowDispatchConfig() *WorkflowDispatch {
|
||||||
if w.RawOn.Kind != yaml.MappingNode {
|
switch w.RawOn.Kind {
|
||||||
|
case yaml.ScalarNode:
|
||||||
|
var val string
|
||||||
|
if !decodeNode(w.RawOn, &val) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
if val == "workflow_dispatch" {
|
||||||
|
return &WorkflowDispatch{}
|
||||||
|
}
|
||||||
|
case yaml.SequenceNode:
|
||||||
|
var val []string
|
||||||
|
if !decodeNode(w.RawOn, &val) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
for _, v := range val {
|
||||||
|
if v == "workflow_dispatch" {
|
||||||
|
return &WorkflowDispatch{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case yaml.MappingNode:
|
||||||
var val map[string]yaml.Node
|
var val map[string]yaml.Node
|
||||||
if !decodeNode(w.RawOn, &val) {
|
if !decodeNode(w.RawOn, &val) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var config WorkflowDispatch
|
n, found := val["workflow_dispatch"]
|
||||||
node := val["workflow_dispatch"]
|
var workflowDispatch WorkflowDispatch
|
||||||
if !decodeNode(node, &config) {
|
if found && decodeNode(n, &workflowDispatch) {
|
||||||
|
return &workflowDispatch
|
||||||
|
}
|
||||||
|
default:
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
return nil
|
||||||
return &config
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type WorkflowCallInput struct {
|
type WorkflowCallInput struct {
|
||||||
|
@ -299,15 +317,39 @@ func (j *Job) Needs() []string {
|
||||||
// RunsOn list for Job
|
// RunsOn list for Job
|
||||||
func (j *Job) RunsOn() []string {
|
func (j *Job) RunsOn() []string {
|
||||||
switch j.RawRunsOn.Kind {
|
switch j.RawRunsOn.Kind {
|
||||||
|
case yaml.MappingNode:
|
||||||
|
var val struct {
|
||||||
|
Group string
|
||||||
|
Labels yaml.Node
|
||||||
|
}
|
||||||
|
|
||||||
|
if !decodeNode(j.RawRunsOn, &val) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
labels := nodeAsStringSlice(val.Labels)
|
||||||
|
|
||||||
|
if val.Group != "" {
|
||||||
|
labels = append(labels, val.Group)
|
||||||
|
}
|
||||||
|
|
||||||
|
return labels
|
||||||
|
default:
|
||||||
|
return nodeAsStringSlice(j.RawRunsOn)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func nodeAsStringSlice(node yaml.Node) []string {
|
||||||
|
switch node.Kind {
|
||||||
case yaml.ScalarNode:
|
case yaml.ScalarNode:
|
||||||
var val string
|
var val string
|
||||||
if !decodeNode(j.RawRunsOn, &val) {
|
if !decodeNode(node, &val) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return []string{val}
|
return []string{val}
|
||||||
case yaml.SequenceNode:
|
case yaml.SequenceNode:
|
||||||
var val []string
|
var val []string
|
||||||
if !decodeNode(j.RawRunsOn, &val) {
|
if !decodeNode(node, &val) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return val
|
return val
|
||||||
|
|
|
@ -153,6 +153,41 @@ jobs:
|
||||||
assert.Contains(t, workflow.On(), "pull_request")
|
assert.Contains(t, workflow.On(), "pull_request")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestReadWorkflow_RunsOnLabels(t *testing.T) {
|
||||||
|
yaml := `
|
||||||
|
name: local-action-docker-url
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
test:
|
||||||
|
container: nginx:latest
|
||||||
|
runs-on:
|
||||||
|
labels: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: ./actions/docker-url`
|
||||||
|
|
||||||
|
workflow, err := ReadWorkflow(strings.NewReader(yaml))
|
||||||
|
assert.NoError(t, err, "read workflow should succeed")
|
||||||
|
assert.Equal(t, workflow.Jobs["test"].RunsOn(), []string{"ubuntu-latest"})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReadWorkflow_RunsOnLabelsWithGroup(t *testing.T) {
|
||||||
|
yaml := `
|
||||||
|
name: local-action-docker-url
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
test:
|
||||||
|
container: nginx:latest
|
||||||
|
runs-on:
|
||||||
|
labels: [ubuntu-latest]
|
||||||
|
group: linux
|
||||||
|
steps:
|
||||||
|
- uses: ./actions/docker-url`
|
||||||
|
|
||||||
|
workflow, err := ReadWorkflow(strings.NewReader(yaml))
|
||||||
|
assert.NoError(t, err, "read workflow should succeed")
|
||||||
|
assert.Equal(t, workflow.Jobs["test"].RunsOn(), []string{"ubuntu-latest", "linux"})
|
||||||
|
}
|
||||||
|
|
||||||
func TestReadWorkflow_StringContainer(t *testing.T) {
|
func TestReadWorkflow_StringContainer(t *testing.T) {
|
||||||
yaml := `
|
yaml := `
|
||||||
name: local-action-docker-url
|
name: local-action-docker-url
|
||||||
|
@ -464,3 +499,107 @@ func TestStep_ShellCommand(t *testing.T) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestReadWorkflow_WorkflowDispatchConfig(t *testing.T) {
|
||||||
|
yaml := `
|
||||||
|
name: local-action-docker-url
|
||||||
|
`
|
||||||
|
workflow, err := ReadWorkflow(strings.NewReader(yaml))
|
||||||
|
assert.NoError(t, err, "read workflow should succeed")
|
||||||
|
workflowDispatch := workflow.WorkflowDispatchConfig()
|
||||||
|
assert.Nil(t, workflowDispatch)
|
||||||
|
|
||||||
|
yaml = `
|
||||||
|
name: local-action-docker-url
|
||||||
|
on: push
|
||||||
|
`
|
||||||
|
workflow, err = ReadWorkflow(strings.NewReader(yaml))
|
||||||
|
assert.NoError(t, err, "read workflow should succeed")
|
||||||
|
workflowDispatch = workflow.WorkflowDispatchConfig()
|
||||||
|
assert.Nil(t, workflowDispatch)
|
||||||
|
|
||||||
|
yaml = `
|
||||||
|
name: local-action-docker-url
|
||||||
|
on: workflow_dispatch
|
||||||
|
`
|
||||||
|
workflow, err = ReadWorkflow(strings.NewReader(yaml))
|
||||||
|
assert.NoError(t, err, "read workflow should succeed")
|
||||||
|
workflowDispatch = workflow.WorkflowDispatchConfig()
|
||||||
|
assert.NotNil(t, workflowDispatch)
|
||||||
|
assert.Nil(t, workflowDispatch.Inputs)
|
||||||
|
|
||||||
|
yaml = `
|
||||||
|
name: local-action-docker-url
|
||||||
|
on: [push, pull_request]
|
||||||
|
`
|
||||||
|
workflow, err = ReadWorkflow(strings.NewReader(yaml))
|
||||||
|
assert.NoError(t, err, "read workflow should succeed")
|
||||||
|
workflowDispatch = workflow.WorkflowDispatchConfig()
|
||||||
|
assert.Nil(t, workflowDispatch)
|
||||||
|
|
||||||
|
yaml = `
|
||||||
|
name: local-action-docker-url
|
||||||
|
on: [push, workflow_dispatch]
|
||||||
|
`
|
||||||
|
workflow, err = ReadWorkflow(strings.NewReader(yaml))
|
||||||
|
assert.NoError(t, err, "read workflow should succeed")
|
||||||
|
workflowDispatch = workflow.WorkflowDispatchConfig()
|
||||||
|
assert.NotNil(t, workflowDispatch)
|
||||||
|
assert.Nil(t, workflowDispatch.Inputs)
|
||||||
|
|
||||||
|
yaml = `
|
||||||
|
name: local-action-docker-url
|
||||||
|
on:
|
||||||
|
- push
|
||||||
|
- workflow_dispatch
|
||||||
|
`
|
||||||
|
workflow, err = ReadWorkflow(strings.NewReader(yaml))
|
||||||
|
assert.NoError(t, err, "read workflow should succeed")
|
||||||
|
workflowDispatch = workflow.WorkflowDispatchConfig()
|
||||||
|
assert.NotNil(t, workflowDispatch)
|
||||||
|
assert.Nil(t, workflowDispatch.Inputs)
|
||||||
|
|
||||||
|
yaml = `
|
||||||
|
name: local-action-docker-url
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
pull_request:
|
||||||
|
`
|
||||||
|
workflow, err = ReadWorkflow(strings.NewReader(yaml))
|
||||||
|
assert.NoError(t, err, "read workflow should succeed")
|
||||||
|
workflowDispatch = workflow.WorkflowDispatchConfig()
|
||||||
|
assert.Nil(t, workflowDispatch)
|
||||||
|
|
||||||
|
yaml = `
|
||||||
|
name: local-action-docker-url
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
pull_request:
|
||||||
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
logLevel:
|
||||||
|
description: 'Log level'
|
||||||
|
required: true
|
||||||
|
default: 'warning'
|
||||||
|
type: choice
|
||||||
|
options:
|
||||||
|
- info
|
||||||
|
- warning
|
||||||
|
- debug
|
||||||
|
`
|
||||||
|
workflow, err = ReadWorkflow(strings.NewReader(yaml))
|
||||||
|
assert.NoError(t, err, "read workflow should succeed")
|
||||||
|
workflowDispatch = workflow.WorkflowDispatchConfig()
|
||||||
|
assert.NotNil(t, workflowDispatch)
|
||||||
|
assert.Equal(t, WorkflowDispatchInput{
|
||||||
|
Default: "warning",
|
||||||
|
Description: "Log level",
|
||||||
|
Options: []string{
|
||||||
|
"info",
|
||||||
|
"warning",
|
||||||
|
"debug",
|
||||||
|
},
|
||||||
|
Required: true,
|
||||||
|
Type: "choice",
|
||||||
|
}, workflowDispatch.Inputs["logLevel"])
|
||||||
|
}
|
||||||
|
|
|
@ -3,6 +3,7 @@ package runner
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"embed"
|
"embed"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/fs"
|
"io/fs"
|
||||||
|
@ -41,11 +42,24 @@ var trampoline embed.FS
|
||||||
|
|
||||||
func readActionImpl(ctx context.Context, step *model.Step, actionDir string, actionPath string, readFile actionYamlReader, writeFile fileWriter) (*model.Action, error) {
|
func readActionImpl(ctx context.Context, step *model.Step, actionDir string, actionPath string, readFile actionYamlReader, writeFile fileWriter) (*model.Action, error) {
|
||||||
logger := common.Logger(ctx)
|
logger := common.Logger(ctx)
|
||||||
|
allErrors := []error{}
|
||||||
|
addError := func(fileName string, err error) {
|
||||||
|
if err != nil {
|
||||||
|
allErrors = append(allErrors, fmt.Errorf("failed to read '%s' from action '%s' with path '%s' of step %w", fileName, step.String(), actionPath, err))
|
||||||
|
} else {
|
||||||
|
// One successful read, clear error state
|
||||||
|
allErrors = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
reader, closer, err := readFile("action.yml")
|
reader, closer, err := readFile("action.yml")
|
||||||
|
addError("action.yml", err)
|
||||||
if os.IsNotExist(err) {
|
if os.IsNotExist(err) {
|
||||||
reader, closer, err = readFile("action.yaml")
|
reader, closer, err = readFile("action.yaml")
|
||||||
if err != nil {
|
addError("action.yaml", err)
|
||||||
if _, closer, err2 := readFile("Dockerfile"); err2 == nil {
|
if os.IsNotExist(err) {
|
||||||
|
_, closer, err := readFile("Dockerfile")
|
||||||
|
addError("Dockerfile", err)
|
||||||
|
if err == nil {
|
||||||
closer.Close()
|
closer.Close()
|
||||||
action := &model.Action{
|
action := &model.Action{
|
||||||
Name: "(Synthetic)",
|
Name: "(Synthetic)",
|
||||||
|
@ -90,10 +104,10 @@ func readActionImpl(ctx context.Context, step *model.Step, actionDir string, act
|
||||||
return action, nil
|
return action, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil, err
|
|
||||||
}
|
}
|
||||||
} else if err != nil {
|
}
|
||||||
return nil, err
|
if allErrors != nil {
|
||||||
|
return nil, errors.Join(allErrors...)
|
||||||
}
|
}
|
||||||
defer closer.Close()
|
defer closer.Close()
|
||||||
|
|
||||||
|
@ -110,9 +124,6 @@ func maybeCopyToActionDir(ctx context.Context, step actionStep, actionDir string
|
||||||
if stepModel.Type() != model.StepTypeUsesActionRemote {
|
if stepModel.Type() != model.StepTypeUsesActionRemote {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if err := removeGitIgnore(ctx, actionDir); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
var containerActionDirCopy string
|
var containerActionDirCopy string
|
||||||
containerActionDirCopy = strings.TrimSuffix(containerActionDir, actionPath)
|
containerActionDirCopy = strings.TrimSuffix(containerActionDir, actionPath)
|
||||||
|
@ -121,6 +132,21 @@ func maybeCopyToActionDir(ctx context.Context, step actionStep, actionDir string
|
||||||
if !strings.HasSuffix(containerActionDirCopy, `/`) {
|
if !strings.HasSuffix(containerActionDirCopy, `/`) {
|
||||||
containerActionDirCopy += `/`
|
containerActionDirCopy += `/`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if rc.Config != nil && rc.Config.ActionCache != nil {
|
||||||
|
raction := step.(*stepActionRemote)
|
||||||
|
ta, err := rc.Config.ActionCache.GetTarArchive(ctx, raction.cacheDir, raction.resolvedSha, "")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer ta.Close()
|
||||||
|
return rc.JobContainer.CopyTarStream(ctx, containerActionDirCopy, ta)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := removeGitIgnore(ctx, actionDir); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
return rc.JobContainer.CopyDir(containerActionDirCopy, actionDir+"/", rc.Config.UseGitIgnore)(ctx)
|
return rc.JobContainer.CopyDir(containerActionDirCopy, actionDir+"/", rc.Config.UseGitIgnore)(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -281,6 +307,13 @@ func execAsDocker(ctx context.Context, step actionStep, actionName string, based
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer buildContext.Close()
|
defer buildContext.Close()
|
||||||
|
} else if rc.Config.ActionCache != nil {
|
||||||
|
rstep := step.(*stepActionRemote)
|
||||||
|
buildContext, err = rc.Config.ActionCache.GetTarArchive(ctx, rstep.cacheDir, rstep.resolvedSha, contextDir)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer buildContext.Close()
|
||||||
}
|
}
|
||||||
prepImage = container.NewDockerBuildExecutor(container.NewDockerBuildExecutorInput{
|
prepImage = container.NewDockerBuildExecutor(container.NewDockerBuildExecutorInput{
|
||||||
ContextDir: contextDir,
|
ContextDir: contextDir,
|
||||||
|
|
|
@ -6,6 +6,7 @@ import (
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"errors"
|
"errors"
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/fs"
|
"io/fs"
|
||||||
"path"
|
"path"
|
||||||
|
@ -86,6 +87,9 @@ func (c GoGitActionCache) Fetch(ctx context.Context, cacheDir, url, ref, token s
|
||||||
Auth: auth,
|
Auth: auth,
|
||||||
Force: true,
|
Force: true,
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
|
if tagOrSha && errors.Is(err, git.NoErrAlreadyUpToDate) {
|
||||||
|
return "", fmt.Errorf("couldn't find remote ref \"%s\"", ref)
|
||||||
|
}
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
if tagOrSha {
|
if tagOrSha {
|
||||||
|
|
|
@ -20,6 +20,7 @@ type jobInfo interface {
|
||||||
result(result string)
|
result(result string)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//nolint:contextcheck,gocyclo
|
||||||
func newJobExecutor(info jobInfo, sf stepFactory, rc *RunContext) common.Executor {
|
func newJobExecutor(info jobInfo, sf stepFactory, rc *RunContext) common.Executor {
|
||||||
steps := make([]common.Executor, 0)
|
steps := make([]common.Executor, 0)
|
||||||
preSteps := make([]common.Executor, 0)
|
preSteps := make([]common.Executor, 0)
|
||||||
|
@ -101,7 +102,7 @@ func newJobExecutor(info jobInfo, sf stepFactory, rc *RunContext) common.Executo
|
||||||
|
|
||||||
postExec := useStepLogger(rc, stepModel, stepStagePost, step.post())
|
postExec := useStepLogger(rc, stepModel, stepStagePost, step.post())
|
||||||
if postExecutor != nil {
|
if postExecutor != nil {
|
||||||
// run the post exector in reverse order
|
// run the post executor in reverse order
|
||||||
postExecutor = postExec.Finally(postExecutor)
|
postExecutor = postExec.Finally(postExecutor)
|
||||||
} else {
|
} else {
|
||||||
postExecutor = postExec
|
postExecutor = postExec
|
||||||
|
@ -117,22 +118,19 @@ func newJobExecutor(info jobInfo, sf stepFactory, rc *RunContext) common.Executo
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
logger := common.Logger(ctx)
|
logger := common.Logger(ctx)
|
||||||
logger.Infof("Cleaning up services for job %s", rc.JobName)
|
|
||||||
if err := rc.stopServiceContainers()(ctx); err != nil {
|
|
||||||
logger.Errorf("Error while cleaning services: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
logger.Infof("Cleaning up container for job %s", rc.JobName)
|
logger.Infof("Cleaning up container for job %s", rc.JobName)
|
||||||
if err = info.stopContainer()(ctx); err != nil {
|
if err = info.stopContainer()(ctx); err != nil {
|
||||||
logger.Errorf("Error while stop job container: %v", err)
|
logger.Errorf("Error while stop job container: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !rc.IsHostEnv(ctx) && rc.Config.ContainerNetworkMode == "" {
|
if !rc.IsHostEnv(ctx) && rc.Config.ContainerNetworkMode == "" {
|
||||||
// clean network in docker mode only
|
// clean network in docker mode only
|
||||||
// if the value of `ContainerNetworkMode` is empty string,
|
// if the value of `ContainerNetworkMode` is empty string,
|
||||||
// it means that the network to which containers are connecting is created by `act_runner`,
|
// it means that the network to which containers are connecting is created by `act_runner`,
|
||||||
// so, we should remove the network at last.
|
// so, we should remove the network at last.
|
||||||
logger.Infof("Cleaning up network for job %s, and network name is: %s", rc.JobName, rc.networkName())
|
networkName, _ := rc.networkName()
|
||||||
if err := container.NewDockerNetworkRemoveExecutor(rc.networkName())(ctx); err != nil {
|
logger.Infof("Cleaning up network for job %s, and network name is: %s", rc.JobName, networkName)
|
||||||
|
if err := container.NewDockerNetworkRemoveExecutor(networkName)(ctx); err != nil {
|
||||||
logger.Errorf("Error while cleaning network: %v", err)
|
logger.Errorf("Error while cleaning network: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package runner
|
package runner
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"archive/tar"
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
@ -67,12 +68,51 @@ func newRemoteReusableWorkflowExecutor(rc *RunContext) common.Executor {
|
||||||
// FIXME: if the reusable workflow is from a private repository, we need to provide a token to access the repository.
|
// FIXME: if the reusable workflow is from a private repository, we need to provide a token to access the repository.
|
||||||
token := ""
|
token := ""
|
||||||
|
|
||||||
|
if rc.Config.ActionCache != nil {
|
||||||
|
return newActionCacheReusableWorkflowExecutor(rc, filename, remoteReusableWorkflow)
|
||||||
|
}
|
||||||
|
|
||||||
return common.NewPipelineExecutor(
|
return common.NewPipelineExecutor(
|
||||||
newMutexExecutor(cloneIfRequired(rc, *remoteReusableWorkflow, workflowDir, token)),
|
newMutexExecutor(cloneIfRequired(rc, *remoteReusableWorkflow, workflowDir, token)),
|
||||||
newReusableWorkflowExecutor(rc, workflowDir, remoteReusableWorkflow.FilePath()),
|
newReusableWorkflowExecutor(rc, workflowDir, remoteReusableWorkflow.FilePath()),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func newActionCacheReusableWorkflowExecutor(rc *RunContext, filename string, remoteReusableWorkflow *remoteReusableWorkflow) common.Executor {
|
||||||
|
return func(ctx context.Context) error {
|
||||||
|
ghctx := rc.getGithubContext(ctx)
|
||||||
|
remoteReusableWorkflow.URL = ghctx.ServerURL
|
||||||
|
sha, err := rc.Config.ActionCache.Fetch(ctx, filename, remoteReusableWorkflow.CloneURL(), remoteReusableWorkflow.Ref, ghctx.Token)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
archive, err := rc.Config.ActionCache.GetTarArchive(ctx, filename, sha, fmt.Sprintf(".github/workflows/%s", remoteReusableWorkflow.Filename))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer archive.Close()
|
||||||
|
treader := tar.NewReader(archive)
|
||||||
|
if _, err = treader.Next(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
planner, err := model.NewSingleWorkflowPlanner(remoteReusableWorkflow.Filename, treader)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
plan, err := planner.PlanEvent("workflow_call")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
runner, err := NewReusableWorkflowRunner(rc)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return runner.NewPlanExecutor(plan)(ctx)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
executorLock sync.Mutex
|
executorLock sync.Mutex
|
||||||
)
|
)
|
||||||
|
@ -103,6 +143,7 @@ func cloneIfRequired(rc *RunContext, remoteReusableWorkflow remoteReusableWorkfl
|
||||||
Ref: remoteReusableWorkflow.Ref,
|
Ref: remoteReusableWorkflow.Ref,
|
||||||
Dir: targetDirectory,
|
Dir: targetDirectory,
|
||||||
Token: token,
|
Token: token,
|
||||||
|
OfflineMode: rc.Config.ActionOfflineMode,
|
||||||
})(ctx)
|
})(ctx)
|
||||||
},
|
},
|
||||||
nil,
|
nil,
|
||||||
|
|
|
@ -16,14 +16,13 @@ import (
|
||||||
"regexp"
|
"regexp"
|
||||||
"runtime"
|
"runtime"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/opencontainers/selinux/go-selinux"
|
|
||||||
|
|
||||||
|
"github.com/docker/go-connections/nat"
|
||||||
"github.com/nektos/act/pkg/common"
|
"github.com/nektos/act/pkg/common"
|
||||||
"github.com/nektos/act/pkg/container"
|
"github.com/nektos/act/pkg/container"
|
||||||
"github.com/nektos/act/pkg/exprparser"
|
"github.com/nektos/act/pkg/exprparser"
|
||||||
"github.com/nektos/act/pkg/model"
|
"github.com/nektos/act/pkg/model"
|
||||||
|
"github.com/opencontainers/selinux/go-selinux"
|
||||||
)
|
)
|
||||||
|
|
||||||
// RunContext contains info about current job
|
// RunContext contains info about current job
|
||||||
|
@ -65,7 +64,7 @@ func (rc *RunContext) String() string {
|
||||||
if rc.caller != nil {
|
if rc.caller != nil {
|
||||||
// prefix the reusable workflow with the caller job
|
// prefix the reusable workflow with the caller job
|
||||||
// this is required to create unique container names
|
// this is required to create unique container names
|
||||||
name = fmt.Sprintf("%s/%s", rc.caller.runContext.Run.JobID, name)
|
name = fmt.Sprintf("%s/%s", rc.caller.runContext.Name, name)
|
||||||
}
|
}
|
||||||
return name
|
return name
|
||||||
}
|
}
|
||||||
|
@ -95,9 +94,15 @@ func (rc *RunContext) jobContainerName() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
// networkName return the name of the network which will be created by `act` automatically for job,
|
// networkName return the name of the network which will be created by `act` automatically for job,
|
||||||
// only create network if `rc.Config.ContainerNetworkMode` is empty string.
|
// only create network if using a service container
|
||||||
func (rc *RunContext) networkName() string {
|
func (rc *RunContext) networkName() (string, bool) {
|
||||||
return fmt.Sprintf("%s-network", rc.jobContainerName())
|
if len(rc.Run.Job().Services) > 0 {
|
||||||
|
return fmt.Sprintf("%s-%s-network", rc.jobContainerName(), rc.Run.JobID), true
|
||||||
|
}
|
||||||
|
if rc.Config.ContainerNetworkMode == "" {
|
||||||
|
return "host", false
|
||||||
|
}
|
||||||
|
return string(rc.Config.ContainerNetworkMode), false
|
||||||
}
|
}
|
||||||
|
|
||||||
func getDockerDaemonSocketMountPath(daemonPath string) string {
|
func getDockerDaemonSocketMountPath(daemonPath string) string {
|
||||||
|
@ -135,7 +140,7 @@ func (rc *RunContext) GetBindsAndMounts() ([]string, map[string]string) {
|
||||||
ext := container.LinuxContainerEnvironmentExtensions{}
|
ext := container.LinuxContainerEnvironmentExtensions{}
|
||||||
|
|
||||||
mounts := map[string]string{
|
mounts := map[string]string{
|
||||||
"act-toolcache": "/toolcache",
|
"act-toolcache": "/opt/hostedtoolcache",
|
||||||
name + "-env": ext.GetActPath(),
|
name + "-env": ext.GetActPath(),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -247,6 +252,7 @@ func (rc *RunContext) startHostEnvironment() common.Executor {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//nolint:gocyclo
|
||||||
func (rc *RunContext) startJobContainer() common.Executor {
|
func (rc *RunContext) startJobContainer() common.Executor {
|
||||||
return func(ctx context.Context) error {
|
return func(ctx context.Context) error {
|
||||||
logger := common.Logger(ctx)
|
logger := common.Logger(ctx)
|
||||||
|
@ -285,14 +291,15 @@ func (rc *RunContext) startJobContainer() common.Executor {
|
||||||
|
|
||||||
// specify the network to which the container will connect when `docker create` stage. (like execute command line: docker create --network <networkName> <image>)
|
// specify the network to which the container will connect when `docker create` stage. (like execute command line: docker create --network <networkName> <image>)
|
||||||
networkName := string(rc.Config.ContainerNetworkMode)
|
networkName := string(rc.Config.ContainerNetworkMode)
|
||||||
|
var createAndDeleteNetwork bool
|
||||||
if networkName == "" {
|
if networkName == "" {
|
||||||
// if networkName is empty string, will create a new network for the containers.
|
// if networkName is empty string, will create a new network for the containers.
|
||||||
// and it will be removed after at last.
|
// and it will be removed after at last.
|
||||||
networkName = rc.networkName()
|
networkName, createAndDeleteNetwork = rc.networkName()
|
||||||
}
|
}
|
||||||
|
|
||||||
// add service containers
|
// add service containers
|
||||||
for serviceId, spec := range rc.Run.Job().Services {
|
for serviceID, spec := range rc.Run.Job().Services {
|
||||||
// interpolate env
|
// interpolate env
|
||||||
interpolatedEnvs := make(map[string]string, len(spec.Env))
|
interpolatedEnvs := make(map[string]string, len(spec.Env))
|
||||||
for k, v := range spec.Env {
|
for k, v := range spec.Env {
|
||||||
|
@ -302,21 +309,36 @@ func (rc *RunContext) startJobContainer() common.Executor {
|
||||||
for k, v := range interpolatedEnvs {
|
for k, v := range interpolatedEnvs {
|
||||||
envs = append(envs, fmt.Sprintf("%s=%s", k, v))
|
envs = append(envs, fmt.Sprintf("%s=%s", k, v))
|
||||||
}
|
}
|
||||||
// interpolate cmd
|
|
||||||
interpolatedCmd := make([]string, 0, len(spec.Cmd))
|
interpolatedCmd := make([]string, 0, len(spec.Cmd))
|
||||||
for _, v := range spec.Cmd {
|
for _, v := range spec.Cmd {
|
||||||
interpolatedCmd = append(interpolatedCmd, rc.ExprEval.Interpolate(ctx, v))
|
interpolatedCmd = append(interpolatedCmd, rc.ExprEval.Interpolate(ctx, v))
|
||||||
}
|
}
|
||||||
username, password, err := rc.handleServiceCredentials(ctx, spec.Credentials)
|
|
||||||
|
username, password, err = rc.handleServiceCredentials(ctx, spec.Credentials)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to handle service %s credentials: %w", serviceId, err)
|
return fmt.Errorf("failed to handle service %s credentials: %w", serviceID, err)
|
||||||
}
|
}
|
||||||
serviceBinds, serviceMounts := rc.GetServiceBindsAndMounts(spec.Volumes)
|
|
||||||
serviceContainerName := createSimpleContainerName(rc.jobContainerName(), serviceId)
|
interpolatedVolumes := make([]string, 0, len(spec.Volumes))
|
||||||
|
for _, volume := range spec.Volumes {
|
||||||
|
interpolatedVolumes = append(interpolatedVolumes, rc.ExprEval.Interpolate(ctx, volume))
|
||||||
|
}
|
||||||
|
serviceBinds, serviceMounts := rc.GetServiceBindsAndMounts(interpolatedVolumes)
|
||||||
|
|
||||||
|
interpolatedPorts := make([]string, 0, len(spec.Ports))
|
||||||
|
for _, port := range spec.Ports {
|
||||||
|
interpolatedPorts = append(interpolatedPorts, rc.ExprEval.Interpolate(ctx, port))
|
||||||
|
}
|
||||||
|
exposedPorts, portBindings, err := nat.ParsePortSpecs(interpolatedPorts)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to parse service %s ports: %w", serviceID, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
serviceContainerName := createContainerName(rc.jobContainerName(), serviceID)
|
||||||
c := container.NewContainer(&container.NewContainerInput{
|
c := container.NewContainer(&container.NewContainerInput{
|
||||||
Name: serviceContainerName,
|
Name: serviceContainerName,
|
||||||
WorkingDir: ext.ToContainerPath(rc.Config.Workdir),
|
WorkingDir: ext.ToContainerPath(rc.Config.Workdir),
|
||||||
Image: spec.Image,
|
Image: rc.ExprEval.Interpolate(ctx, spec.Image),
|
||||||
Username: username,
|
Username: username,
|
||||||
Password: password,
|
Password: password,
|
||||||
Cmd: interpolatedCmd,
|
Cmd: interpolatedCmd,
|
||||||
|
@ -329,26 +351,58 @@ func (rc *RunContext) startJobContainer() common.Executor {
|
||||||
UsernsMode: rc.Config.UsernsMode,
|
UsernsMode: rc.Config.UsernsMode,
|
||||||
Platform: rc.Config.ContainerArchitecture,
|
Platform: rc.Config.ContainerArchitecture,
|
||||||
AutoRemove: rc.Config.AutoRemove,
|
AutoRemove: rc.Config.AutoRemove,
|
||||||
Options: spec.Options,
|
Options: rc.ExprEval.Interpolate(ctx, spec.Options),
|
||||||
NetworkMode: networkName,
|
NetworkMode: networkName,
|
||||||
NetworkAliases: []string{serviceId},
|
NetworkAliases: []string{serviceID},
|
||||||
|
ExposedPorts: exposedPorts,
|
||||||
|
PortBindings: portBindings,
|
||||||
ValidVolumes: rc.Config.ValidVolumes,
|
ValidVolumes: rc.Config.ValidVolumes,
|
||||||
})
|
})
|
||||||
rc.ServiceContainers = append(rc.ServiceContainers, c)
|
rc.ServiceContainers = append(rc.ServiceContainers, c)
|
||||||
}
|
}
|
||||||
|
|
||||||
rc.cleanUpJobContainer = func(ctx context.Context) error {
|
rc.cleanUpJobContainer = func(ctx context.Context) error {
|
||||||
if rc.JobContainer != nil && !rc.Config.ReuseContainers {
|
reuseJobContainer := func(ctx context.Context) bool {
|
||||||
return rc.JobContainer.Remove().
|
return rc.Config.ReuseContainers
|
||||||
Then(container.NewDockerVolumeRemoveExecutor(rc.jobContainerName(), false)).
|
}
|
||||||
Then(container.NewDockerVolumeRemoveExecutor(rc.jobContainerName()+"-env", false))(ctx)
|
|
||||||
|
if rc.JobContainer != nil {
|
||||||
|
return rc.JobContainer.Remove().IfNot(reuseJobContainer).
|
||||||
|
Then(container.NewDockerVolumeRemoveExecutor(rc.jobContainerName(), false)).IfNot(reuseJobContainer).
|
||||||
|
Then(container.NewDockerVolumeRemoveExecutor(rc.jobContainerName()+"-env", false)).IfNot(reuseJobContainer).
|
||||||
|
Then(func(ctx context.Context) error {
|
||||||
|
if len(rc.ServiceContainers) > 0 {
|
||||||
|
logger.Infof("Cleaning up services for job %s", rc.JobName)
|
||||||
|
if err := rc.stopServiceContainers()(ctx); err != nil {
|
||||||
|
logger.Errorf("Error while cleaning services: %v", err)
|
||||||
|
}
|
||||||
|
if createAndDeleteNetwork {
|
||||||
|
// clean network if it has been created by act
|
||||||
|
// if using service containers
|
||||||
|
// it means that the network to which containers are connecting is created by `act_runner`,
|
||||||
|
// so, we should remove the network at last.
|
||||||
|
logger.Infof("Cleaning up network for job %s, and network name is: %s", rc.JobName, networkName)
|
||||||
|
if err := container.NewDockerNetworkRemoveExecutor(networkName)(ctx); err != nil {
|
||||||
|
logger.Errorf("Error while cleaning network: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})(ctx)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
jobContainerNetwork := rc.Config.ContainerNetworkMode.NetworkName()
|
||||||
|
if rc.containerImage(ctx) != "" {
|
||||||
|
jobContainerNetwork = networkName
|
||||||
|
} else if jobContainerNetwork == "" {
|
||||||
|
jobContainerNetwork = "host"
|
||||||
|
}
|
||||||
|
|
||||||
rc.JobContainer = container.NewContainer(&container.NewContainerInput{
|
rc.JobContainer = container.NewContainer(&container.NewContainerInput{
|
||||||
Cmd: nil,
|
Cmd: nil,
|
||||||
Entrypoint: []string{"/bin/sleep", fmt.Sprint(rc.Config.ContainerMaxLifetime.Round(time.Second).Seconds())},
|
Entrypoint: []string{"tail", "-f", "/dev/null"},
|
||||||
WorkingDir: ext.ToContainerPath(rc.Config.Workdir),
|
WorkingDir: ext.ToContainerPath(rc.Config.Workdir),
|
||||||
Image: image,
|
Image: image,
|
||||||
Username: username,
|
Username: username,
|
||||||
|
@ -356,7 +410,7 @@ func (rc *RunContext) startJobContainer() common.Executor {
|
||||||
Name: name,
|
Name: name,
|
||||||
Env: envList,
|
Env: envList,
|
||||||
Mounts: mounts,
|
Mounts: mounts,
|
||||||
NetworkMode: networkName,
|
NetworkMode: jobContainerNetwork,
|
||||||
NetworkAliases: []string{rc.Name},
|
NetworkAliases: []string{rc.Name},
|
||||||
Binds: binds,
|
Binds: binds,
|
||||||
Stdout: logWriter,
|
Stdout: logWriter,
|
||||||
|
@ -375,6 +429,7 @@ func (rc *RunContext) startJobContainer() common.Executor {
|
||||||
return common.NewPipelineExecutor(
|
return common.NewPipelineExecutor(
|
||||||
rc.pullServicesImages(rc.Config.ForcePull),
|
rc.pullServicesImages(rc.Config.ForcePull),
|
||||||
rc.JobContainer.Pull(rc.Config.ForcePull),
|
rc.JobContainer.Pull(rc.Config.ForcePull),
|
||||||
|
rc.stopJobContainer(),
|
||||||
container.NewDockerNetworkCreateExecutor(networkName).IfBool(!rc.IsHostEnv(ctx) && rc.Config.ContainerNetworkMode == ""), // if the value of `ContainerNetworkMode` is empty string, then will create a new network for containers.
|
container.NewDockerNetworkCreateExecutor(networkName).IfBool(!rc.IsHostEnv(ctx) && rc.Config.ContainerNetworkMode == ""), // if the value of `ContainerNetworkMode` is empty string, then will create a new network for containers.
|
||||||
rc.startServiceContainers(networkName),
|
rc.startServiceContainers(networkName),
|
||||||
rc.JobContainer.Create(rc.Config.ContainerCapAdd, rc.Config.ContainerCapDrop),
|
rc.JobContainer.Create(rc.Config.ContainerCapAdd, rc.Config.ContainerCapDrop),
|
||||||
|
@ -452,10 +507,10 @@ func (rc *RunContext) UpdateExtraPath(ctx context.Context, githubEnvPath string)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// stopJobContainer removes the job container (if it exists) and its volume (if it exists) if !rc.Config.ReuseContainers
|
// stopJobContainer removes the job container (if it exists) and its volume (if it exists)
|
||||||
func (rc *RunContext) stopJobContainer() common.Executor {
|
func (rc *RunContext) stopJobContainer() common.Executor {
|
||||||
return func(ctx context.Context) error {
|
return func(ctx context.Context) error {
|
||||||
if rc.cleanUpJobContainer != nil && !rc.Config.ReuseContainers {
|
if rc.cleanUpJobContainer != nil {
|
||||||
return rc.cleanUpJobContainer(ctx)
|
return rc.cleanUpJobContainer(ctx)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -472,7 +527,7 @@ func (rc *RunContext) pullServicesImages(forcePull bool) common.Executor {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rc *RunContext) startServiceContainers(networkName string) common.Executor {
|
func (rc *RunContext) startServiceContainers(_ string) common.Executor {
|
||||||
return func(ctx context.Context) error {
|
return func(ctx context.Context) error {
|
||||||
execs := []common.Executor{}
|
execs := []common.Executor{}
|
||||||
for _, c := range rc.ServiceContainers {
|
for _, c := range rc.ServiceContainers {
|
||||||
|
@ -490,7 +545,7 @@ func (rc *RunContext) stopServiceContainers() common.Executor {
|
||||||
return func(ctx context.Context) error {
|
return func(ctx context.Context) error {
|
||||||
execs := []common.Executor{}
|
execs := []common.Executor{}
|
||||||
for _, c := range rc.ServiceContainers {
|
for _, c := range rc.ServiceContainers {
|
||||||
execs = append(execs, c.Remove())
|
execs = append(execs, c.Remove().Finally(c.Close()))
|
||||||
}
|
}
|
||||||
return common.NewParallelExecutor(len(execs), execs...)(ctx)
|
return common.NewParallelExecutor(len(execs), execs...)(ctx)
|
||||||
}
|
}
|
||||||
|
@ -610,13 +665,11 @@ func (rc *RunContext) containerImage(ctx context.Context) string {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rc *RunContext) runsOnImage(ctx context.Context) string {
|
func (rc *RunContext) runsOnImage(ctx context.Context) string {
|
||||||
job := rc.Run.Job()
|
if rc.Run.Job().RunsOn() == nil {
|
||||||
|
|
||||||
if job.RunsOn() == nil {
|
|
||||||
common.Logger(ctx).Errorf("'runs-on' key not defined in %s", rc.String())
|
common.Logger(ctx).Errorf("'runs-on' key not defined in %s", rc.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
runsOn := job.RunsOn()
|
runsOn := rc.Run.Job().RunsOn()
|
||||||
for i, v := range runsOn {
|
for i, v := range runsOn {
|
||||||
runsOn[i] = rc.ExprEval.Interpolate(ctx, v)
|
runsOn[i] = rc.ExprEval.Interpolate(ctx, v)
|
||||||
}
|
}
|
||||||
|
@ -627,8 +680,8 @@ func (rc *RunContext) runsOnImage(ctx context.Context) string {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, runnerLabel := range runsOn {
|
for _, platformName := range rc.runsOnPlatformNames(ctx) {
|
||||||
image := rc.Config.Platforms[strings.ToLower(runnerLabel)]
|
image := rc.Config.Platforms[strings.ToLower(platformName)]
|
||||||
if image != "" {
|
if image != "" {
|
||||||
return image
|
return image
|
||||||
}
|
}
|
||||||
|
@ -637,6 +690,21 @@ func (rc *RunContext) runsOnImage(ctx context.Context) string {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (rc *RunContext) runsOnPlatformNames(ctx context.Context) []string {
|
||||||
|
job := rc.Run.Job()
|
||||||
|
|
||||||
|
if job.RunsOn() == nil {
|
||||||
|
return []string{}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := rc.ExprEval.EvaluateYamlNode(ctx, &job.RawRunsOn); err != nil {
|
||||||
|
common.Logger(ctx).Errorf("Error while evaluating runs-on: %v", err)
|
||||||
|
return []string{}
|
||||||
|
}
|
||||||
|
|
||||||
|
return job.RunsOn()
|
||||||
|
}
|
||||||
|
|
||||||
func (rc *RunContext) platformImage(ctx context.Context) string {
|
func (rc *RunContext) platformImage(ctx context.Context) string {
|
||||||
if containerImage := rc.containerImage(ctx); containerImage != "" {
|
if containerImage := rc.containerImage(ctx); containerImage != "" {
|
||||||
return containerImage
|
return containerImage
|
||||||
|
@ -667,8 +735,6 @@ func (rc *RunContext) isEnabled(ctx context.Context) (bool, error) {
|
||||||
|
|
||||||
if jobType == model.JobTypeInvalid {
|
if jobType == model.JobTypeInvalid {
|
||||||
return false, jobTypeErr
|
return false, jobTypeErr
|
||||||
} else if jobType != model.JobTypeDefault {
|
|
||||||
return true, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if !runJob {
|
if !runJob {
|
||||||
|
@ -676,14 +742,13 @@ func (rc *RunContext) isEnabled(ctx context.Context) (bool, error) {
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
img := rc.platformImage(ctx)
|
if jobType != model.JobTypeDefault {
|
||||||
if img == "" {
|
return true, nil
|
||||||
if job.RunsOn() == nil {
|
|
||||||
l.Errorf("'runs-on' key not defined in %s", rc.String())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, runnerLabel := range job.RunsOn() {
|
img := rc.platformImage(ctx)
|
||||||
platformName := rc.ExprEval.Interpolate(ctx, runnerLabel)
|
if img == "" {
|
||||||
|
for _, platformName := range rc.runsOnPlatformNames(ctx) {
|
||||||
l.Infof("\U0001F6A7 Skipping unsupported platform -- Try running with `-P %+v=...`", platformName)
|
l.Infof("\U0001F6A7 Skipping unsupported platform -- Try running with `-P %+v=...`", platformName)
|
||||||
}
|
}
|
||||||
return false, nil
|
return false, nil
|
||||||
|
@ -960,7 +1025,6 @@ func (rc *RunContext) withGithubEnv(ctx context.Context, github *model.GithubCon
|
||||||
env["GITHUB_REF"] = github.Ref
|
env["GITHUB_REF"] = github.Ref
|
||||||
env["GITHUB_REF_NAME"] = github.RefName
|
env["GITHUB_REF_NAME"] = github.RefName
|
||||||
env["GITHUB_REF_TYPE"] = github.RefType
|
env["GITHUB_REF_TYPE"] = github.RefType
|
||||||
env["GITHUB_TOKEN"] = github.Token
|
|
||||||
env["GITHUB_JOB"] = github.Job
|
env["GITHUB_JOB"] = github.Job
|
||||||
env["GITHUB_REPOSITORY_OWNER"] = github.RepositoryOwner
|
env["GITHUB_REPOSITORY_OWNER"] = github.RepositoryOwner
|
||||||
env["GITHUB_RETENTION_DAYS"] = github.RetentionDays
|
env["GITHUB_RETENTION_DAYS"] = github.RetentionDays
|
||||||
|
@ -987,9 +1051,7 @@ func (rc *RunContext) withGithubEnv(ctx context.Context, github *model.GithubCon
|
||||||
setActionRuntimeVars(rc, env)
|
setActionRuntimeVars(rc, env)
|
||||||
}
|
}
|
||||||
|
|
||||||
job := rc.Run.Job()
|
for _, platformName := range rc.runsOnPlatformNames(ctx) {
|
||||||
for _, runnerLabel := range job.RunsOn() {
|
|
||||||
platformName := rc.ExprEval.Interpolate(ctx, runnerLabel)
|
|
||||||
if platformName != "" {
|
if platformName != "" {
|
||||||
if platformName == "ubuntu-latest" {
|
if platformName == "ubuntu-latest" {
|
||||||
// hardcode current ubuntu-latest since we have no way to check that 'on the fly'
|
// hardcode current ubuntu-latest since we have no way to check that 'on the fly'
|
||||||
|
|
|
@ -470,6 +470,53 @@ func createJob(t *testing.T, input string, result string) *model.Job {
|
||||||
return job
|
return job
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestRunContextRunsOnPlatformNames(t *testing.T) {
|
||||||
|
log.SetLevel(log.DebugLevel)
|
||||||
|
assertObject := assert.New(t)
|
||||||
|
|
||||||
|
rc := createIfTestRunContext(map[string]*model.Job{
|
||||||
|
"job1": createJob(t, `runs-on: ubuntu-latest`, ""),
|
||||||
|
})
|
||||||
|
assertObject.Equal([]string{"ubuntu-latest"}, rc.runsOnPlatformNames(context.Background()))
|
||||||
|
|
||||||
|
rc = createIfTestRunContext(map[string]*model.Job{
|
||||||
|
"job1": createJob(t, `runs-on: ${{ 'ubuntu-latest' }}`, ""),
|
||||||
|
})
|
||||||
|
assertObject.Equal([]string{"ubuntu-latest"}, rc.runsOnPlatformNames(context.Background()))
|
||||||
|
|
||||||
|
rc = createIfTestRunContext(map[string]*model.Job{
|
||||||
|
"job1": createJob(t, `runs-on: [self-hosted, my-runner]`, ""),
|
||||||
|
})
|
||||||
|
assertObject.Equal([]string{"self-hosted", "my-runner"}, rc.runsOnPlatformNames(context.Background()))
|
||||||
|
|
||||||
|
rc = createIfTestRunContext(map[string]*model.Job{
|
||||||
|
"job1": createJob(t, `runs-on: [self-hosted, "${{ 'my-runner' }}"]`, ""),
|
||||||
|
})
|
||||||
|
assertObject.Equal([]string{"self-hosted", "my-runner"}, rc.runsOnPlatformNames(context.Background()))
|
||||||
|
|
||||||
|
rc = createIfTestRunContext(map[string]*model.Job{
|
||||||
|
"job1": createJob(t, `runs-on: ${{ fromJSON('["ubuntu-latest"]') }}`, ""),
|
||||||
|
})
|
||||||
|
assertObject.Equal([]string{"ubuntu-latest"}, rc.runsOnPlatformNames(context.Background()))
|
||||||
|
|
||||||
|
// test missing / invalid runs-on
|
||||||
|
rc = createIfTestRunContext(map[string]*model.Job{
|
||||||
|
"job1": createJob(t, `name: something`, ""),
|
||||||
|
})
|
||||||
|
assertObject.Equal([]string{}, rc.runsOnPlatformNames(context.Background()))
|
||||||
|
|
||||||
|
rc = createIfTestRunContext(map[string]*model.Job{
|
||||||
|
"job1": createJob(t, `runs-on:
|
||||||
|
mapping: value`, ""),
|
||||||
|
})
|
||||||
|
assertObject.Equal([]string{}, rc.runsOnPlatformNames(context.Background()))
|
||||||
|
|
||||||
|
rc = createIfTestRunContext(map[string]*model.Job{
|
||||||
|
"job1": createJob(t, `runs-on: ${{ invalid expression }}`, ""),
|
||||||
|
})
|
||||||
|
assertObject.Equal([]string{}, rc.runsOnPlatformNames(context.Background()))
|
||||||
|
}
|
||||||
|
|
||||||
func TestRunContextIsEnabled(t *testing.T) {
|
func TestRunContextIsEnabled(t *testing.T) {
|
||||||
log.SetLevel(log.DebugLevel)
|
log.SetLevel(log.DebugLevel)
|
||||||
assertObject := assert.New(t)
|
assertObject := assert.New(t)
|
||||||
|
@ -572,6 +619,17 @@ if: always()`, ""),
|
||||||
})
|
})
|
||||||
rc.Run.JobID = "job2"
|
rc.Run.JobID = "job2"
|
||||||
assertObject.True(rc.isEnabled(context.Background()))
|
assertObject.True(rc.isEnabled(context.Background()))
|
||||||
|
|
||||||
|
rc = createIfTestRunContext(map[string]*model.Job{
|
||||||
|
"job1": createJob(t, `uses: ./.github/workflows/reusable.yml`, ""),
|
||||||
|
})
|
||||||
|
assertObject.True(rc.isEnabled(context.Background()))
|
||||||
|
|
||||||
|
rc = createIfTestRunContext(map[string]*model.Job{
|
||||||
|
"job1": createJob(t, `uses: ./.github/workflows/reusable.yml
|
||||||
|
if: false`, ""),
|
||||||
|
})
|
||||||
|
assertObject.False(rc.isEnabled(context.Background()))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRunContextGetEnv(t *testing.T) {
|
func TestRunContextGetEnv(t *testing.T) {
|
||||||
|
|
|
@ -25,6 +25,7 @@ type Config struct {
|
||||||
Actor string // the user that triggered the event
|
Actor string // the user that triggered the event
|
||||||
Workdir string // path to working directory
|
Workdir string // path to working directory
|
||||||
ActionCacheDir string // path used for caching action contents
|
ActionCacheDir string // path used for caching action contents
|
||||||
|
ActionOfflineMode bool // when offline, use caching action contents
|
||||||
BindWorkdir bool // bind the workdir to the job container
|
BindWorkdir bool // bind the workdir to the job container
|
||||||
EventName string // name of event to run
|
EventName string // name of event to run
|
||||||
EventPath string // path to JSON file to use for event.json in containers
|
EventPath string // path to JSON file to use for event.json in containers
|
||||||
|
@ -60,12 +61,13 @@ type Config struct {
|
||||||
ReplaceGheActionWithGithubCom []string // Use actions from GitHub Enterprise instance to GitHub
|
ReplaceGheActionWithGithubCom []string // Use actions from GitHub Enterprise instance to GitHub
|
||||||
ReplaceGheActionTokenWithGithubCom string // Token of private action repo on GitHub.
|
ReplaceGheActionTokenWithGithubCom string // Token of private action repo on GitHub.
|
||||||
Matrix map[string]map[string]bool // Matrix config to run
|
Matrix map[string]map[string]bool // Matrix config to run
|
||||||
|
ContainerNetworkMode docker_container.NetworkMode // the network mode of job containers (the value of --network)
|
||||||
|
ActionCache ActionCache // Use a custom ActionCache Implementation
|
||||||
|
|
||||||
PresetGitHubContext *model.GithubContext // the preset github context, overrides some fields like DefaultBranch, Env, Secrets etc.
|
PresetGitHubContext *model.GithubContext // the preset github context, overrides some fields like DefaultBranch, Env, Secrets etc.
|
||||||
EventJSON string // the content of JSON file to use for event.json in containers, overrides EventPath
|
EventJSON string // the content of JSON file to use for event.json in containers, overrides EventPath
|
||||||
ContainerNamePrefix string // the prefix of container name
|
ContainerNamePrefix string // the prefix of container name
|
||||||
ContainerMaxLifetime time.Duration // the max lifetime of job containers
|
ContainerMaxLifetime time.Duration // the max lifetime of job containers
|
||||||
ContainerNetworkMode docker_container.NetworkMode // the network mode of job containers (the value of --network)
|
|
||||||
DefaultActionInstance string // the default actions web site
|
DefaultActionInstance string // the default actions web site
|
||||||
PlatformPicker func(labels []string) string // platform picker, it will take precedence over Platforms if isn't nil
|
PlatformPicker func(labels []string) string // platform picker, it will take precedence over Platforms if isn't nil
|
||||||
JobLoggerLevel *log.Level // the level of job logger
|
JobLoggerLevel *log.Level // the level of job logger
|
||||||
|
|
|
@ -302,6 +302,11 @@ func TestRunEvent(t *testing.T) {
|
||||||
{workdir, "set-env-step-env-override", "push", "", platforms, secrets},
|
{workdir, "set-env-step-env-override", "push", "", platforms, secrets},
|
||||||
{workdir, "set-env-new-env-file-per-step", "push", "", platforms, secrets},
|
{workdir, "set-env-new-env-file-per-step", "push", "", platforms, secrets},
|
||||||
{workdir, "no-panic-on-invalid-composite-action", "push", "jobs failed due to invalid action", platforms, secrets},
|
{workdir, "no-panic-on-invalid-composite-action", "push", "jobs failed due to invalid action", platforms, secrets},
|
||||||
|
|
||||||
|
// services
|
||||||
|
{workdir, "services", "push", "", platforms, secrets},
|
||||||
|
{workdir, "services-host-network", "push", "", platforms, secrets},
|
||||||
|
{workdir, "services-with-container", "push", "", platforms, secrets},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, table := range tables {
|
for _, table := range tables {
|
||||||
|
|
|
@ -34,6 +34,9 @@ const (
|
||||||
stepStagePost
|
stepStagePost
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Controls how many symlinks are resolved for local and remote Actions
|
||||||
|
const maxSymlinkDepth = 10
|
||||||
|
|
||||||
func (s stepStage) String() string {
|
func (s stepStage) String() string {
|
||||||
switch s {
|
switch s {
|
||||||
case stepStagePre:
|
case stepStagePre:
|
||||||
|
@ -307,3 +310,13 @@ func mergeIntoMapCaseInsensitive(target map[string]string, maps ...map[string]st
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func symlinkJoin(filename, sym, parent string) (string, error) {
|
||||||
|
dir := path.Dir(filename)
|
||||||
|
dest := path.Join(dir, sym)
|
||||||
|
prefix := path.Clean(parent) + "/"
|
||||||
|
if strings.HasPrefix(dest, prefix) || prefix == "./" {
|
||||||
|
return dest, nil
|
||||||
|
}
|
||||||
|
return "", fmt.Errorf("symlink tries to access file '%s' outside of '%s'", strings.ReplaceAll(dest, "'", "''"), strings.ReplaceAll(parent, "'", "''"))
|
||||||
|
}
|
||||||
|
|
|
@ -3,7 +3,10 @@ package runner
|
||||||
import (
|
import (
|
||||||
"archive/tar"
|
"archive/tar"
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"io/fs"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
@ -42,17 +45,33 @@ func (sal *stepActionLocal) main() common.Executor {
|
||||||
localReader := func(ctx context.Context) actionYamlReader {
|
localReader := func(ctx context.Context) actionYamlReader {
|
||||||
_, cpath := getContainerActionPaths(sal.Step, path.Join(actionDir, ""), sal.RunContext)
|
_, cpath := getContainerActionPaths(sal.Step, path.Join(actionDir, ""), sal.RunContext)
|
||||||
return func(filename string) (io.Reader, io.Closer, error) {
|
return func(filename string) (io.Reader, io.Closer, error) {
|
||||||
tars, err := sal.RunContext.JobContainer.GetContainerArchive(ctx, path.Join(cpath, filename))
|
spath := path.Join(cpath, filename)
|
||||||
if err != nil {
|
for i := 0; i < maxSymlinkDepth; i++ {
|
||||||
return nil, nil, os.ErrNotExist
|
tars, err := sal.RunContext.JobContainer.GetContainerArchive(ctx, spath)
|
||||||
|
if errors.Is(err, fs.ErrNotExist) {
|
||||||
|
return nil, nil, err
|
||||||
|
} else if err != nil {
|
||||||
|
return nil, nil, fs.ErrNotExist
|
||||||
}
|
}
|
||||||
treader := tar.NewReader(tars)
|
treader := tar.NewReader(tars)
|
||||||
if _, err := treader.Next(); err != nil {
|
header, err := treader.Next()
|
||||||
|
if errors.Is(err, io.EOF) {
|
||||||
return nil, nil, os.ErrNotExist
|
return nil, nil, os.ErrNotExist
|
||||||
|
} else if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
if header.FileInfo().Mode()&os.ModeSymlink == os.ModeSymlink {
|
||||||
|
spath, err = symlinkJoin(spath, header.Linkname, cpath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
return treader, tars, nil
|
return treader, tars, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
return nil, nil, fmt.Errorf("max depth %d of symlinks exceeded while reading %s", maxSymlinkDepth, spath)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
actionModel, err := sal.readAction(ctx, sal.Step, actionDir, "", localReader(ctx), os.WriteFile)
|
actionModel, err := sal.readAction(ctx, sal.Step, actionDir, "", localReader(ctx), os.WriteFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package runner
|
package runner
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"archive/tar"
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
@ -28,6 +29,8 @@ type stepActionRemote struct {
|
||||||
action *model.Action
|
action *model.Action
|
||||||
env map[string]string
|
env map[string]string
|
||||||
remoteAction *remoteAction
|
remoteAction *remoteAction
|
||||||
|
cacheDir string
|
||||||
|
resolvedSha string
|
||||||
}
|
}
|
||||||
|
|
||||||
var stepActionRemoteNewCloneExecutor = git.NewGitCloneExecutor
|
var stepActionRemoteNewCloneExecutor = git.NewGitCloneExecutor
|
||||||
|
@ -62,6 +65,48 @@ func (sar *stepActionRemote) prepareActionExecutor() common.Executor {
|
||||||
github.Token = sar.RunContext.Config.ReplaceGheActionTokenWithGithubCom
|
github.Token = sar.RunContext.Config.ReplaceGheActionTokenWithGithubCom
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if sar.RunContext.Config.ActionCache != nil {
|
||||||
|
cache := sar.RunContext.Config.ActionCache
|
||||||
|
|
||||||
|
var err error
|
||||||
|
sar.cacheDir = fmt.Sprintf("%s/%s", sar.remoteAction.Org, sar.remoteAction.Repo)
|
||||||
|
repoURL := sar.remoteAction.URL + "/" + sar.cacheDir
|
||||||
|
repoRef := sar.remoteAction.Ref
|
||||||
|
sar.resolvedSha, err = cache.Fetch(ctx, sar.cacheDir, repoURL, repoRef, github.Token)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to fetch \"%s\" version \"%s\": %w", repoURL, repoRef, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
remoteReader := func(ctx context.Context) actionYamlReader {
|
||||||
|
return func(filename string) (io.Reader, io.Closer, error) {
|
||||||
|
spath := path.Join(sar.remoteAction.Path, filename)
|
||||||
|
for i := 0; i < maxSymlinkDepth; i++ {
|
||||||
|
tars, err := cache.GetTarArchive(ctx, sar.cacheDir, sar.resolvedSha, spath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, os.ErrNotExist
|
||||||
|
}
|
||||||
|
treader := tar.NewReader(tars)
|
||||||
|
header, err := treader.Next()
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, os.ErrNotExist
|
||||||
|
}
|
||||||
|
if header.FileInfo().Mode()&os.ModeSymlink == os.ModeSymlink {
|
||||||
|
spath, err = symlinkJoin(spath, header.Linkname, ".")
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return treader, tars, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, nil, fmt.Errorf("max depth %d of symlinks exceeded while reading %s", maxSymlinkDepth, spath)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
actionModel, err := sar.readAction(ctx, sar.Step, sar.resolvedSha, sar.remoteAction.Path, remoteReader(ctx), os.WriteFile)
|
||||||
|
sar.action = actionModel
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
actionDir := fmt.Sprintf("%s/%s", sar.RunContext.ActionCacheDir(), safeFilename(sar.Step.Uses))
|
actionDir := fmt.Sprintf("%s/%s", sar.RunContext.ActionCacheDir(), safeFilename(sar.Step.Uses))
|
||||||
gitClone := stepActionRemoteNewCloneExecutor(git.NewGitCloneExecutorInput{
|
gitClone := stepActionRemoteNewCloneExecutor(git.NewGitCloneExecutorInput{
|
||||||
|
@ -75,6 +120,7 @@ func (sar *stepActionRemote) prepareActionExecutor() common.Executor {
|
||||||
For GitHub, they are the same, always github.com.
|
For GitHub, they are the same, always github.com.
|
||||||
But for Gitea, tasks triggered by a.com can clone actions from b.com.
|
But for Gitea, tasks triggered by a.com can clone actions from b.com.
|
||||||
*/
|
*/
|
||||||
|
OfflineMode: sar.RunContext.Config.ActionOfflineMode,
|
||||||
})
|
})
|
||||||
var ntErr common.Executor
|
var ntErr common.Executor
|
||||||
if err := gitClone(ctx); err != nil {
|
if err := gitClone(ctx); err != nil {
|
||||||
|
|
|
@ -182,7 +182,6 @@ func TestSetupEnv(t *testing.T) {
|
||||||
"GITHUB_RUN_ID": "runId",
|
"GITHUB_RUN_ID": "runId",
|
||||||
"GITHUB_RUN_NUMBER": "1",
|
"GITHUB_RUN_NUMBER": "1",
|
||||||
"GITHUB_SERVER_URL": "https://",
|
"GITHUB_SERVER_URL": "https://",
|
||||||
"GITHUB_TOKEN": "",
|
|
||||||
"GITHUB_WORKFLOW": "",
|
"GITHUB_WORKFLOW": "",
|
||||||
"INPUT_STEP_WITH": "with-value",
|
"INPUT_STEP_WITH": "with-value",
|
||||||
"RC_KEY": "rcvalue",
|
"RC_KEY": "rcvalue",
|
||||||
|
|
14
pkg/runner/testdata/services-host-network/push.yml
vendored
Normal file
14
pkg/runner/testdata/services-host-network/push.yml
vendored
Normal file
|
@ -0,0 +1,14 @@
|
||||||
|
name: services-host-network
|
||||||
|
on: push
|
||||||
|
jobs:
|
||||||
|
services-host-network:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
services:
|
||||||
|
nginx:
|
||||||
|
image: "nginx:latest"
|
||||||
|
ports:
|
||||||
|
- "8080:80"
|
||||||
|
steps:
|
||||||
|
- run: apt-get -qq update && apt-get -yqq install --no-install-recommends curl net-tools
|
||||||
|
- run: netstat -tlpen
|
||||||
|
- run: curl -v http://localhost:8080
|
16
pkg/runner/testdata/services-with-container/push.yml
vendored
Normal file
16
pkg/runner/testdata/services-with-container/push.yml
vendored
Normal file
|
@ -0,0 +1,16 @@
|
||||||
|
name: services-with-containers
|
||||||
|
on: push
|
||||||
|
jobs:
|
||||||
|
services-with-containers:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
# https://docs.github.com/en/actions/using-containerized-services/about-service-containers#running-jobs-in-a-container
|
||||||
|
container:
|
||||||
|
image: "ubuntu:latest"
|
||||||
|
services:
|
||||||
|
nginx:
|
||||||
|
image: "nginx:latest"
|
||||||
|
ports:
|
||||||
|
- "8080:80"
|
||||||
|
steps:
|
||||||
|
- run: apt-get -qq update && apt-get -yqq install --no-install-recommends curl
|
||||||
|
- run: curl -v http://nginx:80
|
Loading…
Reference in a new issue