Compare commits
115 commits
2024-03-11
...
main
Author | SHA1 | Date | |
---|---|---|---|
|
0ccd4cda9a | ||
|
b1b9df5ef4 | ||
|
8084844bf1 | ||
|
f19b377e2d | ||
|
98d572ee58 | ||
|
a7db265415 | ||
|
d301a5b66a | ||
|
73e39f1d49 | ||
|
b30d3d3afe | ||
|
7b9d88ce2d | ||
|
8106ecaed0 | ||
|
d86d9f1cf6 | ||
|
8aec6042ef | ||
|
8a2fbfa013 | ||
|
209a3236d0 | ||
|
8a104e2424 | ||
|
707d6a993b | ||
|
451194fcb2 | ||
|
e134587d40 | ||
|
8d1dc147cf | ||
|
c9e2134fc4 | ||
|
1835203108 | ||
|
be182ffdfa | ||
|
e89fbf5d6a | ||
|
39a5735dc7 | ||
|
65054ab93c | ||
|
b7c39f5956 | ||
|
09c86f6ffa | ||
|
ebd01185d1 | ||
|
af0a149a4d | ||
|
09d179c15b | ||
|
616954df59 | ||
|
0a41e2be4b | ||
|
251cc1c26d | ||
|
d0f60d6ac2 | ||
|
77ceaf4e5d | ||
|
8c59ad2ab3 | ||
|
e3a58d7c36 | ||
|
d5915243ad | ||
|
b9009a7a20 | ||
|
0cc67b8817 | ||
|
7e20ddf928 | ||
|
82a3640cf8 | ||
|
e6fec7324e | ||
|
73f32886f2 | ||
|
b7a8145d09 | ||
|
12c0c4277a | ||
|
3ed38d8e8b | ||
|
0dbf44c657 | ||
|
6dcf9bc6e6 | ||
|
df61c7fcdb | ||
|
36e0261150 | ||
|
46dc2ffe80 | ||
|
054caec791 | ||
|
5a80a044f9 | ||
|
4ca35d2192 | ||
|
5e0d29d665 | ||
|
6dd67253bc | ||
|
09d4b5d6ad | ||
|
a6ec2c129a | ||
|
424fd5e02b | ||
|
6a8c42ac53 | ||
|
c215e0888a | ||
|
6091094e14 | ||
|
8072a00a77 | ||
|
7f7d84b10f | ||
|
15bb54f14e | ||
|
f055d4ae60 | ||
|
f7a846d2f5 | ||
|
cd40f3fe9b | ||
|
adbe229fcb | ||
|
96d6cf8b2c | ||
|
ef5746ba74 | ||
|
4fae81efe4 | ||
|
238a495579 | ||
|
74dcce467d | ||
|
0c60f9749a | ||
|
6b0ef97c52 | ||
|
0806c8b109 | ||
|
0dfb06748e | ||
|
603b44b585 | ||
|
00fbfa754c | ||
|
1b10028447 | ||
|
9cecf94039 | ||
|
29f4123b5c | ||
|
85c3b3b541 | ||
|
2b47c99bb7 | ||
|
3c405a0d94 | ||
|
899a1f206e | ||
|
95ff5bf299 | ||
|
bd10c9a801 | ||
|
3d0cb3d82b | ||
|
7693697f4c | ||
|
4dcb9b7a13 | ||
|
55477899e7 | ||
|
04011b6b78 | ||
|
c8f847d82d | ||
|
74b0fe8ba9 | ||
|
18b4714e38 | ||
|
610358e1c3 | ||
|
1c16fd1967 | ||
|
55b09a04cd | ||
|
5a79256ee4 | ||
|
1bb2ee7098 | ||
|
84a4025bc8 | ||
|
fb4f29fd6d | ||
|
3e5c62977f | ||
|
83bfbcdafd | ||
|
3d65b0f73f | ||
|
854e3e9ec5 | ||
|
db71c41d17 | ||
|
db6e477e25 | ||
|
ceeb6c160c | ||
|
ace4cd47c7 | ||
|
99067a9c1e |
76 changed files with 2353 additions and 1016 deletions
18
.forgejo/cascading-pr-runner
Executable file
18
.forgejo/cascading-pr-runner
Executable file
|
@ -0,0 +1,18 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
runner=$1
|
||||
runner_pr=$2
|
||||
act=$3
|
||||
act_pr=$4
|
||||
|
||||
url=$(jq --raw-output .head.repo.html_url < $act_pr)
|
||||
test "$url" != null
|
||||
url=${url##http*://}
|
||||
branch=$(jq --raw-output .head.ref < $act_pr)
|
||||
test "$branch" != null
|
||||
cd $runner
|
||||
sed -i -e "s|^replace github.com/nektos/act.*|replace github.com/nektos/act => $url $branch|" go.mod
|
||||
GOPROXY=direct go mod tidy
|
||||
date > last-upgrade
|
30
.forgejo/workflows/cascade-runner.yml
Normal file
30
.forgejo/workflows/cascade-runner.yml
Normal file
|
@ -0,0 +1,30 @@
|
|||
# SPDX-License-Identifier: MIT
|
||||
on:
|
||||
pull_request_target:
|
||||
types:
|
||||
- opened
|
||||
- synchronize
|
||||
- closed
|
||||
jobs:
|
||||
cascade:
|
||||
runs-on: docker
|
||||
if: vars.CASCADE != 'no'
|
||||
container:
|
||||
image: 'docker.io/node:20-bookworm'
|
||||
steps:
|
||||
- uses: https://code.forgejo.org/actions/setup-go@v4
|
||||
with:
|
||||
go-version: "1.21"
|
||||
- uses: actions/cascading-pr@v1
|
||||
with:
|
||||
origin-url: ${{ env.GITHUB_SERVER_URL }}
|
||||
origin-repo: forgejo/act
|
||||
origin-token: ${{ secrets.CASCADING_PR_ORIGIN }}
|
||||
origin-pr: ${{ github.event.pull_request.number }}
|
||||
destination-url: ${{ env.GITHUB_SERVER_URL }}
|
||||
destination-repo: forgejo/runner
|
||||
destination-fork-repo: cascading-pr/runner
|
||||
destination-branch: main
|
||||
destination-token: ${{ secrets.CASCADING_PR_DESTINATION }}
|
||||
close-merge: true
|
||||
update: .forgejo/cascading-pr-runner
|
|
@ -1,7 +1,9 @@
|
|||
name: checks
|
||||
on:
|
||||
- push
|
||||
- pull_request
|
||||
push:
|
||||
branches:
|
||||
- 'main'
|
||||
pull_request:
|
||||
|
||||
env:
|
||||
GOPROXY: https://goproxy.io,direct
|
||||
|
@ -11,7 +13,7 @@ env:
|
|||
jobs:
|
||||
lint:
|
||||
name: check and test
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: docker
|
||||
steps:
|
||||
- name: cache go path
|
||||
id: cache-go-path
|
||||
|
@ -33,12 +35,14 @@ jobs:
|
|||
go_cache-
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.20
|
||||
go-version: 1.21
|
||||
- uses: actions/checkout@v3
|
||||
- name: vet checks
|
||||
run: go vet -v ./...
|
||||
- name: build
|
||||
run: go build -v ./...
|
||||
- name: build without docker
|
||||
run: go build -tags WITHOUT_DOCKER -v ./...
|
||||
- name: test
|
||||
run: go test -v ./pkg/jobparser
|
||||
run: go test -v ./pkg/jobparser ./pkg/model ./pkg/exprparser
|
||||
# TODO test more packages
|
1
.github/FUNDING.yml
vendored
1
.github/FUNDING.yml
vendored
|
@ -1 +0,0 @@
|
|||
github: cplee
|
88
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
88
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
|
@ -1,88 +0,0 @@
|
|||
name: Bug report
|
||||
description: Use this template for reporting bugs/issues.
|
||||
labels:
|
||||
- 'kind/bug'
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Thanks for taking the time to fill out this bug report!
|
||||
- type: textarea
|
||||
id: act-debug
|
||||
attributes:
|
||||
label: Bug report info
|
||||
render: plain text
|
||||
description: |
|
||||
Output of `act --bug-report`
|
||||
placeholder: |
|
||||
act --bug-report
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: act-command
|
||||
attributes:
|
||||
label: Command used with act
|
||||
description: |
|
||||
Please paste your whole command
|
||||
placeholder: |
|
||||
act -P ubuntu-latest=node:12 -v -d ...
|
||||
render: sh
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: what-happened
|
||||
attributes:
|
||||
label: Describe issue
|
||||
description: |
|
||||
Also tell us what did you expect to happen?
|
||||
placeholder: |
|
||||
Describe issue
|
||||
validations:
|
||||
required: true
|
||||
- type: input
|
||||
id: repo
|
||||
attributes:
|
||||
label: Link to GitHub repository
|
||||
description: |
|
||||
Provide link to GitHub repository, you can skip it if the repository is private or you don't have it on GitHub, otherwise please provide it as it might help us troubleshoot problem
|
||||
placeholder: |
|
||||
https://github.com/nektos/act
|
||||
validations:
|
||||
required: false
|
||||
- type: textarea
|
||||
id: workflow
|
||||
attributes:
|
||||
label: Workflow content
|
||||
description: |
|
||||
Please paste your **whole** workflow here
|
||||
placeholder: |
|
||||
name: My workflow
|
||||
on: ['push', 'schedule']
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
KEY: VAL
|
||||
[...]
|
||||
render: yml
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: logs
|
||||
attributes:
|
||||
label: Relevant log output
|
||||
description: |
|
||||
Please copy and paste any relevant log output. This will be automatically formatted into code, so no need for backticks. Please verify that the log output doesn't contain any sensitive data.
|
||||
render: sh
|
||||
placeholder: |
|
||||
Use `act -v` for verbose output
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: additional-info
|
||||
attributes:
|
||||
label: Additional information
|
||||
placeholder: |
|
||||
Additional information that doesn't fit elsewhere
|
||||
validations:
|
||||
required: false
|
14
.github/ISSUE_TEMPLATE/config.yml
vendored
14
.github/ISSUE_TEMPLATE/config.yml
vendored
|
@ -1,14 +0,0 @@
|
|||
blank_issues_enabled: true
|
||||
contact_links:
|
||||
- name: Start a discussion
|
||||
url: https://github.com/nektos/act/discussions/new
|
||||
about: You can ask for help here!
|
||||
- name: Ask on Gitter
|
||||
url: https://gitter.im/nektos/act
|
||||
about: You can ask for help here!
|
||||
- name: Ask on Gitter (via Matrix)
|
||||
url: https://matrix.to/#/#nektos_act:gitter.im?via=gitter.im&via=matrix.org
|
||||
about: If you prefer Matrix over Gitter
|
||||
- name: Want to contribute to act?
|
||||
url: https://github.com/nektos/act/blob/master/CONTRIBUTING.md
|
||||
about: Be sure to read contributing guidelines!
|
28
.github/ISSUE_TEMPLATE/feature_template.yml
vendored
28
.github/ISSUE_TEMPLATE/feature_template.yml
vendored
|
@ -1,28 +0,0 @@
|
|||
name: Feature request
|
||||
description: Use this template for requesting a feature/enhancement.
|
||||
labels:
|
||||
- 'kind/feature-request'
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Please note that incompatibility with GitHub Actions should be opened as a bug report, not a new feature.
|
||||
- type: input
|
||||
id: act-version
|
||||
attributes:
|
||||
label: Act version
|
||||
description: |
|
||||
What version of `act` are you using? Version can be obtained via `act --version`
|
||||
If you've built it from source, please provide commit hash
|
||||
placeholder: |
|
||||
act --version
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: feature
|
||||
attributes:
|
||||
label: Feature description
|
||||
description: Describe feature that you would like to see
|
||||
placeholder: ...
|
||||
validations:
|
||||
required: true
|
18
.github/ISSUE_TEMPLATE/wiki_issue.yml
vendored
18
.github/ISSUE_TEMPLATE/wiki_issue.yml
vendored
|
@ -1,18 +0,0 @@
|
|||
name: Wiki issue
|
||||
description: Report issue/improvement ragarding documentation (wiki)
|
||||
labels:
|
||||
- 'kind/discussion'
|
||||
- 'area/docs'
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Thanks for taking the time to fill out this issue!
|
||||
- type: textarea
|
||||
id: details
|
||||
attributes:
|
||||
label: Details
|
||||
description: |
|
||||
Describe issue
|
||||
validations:
|
||||
required: true
|
20
.github/actions/choco/Dockerfile
vendored
20
.github/actions/choco/Dockerfile
vendored
|
@ -1,20 +0,0 @@
|
|||
FROM alpine:3.17
|
||||
|
||||
ARG CHOCOVERSION=1.1.0
|
||||
|
||||
RUN apk add --no-cache bash ca-certificates git \
|
||||
&& apk --no-cache --repository http://dl-cdn.alpinelinux.org/alpine/edge/testing add mono mono-dev \
|
||||
&& cert-sync /etc/ssl/certs/ca-certificates.crt \
|
||||
&& wget "https://github.com/chocolatey/choco/archive/${CHOCOVERSION}.tar.gz" -O- | tar -xzf - \
|
||||
&& cd choco-"${CHOCOVERSION}" \
|
||||
&& chmod +x build.sh zip.sh \
|
||||
&& ./build.sh -v \
|
||||
&& mv ./code_drop/chocolatey/console /opt/chocolatey \
|
||||
&& mkdir -p /opt/chocolatey/lib \
|
||||
&& rm -rf /choco-"${CHOCOVERSION}" \
|
||||
&& apk del mono-dev \
|
||||
&& rm -rf /var/cache/apk/*
|
||||
|
||||
ENV ChocolateyInstall=/opt/chocolatey
|
||||
COPY entrypoint.sh /entrypoint.sh
|
||||
ENTRYPOINT ["/entrypoint.sh"]
|
16
.github/actions/choco/action.yml
vendored
16
.github/actions/choco/action.yml
vendored
|
@ -1,16 +0,0 @@
|
|||
name: 'Chocolatey Packager'
|
||||
description: 'Create the choco package and push it'
|
||||
inputs:
|
||||
version:
|
||||
description: 'Version of package'
|
||||
required: false
|
||||
apiKey:
|
||||
description: 'API Key for chocolately'
|
||||
required: false
|
||||
push:
|
||||
description: 'Option for if package is going to be pushed'
|
||||
required: false
|
||||
default: 'false'
|
||||
runs:
|
||||
using: 'docker'
|
||||
image: 'Dockerfile'
|
31
.github/actions/choco/entrypoint.sh
vendored
31
.github/actions/choco/entrypoint.sh
vendored
|
@ -1,31 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
function choco {
|
||||
mono /opt/chocolatey/choco.exe "$@" --allow-unofficial --nocolor
|
||||
}
|
||||
|
||||
function get_version {
|
||||
local version=${INPUT_VERSION:-$(git describe --tags)}
|
||||
version=(${version//[!0-9.-]/})
|
||||
local version_parts=(${version//-/ })
|
||||
version=${version_parts[0]}
|
||||
if [ ${#version_parts[@]} -gt 1 ]; then
|
||||
version=${version_parts}.${version_parts[1]}
|
||||
fi
|
||||
echo "$version"
|
||||
}
|
||||
|
||||
## Determine the version to pack
|
||||
VERSION=$(get_version)
|
||||
echo "Packing version ${VERSION} of act"
|
||||
rm -f act-cli.*.nupkg
|
||||
mkdir -p tools
|
||||
cp LICENSE tools/LICENSE.txt
|
||||
cp VERIFICATION tools/VERIFICATION.txt
|
||||
cp dist/act_windows_amd64_v1/act.exe tools/
|
||||
choco pack act-cli.nuspec --version ${VERSION}
|
||||
if [[ "$INPUT_PUSH" == "true" ]]; then
|
||||
choco push act-cli.${VERSION}.nupkg --api-key ${INPUT_APIKEY} -s https://push.chocolatey.org/ --timeout 180
|
||||
fi
|
77
.github/actions/run-tests/action.yml
vendored
77
.github/actions/run-tests/action.yml
vendored
|
@ -1,77 +0,0 @@
|
|||
name: 'run-tests'
|
||||
description: 'Runs go test and upload a step summary'
|
||||
inputs:
|
||||
filter:
|
||||
description: 'The go test pattern for the tests to run'
|
||||
required: false
|
||||
default: ''
|
||||
upload-logs-name:
|
||||
description: 'Choose the name of the log artifact'
|
||||
required: false
|
||||
default: logs-${{ github.job }}-${{ strategy.job-index }}
|
||||
upload-logs:
|
||||
description: 'If true uploads logs of each tests as an artifact'
|
||||
required: false
|
||||
default: 'true'
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- uses: actions/github-script@v6
|
||||
with:
|
||||
github-token: none # No reason to grant access to the GITHUB_TOKEN
|
||||
script: |
|
||||
let myOutput = '';
|
||||
var fs = require('fs');
|
||||
var uploadLogs = process.env.UPLOAD_LOGS === 'true';
|
||||
if(uploadLogs) {
|
||||
await io.mkdirP('logs');
|
||||
}
|
||||
var filename = null;
|
||||
const options = {};
|
||||
options.ignoreReturnCode = true;
|
||||
options.env = Object.assign({}, process.env);
|
||||
delete options.env.ACTIONS_RUNTIME_URL;
|
||||
delete options.env.ACTIONS_RUNTIME_TOKEN;
|
||||
delete options.env.ACTIONS_CACHE_URL;
|
||||
options.listeners = {
|
||||
stdout: (data) => {
|
||||
for(line of data.toString().split('\n')) {
|
||||
if(/^\s*(===\s[^\s]+\s|---\s[^\s]+:\s)/.test(line)) {
|
||||
if(uploadLogs) {
|
||||
var runprefix = "=== RUN ";
|
||||
if(line.startsWith(runprefix)) {
|
||||
filename = "logs/" + line.substring(runprefix.length).replace(/[^A-Za-z0-9]/g, '-') + ".txt";
|
||||
fs.writeFileSync(filename, line + "\n");
|
||||
} else if(filename) {
|
||||
fs.appendFileSync(filename, line + "\n");
|
||||
filename = null;
|
||||
}
|
||||
}
|
||||
myOutput += line + "\n";
|
||||
} else if(filename) {
|
||||
fs.appendFileSync(filename, line + "\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
var args = ['test', '-v', '-cover', '-coverprofile=coverage.txt', '-covermode=atomic', '-timeout', '20m'];
|
||||
var filter = process.env.FILTER;
|
||||
if(filter) {
|
||||
args.push('-run');
|
||||
args.push(filter);
|
||||
}
|
||||
args.push('./...');
|
||||
var exitcode = await exec.exec('go', args, options);
|
||||
if(process.env.GITHUB_STEP_SUMMARY) {
|
||||
core.summary.addCodeBlock(myOutput);
|
||||
await core.summary.write();
|
||||
}
|
||||
process.exit(exitcode);
|
||||
env:
|
||||
FILTER: ${{ inputs.filter }}
|
||||
UPLOAD_LOGS: ${{ inputs.upload-logs }}
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always() && inputs.upload-logs == 'true' && !env.ACT
|
||||
with:
|
||||
name: ${{ inputs.upload-logs-name }}
|
||||
path: logs
|
15
.github/dependabot.yml
vendored
15
.github/dependabot.yml
vendored
|
@ -1,15 +0,0 @@
|
|||
# To get started with Dependabot version updates, you'll need to specify which
|
||||
# package ecosystems to update and where the package manifests are located.
|
||||
# Please see the documentation for all configuration options:
|
||||
# https://help.github.com/github/administering-a-repository/configuration-options-for-dependency-updates
|
||||
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: 'github-actions'
|
||||
directory: '/'
|
||||
schedule:
|
||||
interval: 'weekly'
|
||||
- package-ecosystem: 'gomod'
|
||||
directory: '/'
|
||||
schedule:
|
||||
interval: 'weekly'
|
1
.github/workflows/.gitignore
vendored
1
.github/workflows/.gitignore
vendored
|
@ -1 +0,0 @@
|
|||
test-*.yml
|
181
.github/workflows/checks.yml
vendored
181
.github/workflows/checks.yml
vendored
|
@ -1,181 +0,0 @@
|
|||
name: checks
|
||||
on: [pull_request, workflow_dispatch]
|
||||
|
||||
concurrency:
|
||||
cancel-in-progress: true
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
|
||||
env:
|
||||
ACT_OWNER: ${{ github.repository_owner }}
|
||||
ACT_REPOSITORY: ${{ github.repository }}
|
||||
CGO_ENABLED: 0
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
name: lint
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
check-latest: true
|
||||
- uses: golangci/golangci-lint-action@v3.7.0
|
||||
with:
|
||||
version: v1.53
|
||||
only-new-issues: true
|
||||
- uses: megalinter/megalinter/flavors/go@v7.4.0
|
||||
env:
|
||||
DEFAULT_BRANCH: master
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
VALIDATE_ALL_CODEBASE: false
|
||||
GITHUB_STATUS_REPORTER: ${{ !env.ACT }}
|
||||
GITHUB_COMMENT_REPORTER: ${{ !env.ACT }}
|
||||
|
||||
test-linux:
|
||||
name: test-linux
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 2
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
- uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
check-latest: true
|
||||
- uses: actions/cache@v3
|
||||
if: ${{ !env.ACT }}
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
- name: Run Tests
|
||||
uses: ./.github/actions/run-tests
|
||||
with:
|
||||
upload-logs-name: logs-linux
|
||||
- name: Run act from cli
|
||||
run: go run main.go -P ubuntu-latest=node:16-buster-slim -C ./pkg/runner/testdata/ -W ./basic/push.yml
|
||||
- name: Upload Codecov report
|
||||
uses: codecov/codecov-action@v3.1.4
|
||||
with:
|
||||
files: coverage.txt
|
||||
fail_ci_if_error: true # optional (default = false)
|
||||
|
||||
test-host:
|
||||
strategy:
|
||||
matrix:
|
||||
os:
|
||||
- windows-latest
|
||||
- macos-latest
|
||||
name: test-${{matrix.os}}
|
||||
runs-on: ${{matrix.os}}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 2
|
||||
- uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
check-latest: true
|
||||
- name: Run Tests
|
||||
uses: ./.github/actions/run-tests
|
||||
with:
|
||||
filter: '^TestRunEventHostEnvironment$'
|
||||
upload-logs-name: logs-${{ matrix.os }}
|
||||
|
||||
snapshot:
|
||||
name: snapshot
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
check-latest: true
|
||||
- uses: actions/cache@v3
|
||||
if: ${{ !env.ACT }}
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
- name: GoReleaser
|
||||
uses: goreleaser/goreleaser-action@v5
|
||||
with:
|
||||
version: latest
|
||||
args: release --snapshot --clean
|
||||
- name: Capture x86_64 (64-bit) Linux binary
|
||||
if: ${{ !env.ACT }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: act-linux-amd64
|
||||
path: dist/act_linux_amd64_v1/act
|
||||
- name: Capture i386 (32-bit) Linux binary
|
||||
if: ${{ !env.ACT }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: act-linux-i386
|
||||
path: dist/act_linux_386/act
|
||||
- name: Capture arm64 (64-bit) Linux binary
|
||||
if: ${{ !env.ACT }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: act-linux-arm64
|
||||
path: dist/act_linux_arm64/act
|
||||
- name: Capture armv6 (32-bit) Linux binary
|
||||
if: ${{ !env.ACT }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: act-linux-armv6
|
||||
path: dist/act_linux_arm_6/act
|
||||
- name: Capture armv7 (32-bit) Linux binary
|
||||
if: ${{ !env.ACT }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: act-linux-armv7
|
||||
path: dist/act_linux_arm_7/act
|
||||
- name: Capture x86_64 (64-bit) Windows binary
|
||||
if: ${{ !env.ACT }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: act-windows-amd64
|
||||
path: dist/act_windows_amd64_v1/act.exe
|
||||
- name: Capture i386 (32-bit) Windows binary
|
||||
if: ${{ !env.ACT }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: act-windows-i386
|
||||
path: dist/act_windows_386/act.exe
|
||||
- name: Capture arm64 (64-bit) Windows binary
|
||||
if: ${{ !env.ACT }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: act-windows-arm64
|
||||
path: dist/act_windows_arm64/act.exe
|
||||
- name: Capture armv7 (32-bit) Windows binary
|
||||
if: ${{ !env.ACT }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: act-windows-armv7
|
||||
path: dist/act_windows_arm_7/act.exe
|
||||
- name: Capture x86_64 (64-bit) MacOS binary
|
||||
if: ${{ !env.ACT }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: act-macos-amd64
|
||||
path: dist/act_darwin_amd64_v1/act
|
||||
- name: Capture arm64 (64-bit) MacOS binary
|
||||
if: ${{ !env.ACT }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: act-macos-arm64
|
||||
path: dist/act_darwin_arm64/act
|
||||
- name: Chocolatey
|
||||
uses: ./.github/actions/choco
|
||||
with:
|
||||
version: v0.0.0-pr
|
29
.github/workflows/promote.yml
vendored
29
.github/workflows/promote.yml
vendored
|
@ -1,29 +0,0 @@
|
|||
name: promote
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 2 1 * *'
|
||||
workflow_dispatch: {}
|
||||
|
||||
jobs:
|
||||
release:
|
||||
name: promote
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
ref: master
|
||||
token: ${{ secrets.GORELEASER_GITHUB_TOKEN }}
|
||||
- uses: fregante/setup-git-user@v2
|
||||
- uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
check-latest: true
|
||||
- uses: actions/cache@v3
|
||||
if: ${{ !env.ACT }}
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
- run: make promote
|
61
.github/workflows/release.yml
vendored
61
.github/workflows/release.yml
vendored
|
@ -1,61 +0,0 @@
|
|||
name: release
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- v*
|
||||
|
||||
jobs:
|
||||
release:
|
||||
name: release
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
check-latest: true
|
||||
- uses: actions/cache@v3
|
||||
if: ${{ !env.ACT }}
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
- name: GoReleaser
|
||||
uses: goreleaser/goreleaser-action@v5
|
||||
with:
|
||||
version: latest
|
||||
args: release --clean
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GORELEASER_GITHUB_TOKEN }}
|
||||
- name: Winget
|
||||
uses: vedantmgoyal2009/winget-releaser@v2
|
||||
with:
|
||||
identifier: nektos.act
|
||||
installers-regex: '_Windows_\w+\.zip$'
|
||||
token: ${{ secrets.WINGET_TOKEN }}
|
||||
- name: Chocolatey
|
||||
uses: ./.github/actions/choco
|
||||
with:
|
||||
version: ${{ github.ref }}
|
||||
apiKey: ${{ secrets.CHOCO_APIKEY }}
|
||||
push: true
|
||||
- name: GitHub CLI extension
|
||||
uses: actions/github-script@v6
|
||||
with:
|
||||
github-token: ${{ secrets.GORELEASER_GITHUB_TOKEN }}
|
||||
script: |
|
||||
const mainRef = (await github.rest.git.getRef({
|
||||
owner: 'nektos',
|
||||
repo: 'gh-act',
|
||||
ref: 'heads/main',
|
||||
})).data;
|
||||
console.log(mainRef);
|
||||
github.rest.git.createRef({
|
||||
owner: 'nektos',
|
||||
repo: 'gh-act',
|
||||
ref: context.ref,
|
||||
sha: mainRef.object.sha,
|
||||
});
|
23
.github/workflows/stale.yml
vendored
23
.github/workflows/stale.yml
vendored
|
@ -1,23 +0,0 @@
|
|||
name: 'Close stale issues'
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 0 * * *'
|
||||
|
||||
jobs:
|
||||
stale:
|
||||
name: Stale
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/stale@v8
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
stale-issue-message: 'Issue is stale and will be closed in 14 days unless there is new activity'
|
||||
stale-pr-message: 'PR is stale and will be closed in 14 days unless there is new activity'
|
||||
stale-issue-label: 'stale'
|
||||
exempt-issue-labels: 'stale-exempt,kind/feature-request'
|
||||
stale-pr-label: 'stale'
|
||||
exempt-pr-labels: 'stale-exempt'
|
||||
remove-stale-when-updated: 'True'
|
||||
operations-per-run: 500
|
||||
days-before-stale: 180
|
||||
days-before-close: 14
|
|
@ -71,6 +71,10 @@ pull_request_rules:
|
|||
- and:
|
||||
- 'approved-reviews-by=@nektos/act-maintainers'
|
||||
- '#approved-reviews-by>=2'
|
||||
- and:
|
||||
- 'author=@nektos/act-maintainers'
|
||||
- 'approved-reviews-by=@nektos/act-maintainers'
|
||||
- '#approved-reviews-by>=1'
|
||||
- -draft
|
||||
- -merged
|
||||
- -closed
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
## Forking rules
|
||||
|
||||
This is a custom fork of [nektos/act](https://github.com/nektos/act/), for the purpose of serving [act_runner](https://gitea.com/gitea/act_runner).
|
||||
This is a custom fork of [nektos/act](https://github.com/nektos/act/), for the [Forgejo runner](https://code.forgejo.org/forgejo/runner).
|
||||
|
||||
It cannot be used as command line tool anymore, but only as a library.
|
||||
|
||||
|
|
2
VERSION
2
VERSION
|
@ -1 +1 @@
|
|||
0.2.52
|
||||
0.2.59
|
|
@ -55,7 +55,10 @@ type Input struct {
|
|||
replaceGheActionTokenWithGithubCom string
|
||||
matrix []string
|
||||
actionCachePath string
|
||||
actionOfflineMode bool
|
||||
logPrefixJobID bool
|
||||
networkName string
|
||||
useNewActionCache bool
|
||||
}
|
||||
|
||||
func (i *Input) resolve(path string) string {
|
||||
|
|
|
@ -15,7 +15,7 @@ func (i *Input) newPlatforms() map[string]string {
|
|||
for _, p := range i.platforms {
|
||||
pParts := strings.Split(p, "=")
|
||||
if len(pParts) == 2 {
|
||||
platforms[pParts[0]] = pParts[1]
|
||||
platforms[strings.ToLower(pParts[0])] = pParts[1]
|
||||
}
|
||||
}
|
||||
return platforms
|
||||
|
|
66
cmd/root.go
66
cmd/root.go
|
@ -14,6 +14,7 @@ import (
|
|||
"github.com/AlecAivazis/survey/v2"
|
||||
"github.com/adrg/xdg"
|
||||
"github.com/andreaskoch/go-fswatch"
|
||||
docker_container "github.com/docker/docker/api/types/container"
|
||||
"github.com/joho/godotenv"
|
||||
gitignore "github.com/sabhiram/go-gitignore"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
@ -96,6 +97,9 @@ func Execute(ctx context.Context, version string) {
|
|||
rootCmd.PersistentFlags().StringVarP(&input.cacheServerAddr, "cache-server-addr", "", common.GetOutboundIP().String(), "Defines the address to which the cache server binds.")
|
||||
rootCmd.PersistentFlags().Uint16VarP(&input.cacheServerPort, "cache-server-port", "", 0, "Defines the port where the artifact server listens. 0 means a randomly available port.")
|
||||
rootCmd.PersistentFlags().StringVarP(&input.actionCachePath, "action-cache-path", "", filepath.Join(CacheHomeDir, "act"), "Defines the path where the actions get cached and host workspaces created.")
|
||||
rootCmd.PersistentFlags().BoolVarP(&input.actionOfflineMode, "action-offline-mode", "", false, "If action contents exists, it will not be fetch and pull again. If turn on this,will turn off force pull")
|
||||
rootCmd.PersistentFlags().StringVarP(&input.networkName, "network", "", "host", "Sets a docker network name. Defaults to host.")
|
||||
rootCmd.PersistentFlags().BoolVarP(&input.useNewActionCache, "use-new-action-cache", "", false, "Enable using the new Action Cache for storing Actions locally")
|
||||
rootCmd.SetArgs(args())
|
||||
|
||||
if err := rootCmd.Execute(); err != nil {
|
||||
|
@ -103,23 +107,22 @@ func Execute(ctx context.Context, version string) {
|
|||
}
|
||||
}
|
||||
|
||||
// Return locations where Act's config can be found in order: XDG spec, .actrc in HOME directory, .actrc in invocation directory
|
||||
func configLocations() []string {
|
||||
configFileName := ".actrc"
|
||||
|
||||
// reference: https://specifications.freedesktop.org/basedir-spec/latest/ar01s03.html
|
||||
var actrcXdg string
|
||||
for _, fileName := range []string{"act/actrc", configFileName} {
|
||||
if foundConfig, err := xdg.SearchConfigFile(fileName); foundConfig != "" && err == nil {
|
||||
actrcXdg = foundConfig
|
||||
break
|
||||
}
|
||||
homePath := filepath.Join(UserHomeDir, configFileName)
|
||||
invocationPath := filepath.Join(".", configFileName)
|
||||
|
||||
// Though named xdg, adrg's lib support macOS and Windows config paths as well
|
||||
// It also takes cares of creating the parent folder so we don't need to bother later
|
||||
specPath, err := xdg.ConfigFile("act/actrc")
|
||||
if err != nil {
|
||||
specPath = homePath
|
||||
}
|
||||
|
||||
return []string{
|
||||
filepath.Join(UserHomeDir, configFileName),
|
||||
actrcXdg,
|
||||
filepath.Join(".", configFileName),
|
||||
}
|
||||
// This order should be enforced since the survey part relies on it
|
||||
return []string{specPath, homePath, invocationPath}
|
||||
}
|
||||
|
||||
var commonSocketPaths = []string{
|
||||
|
@ -265,7 +268,8 @@ func readArgsFile(file string, split bool) []string {
|
|||
}()
|
||||
scanner := bufio.NewScanner(f)
|
||||
for scanner.Scan() {
|
||||
arg := strings.TrimSpace(scanner.Text())
|
||||
arg := os.ExpandEnv(strings.TrimSpace(scanner.Text()))
|
||||
|
||||
if strings.HasPrefix(arg, "-") && split {
|
||||
args = append(args, regexp.MustCompile(`\s`).Split(arg, 2)...)
|
||||
} else if !split {
|
||||
|
@ -291,19 +295,17 @@ func cleanup(inputs *Input) func(*cobra.Command, []string) {
|
|||
}
|
||||
}
|
||||
|
||||
func parseEnvs(env []string, envs map[string]string) bool {
|
||||
if env != nil {
|
||||
for _, envVar := range env {
|
||||
e := strings.SplitN(envVar, `=`, 2)
|
||||
if len(e) == 2 {
|
||||
envs[e[0]] = e[1]
|
||||
} else {
|
||||
envs[e[0]] = ""
|
||||
}
|
||||
func parseEnvs(env []string) map[string]string {
|
||||
envs := make(map[string]string, len(env))
|
||||
for _, envVar := range env {
|
||||
e := strings.SplitN(envVar, `=`, 2)
|
||||
if len(e) == 2 {
|
||||
envs[e[0]] = e[1]
|
||||
} else {
|
||||
envs[e[0]] = ""
|
||||
}
|
||||
return true
|
||||
}
|
||||
return false
|
||||
return envs
|
||||
}
|
||||
|
||||
func readYamlFile(file string) (map[string]string, error) {
|
||||
|
@ -409,13 +411,11 @@ func newRunCommand(ctx context.Context, input *Input) func(*cobra.Command, []str
|
|||
}
|
||||
|
||||
log.Debugf("Loading environment from %s", input.Envfile())
|
||||
envs := make(map[string]string)
|
||||
_ = parseEnvs(input.envs, envs)
|
||||
envs := parseEnvs(input.envs)
|
||||
_ = readEnvs(input.Envfile(), envs)
|
||||
|
||||
log.Debugf("Loading action inputs from %s", input.Inputfile())
|
||||
inputs := make(map[string]string)
|
||||
_ = parseEnvs(input.inputs, inputs)
|
||||
inputs := parseEnvs(input.inputs)
|
||||
_ = readEnvs(input.Inputfile(), inputs)
|
||||
|
||||
log.Debugf("Loading secrets from %s", input.Secretfile())
|
||||
|
@ -552,6 +552,7 @@ func newRunCommand(ctx context.Context, input *Input) func(*cobra.Command, []str
|
|||
}
|
||||
}
|
||||
if !cfgFound && len(cfgLocations) > 0 {
|
||||
// The first config location refers to the global config folder one
|
||||
if err := defaultImageSurvey(cfgLocations[0]); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
@ -578,11 +579,12 @@ func newRunCommand(ctx context.Context, input *Input) func(*cobra.Command, []str
|
|||
EventName: eventName,
|
||||
EventPath: input.EventPath(),
|
||||
DefaultBranch: defaultbranch,
|
||||
ForcePull: input.forcePull,
|
||||
ForcePull: !input.actionOfflineMode && input.forcePull,
|
||||
ForceRebuild: input.forceRebuild,
|
||||
ReuseContainers: input.reuseContainers,
|
||||
Workdir: input.Workdir(),
|
||||
ActionCacheDir: input.actionCachePath,
|
||||
ActionOfflineMode: input.actionOfflineMode,
|
||||
BindWorkdir: input.bindWorkdir,
|
||||
LogOutput: !input.noOutput,
|
||||
JSONLogger: input.jsonLogger,
|
||||
|
@ -612,6 +614,12 @@ func newRunCommand(ctx context.Context, input *Input) func(*cobra.Command, []str
|
|||
ReplaceGheActionWithGithubCom: input.replaceGheActionWithGithubCom,
|
||||
ReplaceGheActionTokenWithGithubCom: input.replaceGheActionTokenWithGithubCom,
|
||||
Matrix: matrixes,
|
||||
ContainerNetworkMode: docker_container.NetworkMode(input.networkName),
|
||||
}
|
||||
if input.useNewActionCache {
|
||||
config.ActionCache = &runner.GoGitActionCache{
|
||||
Path: config.ActionCacheDir,
|
||||
}
|
||||
}
|
||||
r, err := runner.New(config)
|
||||
if err != nil {
|
||||
|
|
76
go.mod
76
go.mod
|
@ -1,92 +1,102 @@
|
|||
module github.com/nektos/act
|
||||
|
||||
go 1.20
|
||||
go 1.21.13
|
||||
|
||||
require (
|
||||
github.com/AlecAivazis/survey/v2 v2.3.7
|
||||
github.com/Masterminds/semver v1.5.0
|
||||
github.com/adrg/xdg v0.4.0
|
||||
github.com/andreaskoch/go-fswatch v1.0.0
|
||||
github.com/creack/pty v1.1.18
|
||||
github.com/docker/cli v24.0.6+incompatible
|
||||
github.com/creack/pty v1.1.21
|
||||
github.com/docker/cli v25.0.3+incompatible
|
||||
github.com/docker/distribution v2.8.3+incompatible
|
||||
github.com/docker/docker v24.0.6+incompatible // 24.0 branch
|
||||
github.com/docker/go-connections v0.4.0
|
||||
github.com/docker/docker v25.0.6+incompatible
|
||||
github.com/docker/go-connections v0.5.0
|
||||
github.com/go-git/go-billy/v5 v5.5.0
|
||||
github.com/go-git/go-git/v5 v5.9.0
|
||||
github.com/gobwas/glob v0.2.3
|
||||
github.com/go-git/go-git/v5 v5.11.0
|
||||
github.com/imdario/mergo v0.3.16
|
||||
github.com/joho/godotenv v1.5.1
|
||||
github.com/julienschmidt/httprouter v1.3.0
|
||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51
|
||||
github.com/mattn/go-isatty v0.0.19
|
||||
github.com/moby/buildkit v0.12.2
|
||||
github.com/mattn/go-isatty v0.0.20
|
||||
github.com/moby/buildkit v0.13.2
|
||||
github.com/moby/patternmatcher v0.6.0
|
||||
github.com/opencontainers/image-spec v1.1.0-rc5
|
||||
github.com/opencontainers/selinux v1.11.0
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/rhysd/actionlint v1.6.26
|
||||
github.com/rhysd/actionlint v1.6.27
|
||||
github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06
|
||||
github.com/sirupsen/logrus v1.9.3
|
||||
github.com/spf13/cobra v1.7.0
|
||||
github.com/spf13/cobra v1.8.0
|
||||
github.com/spf13/pflag v1.0.5
|
||||
github.com/stretchr/testify v1.8.4
|
||||
github.com/timshannon/bolthold v0.0.0-20210913165410-232392fc8a6a
|
||||
go.etcd.io/bbolt v1.3.7
|
||||
golang.org/x/term v0.13.0
|
||||
go.etcd.io/bbolt v1.3.9
|
||||
golang.org/x/term v0.18.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
gotest.tools/v3 v3.5.1
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/Masterminds/semver v1.5.0
|
||||
github.com/gobwas/glob v0.2.3
|
||||
)
|
||||
|
||||
require (
|
||||
dario.cat/mergo v1.0.0 // indirect
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.1 // indirect
|
||||
github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 // indirect
|
||||
github.com/acomagu/bufpipe v1.0.4 // indirect
|
||||
github.com/cloudflare/circl v1.3.3 // indirect
|
||||
github.com/containerd/containerd v1.7.2 // indirect
|
||||
github.com/cloudflare/circl v1.3.7 // indirect
|
||||
github.com/containerd/containerd v1.7.13 // indirect
|
||||
github.com/containerd/log v0.1.0 // indirect
|
||||
github.com/cyphar/filepath-securejoin v0.2.4 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/distribution/reference v0.5.0 // indirect
|
||||
github.com/docker/docker-credential-helpers v0.7.0 // indirect
|
||||
github.com/docker/docker-credential-helpers v0.8.0 // indirect
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
github.com/emirpasic/gods v1.18.1 // indirect
|
||||
github.com/fatih/color v1.15.0 // indirect
|
||||
github.com/fatih/color v1.16.0 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect
|
||||
github.com/go-logr/logr v1.3.0 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/google/go-cmp v0.5.9 // indirect
|
||||
github.com/google/go-cmp v0.6.0 // indirect
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect
|
||||
github.com/kevinburke/ssh_config v1.2.0 // indirect
|
||||
github.com/klauspost/compress v1.16.3 // indirect
|
||||
github.com/klauspost/compress v1.17.4 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.15 // indirect
|
||||
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b // indirect
|
||||
github.com/mitchellh/mapstructure v1.1.2 // indirect
|
||||
github.com/moby/sys/sequential v0.5.0 // indirect
|
||||
github.com/moby/term v0.0.0-20200312100748-672ec06f55cd // indirect
|
||||
github.com/moby/sys/user v0.1.0 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/runc v1.1.7 // indirect
|
||||
github.com/pjbgf/sha1cd v0.3.0 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/rivo/uniseg v0.4.4 // indirect
|
||||
github.com/rivo/uniseg v0.4.7 // indirect
|
||||
github.com/robfig/cron/v3 v3.0.1 // indirect
|
||||
github.com/sergi/go-diff v1.2.0 // indirect
|
||||
github.com/skeema/knownhosts v1.2.0 // indirect
|
||||
github.com/skeema/knownhosts v1.2.1 // indirect
|
||||
github.com/stretchr/objx v0.5.0 // indirect
|
||||
github.com/xanzy/ssh-agent v0.3.3 // indirect
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
|
||||
golang.org/x/crypto v0.13.0 // indirect
|
||||
golang.org/x/mod v0.12.0 // indirect
|
||||
golang.org/x/net v0.15.0 // indirect
|
||||
golang.org/x/sync v0.3.0 // indirect
|
||||
golang.org/x/sys v0.13.0 // indirect
|
||||
golang.org/x/text v0.13.0 // indirect
|
||||
golang.org/x/tools v0.13.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 // indirect
|
||||
go.opentelemetry.io/otel v1.21.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.21.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.21.0 // indirect
|
||||
golang.org/x/crypto v0.21.0 // indirect
|
||||
golang.org/x/mod v0.13.0 // indirect
|
||||
golang.org/x/net v0.23.0 // indirect
|
||||
golang.org/x/sync v0.6.0 // indirect
|
||||
golang.org/x/sys v0.18.0 // indirect
|
||||
golang.org/x/text v0.14.0 // indirect
|
||||
golang.org/x/tools v0.14.0 // indirect
|
||||
gopkg.in/warnings.v0 v0.1.2 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
)
|
||||
|
|
198
go.sum
198
go.sum
|
@ -1,37 +1,44 @@
|
|||
dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk=
|
||||
dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
|
||||
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1 h1:EKPd1INOIyr5hWOWhvpmQpY6tKjeG0hT1s3AMC/9fic=
|
||||
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU=
|
||||
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8=
|
||||
github.com/AlecAivazis/survey/v2 v2.3.7 h1:6I/u8FvytdGsgonrYsVn2t8t4QiRnh6QSTqkkhIiSjQ=
|
||||
github.com/AlecAivazis/survey/v2 v2.3.7/go.mod h1:xUTIdE4KCOIjsBAE1JYsUPoCqYdZ1reCfTwbto0Fduo=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
|
||||
github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww=
|
||||
github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
|
||||
github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY=
|
||||
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
|
||||
github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
|
||||
github.com/Microsoft/hcsshim v0.10.0-rc.8 h1:YSZVvlIIDD1UxQpJp0h+dnpLUw+TrY0cx8obKsp3bek=
|
||||
github.com/Microsoft/hcsshim v0.11.4 h1:68vKo2VN8DE9AdN4tnkWnmdhqdbpUFM8OF3Airm7fz8=
|
||||
github.com/Microsoft/hcsshim v0.11.4/go.mod h1:smjE4dvqPX9Zldna+t5FG3rnoHhaB7QYxPRqGcpAD9w=
|
||||
github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2 h1:+vx7roKuyA63nhn5WAunQHLTznkw5W8b1Xc0dNjp83s=
|
||||
github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2/go.mod h1:HBCaDeC1lPdgDeDbhX8XFpy1jqjK0IBG8W5K+xYqA0w=
|
||||
github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 h1:kkhsdkhsCvIsutKu5zLMgWtgh9YxGCNAw8Ad8hjwfYg=
|
||||
github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0=
|
||||
github.com/acomagu/bufpipe v1.0.4 h1:e3H4WUzM3npvo5uv95QuJM3cQspFNtFBzvJ2oNjKIDQ=
|
||||
github.com/acomagu/bufpipe v1.0.4/go.mod h1:mxdxdup/WdsKVreO5GpW4+M/1CE2sMG4jeGJ2sYmHc4=
|
||||
github.com/adrg/xdg v0.4.0 h1:RzRqFcjH4nE5C6oTAxhBtoE2IRyjBSa62SCbyPidvls=
|
||||
github.com/adrg/xdg v0.4.0/go.mod h1:N6ag73EX4wyxeaoeHctc1mas01KZgsj5tYiAIwqJE/E=
|
||||
github.com/andreaskoch/go-fswatch v1.0.0 h1:la8nP/HiaFCxP2IM6NZNUCoxgLWuyNFgH0RligBbnJU=
|
||||
github.com/andreaskoch/go-fswatch v1.0.0/go.mod h1:r5/iV+4jfwoY2sYqBkg8vpF04ehOvEl4qPptVGdxmqo=
|
||||
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8=
|
||||
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4=
|
||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
|
||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
|
||||
github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0=
|
||||
github.com/cloudflare/circl v1.3.3 h1:fE/Qz0QdIGqeWfnwq0RE0R7MI51s0M2E4Ga9kq5AEMs=
|
||||
github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
|
||||
github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
|
||||
github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA=
|
||||
github.com/containerd/containerd v1.7.2 h1:UF2gdONnxO8I6byZXDi5sXWiWvlW3D/sci7dTQimEJo=
|
||||
github.com/containerd/containerd v1.7.2/go.mod h1:afcz74+K10M/+cjGHIVQrCt3RAQhUSCAjJ9iMYhhkuI=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU=
|
||||
github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA=
|
||||
github.com/containerd/containerd v1.7.13 h1:wPYKIeGMN8vaggSKuV1X0wZulpMz4CrgEsZdaCyB6Is=
|
||||
github.com/containerd/containerd v1.7.13/go.mod h1:zT3up6yTRfEUa6+GsITYIJNgSVL9NQ4x4h1RPzk0Wu4=
|
||||
github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
|
||||
github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/creack/pty v1.1.17/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
|
||||
github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
|
||||
github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
|
||||
github.com/creack/pty v1.1.21 h1:1/QdRyBaHHJP61QkWMXlOIBfsgdDeeKfK8SYVUWJKf0=
|
||||
github.com/creack/pty v1.1.21/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
|
||||
github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg=
|
||||
github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
|
@ -39,43 +46,55 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c
|
|||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0=
|
||||
github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
|
||||
github.com/docker/cli v24.0.6+incompatible h1:fF+XCQCgJjjQNIMjzaSmiKJSCcfcXb3TWTcc7GAneOY=
|
||||
github.com/docker/cli v24.0.6+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/cli v25.0.3+incompatible h1:KLeNs7zws74oFuVhgZQ5ONGZiXUUdgsdy6/EsX/6284=
|
||||
github.com/docker/cli v25.0.3+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk=
|
||||
github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker v24.0.6+incompatible h1:hceabKCtUgDqPu+qm0NgsaXf28Ljf4/pWFL7xjWWDgE=
|
||||
github.com/docker/docker v24.0.6+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker-credential-helpers v0.7.0 h1:xtCHsjxogADNZcdv1pKUHXryefjlVRqWqIhk/uXJp0A=
|
||||
github.com/docker/docker-credential-helpers v0.7.0/go.mod h1:rETQfLdHNT3foU5kuNkFR1R1V12OJRRO5lzt2D1b5X0=
|
||||
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
|
||||
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
||||
github.com/docker/docker v25.0.6+incompatible h1:5cPwbwriIcsua2REJe8HqQV+6WlWc1byg2QSXzBxBGg=
|
||||
github.com/docker/docker v25.0.6+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker-credential-helpers v0.8.0 h1:YQFtbBQb4VrpoPxhFuzEBPQ9E16qz5SpHLS+uswaCp8=
|
||||
github.com/docker/docker-credential-helpers v0.8.0/go.mod h1:UGFXcuoQ5TxPiB54nHOZ32AWRqQdECoh/Mg0AlEYb40=
|
||||
github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c=
|
||||
github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc=
|
||||
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
|
||||
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a h1:mATvB/9r/3gvcejNsXKSkQ6lcIaNec2nyfOdlTBR2lU=
|
||||
github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a/go.mod h1:Ro8st/ElPeALwNFlcTpWmkr6IoMFfkjXAvTHpevnDsM=
|
||||
github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc=
|
||||
github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ=
|
||||
github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs=
|
||||
github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw=
|
||||
github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=
|
||||
github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=
|
||||
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
|
||||
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/gliderlabs/ssh v0.3.5 h1:OcaySEmAQJgyYcArR+gGGTHCyE7nvhEMTlYY+Dp8CpY=
|
||||
github.com/gliderlabs/ssh v0.3.5/go.mod h1:8XB4KraRrX39qHhT6yxPsHedjA08I/uBVwj4xC+/+z4=
|
||||
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI=
|
||||
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic=
|
||||
github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU=
|
||||
github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgFRpCtpDCKow=
|
||||
github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20230305113008-0c11038e723f h1:Pz0DHeFij3XFhoBRGUDPzSJ+w2UcK5/0JvF8DRI58r8=
|
||||
github.com/go-git/go-git/v5 v5.9.0 h1:cD9SFA7sHVRdJ7AYck1ZaAa/yeuBvGPxwXDL8cxrObY=
|
||||
github.com/go-git/go-git/v5 v5.9.0/go.mod h1:RKIqga24sWdMGZF+1Ekv9kylsDz6LzdTSI2s/OsZWE0=
|
||||
github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399 h1:eMje31YglSBqCdIqdhKBW8lokaMrL3uTkpGYlE2OOT4=
|
||||
github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399/go.mod h1:1OCfN199q1Jm3HZlxleg+Dw/mwps2Wbk9frAWm+4FII=
|
||||
github.com/go-git/go-git/v5 v5.11.0 h1:XIZc1p+8YzypNr34itUfSvYJcv+eYdTnTvOZ2vD3cA4=
|
||||
github.com/go-git/go-git/v5 v5.11.0/go.mod h1:6GFcX2P3NM7FPBfpePbpLd21XxsgdAt+lKqXmCUiUCY=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY=
|
||||
github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||
github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
|
||||
github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
|
||||
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg=
|
||||
github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec h1:qv2VnGeEQHchGaZ/u7lxST/RaJw+cv273q79D81Xbog=
|
||||
github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec/go.mod h1:Q48J4R4DvxnHolD5P8pOtXigYlRuPLGl6moFx3ulM68=
|
||||
github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
|
||||
|
@ -94,80 +113,78 @@ github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4
|
|||
github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.16.3 h1:XuJt9zzcnaz6a16/OU53ZjWp/v7/42WcR5t2a0PcNQY=
|
||||
github.com/klauspost/compress v1.16.3/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4=
|
||||
github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/matryer/is v1.2.0 h1:92UTHpy8CDwaJ08GqLDzhhuixiBUUD1p3AU6PHddz4A=
|
||||
github.com/matryer/is v1.2.0/go.mod h1:2fLPjFQM9rhQ15aVEtbuwhJinnOqrmgXPNdZsdwlWXA=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
|
||||
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||
github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
|
||||
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U=
|
||||
github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b h1:j7+1HpAFS1zy5+Q4qx1fWh90gTKwiN4QCGoY9TWyyO4=
|
||||
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE=
|
||||
github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
|
||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/moby/buildkit v0.12.2 h1:B7guBgY6sfk4dBlv/ORUxyYlp0UojYaYyATgtNwSCXc=
|
||||
github.com/moby/buildkit v0.12.2/go.mod h1:adB4y0SxxX8trnrY+oEulb48ODLqPO6pKMF0ppGcCoI=
|
||||
github.com/moby/buildkit v0.13.2 h1:nXNszM4qD9E7QtG7bFWPnDI1teUQFQglBzon/IU3SzI=
|
||||
github.com/moby/buildkit v0.13.2/go.mod h1:2cyVOv9NoHM7arphK9ZfHIWKn9YVZRFd1wXB8kKmEzY=
|
||||
github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk=
|
||||
github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc=
|
||||
github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc=
|
||||
github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo=
|
||||
github.com/moby/term v0.0.0-20200312100748-672ec06f55cd h1:aY7OQNf2XqY/JQ6qREWamhI/81os/agb2BAGpcx5yWI=
|
||||
github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo=
|
||||
github.com/moby/sys/user v0.1.0 h1:WmZ93f5Ux6het5iituh9x2zAG7NFY9Aqi49jjE1PaQg=
|
||||
github.com/moby/sys/user v0.1.0/go.mod h1:fKJhFOnsCN6xZ5gSfbM6zaHGgDJMrqt9/reuj4T7MmU=
|
||||
github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=
|
||||
github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
|
||||
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
|
||||
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
|
||||
github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI=
|
||||
github.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3evPbQ0M=
|
||||
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||
github.com/opencontainers/image-spec v1.1.0-rc5 h1:Ygwkfw9bpDvs+c9E34SdgGOj41dX/cbdlwvlWt0pnFI=
|
||||
github.com/opencontainers/image-spec v1.1.0-rc5/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8=
|
||||
github.com/opencontainers/runc v1.1.7 h1:y2EZDS8sNng4Ksf0GUYNhKbTShZJPJg1FiXJNH/uoCk=
|
||||
github.com/opencontainers/runc v1.1.7/go.mod h1:CbUumNnWCuTGFukNXahoo/RFBZvDAgRh/smNYNOhA50=
|
||||
github.com/opencontainers/selinux v1.11.0 h1:+5Zbo97w3Lbmb3PeqQtpmTkMwsW5nRI3YaLpt7tQ7oU=
|
||||
github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec=
|
||||
github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4=
|
||||
github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/rhysd/actionlint v1.6.26 h1:zi7jPZf3Ks14gCXYAAL47uBziyFlX7+Xwilqhexct9g=
|
||||
github.com/rhysd/actionlint v1.6.26/go.mod h1:TIj1DlCgtYLOv5CH9wCK+WJTOr1qAdnFzkGi0IgSCO4=
|
||||
github.com/rhysd/actionlint v1.6.27 h1:xxwe8YmveBcC8lydW6GoHMGmB6H/MTqUU60F2p10wjw=
|
||||
github.com/rhysd/actionlint v1.6.27/go.mod h1:m2nFUjAnOrxCMXuOMz9evYBRCLUsMnKY2IJl/N5umbk=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
|
||||
github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
||||
github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
|
||||
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
||||
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
|
||||
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
|
||||
github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
|
||||
github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06 h1:OkMGxebDjyw0ULyrTYWeN0UNCCkmCWfjPnIA2W6oviI=
|
||||
github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06/go.mod h1:+ePHsJ1keEjQtpvf9HHw0f4ZeJ0TLRsxhunSI2hYJSs=
|
||||
github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ=
|
||||
github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/skeema/knownhosts v1.2.0 h1:h9r9cf0+u7wSE+M183ZtMGgOJKiL96brpaz5ekfJCpM=
|
||||
github.com/skeema/knownhosts v1.2.0/go.mod h1:g4fPeYpque7P0xefxtGzV81ihjC8sX2IqpAoNkjxbMo=
|
||||
github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I=
|
||||
github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0=
|
||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/skeema/knownhosts v1.2.1 h1:SHWdIUa82uGZz+F+47k8SY4QhhI291cXCpopT1lK2AQ=
|
||||
github.com/skeema/knownhosts v1.2.1/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo=
|
||||
github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0=
|
||||
github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
|
@ -184,8 +201,9 @@ github.com/timshannon/bolthold v0.0.0-20210913165410-232392fc8a6a h1:oIi7H/bwFUY
|
|||
github.com/timshannon/bolthold v0.0.0-20210913165410-232392fc8a6a/go.mod h1:iSvujNDmpZ6eQX+bg/0X3lF7LEmZ8N77g2a/J/+Zt2U=
|
||||
github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM=
|
||||
github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0=
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
|
||||
github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74=
|
||||
|
@ -194,8 +212,24 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
|
|||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4=
|
||||
go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ=
|
||||
go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw=
|
||||
go.etcd.io/bbolt v1.3.9 h1:8x7aARPEXiXbHmtUwAIv7eV2fQFHrLLavdiJ3uzJXoI=
|
||||
go.etcd.io/bbolt v1.3.9/go.mod h1:zaO32+Ti0PK1ivdPtgMESzuzL2VPoIG1PCQNvOdo/dE=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 h1:aFJWCqJMNjENlcleuuOkGAPH82y0yULBScfXcIEdS24=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1/go.mod h1:sEGXWArGqc3tVa+ekntsN65DmVbVeW+7lTKTjZF3/Fo=
|
||||
go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc=
|
||||
go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 h1:cl5P5/GIfFh4t6xyruOgJP5QiA1pw4fYYdv6nc6CBWw=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0/go.mod h1:zgBdWWAu7oEEMC06MMKc5NLbA/1YDXV1sMpSqEeLQLg=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0 h1:digkEZCJWobwBqMwC0cwCq8/wkkRy/OowZg5OArWZrM=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0/go.mod h1:/OpE/y70qVkndM0TrxT4KBoN3RsFZP0QaofcfYrj76I=
|
||||
go.opentelemetry.io/otel/metric v1.21.0 h1:tlYWfeo+Bocx5kLEloTjbcDwBuELRrIFxwdQ36PlJu4=
|
||||
go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM=
|
||||
go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8=
|
||||
go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E=
|
||||
go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8fpfLc=
|
||||
go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ=
|
||||
go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I=
|
||||
go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
|
@ -203,15 +237,14 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y
|
|||
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
|
||||
golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
|
||||
golang.org/x/crypto v0.13.0 h1:mvySKfSWJ+UKUii46M40LOvyWfN0s2U+46/jDd0e6Ck=
|
||||
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
|
||||
golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA=
|
||||
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc=
|
||||
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/mod v0.13.0 h1:I/DsJXRlw/8l/0c24sM9yb0T4z9liZTduXvdAWYiysY=
|
||||
golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
|
@ -222,27 +255,26 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug
|
|||
golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
|
||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
|
||||
golang.org/x/net v0.15.0 h1:ugBLEUaxABaB5AJqW9enI0ACdci2RUd4eP51NTBvuJ8=
|
||||
golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
|
||||
golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs=
|
||||
golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
|
||||
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
||||
golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
|
||||
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
|
@ -252,15 +284,15 @@ golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|||
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE=
|
||||
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4=
|
||||
golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
|
||||
golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek=
|
||||
golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U=
|
||||
golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8=
|
||||
golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
|
@ -268,25 +300,35 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
|||
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k=
|
||||
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
|
||||
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
golang.org/x/tools v0.13.0 h1:Iey4qkscZuv0VvIt8E0neZjtPVQFSc870HQ448QgEmQ=
|
||||
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
|
||||
golang.org/x/tools v0.14.0 h1:jvNa2pY0M4r62jkRQ6RwEZZyPcymeL9XZMLBbV7U2nc=
|
||||
golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/genproto v0.0.0-20231016165738-49dd2c1f3d0b h1:+YaDE2r2OG8t/z5qmsh7Y+XXwCbvadxxZ0YY6mTdrVA=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b h1:CIC2YMXmIhYw6evmhPxBKJ4fmLbOFtXQN/GV3XOZR8k=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b/go.mod h1:IBQ646DjkDkvUIsVq/cc03FUFQ9wbZu7yE396YcL870=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20231016165738-49dd2c1f3d0b h1:ZlWIi1wSK56/8hn4QcBp/j9M7Gt3U/3hZw3mC7vDICo=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20231016165738-49dd2c1f3d0b/go.mod h1:swOH3j0KzcDDgGUWr+SNpyTen5YrXjS3eyPzFYKc6lc=
|
||||
google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk=
|
||||
google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98=
|
||||
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
|
||||
google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME=
|
||||
gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
|
@ -296,7 +338,5 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
|||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
|
||||
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
|
||||
gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU=
|
||||
gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU=
|
||||
|
|
|
@ -9,6 +9,7 @@ import (
|
|||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
|
@ -386,7 +387,12 @@ func (h *Handler) findCache(db *bolthold.Store, keys []string, version string) (
|
|||
|
||||
for _, prefix := range keys[1:] {
|
||||
found := false
|
||||
if err := db.ForEach(bolthold.Where("Key").Ge(prefix).And("Version").Eq(version).SortBy("Key"), func(v *Cache) error {
|
||||
prefixPattern := fmt.Sprintf("^%s", regexp.QuoteMeta(prefix))
|
||||
re, err := regexp.Compile(prefixPattern)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if err := db.ForEach(bolthold.Where("Key").RegExp(re).And("Version").Eq(version).SortBy("CreatedAt").Reverse(), func(v *Cache) error {
|
||||
if !strings.HasPrefix(v.Key, prefix) {
|
||||
return stop
|
||||
}
|
||||
|
|
|
@ -221,10 +221,14 @@ func findGitSlug(url string, githubInstance string) (string, string, error) {
|
|||
|
||||
// NewGitCloneExecutorInput the input for the NewGitCloneExecutor
|
||||
type NewGitCloneExecutorInput struct {
|
||||
URL string
|
||||
Ref string
|
||||
Dir string
|
||||
Token string
|
||||
URL string
|
||||
Ref string
|
||||
Dir string
|
||||
Token string
|
||||
OfflineMode bool
|
||||
|
||||
// For Gitea
|
||||
InsecureSkipTLS bool
|
||||
}
|
||||
|
||||
// CloneIfRequired ...
|
||||
|
@ -246,6 +250,8 @@ func CloneIfRequired(ctx context.Context, refName plumbing.ReferenceName, input
|
|||
cloneOptions := git.CloneOptions{
|
||||
URL: input.URL,
|
||||
Progress: progressWriter,
|
||||
|
||||
InsecureSkipTLS: input.InsecureSkipTLS, // For Gitea
|
||||
}
|
||||
if input.Token != "" {
|
||||
cloneOptions.Auth = &http.BasicAuth{
|
||||
|
@ -302,12 +308,21 @@ func NewGitCloneExecutor(input NewGitCloneExecutorInput) common.Executor {
|
|||
return err
|
||||
}
|
||||
|
||||
isOfflineMode := input.OfflineMode
|
||||
|
||||
// fetch latest changes
|
||||
fetchOptions, pullOptions := gitOptions(input.Token)
|
||||
|
||||
err = r.Fetch(&fetchOptions)
|
||||
if err != nil && !errors.Is(err, git.NoErrAlreadyUpToDate) {
|
||||
return err
|
||||
if input.InsecureSkipTLS { // For Gitea
|
||||
fetchOptions.InsecureSkipTLS = true
|
||||
pullOptions.InsecureSkipTLS = true
|
||||
}
|
||||
|
||||
if !isOfflineMode {
|
||||
err = r.Fetch(&fetchOptions)
|
||||
if err != nil && !errors.Is(err, git.NoErrAlreadyUpToDate) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
var hash *plumbing.Hash
|
||||
|
@ -367,9 +382,10 @@ func NewGitCloneExecutor(input NewGitCloneExecutorInput) common.Executor {
|
|||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err = w.Pull(&pullOptions); err != nil && err != git.NoErrAlreadyUpToDate {
|
||||
logger.Debugf("Unable to pull %s: %v", refName, err)
|
||||
if !isOfflineMode {
|
||||
if err = w.Pull(&pullOptions); err != nil && err != git.NoErrAlreadyUpToDate {
|
||||
logger.Debugf("Unable to pull %s: %v", refName, err)
|
||||
}
|
||||
}
|
||||
logger.Debugf("Cloned %s to %s", input.URL, input.Dir)
|
||||
|
||||
|
|
|
@ -4,34 +4,37 @@ import (
|
|||
"context"
|
||||
"io"
|
||||
|
||||
"github.com/docker/go-connections/nat"
|
||||
"github.com/nektos/act/pkg/common"
|
||||
)
|
||||
|
||||
// NewContainerInput the input for the New function
|
||||
type NewContainerInput struct {
|
||||
Image string
|
||||
Username string
|
||||
Password string
|
||||
Entrypoint []string
|
||||
Cmd []string
|
||||
WorkingDir string
|
||||
Env []string
|
||||
Binds []string
|
||||
Mounts map[string]string
|
||||
Name string
|
||||
Stdout io.Writer
|
||||
Stderr io.Writer
|
||||
NetworkMode string
|
||||
Privileged bool
|
||||
UsernsMode string
|
||||
Platform string
|
||||
Options string
|
||||
Image string
|
||||
Username string
|
||||
Password string
|
||||
Entrypoint []string
|
||||
Cmd []string
|
||||
WorkingDir string
|
||||
Env []string
|
||||
Binds []string
|
||||
Mounts map[string]string
|
||||
Name string
|
||||
Stdout io.Writer
|
||||
Stderr io.Writer
|
||||
NetworkMode string
|
||||
Privileged bool
|
||||
UsernsMode string
|
||||
Platform string
|
||||
Options string
|
||||
NetworkAliases []string
|
||||
ExposedPorts nat.PortSet
|
||||
PortBindings nat.PortMap
|
||||
|
||||
// Gitea specific
|
||||
AutoRemove bool
|
||||
|
||||
NetworkAliases []string
|
||||
ValidVolumes []string
|
||||
ValidVolumes []string
|
||||
}
|
||||
|
||||
// FileEntry is a file to copy to a container
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows))
|
||||
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows || netbsd))
|
||||
|
||||
package container
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows))
|
||||
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows || netbsd))
|
||||
|
||||
package container
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows))
|
||||
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows || netbsd))
|
||||
|
||||
// This file is exact copy of https://github.com/docker/cli/blob/9ac8584acfd501c3f4da0e845e3a40ed15c85041/cli/command/container/opts.go
|
||||
// appended with license information.
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows))
|
||||
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows || netbsd))
|
||||
|
||||
package container
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows))
|
||||
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows || netbsd))
|
||||
|
||||
package container
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows))
|
||||
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows || netbsd))
|
||||
|
||||
package container
|
||||
|
||||
|
@ -9,17 +9,27 @@ import (
|
|||
"github.com/nektos/act/pkg/common"
|
||||
)
|
||||
|
||||
func NewDockerNetworkCreateExecutor(name string) common.Executor {
|
||||
func NewDockerNetworkCreateExecutor(name string, config *types.NetworkCreate) common.Executor {
|
||||
return func(ctx context.Context) error {
|
||||
cli, err := GetDockerClient(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
_, err = cli.NetworkCreate(ctx, name, types.NetworkCreate{
|
||||
Driver: "bridge",
|
||||
Scope: "local",
|
||||
})
|
||||
// Only create the network if it doesn't exist
|
||||
networks, err := cli.NetworkList(ctx, types.NetworkListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, network := range networks {
|
||||
if network.Name == name {
|
||||
common.Logger(ctx).Debugf("Network %v exists", name)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
_, err = cli.NetworkCreate(ctx, name, *config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -34,7 +44,32 @@ func NewDockerNetworkRemoveExecutor(name string) common.Executor {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
return cli.NetworkRemove(ctx, name)
|
||||
// Make shure that all network of the specified name are removed
|
||||
// cli.NetworkRemove refuses to remove a network if there are duplicates
|
||||
networks, err := cli.NetworkList(ctx, types.NetworkListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
common.Logger(ctx).Debugf("%v", networks)
|
||||
for _, network := range networks {
|
||||
if network.Name == name {
|
||||
result, err := cli.NetworkInspect(ctx, network.ID, types.NetworkInspectOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(result.Containers) == 0 {
|
||||
if err = cli.NetworkRemove(ctx, network.ID); err != nil {
|
||||
common.Logger(ctx).Debugf("%v", err)
|
||||
}
|
||||
} else {
|
||||
common.Logger(ctx).Debugf("Refusing to remove network %v because it still has active endpoints", name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows))
|
||||
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows || netbsd))
|
||||
|
||||
package container
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows))
|
||||
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows || netbsd))
|
||||
|
||||
package container
|
||||
|
||||
|
@ -38,6 +38,7 @@ import (
|
|||
"golang.org/x/term"
|
||||
|
||||
"github.com/nektos/act/pkg/common"
|
||||
"github.com/nektos/act/pkg/filecollector"
|
||||
)
|
||||
|
||||
// NewContainer creates a reference to a container
|
||||
|
@ -86,7 +87,7 @@ func supportsContainerImagePlatform(ctx context.Context, cli client.APIClient) b
|
|||
|
||||
func (cr *containerReference) Create(capAdd []string, capDrop []string) common.Executor {
|
||||
return common.
|
||||
NewInfoExecutor("%sdocker create image=%s platform=%s entrypoint=%+q cmd=%+q", logPrefix, cr.input.Image, cr.input.Platform, cr.input.Entrypoint, cr.input.Cmd).
|
||||
NewInfoExecutor("%sdocker create image=%s platform=%s entrypoint=%+q cmd=%+q network=%+q", logPrefix, cr.input.Image, cr.input.Platform, cr.input.Entrypoint, cr.input.Cmd, cr.input.NetworkMode).
|
||||
Then(
|
||||
common.NewPipelineExecutor(
|
||||
cr.connect(),
|
||||
|
@ -98,7 +99,7 @@ func (cr *containerReference) Create(capAdd []string, capDrop []string) common.E
|
|||
|
||||
func (cr *containerReference) Start(attach bool) common.Executor {
|
||||
return common.
|
||||
NewInfoExecutor("%sdocker run image=%s platform=%s entrypoint=%+q cmd=%+q", logPrefix, cr.input.Image, cr.input.Platform, cr.input.Entrypoint, cr.input.Cmd).
|
||||
NewInfoExecutor("%sdocker run image=%s platform=%s entrypoint=%+q cmd=%+q network=%+q", logPrefix, cr.input.Image, cr.input.Platform, cr.input.Entrypoint, cr.input.Cmd, cr.input.NetworkMode).
|
||||
Then(
|
||||
common.NewPipelineExecutor(
|
||||
cr.connect(),
|
||||
|
@ -260,8 +261,10 @@ func RunnerArch(ctx context.Context) string {
|
|||
|
||||
archMapper := map[string]string{
|
||||
"x86_64": "X64",
|
||||
"amd64": "X64",
|
||||
"386": "X86",
|
||||
"aarch64": "ARM64",
|
||||
"arm64": "ARM64",
|
||||
}
|
||||
if arch, ok := archMapper[info.Architecture]; ok {
|
||||
return arch
|
||||
|
@ -370,8 +373,8 @@ func (cr *containerReference) mergeContainerConfigs(ctx context.Context, config
|
|||
// So comment out the following code.
|
||||
|
||||
// if len(copts.netMode.Value()) == 0 {
|
||||
// if err = copts.netMode.Set("host"); err != nil {
|
||||
// return nil, nil, fmt.Errorf("Cannot parse networkmode=host. This is an internal error and should not happen: '%w'", err)
|
||||
// if err = copts.netMode.Set(cr.input.NetworkMode); err != nil {
|
||||
// return nil, nil, fmt.Errorf("Cannot parse networkmode=%s. This is an internal error and should not happen: '%w'", cr.input.NetworkMode, err)
|
||||
// }
|
||||
// }
|
||||
|
||||
|
@ -426,10 +429,11 @@ func (cr *containerReference) create(capAdd []string, capDrop []string) common.E
|
|||
input := cr.input
|
||||
|
||||
config := &container.Config{
|
||||
Image: input.Image,
|
||||
WorkingDir: input.WorkingDir,
|
||||
Env: input.Env,
|
||||
Tty: isTerminal,
|
||||
Image: input.Image,
|
||||
WorkingDir: input.WorkingDir,
|
||||
Env: input.Env,
|
||||
ExposedPorts: input.ExposedPorts,
|
||||
Tty: isTerminal,
|
||||
}
|
||||
logger.Debugf("Common container.Config ==> %+v", config)
|
||||
|
||||
|
@ -465,14 +469,15 @@ func (cr *containerReference) create(capAdd []string, capDrop []string) common.E
|
|||
}
|
||||
|
||||
hostConfig := &container.HostConfig{
|
||||
CapAdd: capAdd,
|
||||
CapDrop: capDrop,
|
||||
Binds: input.Binds,
|
||||
Mounts: mounts,
|
||||
NetworkMode: container.NetworkMode(input.NetworkMode),
|
||||
Privileged: input.Privileged,
|
||||
UsernsMode: container.UsernsMode(input.UsernsMode),
|
||||
AutoRemove: input.AutoRemove,
|
||||
CapAdd: capAdd,
|
||||
CapDrop: capDrop,
|
||||
Binds: input.Binds,
|
||||
Mounts: mounts,
|
||||
NetworkMode: container.NetworkMode(input.NetworkMode),
|
||||
Privileged: input.Privileged,
|
||||
UsernsMode: container.UsernsMode(input.UsernsMode),
|
||||
PortBindings: input.PortBindings,
|
||||
AutoRemove: input.AutoRemove,
|
||||
}
|
||||
logger.Debugf("Common container.HostConfig ==> %+v", hostConfig)
|
||||
|
||||
|
@ -487,7 +492,10 @@ func (cr *containerReference) create(capAdd []string, capDrop []string) common.E
|
|||
// For Gitea
|
||||
// network-scoped alias is supported only for containers in user defined networks
|
||||
var networkingConfig *network.NetworkingConfig
|
||||
if hostConfig.NetworkMode.IsUserDefined() && len(input.NetworkAliases) > 0 {
|
||||
logger.Debugf("input.NetworkAliases ==> %v", input.NetworkAliases)
|
||||
n := hostConfig.NetworkMode
|
||||
// IsUserDefined and IsHost are broken on windows
|
||||
if n.IsUserDefined() && n != "host" && len(input.NetworkAliases) > 0 {
|
||||
endpointConfig := &network.EndpointSettings{
|
||||
Aliases: input.NetworkAliases,
|
||||
}
|
||||
|
@ -709,10 +717,28 @@ func (cr *containerReference) waitForCommand(ctx context.Context, isTerminal boo
|
|||
}
|
||||
|
||||
func (cr *containerReference) CopyTarStream(ctx context.Context, destPath string, tarStream io.Reader) error {
|
||||
err := cr.cli.CopyToContainer(ctx, cr.id, destPath, tarStream, types.CopyToContainerOptions{})
|
||||
// Mkdir
|
||||
buf := &bytes.Buffer{}
|
||||
tw := tar.NewWriter(buf)
|
||||
_ = tw.WriteHeader(&tar.Header{
|
||||
Name: destPath,
|
||||
Mode: 777,
|
||||
Typeflag: tar.TypeDir,
|
||||
})
|
||||
tw.Close()
|
||||
err := cr.cli.CopyToContainer(ctx, cr.id, "/", buf, types.CopyToContainerOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to mkdir to copy content to container: %w", err)
|
||||
}
|
||||
// Copy Content
|
||||
err = cr.cli.CopyToContainer(ctx, cr.id, destPath, tarStream, types.CopyToContainerOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to copy content to container: %w", err)
|
||||
}
|
||||
// If this fails, then folders have wrong permissions on non root container
|
||||
if cr.UID != 0 || cr.GID != 0 {
|
||||
_ = cr.Exec([]string{"chown", "-R", fmt.Sprintf("%d:%d", cr.UID, cr.GID), destPath}, nil, "0", "")(ctx)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -753,12 +779,12 @@ func (cr *containerReference) copyDir(dstPath string, srcPath string, useGitIgno
|
|||
ignorer = gitignore.NewMatcher(ps)
|
||||
}
|
||||
|
||||
fc := &fileCollector{
|
||||
Fs: &defaultFs{},
|
||||
fc := &filecollector.FileCollector{
|
||||
Fs: &filecollector.DefaultFs{},
|
||||
Ignorer: ignorer,
|
||||
SrcPath: srcPath,
|
||||
SrcPrefix: srcPrefix,
|
||||
Handler: &tarCollector{
|
||||
Handler: &filecollector.TarCollector{
|
||||
TarWriter: tw,
|
||||
UID: cr.UID,
|
||||
GID: cr.GID,
|
||||
|
@ -766,7 +792,7 @@ func (cr *containerReference) copyDir(dstPath string, srcPath string, useGitIgno
|
|||
},
|
||||
}
|
||||
|
||||
err = filepath.Walk(srcPath, fc.collectFiles(ctx, []string{}))
|
||||
err = filepath.Walk(srcPath, fc.CollectFiles(ctx, []string{}))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -23,6 +23,7 @@ func TestDocker(t *testing.T) {
|
|||
ctx := context.Background()
|
||||
client, err := GetDockerClient(ctx)
|
||||
assert.NoError(t, err)
|
||||
defer client.Close()
|
||||
|
||||
dockerBuild := NewDockerBuildExecutor(NewDockerBuildExecutorInput{
|
||||
ContextDir: "testdata",
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
//go:build WITHOUT_DOCKER || !(linux || darwin || windows)
|
||||
//go:build WITHOUT_DOCKER || !(linux || darwin || windows || netbsd)
|
||||
|
||||
package container
|
||||
|
||||
|
@ -56,7 +56,7 @@ func NewDockerVolumeRemoveExecutor(volume string, force bool) common.Executor {
|
|||
}
|
||||
}
|
||||
|
||||
func NewDockerNetworkCreateExecutor(name string) common.Executor {
|
||||
func NewDockerNetworkCreateExecutor(name string, config *types.NetworkCreate) common.Executor {
|
||||
return func(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows))
|
||||
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows || netbsd))
|
||||
|
||||
package container
|
||||
|
||||
|
|
|
@ -5,6 +5,9 @@ import "context"
|
|||
type ExecutionsEnvironment interface {
|
||||
Container
|
||||
ToContainerPath(string) string
|
||||
GetName() string
|
||||
GetRoot() string
|
||||
GetLXC() bool
|
||||
GetActPath() string
|
||||
GetPathVariableName() string
|
||||
DefaultPathVariable() string
|
||||
|
|
|
@ -21,20 +21,24 @@ import (
|
|||
"golang.org/x/term"
|
||||
|
||||
"github.com/nektos/act/pkg/common"
|
||||
"github.com/nektos/act/pkg/filecollector"
|
||||
"github.com/nektos/act/pkg/lookpath"
|
||||
)
|
||||
|
||||
type HostEnvironment struct {
|
||||
Name string
|
||||
Path string
|
||||
TmpDir string
|
||||
ToolCache string
|
||||
Workdir string
|
||||
ActPath string
|
||||
Root string
|
||||
CleanUp func()
|
||||
StdOut io.Writer
|
||||
LXC bool
|
||||
}
|
||||
|
||||
func (e *HostEnvironment) Create(_ []string, _ []string) common.Executor {
|
||||
func (e *HostEnvironment) Create(_, _ []string) common.Executor {
|
||||
return func(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
@ -71,7 +75,7 @@ func (e *HostEnvironment) CopyTarStream(ctx context.Context, destPath string, ta
|
|||
return err
|
||||
}
|
||||
tr := tar.NewReader(tarStream)
|
||||
cp := ©Collector{
|
||||
cp := &filecollector.CopyCollector{
|
||||
DstDir: destPath,
|
||||
}
|
||||
for {
|
||||
|
@ -93,7 +97,7 @@ func (e *HostEnvironment) CopyTarStream(ctx context.Context, destPath string, ta
|
|||
}
|
||||
}
|
||||
|
||||
func (e *HostEnvironment) CopyDir(destPath string, srcPath string, useGitIgnore bool) common.Executor {
|
||||
func (e *HostEnvironment) CopyDir(destPath, srcPath string, useGitIgnore bool) common.Executor {
|
||||
return func(ctx context.Context) error {
|
||||
logger := common.Logger(ctx)
|
||||
srcPrefix := filepath.Dir(srcPath)
|
||||
|
@ -110,16 +114,16 @@ func (e *HostEnvironment) CopyDir(destPath string, srcPath string, useGitIgnore
|
|||
|
||||
ignorer = gitignore.NewMatcher(ps)
|
||||
}
|
||||
fc := &fileCollector{
|
||||
Fs: &defaultFs{},
|
||||
fc := &filecollector.FileCollector{
|
||||
Fs: &filecollector.DefaultFs{},
|
||||
Ignorer: ignorer,
|
||||
SrcPath: srcPath,
|
||||
SrcPrefix: srcPrefix,
|
||||
Handler: ©Collector{
|
||||
Handler: &filecollector.CopyCollector{
|
||||
DstDir: destPath,
|
||||
},
|
||||
}
|
||||
return filepath.Walk(srcPath, fc.collectFiles(ctx, []string{}))
|
||||
return filepath.Walk(srcPath, fc.CollectFiles(ctx, []string{}))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -132,21 +136,21 @@ func (e *HostEnvironment) GetContainerArchive(ctx context.Context, srcPath strin
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tc := &tarCollector{
|
||||
tc := &filecollector.TarCollector{
|
||||
TarWriter: tw,
|
||||
}
|
||||
if fi.IsDir() {
|
||||
srcPrefix := filepath.Dir(srcPath)
|
||||
srcPrefix := srcPath
|
||||
if !strings.HasSuffix(srcPrefix, string(filepath.Separator)) {
|
||||
srcPrefix += string(filepath.Separator)
|
||||
}
|
||||
fc := &fileCollector{
|
||||
Fs: &defaultFs{},
|
||||
fc := &filecollector.FileCollector{
|
||||
Fs: &filecollector.DefaultFs{},
|
||||
SrcPath: srcPath,
|
||||
SrcPrefix: srcPrefix,
|
||||
Handler: tc,
|
||||
}
|
||||
err = filepath.Walk(srcPath, fc.collectFiles(ctx, []string{}))
|
||||
err = filepath.Walk(srcPath, fc.CollectFiles(ctx, []string{}))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -287,7 +291,7 @@ func getEnvListFromMap(env map[string]string) []string {
|
|||
return envList
|
||||
}
|
||||
|
||||
func (e *HostEnvironment) exec(ctx context.Context, command []string, cmdline string, env map[string]string, _, workdir string) error {
|
||||
func (e *HostEnvironment) exec(ctx context.Context, commandparam []string, cmdline string, env map[string]string, user, workdir string) error {
|
||||
envList := getEnvListFromMap(env)
|
||||
var wd string
|
||||
if workdir != "" {
|
||||
|
@ -299,6 +303,23 @@ func (e *HostEnvironment) exec(ctx context.Context, command []string, cmdline st
|
|||
} else {
|
||||
wd = e.Path
|
||||
}
|
||||
|
||||
if _, err := os.Stat(wd); err != nil {
|
||||
common.Logger(ctx).Debugf("Failed to stat working directory %s %v\n", wd, err.Error())
|
||||
}
|
||||
|
||||
command := make([]string, len(commandparam))
|
||||
copy(command, commandparam)
|
||||
|
||||
if e.GetLXC() {
|
||||
if user == "root" {
|
||||
command = append([]string{"/usr/bin/sudo"}, command...)
|
||||
} else {
|
||||
common.Logger(ctx).Debugf("lxc-attach --name %v %v", e.Name, command)
|
||||
command = append([]string{"/usr/bin/sudo", "--preserve-env", "--preserve-env=PATH", "/usr/bin/lxc-attach", "--keep-env", "--name", e.Name, "--"}, command...)
|
||||
}
|
||||
}
|
||||
|
||||
f, err := lookupPathHost(command[0], env, e.StdOut)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -341,7 +362,7 @@ func (e *HostEnvironment) exec(ctx context.Context, command []string, cmdline st
|
|||
}
|
||||
err = cmd.Run()
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("RUN %w", err)
|
||||
}
|
||||
if tty != nil {
|
||||
writer.AutoStop = true
|
||||
|
@ -398,6 +419,18 @@ func (e *HostEnvironment) ToContainerPath(path string) string {
|
|||
return path
|
||||
}
|
||||
|
||||
func (e *HostEnvironment) GetLXC() bool {
|
||||
return e.LXC
|
||||
}
|
||||
|
||||
func (e *HostEnvironment) GetName() string {
|
||||
return e.Name
|
||||
}
|
||||
|
||||
func (e *HostEnvironment) GetRoot() string {
|
||||
return e.Root
|
||||
}
|
||||
|
||||
func (e *HostEnvironment) GetActPath() string {
|
||||
actPath := e.ActPath
|
||||
if runtime.GOOS == "windows" {
|
||||
|
@ -457,7 +490,7 @@ func (e *HostEnvironment) GetRunnerContext(_ context.Context) map[string]interfa
|
|||
}
|
||||
}
|
||||
|
||||
func (e *HostEnvironment) ReplaceLogWriter(stdout io.Writer, _ io.Writer) (io.Writer, io.Writer) {
|
||||
func (e *HostEnvironment) ReplaceLogWriter(stdout, _ io.Writer) (io.Writer, io.Writer) {
|
||||
org := e.StdOut
|
||||
e.StdOut = stdout
|
||||
return org, org
|
||||
|
|
|
@ -1,4 +1,71 @@
|
|||
package container
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"context"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
// Type assert HostEnvironment implements ExecutionsEnvironment
|
||||
var _ ExecutionsEnvironment = &HostEnvironment{}
|
||||
|
||||
func TestCopyDir(t *testing.T) {
|
||||
dir, err := os.MkdirTemp("", "test-host-env-*")
|
||||
assert.NoError(t, err)
|
||||
defer os.RemoveAll(dir)
|
||||
ctx := context.Background()
|
||||
e := &HostEnvironment{
|
||||
Path: filepath.Join(dir, "path"),
|
||||
TmpDir: filepath.Join(dir, "tmp"),
|
||||
ToolCache: filepath.Join(dir, "tool_cache"),
|
||||
ActPath: filepath.Join(dir, "act_path"),
|
||||
StdOut: os.Stdout,
|
||||
Workdir: path.Join("testdata", "scratch"),
|
||||
}
|
||||
_ = os.MkdirAll(e.Path, 0700)
|
||||
_ = os.MkdirAll(e.TmpDir, 0700)
|
||||
_ = os.MkdirAll(e.ToolCache, 0700)
|
||||
_ = os.MkdirAll(e.ActPath, 0700)
|
||||
err = e.CopyDir(e.Workdir, e.Path, true)(ctx)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestGetContainerArchive(t *testing.T) {
|
||||
dir, err := os.MkdirTemp("", "test-host-env-*")
|
||||
assert.NoError(t, err)
|
||||
defer os.RemoveAll(dir)
|
||||
ctx := context.Background()
|
||||
e := &HostEnvironment{
|
||||
Path: filepath.Join(dir, "path"),
|
||||
TmpDir: filepath.Join(dir, "tmp"),
|
||||
ToolCache: filepath.Join(dir, "tool_cache"),
|
||||
ActPath: filepath.Join(dir, "act_path"),
|
||||
StdOut: os.Stdout,
|
||||
Workdir: path.Join("testdata", "scratch"),
|
||||
}
|
||||
_ = os.MkdirAll(e.Path, 0700)
|
||||
_ = os.MkdirAll(e.TmpDir, 0700)
|
||||
_ = os.MkdirAll(e.ToolCache, 0700)
|
||||
_ = os.MkdirAll(e.ActPath, 0700)
|
||||
expectedContent := []byte("sdde/7sh")
|
||||
err = os.WriteFile(filepath.Join(e.Path, "action.yml"), expectedContent, 0600)
|
||||
assert.NoError(t, err)
|
||||
archive, err := e.GetContainerArchive(ctx, e.Path)
|
||||
assert.NoError(t, err)
|
||||
defer archive.Close()
|
||||
reader := tar.NewReader(archive)
|
||||
h, err := reader.Next()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "action.yml", h.Name)
|
||||
content, err := io.ReadAll(reader)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, expectedContent, content)
|
||||
_, err = reader.Next()
|
||||
assert.ErrorIs(t, err, io.EOF)
|
||||
}
|
||||
|
|
|
@ -10,8 +10,7 @@ import (
|
|||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type LinuxContainerEnvironmentExtensions struct {
|
||||
}
|
||||
type LinuxContainerEnvironmentExtensions struct{}
|
||||
|
||||
// Resolves the equivalent host path inside the container
|
||||
// This is required for windows and WSL 2 to translate things like C:\Users\Myproject to /mnt/users/Myproject
|
||||
|
@ -47,6 +46,18 @@ func (*LinuxContainerEnvironmentExtensions) ToContainerPath(path string) string
|
|||
return result
|
||||
}
|
||||
|
||||
func (*LinuxContainerEnvironmentExtensions) GetName() string {
|
||||
return "NAME"
|
||||
}
|
||||
|
||||
func (*LinuxContainerEnvironmentExtensions) GetLXC() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (*LinuxContainerEnvironmentExtensions) GetRoot() string {
|
||||
return "/var/run"
|
||||
}
|
||||
|
||||
func (*LinuxContainerEnvironmentExtensions) GetActPath() string {
|
||||
return "/var/run/act"
|
||||
}
|
||||
|
|
1
pkg/container/testdata/scratch/test.txt
vendored
Normal file
1
pkg/container/testdata/scratch/test.txt
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
testfile
|
|
@ -43,6 +43,9 @@ func TestFunctionContains(t *testing.T) {
|
|||
assert.Equal(t, tt.expected, output)
|
||||
})
|
||||
}
|
||||
|
||||
_, err := NewInterpeter(env, Config{}).Evaluate("contains('one')", DefaultStatusCheckNone)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestFunctionStartsWith(t *testing.T) {
|
||||
|
@ -72,6 +75,9 @@ func TestFunctionStartsWith(t *testing.T) {
|
|||
assert.Equal(t, tt.expected, output)
|
||||
})
|
||||
}
|
||||
|
||||
_, err := NewInterpeter(env, Config{}).Evaluate("startsWith('one')", DefaultStatusCheckNone)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestFunctionEndsWith(t *testing.T) {
|
||||
|
@ -101,6 +107,9 @@ func TestFunctionEndsWith(t *testing.T) {
|
|||
assert.Equal(t, tt.expected, output)
|
||||
})
|
||||
}
|
||||
|
||||
_, err := NewInterpeter(env, Config{}).Evaluate("endsWith('one')", DefaultStatusCheckNone)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestFunctionJoin(t *testing.T) {
|
||||
|
@ -128,6 +137,9 @@ func TestFunctionJoin(t *testing.T) {
|
|||
assert.Equal(t, tt.expected, output)
|
||||
})
|
||||
}
|
||||
|
||||
_, err := NewInterpeter(env, Config{}).Evaluate("join()", DefaultStatusCheckNone)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestFunctionToJSON(t *testing.T) {
|
||||
|
@ -154,6 +166,9 @@ func TestFunctionToJSON(t *testing.T) {
|
|||
assert.Equal(t, tt.expected, output)
|
||||
})
|
||||
}
|
||||
|
||||
_, err := NewInterpeter(env, Config{}).Evaluate("tojson()", DefaultStatusCheckNone)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestFunctionFromJSON(t *testing.T) {
|
||||
|
@ -177,6 +192,9 @@ func TestFunctionFromJSON(t *testing.T) {
|
|||
assert.Equal(t, tt.expected, output)
|
||||
})
|
||||
}
|
||||
|
||||
_, err := NewInterpeter(env, Config{}).Evaluate("fromjson()", DefaultStatusCheckNone)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestFunctionHashFiles(t *testing.T) {
|
||||
|
@ -230,6 +248,7 @@ func TestFunctionFormat(t *testing.T) {
|
|||
{"format('{0', '{1}', 'World')", nil, "Unclosed brackets. The following format string is invalid: '{0'", "format-invalid-format-string"},
|
||||
{"format('{2}', '{1}', 'World')", "", "The following format string references more arguments than were supplied: '{2}'", "format-invalid-replacement-reference"},
|
||||
{"format('{2147483648}')", "", "The following format string is invalid: '{2147483648}'", "format-invalid-replacement-reference"},
|
||||
{"format('{0} {1} {2} {3}', 1.0, 1.1, 1234567890.0, 12345678901234567890.0)", "1 1.1 1234567890 1.23456789012346E+19", nil, "format-floats"},
|
||||
}
|
||||
|
||||
env := &EvaluationEnvironment{
|
||||
|
@ -247,4 +266,7 @@ func TestFunctionFormat(t *testing.T) {
|
|||
}
|
||||
})
|
||||
}
|
||||
|
||||
_, err := NewInterpeter(env, Config{}).Evaluate("format()", DefaultStatusCheckNone)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
|
|
@ -157,6 +157,8 @@ func (impl *interperterImpl) evaluateVariable(variableNode *actionlint.VariableN
|
|||
return impl.env.Github, nil
|
||||
case "gitea": // compatible with Gitea
|
||||
return impl.env.Github, nil
|
||||
case "forge":
|
||||
return impl.env.Github, nil
|
||||
case "env":
|
||||
return impl.env.Env, nil
|
||||
case "job":
|
||||
|
@ -449,7 +451,7 @@ func (impl *interperterImpl) coerceToString(value reflect.Value) reflect.Value {
|
|||
} else if math.IsInf(value.Float(), -1) {
|
||||
return reflect.ValueOf("-Infinity")
|
||||
}
|
||||
return reflect.ValueOf(fmt.Sprint(value))
|
||||
return reflect.ValueOf(fmt.Sprintf("%.15G", value.Float()))
|
||||
|
||||
case reflect.Slice:
|
||||
return reflect.ValueOf("Array")
|
||||
|
@ -591,23 +593,58 @@ func (impl *interperterImpl) evaluateFuncCall(funcCallNode *actionlint.FuncCallN
|
|||
args = append(args, reflect.ValueOf(value))
|
||||
}
|
||||
|
||||
argCountCheck := func(argCount int) error {
|
||||
if len(args) != argCount {
|
||||
return fmt.Errorf("'%s' expected %d arguments but got %d instead", funcCallNode.Callee, argCount, len(args))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
argAtLeastCheck := func(atLeast int) error {
|
||||
if len(args) < atLeast {
|
||||
return fmt.Errorf("'%s' expected at least %d arguments but got %d instead", funcCallNode.Callee, atLeast, len(args))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
switch strings.ToLower(funcCallNode.Callee) {
|
||||
case "contains":
|
||||
if err := argCountCheck(2); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return impl.contains(args[0], args[1])
|
||||
case "startswith":
|
||||
if err := argCountCheck(2); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return impl.startsWith(args[0], args[1])
|
||||
case "endswith":
|
||||
if err := argCountCheck(2); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return impl.endsWith(args[0], args[1])
|
||||
case "format":
|
||||
if err := argAtLeastCheck(1); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return impl.format(args[0], args[1:]...)
|
||||
case "join":
|
||||
if err := argAtLeastCheck(1); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(args) == 1 {
|
||||
return impl.join(args[0], reflect.ValueOf(","))
|
||||
}
|
||||
return impl.join(args[0], args[1])
|
||||
case "tojson":
|
||||
if err := argCountCheck(1); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return impl.toJSON(args[0])
|
||||
case "fromjson":
|
||||
if err := argCountCheck(1); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return impl.fromJSON(args[0])
|
||||
case "hashfiles":
|
||||
if impl.env.HashFiles != nil {
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
package container
|
||||
package filecollector
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
|
@ -17,18 +17,18 @@ import (
|
|||
"github.com/go-git/go-git/v5/plumbing/format/index"
|
||||
)
|
||||
|
||||
type fileCollectorHandler interface {
|
||||
type Handler interface {
|
||||
WriteFile(path string, fi fs.FileInfo, linkName string, f io.Reader) error
|
||||
}
|
||||
|
||||
type tarCollector struct {
|
||||
type TarCollector struct {
|
||||
TarWriter *tar.Writer
|
||||
UID int
|
||||
GID int
|
||||
DstDir string
|
||||
}
|
||||
|
||||
func (tc tarCollector) WriteFile(fpath string, fi fs.FileInfo, linkName string, f io.Reader) error {
|
||||
func (tc TarCollector) WriteFile(fpath string, fi fs.FileInfo, linkName string, f io.Reader) error {
|
||||
// create a new dir/file header
|
||||
header, err := tar.FileInfoHeader(fi, linkName)
|
||||
if err != nil {
|
||||
|
@ -59,11 +59,11 @@ func (tc tarCollector) WriteFile(fpath string, fi fs.FileInfo, linkName string,
|
|||
return nil
|
||||
}
|
||||
|
||||
type copyCollector struct {
|
||||
type CopyCollector struct {
|
||||
DstDir string
|
||||
}
|
||||
|
||||
func (cc *copyCollector) WriteFile(fpath string, fi fs.FileInfo, linkName string, f io.Reader) error {
|
||||
func (cc *CopyCollector) WriteFile(fpath string, fi fs.FileInfo, linkName string, f io.Reader) error {
|
||||
fdestpath := filepath.Join(cc.DstDir, fpath)
|
||||
if err := os.MkdirAll(filepath.Dir(fdestpath), 0o777); err != nil {
|
||||
return err
|
||||
|
@ -82,29 +82,29 @@ func (cc *copyCollector) WriteFile(fpath string, fi fs.FileInfo, linkName string
|
|||
return nil
|
||||
}
|
||||
|
||||
type fileCollector struct {
|
||||
type FileCollector struct {
|
||||
Ignorer gitignore.Matcher
|
||||
SrcPath string
|
||||
SrcPrefix string
|
||||
Fs fileCollectorFs
|
||||
Handler fileCollectorHandler
|
||||
Fs Fs
|
||||
Handler Handler
|
||||
}
|
||||
|
||||
type fileCollectorFs interface {
|
||||
type Fs interface {
|
||||
Walk(root string, fn filepath.WalkFunc) error
|
||||
OpenGitIndex(path string) (*index.Index, error)
|
||||
Open(path string) (io.ReadCloser, error)
|
||||
Readlink(path string) (string, error)
|
||||
}
|
||||
|
||||
type defaultFs struct {
|
||||
type DefaultFs struct {
|
||||
}
|
||||
|
||||
func (*defaultFs) Walk(root string, fn filepath.WalkFunc) error {
|
||||
func (*DefaultFs) Walk(root string, fn filepath.WalkFunc) error {
|
||||
return filepath.Walk(root, fn)
|
||||
}
|
||||
|
||||
func (*defaultFs) OpenGitIndex(path string) (*index.Index, error) {
|
||||
func (*DefaultFs) OpenGitIndex(path string) (*index.Index, error) {
|
||||
r, err := git.PlainOpen(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -116,16 +116,16 @@ func (*defaultFs) OpenGitIndex(path string) (*index.Index, error) {
|
|||
return i, nil
|
||||
}
|
||||
|
||||
func (*defaultFs) Open(path string) (io.ReadCloser, error) {
|
||||
func (*DefaultFs) Open(path string) (io.ReadCloser, error) {
|
||||
return os.Open(path)
|
||||
}
|
||||
|
||||
func (*defaultFs) Readlink(path string) (string, error) {
|
||||
func (*DefaultFs) Readlink(path string) (string, error) {
|
||||
return os.Readlink(path)
|
||||
}
|
||||
|
||||
//nolint:gocyclo
|
||||
func (fc *fileCollector) collectFiles(ctx context.Context, submodulePath []string) filepath.WalkFunc {
|
||||
func (fc *FileCollector) CollectFiles(ctx context.Context, submodulePath []string) filepath.WalkFunc {
|
||||
i, _ := fc.Fs.OpenGitIndex(path.Join(fc.SrcPath, path.Join(submodulePath...)))
|
||||
return func(file string, fi os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
|
@ -166,7 +166,7 @@ func (fc *fileCollector) collectFiles(ctx context.Context, submodulePath []strin
|
|||
}
|
||||
}
|
||||
if err == nil && entry.Mode == filemode.Submodule {
|
||||
err = fc.Fs.Walk(file, fc.collectFiles(ctx, split))
|
||||
err = fc.Fs.Walk(file, fc.CollectFiles(ctx, split))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
package container
|
||||
package filecollector
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
|
@ -95,16 +95,16 @@ func TestIgnoredTrackedfile(t *testing.T) {
|
|||
tw := tar.NewWriter(tmpTar)
|
||||
ps, _ := gitignore.ReadPatterns(worktree, []string{})
|
||||
ignorer := gitignore.NewMatcher(ps)
|
||||
fc := &fileCollector{
|
||||
fc := &FileCollector{
|
||||
Fs: &memoryFs{Filesystem: fs},
|
||||
Ignorer: ignorer,
|
||||
SrcPath: "mygitrepo",
|
||||
SrcPrefix: "mygitrepo" + string(filepath.Separator),
|
||||
Handler: &tarCollector{
|
||||
Handler: &TarCollector{
|
||||
TarWriter: tw,
|
||||
},
|
||||
}
|
||||
err := fc.Fs.Walk("mygitrepo", fc.collectFiles(context.Background(), []string{}))
|
||||
err := fc.Fs.Walk("mygitrepo", fc.CollectFiles(context.Background(), []string{}))
|
||||
assert.NoError(t, err, "successfully collect files")
|
||||
tw.Close()
|
||||
_, _ = tmpTar.Seek(0, io.SeekStart)
|
||||
|
@ -115,3 +115,58 @@ func TestIgnoredTrackedfile(t *testing.T) {
|
|||
_, err = tr.Next()
|
||||
assert.ErrorIs(t, err, io.EOF, "tar must only contain one element")
|
||||
}
|
||||
|
||||
func TestSymlinks(t *testing.T) {
|
||||
fs := memfs.New()
|
||||
_ = fs.MkdirAll("mygitrepo/.git", 0o777)
|
||||
dotgit, _ := fs.Chroot("mygitrepo/.git")
|
||||
worktree, _ := fs.Chroot("mygitrepo")
|
||||
repo, _ := git.Init(filesystem.NewStorage(dotgit, cache.NewObjectLRUDefault()), worktree)
|
||||
// This file shouldn't be in the tar
|
||||
f, err := worktree.Create(".env")
|
||||
assert.NoError(t, err)
|
||||
_, err = f.Write([]byte("test=val1\n"))
|
||||
assert.NoError(t, err)
|
||||
f.Close()
|
||||
err = worktree.Symlink(".env", "test.env")
|
||||
assert.NoError(t, err)
|
||||
|
||||
w, err := repo.Worktree()
|
||||
assert.NoError(t, err)
|
||||
|
||||
// .gitignore is in the tar after adding it to the index
|
||||
_, err = w.Add(".env")
|
||||
assert.NoError(t, err)
|
||||
_, err = w.Add("test.env")
|
||||
assert.NoError(t, err)
|
||||
|
||||
tmpTar, _ := fs.Create("temp.tar")
|
||||
tw := tar.NewWriter(tmpTar)
|
||||
ps, _ := gitignore.ReadPatterns(worktree, []string{})
|
||||
ignorer := gitignore.NewMatcher(ps)
|
||||
fc := &FileCollector{
|
||||
Fs: &memoryFs{Filesystem: fs},
|
||||
Ignorer: ignorer,
|
||||
SrcPath: "mygitrepo",
|
||||
SrcPrefix: "mygitrepo" + string(filepath.Separator),
|
||||
Handler: &TarCollector{
|
||||
TarWriter: tw,
|
||||
},
|
||||
}
|
||||
err = fc.Fs.Walk("mygitrepo", fc.CollectFiles(context.Background(), []string{}))
|
||||
assert.NoError(t, err, "successfully collect files")
|
||||
tw.Close()
|
||||
_, _ = tmpTar.Seek(0, io.SeekStart)
|
||||
tr := tar.NewReader(tmpTar)
|
||||
h, err := tr.Next()
|
||||
files := map[string]tar.Header{}
|
||||
for err == nil {
|
||||
files[h.Name] = *h
|
||||
h, err = tr.Next()
|
||||
}
|
||||
|
||||
assert.Equal(t, ".env", files[".env"].Name)
|
||||
assert.Equal(t, "test.env", files["test.env"].Name)
|
||||
assert.Equal(t, ".env", files["test.env"].Linkname)
|
||||
assert.ErrorIs(t, err, io.EOF, "tar must be read cleanly to EOF")
|
||||
}
|
|
@ -15,6 +15,7 @@ func NewInterpeter(
|
|||
matrix map[string]interface{},
|
||||
gitCtx *model.GithubContext,
|
||||
results map[string]*JobResult,
|
||||
vars map[string]string,
|
||||
) exprparser.Interpreter {
|
||||
strategy := make(map[string]interface{})
|
||||
if job.Strategy != nil {
|
||||
|
@ -62,6 +63,7 @@ func NewInterpeter(
|
|||
Matrix: matrix,
|
||||
Needs: using,
|
||||
Inputs: nil, // not supported yet
|
||||
Vars: vars,
|
||||
}
|
||||
|
||||
config := exprparser.Config{
|
||||
|
|
|
@ -48,12 +48,15 @@ func Parse(content []byte, options ...ParseOption) ([]*SingleWorkflow, error) {
|
|||
}
|
||||
for _, matrix := range matricxes {
|
||||
job := job.Clone()
|
||||
evaluator := NewExpressionEvaluator(NewInterpeter(id, origin.GetJob(id), matrix, pc.gitContext, results, pc.vars))
|
||||
if job.Name == "" {
|
||||
job.Name = id
|
||||
job.Name = nameWithMatrix(id, matrix)
|
||||
} else {
|
||||
job.Name = evaluator.Interpolate(job.Name)
|
||||
}
|
||||
job.Name = nameWithMatrix(job.Name, matrix)
|
||||
|
||||
job.Strategy.RawMatrix = encodeMatrix(matrix)
|
||||
evaluator := NewExpressionEvaluator(NewInterpeter(id, origin.GetJob(id), matrix, pc.gitContext, results))
|
||||
|
||||
runsOn := origin.GetJob(id).RunsOn()
|
||||
for i, v := range runsOn {
|
||||
runsOn[i] = evaluator.Interpolate(v)
|
||||
|
@ -86,9 +89,16 @@ func WithGitContext(context *model.GithubContext) ParseOption {
|
|||
}
|
||||
}
|
||||
|
||||
func WithVars(vars map[string]string) ParseOption {
|
||||
return func(c *parseContext) {
|
||||
c.vars = vars
|
||||
}
|
||||
}
|
||||
|
||||
type parseContext struct {
|
||||
jobResults map[string]string
|
||||
gitContext *model.GithubContext
|
||||
vars map[string]string
|
||||
}
|
||||
|
||||
type ParseOption func(c *parseContext)
|
||||
|
|
|
@ -17,6 +17,11 @@ func TestParse(t *testing.T) {
|
|||
options []ParseOption
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "multiple_named_matrix",
|
||||
options: nil,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "multiple_jobs",
|
||||
options: nil,
|
||||
|
|
|
@ -259,6 +259,11 @@ func ParseRawOn(rawOn *yaml.Node) ([]*Event, error) {
|
|||
return nil, fmt.Errorf("unknown on type: %#v", branches)
|
||||
}
|
||||
}
|
||||
case map[string]interface{}:
|
||||
if k != "workflow_dispatch" || act != "inputs" {
|
||||
return nil, fmt.Errorf("unknown on type: %#v", v)
|
||||
}
|
||||
acts = nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown on type: %#v", branches)
|
||||
}
|
||||
|
|
|
@ -186,6 +186,14 @@ func TestParseRawOn(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
input: "on:\n workflow_dispatch:\n inputs:\n test:\n type: string",
|
||||
result: []*Event{
|
||||
{
|
||||
Name: "workflow_dispatch",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, kase := range kases {
|
||||
t.Run(kase.input, func(t *testing.T) {
|
||||
|
|
14
pkg/jobparser/testdata/multiple_named_matrix.in.yaml
vendored
Normal file
14
pkg/jobparser/testdata/multiple_named_matrix.in.yaml
vendored
Normal file
|
@ -0,0 +1,14 @@
|
|||
name: test
|
||||
jobs:
|
||||
job1:
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-22.04, ubuntu-20.04]
|
||||
version: [1.17, 1.18, 1.19]
|
||||
runs-on: ${{ matrix.os }}
|
||||
name: On ${{ matrix.os }} with go v${{ matrix.version }}
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: ${{ matrix.version }}
|
||||
- run: uname -a && go version
|
101
pkg/jobparser/testdata/multiple_named_matrix.out.yaml
vendored
Normal file
101
pkg/jobparser/testdata/multiple_named_matrix.out.yaml
vendored
Normal file
|
@ -0,0 +1,101 @@
|
|||
name: test
|
||||
jobs:
|
||||
job1:
|
||||
name: On ubuntu-20.04 with go v1.17
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: ${{ matrix.version }}
|
||||
- run: uname -a && go version
|
||||
strategy:
|
||||
matrix:
|
||||
os:
|
||||
- ubuntu-20.04
|
||||
version:
|
||||
- 1.17
|
||||
---
|
||||
name: test
|
||||
jobs:
|
||||
job1:
|
||||
name: On ubuntu-20.04 with go v1.18
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: ${{ matrix.version }}
|
||||
- run: uname -a && go version
|
||||
strategy:
|
||||
matrix:
|
||||
os:
|
||||
- ubuntu-20.04
|
||||
version:
|
||||
- 1.18
|
||||
---
|
||||
name: test
|
||||
jobs:
|
||||
job1:
|
||||
name: On ubuntu-20.04 with go v1.19
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: ${{ matrix.version }}
|
||||
- run: uname -a && go version
|
||||
strategy:
|
||||
matrix:
|
||||
os:
|
||||
- ubuntu-20.04
|
||||
version:
|
||||
- 1.19
|
||||
---
|
||||
name: test
|
||||
jobs:
|
||||
job1:
|
||||
name: On ubuntu-22.04 with go v1.17
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: ${{ matrix.version }}
|
||||
- run: uname -a && go version
|
||||
strategy:
|
||||
matrix:
|
||||
os:
|
||||
- ubuntu-22.04
|
||||
version:
|
||||
- 1.17
|
||||
---
|
||||
name: test
|
||||
jobs:
|
||||
job1:
|
||||
name: On ubuntu-22.04 with go v1.18
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: ${{ matrix.version }}
|
||||
- run: uname -a && go version
|
||||
strategy:
|
||||
matrix:
|
||||
os:
|
||||
- ubuntu-22.04
|
||||
version:
|
||||
- 1.18
|
||||
---
|
||||
name: test
|
||||
jobs:
|
||||
job1:
|
||||
name: On ubuntu-22.04 with go v1.19
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: ${{ matrix.version }}
|
||||
- run: uname -a && go version
|
||||
strategy:
|
||||
matrix:
|
||||
os:
|
||||
- ubuntu-22.04
|
||||
version:
|
||||
- 1.19
|
|
@ -168,7 +168,7 @@ func (ghc *GithubContext) SetRepositoryAndOwner(ctx context.Context, githubInsta
|
|||
if ghc.Repository == "" {
|
||||
repo, err := git.FindGithubRepo(ctx, repoPath, githubInstance, remoteName)
|
||||
if err != nil {
|
||||
common.Logger(ctx).Warningf("unable to get git repo: %v", err)
|
||||
common.Logger(ctx).Warningf("unable to get git repo (githubInstance: %v; remoteName: %v, repoPath: %v): %v", githubInstance, remoteName, repoPath, err)
|
||||
return
|
||||
}
|
||||
ghc.Repository = repo
|
||||
|
|
|
@ -148,12 +148,10 @@ func NewWorkflowPlanner(path string, noWorkflowRecurse bool) (WorkflowPlanner, e
|
|||
workflow.Name = wf.workflowDirEntry.Name()
|
||||
}
|
||||
|
||||
jobNameRegex := regexp.MustCompile(`^([[:alpha:]_][[:alnum:]_\-]*)$`)
|
||||
for k := range workflow.Jobs {
|
||||
if ok := jobNameRegex.MatchString(k); !ok {
|
||||
_ = f.Close()
|
||||
return nil, fmt.Errorf("workflow is not valid. '%s': Job name '%s' is invalid. Names must start with a letter or '_' and contain only alphanumeric characters, '-', or '_'", workflow.Name, k)
|
||||
}
|
||||
err = validateJobName(workflow)
|
||||
if err != nil {
|
||||
_ = f.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
wp.workflows = append(wp.workflows, workflow)
|
||||
|
@ -171,6 +169,42 @@ func CombineWorkflowPlanner(workflows ...*Workflow) WorkflowPlanner {
|
|||
}
|
||||
}
|
||||
|
||||
func NewSingleWorkflowPlanner(name string, f io.Reader) (WorkflowPlanner, error) {
|
||||
wp := new(workflowPlanner)
|
||||
|
||||
log.Debugf("Reading workflow %s", name)
|
||||
workflow, err := ReadWorkflow(f)
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
return nil, fmt.Errorf("unable to read workflow '%s': file is empty: %w", name, err)
|
||||
}
|
||||
return nil, fmt.Errorf("workflow is not valid. '%s': %w", name, err)
|
||||
}
|
||||
workflow.File = name
|
||||
if workflow.Name == "" {
|
||||
workflow.Name = name
|
||||
}
|
||||
|
||||
err = validateJobName(workflow)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
wp.workflows = append(wp.workflows, workflow)
|
||||
|
||||
return wp, nil
|
||||
}
|
||||
|
||||
func validateJobName(workflow *Workflow) error {
|
||||
jobNameRegex := regexp.MustCompile(`^([[:alpha:]_][[:alnum:]_\-]*)$`)
|
||||
for k := range workflow.Jobs {
|
||||
if ok := jobNameRegex.MatchString(k); !ok {
|
||||
return fmt.Errorf("workflow is not valid. '%s': Job name '%s' is invalid. Names must start with a letter or '_' and contain only alphanumeric characters, '-', or '_'", workflow.Name, k)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type workflowPlanner struct {
|
||||
workflows []*Workflow
|
||||
}
|
||||
|
|
|
@ -103,22 +103,40 @@ type WorkflowDispatch struct {
|
|||
}
|
||||
|
||||
func (w *Workflow) WorkflowDispatchConfig() *WorkflowDispatch {
|
||||
if w.RawOn.Kind != yaml.MappingNode {
|
||||
switch w.RawOn.Kind {
|
||||
case yaml.ScalarNode:
|
||||
var val string
|
||||
if !decodeNode(w.RawOn, &val) {
|
||||
return nil
|
||||
}
|
||||
if val == "workflow_dispatch" {
|
||||
return &WorkflowDispatch{}
|
||||
}
|
||||
case yaml.SequenceNode:
|
||||
var val []string
|
||||
if !decodeNode(w.RawOn, &val) {
|
||||
return nil
|
||||
}
|
||||
for _, v := range val {
|
||||
if v == "workflow_dispatch" {
|
||||
return &WorkflowDispatch{}
|
||||
}
|
||||
}
|
||||
case yaml.MappingNode:
|
||||
var val map[string]yaml.Node
|
||||
if !decodeNode(w.RawOn, &val) {
|
||||
return nil
|
||||
}
|
||||
|
||||
n, found := val["workflow_dispatch"]
|
||||
var workflowDispatch WorkflowDispatch
|
||||
if found && decodeNode(n, &workflowDispatch) {
|
||||
return &workflowDispatch
|
||||
}
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
|
||||
var val map[string]yaml.Node
|
||||
if !decodeNode(w.RawOn, &val) {
|
||||
return nil
|
||||
}
|
||||
|
||||
var config WorkflowDispatch
|
||||
node := val["workflow_dispatch"]
|
||||
if !decodeNode(node, &config) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &config
|
||||
return nil
|
||||
}
|
||||
|
||||
type WorkflowCallInput struct {
|
||||
|
@ -299,15 +317,39 @@ func (j *Job) Needs() []string {
|
|||
// RunsOn list for Job
|
||||
func (j *Job) RunsOn() []string {
|
||||
switch j.RawRunsOn.Kind {
|
||||
case yaml.MappingNode:
|
||||
var val struct {
|
||||
Group string
|
||||
Labels yaml.Node
|
||||
}
|
||||
|
||||
if !decodeNode(j.RawRunsOn, &val) {
|
||||
return nil
|
||||
}
|
||||
|
||||
labels := nodeAsStringSlice(val.Labels)
|
||||
|
||||
if val.Group != "" {
|
||||
labels = append(labels, val.Group)
|
||||
}
|
||||
|
||||
return labels
|
||||
default:
|
||||
return nodeAsStringSlice(j.RawRunsOn)
|
||||
}
|
||||
}
|
||||
|
||||
func nodeAsStringSlice(node yaml.Node) []string {
|
||||
switch node.Kind {
|
||||
case yaml.ScalarNode:
|
||||
var val string
|
||||
if !decodeNode(j.RawRunsOn, &val) {
|
||||
if !decodeNode(node, &val) {
|
||||
return nil
|
||||
}
|
||||
return []string{val}
|
||||
case yaml.SequenceNode:
|
||||
var val []string
|
||||
if !decodeNode(j.RawRunsOn, &val) {
|
||||
if !decodeNode(node, &val) {
|
||||
return nil
|
||||
}
|
||||
return val
|
||||
|
@ -707,7 +749,7 @@ func (w *Workflow) GetJobIDs() []string {
|
|||
}
|
||||
|
||||
var OnDecodeNodeError = func(node yaml.Node, out interface{}, err error) {
|
||||
log.Fatalf("Failed to decode node %v into %T: %v", node, out, err)
|
||||
log.Errorf("Failed to decode node %v into %T: %v", node, out, err)
|
||||
}
|
||||
|
||||
func decodeNode(node yaml.Node, out interface{}) bool {
|
||||
|
|
|
@ -153,6 +153,60 @@ jobs:
|
|||
assert.Contains(t, workflow.On(), "pull_request")
|
||||
}
|
||||
|
||||
func TestReadWorkflow_DecodeNodeError(t *testing.T) {
|
||||
yaml := `
|
||||
on:
|
||||
push:
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: echo
|
||||
env:
|
||||
foo: {{ a }}
|
||||
`
|
||||
|
||||
workflow, err := ReadWorkflow(strings.NewReader(yaml))
|
||||
assert.NoError(t, err, "read workflow should succeed")
|
||||
assert.Nil(t, workflow.GetJob("test").Steps[0].GetEnv())
|
||||
}
|
||||
|
||||
func TestReadWorkflow_RunsOnLabels(t *testing.T) {
|
||||
yaml := `
|
||||
name: local-action-docker-url
|
||||
|
||||
jobs:
|
||||
test:
|
||||
container: nginx:latest
|
||||
runs-on:
|
||||
labels: ubuntu-latest
|
||||
steps:
|
||||
- uses: ./actions/docker-url`
|
||||
|
||||
workflow, err := ReadWorkflow(strings.NewReader(yaml))
|
||||
assert.NoError(t, err, "read workflow should succeed")
|
||||
assert.Equal(t, workflow.Jobs["test"].RunsOn(), []string{"ubuntu-latest"})
|
||||
}
|
||||
|
||||
func TestReadWorkflow_RunsOnLabelsWithGroup(t *testing.T) {
|
||||
yaml := `
|
||||
name: local-action-docker-url
|
||||
|
||||
jobs:
|
||||
test:
|
||||
container: nginx:latest
|
||||
runs-on:
|
||||
labels: [ubuntu-latest]
|
||||
group: linux
|
||||
steps:
|
||||
- uses: ./actions/docker-url`
|
||||
|
||||
workflow, err := ReadWorkflow(strings.NewReader(yaml))
|
||||
assert.NoError(t, err, "read workflow should succeed")
|
||||
assert.Equal(t, workflow.Jobs["test"].RunsOn(), []string{"ubuntu-latest", "linux"})
|
||||
}
|
||||
|
||||
func TestReadWorkflow_StringContainer(t *testing.T) {
|
||||
yaml := `
|
||||
name: local-action-docker-url
|
||||
|
@ -464,3 +518,107 @@ func TestStep_ShellCommand(t *testing.T) {
|
|||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadWorkflow_WorkflowDispatchConfig(t *testing.T) {
|
||||
yaml := `
|
||||
name: local-action-docker-url
|
||||
`
|
||||
workflow, err := ReadWorkflow(strings.NewReader(yaml))
|
||||
assert.NoError(t, err, "read workflow should succeed")
|
||||
workflowDispatch := workflow.WorkflowDispatchConfig()
|
||||
assert.Nil(t, workflowDispatch)
|
||||
|
||||
yaml = `
|
||||
name: local-action-docker-url
|
||||
on: push
|
||||
`
|
||||
workflow, err = ReadWorkflow(strings.NewReader(yaml))
|
||||
assert.NoError(t, err, "read workflow should succeed")
|
||||
workflowDispatch = workflow.WorkflowDispatchConfig()
|
||||
assert.Nil(t, workflowDispatch)
|
||||
|
||||
yaml = `
|
||||
name: local-action-docker-url
|
||||
on: workflow_dispatch
|
||||
`
|
||||
workflow, err = ReadWorkflow(strings.NewReader(yaml))
|
||||
assert.NoError(t, err, "read workflow should succeed")
|
||||
workflowDispatch = workflow.WorkflowDispatchConfig()
|
||||
assert.NotNil(t, workflowDispatch)
|
||||
assert.Nil(t, workflowDispatch.Inputs)
|
||||
|
||||
yaml = `
|
||||
name: local-action-docker-url
|
||||
on: [push, pull_request]
|
||||
`
|
||||
workflow, err = ReadWorkflow(strings.NewReader(yaml))
|
||||
assert.NoError(t, err, "read workflow should succeed")
|
||||
workflowDispatch = workflow.WorkflowDispatchConfig()
|
||||
assert.Nil(t, workflowDispatch)
|
||||
|
||||
yaml = `
|
||||
name: local-action-docker-url
|
||||
on: [push, workflow_dispatch]
|
||||
`
|
||||
workflow, err = ReadWorkflow(strings.NewReader(yaml))
|
||||
assert.NoError(t, err, "read workflow should succeed")
|
||||
workflowDispatch = workflow.WorkflowDispatchConfig()
|
||||
assert.NotNil(t, workflowDispatch)
|
||||
assert.Nil(t, workflowDispatch.Inputs)
|
||||
|
||||
yaml = `
|
||||
name: local-action-docker-url
|
||||
on:
|
||||
- push
|
||||
- workflow_dispatch
|
||||
`
|
||||
workflow, err = ReadWorkflow(strings.NewReader(yaml))
|
||||
assert.NoError(t, err, "read workflow should succeed")
|
||||
workflowDispatch = workflow.WorkflowDispatchConfig()
|
||||
assert.NotNil(t, workflowDispatch)
|
||||
assert.Nil(t, workflowDispatch.Inputs)
|
||||
|
||||
yaml = `
|
||||
name: local-action-docker-url
|
||||
on:
|
||||
push:
|
||||
pull_request:
|
||||
`
|
||||
workflow, err = ReadWorkflow(strings.NewReader(yaml))
|
||||
assert.NoError(t, err, "read workflow should succeed")
|
||||
workflowDispatch = workflow.WorkflowDispatchConfig()
|
||||
assert.Nil(t, workflowDispatch)
|
||||
|
||||
yaml = `
|
||||
name: local-action-docker-url
|
||||
on:
|
||||
push:
|
||||
pull_request:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
logLevel:
|
||||
description: 'Log level'
|
||||
required: true
|
||||
default: 'warning'
|
||||
type: choice
|
||||
options:
|
||||
- info
|
||||
- warning
|
||||
- debug
|
||||
`
|
||||
workflow, err = ReadWorkflow(strings.NewReader(yaml))
|
||||
assert.NoError(t, err, "read workflow should succeed")
|
||||
workflowDispatch = workflow.WorkflowDispatchConfig()
|
||||
assert.NotNil(t, workflowDispatch)
|
||||
assert.Equal(t, WorkflowDispatchInput{
|
||||
Default: "warning",
|
||||
Description: "Log level",
|
||||
Options: []string{
|
||||
"info",
|
||||
"warning",
|
||||
"debug",
|
||||
},
|
||||
Required: true,
|
||||
Type: "choice",
|
||||
}, workflowDispatch.Inputs["logLevel"])
|
||||
}
|
||||
|
|
|
@ -3,6 +3,7 @@ package runner
|
|||
import (
|
||||
"context"
|
||||
"embed"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
|
@ -41,11 +42,24 @@ var trampoline embed.FS
|
|||
|
||||
func readActionImpl(ctx context.Context, step *model.Step, actionDir string, actionPath string, readFile actionYamlReader, writeFile fileWriter) (*model.Action, error) {
|
||||
logger := common.Logger(ctx)
|
||||
allErrors := []error{}
|
||||
addError := func(fileName string, err error) {
|
||||
if err != nil {
|
||||
allErrors = append(allErrors, fmt.Errorf("failed to read '%s' from action '%s' with path '%s' of step %w", fileName, step.String(), actionPath, err))
|
||||
} else {
|
||||
// One successful read, clear error state
|
||||
allErrors = nil
|
||||
}
|
||||
}
|
||||
reader, closer, err := readFile("action.yml")
|
||||
addError("action.yml", err)
|
||||
if os.IsNotExist(err) {
|
||||
reader, closer, err = readFile("action.yaml")
|
||||
if err != nil {
|
||||
if _, closer, err2 := readFile("Dockerfile"); err2 == nil {
|
||||
addError("action.yaml", err)
|
||||
if os.IsNotExist(err) {
|
||||
_, closer, err := readFile("Dockerfile")
|
||||
addError("Dockerfile", err)
|
||||
if err == nil {
|
||||
closer.Close()
|
||||
action := &model.Action{
|
||||
Name: "(Synthetic)",
|
||||
|
@ -90,10 +104,10 @@ func readActionImpl(ctx context.Context, step *model.Step, actionDir string, act
|
|||
return action, nil
|
||||
}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if allErrors != nil {
|
||||
return nil, errors.Join(allErrors...)
|
||||
}
|
||||
defer closer.Close()
|
||||
|
||||
|
@ -110,9 +124,6 @@ func maybeCopyToActionDir(ctx context.Context, step actionStep, actionDir string
|
|||
if stepModel.Type() != model.StepTypeUsesActionRemote {
|
||||
return nil
|
||||
}
|
||||
if err := removeGitIgnore(ctx, actionDir); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var containerActionDirCopy string
|
||||
containerActionDirCopy = strings.TrimSuffix(containerActionDir, actionPath)
|
||||
|
@ -121,6 +132,21 @@ func maybeCopyToActionDir(ctx context.Context, step actionStep, actionDir string
|
|||
if !strings.HasSuffix(containerActionDirCopy, `/`) {
|
||||
containerActionDirCopy += `/`
|
||||
}
|
||||
|
||||
if rc.Config != nil && rc.Config.ActionCache != nil {
|
||||
raction := step.(*stepActionRemote)
|
||||
ta, err := rc.Config.ActionCache.GetTarArchive(ctx, raction.cacheDir, raction.resolvedSha, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer ta.Close()
|
||||
return rc.JobContainer.CopyTarStream(ctx, containerActionDirCopy, ta)
|
||||
}
|
||||
|
||||
if err := removeGitIgnore(ctx, actionDir); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return rc.JobContainer.CopyDir(containerActionDirCopy, actionDir+"/", rc.Config.UseGitIgnore)(ctx)
|
||||
}
|
||||
|
||||
|
@ -281,6 +307,13 @@ func execAsDocker(ctx context.Context, step actionStep, actionName string, based
|
|||
return err
|
||||
}
|
||||
defer buildContext.Close()
|
||||
} else if rc.Config.ActionCache != nil {
|
||||
rstep := step.(*stepActionRemote)
|
||||
buildContext, err = rc.Config.ActionCache.GetTarArchive(ctx, rstep.cacheDir, rstep.resolvedSha, contextDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer buildContext.Close()
|
||||
}
|
||||
prepImage = container.NewDockerBuildExecutor(container.NewDockerBuildExecutorInput{
|
||||
ContextDir: contextDir,
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"path"
|
||||
|
@ -86,6 +87,9 @@ func (c GoGitActionCache) Fetch(ctx context.Context, cacheDir, url, ref, token s
|
|||
Auth: auth,
|
||||
Force: true,
|
||||
}); err != nil {
|
||||
if tagOrSha && errors.Is(err, git.NoErrAlreadyUpToDate) {
|
||||
return "", fmt.Errorf("couldn't find remote ref \"%s\"", ref)
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
if tagOrSha {
|
||||
|
|
|
@ -20,6 +20,7 @@ type jobInfo interface {
|
|||
result(result string)
|
||||
}
|
||||
|
||||
//nolint:contextcheck,gocyclo
|
||||
func newJobExecutor(info jobInfo, sf stepFactory, rc *RunContext) common.Executor {
|
||||
steps := make([]common.Executor, 0)
|
||||
preSteps := make([]common.Executor, 0)
|
||||
|
@ -101,7 +102,7 @@ func newJobExecutor(info jobInfo, sf stepFactory, rc *RunContext) common.Executo
|
|||
|
||||
postExec := useStepLogger(rc, stepModel, stepStagePost, step.post())
|
||||
if postExecutor != nil {
|
||||
// run the post exector in reverse order
|
||||
// run the post executor in reverse order
|
||||
postExecutor = postExec.Finally(postExecutor)
|
||||
} else {
|
||||
postExecutor = postExec
|
||||
|
@ -117,22 +118,19 @@ func newJobExecutor(info jobInfo, sf stepFactory, rc *RunContext) common.Executo
|
|||
defer cancel()
|
||||
|
||||
logger := common.Logger(ctx)
|
||||
logger.Infof("Cleaning up services for job %s", rc.JobName)
|
||||
if err := rc.stopServiceContainers()(ctx); err != nil {
|
||||
logger.Errorf("Error while cleaning services: %v", err)
|
||||
}
|
||||
|
||||
logger.Infof("Cleaning up container for job %s", rc.JobName)
|
||||
if err = info.stopContainer()(ctx); err != nil {
|
||||
logger.Errorf("Error while stop job container: %v", err)
|
||||
}
|
||||
|
||||
if !rc.IsHostEnv(ctx) && rc.Config.ContainerNetworkMode == "" {
|
||||
// clean network in docker mode only
|
||||
// if the value of `ContainerNetworkMode` is empty string,
|
||||
// it means that the network to which containers are connecting is created by `act_runner`,
|
||||
// so, we should remove the network at last.
|
||||
logger.Infof("Cleaning up network for job %s, and network name is: %s", rc.JobName, rc.networkName())
|
||||
if err := container.NewDockerNetworkRemoveExecutor(rc.networkName())(ctx); err != nil {
|
||||
networkName, _ := rc.networkName()
|
||||
logger.Infof("Cleaning up network for job %s, and network name is: %s", rc.JobName, networkName)
|
||||
if err := container.NewDockerNetworkRemoveExecutor(networkName)(ctx); err != nil {
|
||||
logger.Errorf("Error while cleaning network: %v", err)
|
||||
}
|
||||
}
|
||||
|
|
434
pkg/runner/lxc-helpers-lib.sh
Executable file
434
pkg/runner/lxc-helpers-lib.sh
Executable file
|
@ -0,0 +1,434 @@
|
|||
#!/bin/bash
|
||||
# SPDX-License-Identifier: MIT
|
||||
|
||||
export DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
LXC_SELF_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
||||
LXC_BIN=/usr/local/bin
|
||||
LXC_CONTAINER_CONFIG_ALL="unprivileged lxc libvirt docker k8s"
|
||||
LXC_CONTAINER_CONFIG_DEFAULT="lxc libvirt docker"
|
||||
LXC_IPV6_PREFIX_DEFAULT="fc15"
|
||||
LXC_DOCKER_PREFIX_DEFAULT="172.17"
|
||||
LXC_IPV6_DOCKER_PREFIX_DEFAULT="fd00:d0ca"
|
||||
|
||||
: ${LXC_SUDO:=}
|
||||
: ${LXC_CONTAINER_RELEASE:=bookworm}
|
||||
: ${LXC_CONTAINER_CONFIG:=$LXC_CONTAINER_CONFIG_DEFAULT}
|
||||
: ${LXC_HOME:=/home}
|
||||
: ${LXC_VERBOSE:=false}
|
||||
|
||||
source /etc/os-release
|
||||
|
||||
function lxc_release() {
|
||||
echo $VERSION_CODENAME
|
||||
}
|
||||
|
||||
function lxc_template_release() {
|
||||
echo lxc-helpers-$LXC_CONTAINER_RELEASE
|
||||
}
|
||||
|
||||
function lxc_root() {
|
||||
local name="$1"
|
||||
|
||||
echo /var/lib/lxc/$name/rootfs
|
||||
}
|
||||
|
||||
function lxc_config() {
|
||||
local name="$1"
|
||||
|
||||
echo /var/lib/lxc/$name/config
|
||||
}
|
||||
|
||||
function lxc_container_run() {
|
||||
local name="$1"
|
||||
shift
|
||||
|
||||
$LXC_SUDO lxc-attach --clear-env --name $name -- "$@"
|
||||
}
|
||||
|
||||
function lxc_container_run_script_as() {
|
||||
local name="$1"
|
||||
local user="$2"
|
||||
local script="$3"
|
||||
|
||||
$LXC_SUDO chmod +x $(lxc_root $name)$script
|
||||
$LXC_SUDO lxc-attach --name $name -- sudo --user $user $script
|
||||
}
|
||||
|
||||
function lxc_container_run_script() {
|
||||
local name="$1"
|
||||
local script="$2"
|
||||
|
||||
$LXC_SUDO chmod +x $(lxc_root $name)$script
|
||||
lxc_container_run $name $script
|
||||
}
|
||||
|
||||
function lxc_container_inside() {
|
||||
local name="$1"
|
||||
shift
|
||||
|
||||
lxc_container_run $name $LXC_BIN/lxc-helpers.sh "$@"
|
||||
}
|
||||
|
||||
function lxc_container_user_install() {
|
||||
local name="$1"
|
||||
local user_id="$2"
|
||||
local user="$3"
|
||||
|
||||
if test "$user" = root ; then
|
||||
return
|
||||
fi
|
||||
|
||||
local root=$(lxc_root $name)
|
||||
|
||||
if ! $LXC_SUDO grep --quiet "^$user " $root/etc/sudoers ; then
|
||||
$LXC_SUDO tee $root/usr/local/bin/lxc-helpers-create-user.sh > /dev/null <<EOF
|
||||
#!/bin/bash
|
||||
set -ex
|
||||
|
||||
mkdir -p $LXC_HOME
|
||||
useradd --base-dir $LXC_HOME --create-home --shell /bin/bash --uid $user_id $user
|
||||
for group in docker kvm libvirt ; do
|
||||
if grep --quiet \$group /etc/group ; then adduser $user \$group ; fi
|
||||
done
|
||||
echo "$user ALL=(ALL) NOPASSWD:ALL" >> /etc/sudoers
|
||||
sudo --user $user ssh-keygen -b 2048 -N '' -f $LXC_HOME/$user/.ssh/id_rsa
|
||||
EOF
|
||||
lxc_container_run_script $name /usr/local/bin/lxc-helpers-create-user.sh
|
||||
fi
|
||||
}
|
||||
|
||||
function lxc_maybe_sudo() {
|
||||
if test $(id -u) != 0 ; then
|
||||
LXC_SUDO=sudo
|
||||
fi
|
||||
}
|
||||
|
||||
function lxc_prepare_environment() {
|
||||
lxc_maybe_sudo
|
||||
if ! $(which lxc-create > /dev/null) ; then
|
||||
$LXC_SUDO apt-get install -y -qq make git libvirt0 libpam-cgfs bridge-utils uidmap dnsmasq-base dnsmasq dnsmasq-utils qemu-user-static
|
||||
fi
|
||||
}
|
||||
|
||||
function lxc_container_config_nesting() {
|
||||
echo 'security.nesting = true'
|
||||
}
|
||||
|
||||
function lxc_container_config_cap() {
|
||||
echo 'lxc.cap.drop ='
|
||||
}
|
||||
|
||||
function lxc_container_config_net() {
|
||||
cat <<EOF
|
||||
#
|
||||
# /dev/net
|
||||
#
|
||||
lxc.cgroup2.devices.allow = c 10:200 rwm
|
||||
lxc.mount.entry = /dev/net dev/net none bind,create=dir 0 0
|
||||
EOF
|
||||
}
|
||||
|
||||
function lxc_container_config_kvm() {
|
||||
cat <<EOF
|
||||
#
|
||||
# /dev/kvm
|
||||
#
|
||||
lxc.cgroup2.devices.allow = c 10:232 rwm
|
||||
lxc.mount.entry = /dev/kvm dev/kvm none bind,create=file 0 0
|
||||
EOF
|
||||
}
|
||||
|
||||
function lxc_container_config_loop() {
|
||||
cat <<EOF
|
||||
#
|
||||
# /dev/loop
|
||||
#
|
||||
lxc.cgroup2.devices.allow = c 10:237 rwm
|
||||
lxc.cgroup2.devices.allow = b 7:* rwm
|
||||
lxc.mount.entry = /dev/loop-control dev/loop-control none bind,create=file 0 0
|
||||
EOF
|
||||
}
|
||||
|
||||
function lxc_container_config_mapper() {
|
||||
cat <<EOF
|
||||
#
|
||||
# /dev/mapper
|
||||
#
|
||||
lxc.cgroup2.devices.allow = c 10:236 rwm
|
||||
lxc.mount.entry = /dev/mapper dev/mapper none bind,create=dir 0 0
|
||||
EOF
|
||||
}
|
||||
|
||||
function lxc_container_config_fuse() {
|
||||
cat <<EOF
|
||||
#
|
||||
# /dev/fuse
|
||||
#
|
||||
lxc.cgroup2.devices.allow = b 10:229 rwm
|
||||
lxc.mount.entry = /dev/fuse dev/fuse none bind,create=file 0 0
|
||||
EOF
|
||||
}
|
||||
|
||||
function lxc_container_config_kmsg() {
|
||||
cat <<EOF
|
||||
#
|
||||
# kmsg
|
||||
#
|
||||
lxc.cgroup2.devices.allow = c 1:11 rwm
|
||||
lxc.mount.entry = /dev/kmsg dev/kmsg none bind,create=file 0 0
|
||||
EOF
|
||||
}
|
||||
|
||||
function lxc_container_config_proc() {
|
||||
cat <<EOF
|
||||
#
|
||||
# /proc
|
||||
#
|
||||
#
|
||||
# Only because k8s tries to write /proc/sys/vm/overcommit_memory
|
||||
# is there a way to only allow that? Would it be enough for k8s?
|
||||
#
|
||||
lxc.mount.auto = proc:rw
|
||||
EOF
|
||||
}
|
||||
|
||||
function lxc_container_config() {
|
||||
for config in "$@" ; do
|
||||
case $config in
|
||||
unprivileged)
|
||||
;;
|
||||
lxc)
|
||||
echo nesting
|
||||
echo cap
|
||||
;;
|
||||
docker)
|
||||
echo net
|
||||
;;
|
||||
libvirt)
|
||||
echo cap
|
||||
echo kvm
|
||||
echo loop
|
||||
echo mapper
|
||||
echo fuse
|
||||
;;
|
||||
k8s)
|
||||
echo cap
|
||||
echo loop
|
||||
echo mapper
|
||||
echo fuse
|
||||
echo kmsg
|
||||
echo proc
|
||||
;;
|
||||
*)
|
||||
echo "$config unknown ($LXC_CONTAINER_CONFIG_ALL)"
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
done | sort -u | while read config ; do
|
||||
echo "#"
|
||||
echo "# include $config config snippet"
|
||||
echo "#"
|
||||
lxc_container_config_$config
|
||||
done
|
||||
}
|
||||
|
||||
function lxc_container_configure() {
|
||||
local name="$1"
|
||||
|
||||
lxc_container_config $LXC_CONTAINER_CONFIG | $LXC_SUDO tee -a $(lxc_config $name)
|
||||
}
|
||||
|
||||
function lxc_container_install_lxc_helpers() {
|
||||
local name="$1"
|
||||
|
||||
$LXC_SUDO cp -a $LXC_SELF_DIR/lxc-helpers*.sh $root/$LXC_BIN
|
||||
#
|
||||
# Wait for the network to come up
|
||||
#
|
||||
local wait_networking=$(lxc_root $name)/usr/local/bin/lxc-helpers-wait-networking.sh
|
||||
$LXC_SUDO tee $wait_networking > /dev/null <<'EOF'
|
||||
#!/bin/sh -e
|
||||
for d in $(seq 60); do
|
||||
getent hosts wikipedia.org > /dev/null && break
|
||||
sleep 1
|
||||
done
|
||||
getent hosts wikipedia.org > /dev/null || getent hosts wikipedia.org
|
||||
EOF
|
||||
$LXC_SUDO chmod +x $wait_networking
|
||||
}
|
||||
|
||||
function lxc_container_create() {
|
||||
local name="$1"
|
||||
|
||||
lxc_prepare_environment
|
||||
lxc_build_template $(lxc_template_release) "$name"
|
||||
}
|
||||
|
||||
function lxc_container_mount() {
|
||||
local name="$1"
|
||||
local dir="$2"
|
||||
|
||||
local config=$(lxc_config $name)
|
||||
|
||||
if ! $LXC_SUDO grep --quiet "lxc.mount.entry = $dir" $config ; then
|
||||
local relative_dir=${dir##/}
|
||||
$LXC_SUDO tee -a $config > /dev/null <<< "lxc.mount.entry = $dir $relative_dir none bind,create=dir 0 0"
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
function lxc_container_start() {
|
||||
local name="$1"
|
||||
|
||||
if lxc_running $name ; then
|
||||
return
|
||||
fi
|
||||
|
||||
local logs
|
||||
if $LXC_VERBOSE; then
|
||||
logs="--logfile=/dev/tty"
|
||||
fi
|
||||
|
||||
$LXC_SUDO lxc-start $logs $name
|
||||
$LXC_SUDO lxc-wait --name $name --state RUNNING
|
||||
lxc_container_run $name /usr/local/bin/lxc-helpers-wait-networking.sh
|
||||
}
|
||||
|
||||
function lxc_container_stop() {
|
||||
local name="$1"
|
||||
|
||||
$LXC_SUDO lxc-ls -1 --running --filter="^$name" | while read container ; do
|
||||
$LXC_SUDO lxc-stop --kill --name="$container"
|
||||
done
|
||||
}
|
||||
|
||||
function lxc_container_destroy() {
|
||||
local name="$1"
|
||||
local root="$2"
|
||||
|
||||
if lxc_exists "$name" ; then
|
||||
lxc_container_stop $name $root
|
||||
$LXC_SUDO lxc-destroy --force --name="$name"
|
||||
fi
|
||||
}
|
||||
|
||||
function lxc_exists() {
|
||||
local name="$1"
|
||||
|
||||
test "$($LXC_SUDO lxc-ls --filter=^$name\$)"
|
||||
}
|
||||
|
||||
function lxc_running() {
|
||||
local name="$1"
|
||||
|
||||
test "$($LXC_SUDO lxc-ls --running --filter=^$name\$)"
|
||||
}
|
||||
|
||||
function lxc_build_template_release() {
|
||||
local name="$(lxc_template_release)"
|
||||
|
||||
if lxc_exists $name ; then
|
||||
return
|
||||
fi
|
||||
|
||||
local root=$(lxc_root $name)
|
||||
$LXC_SUDO lxc-create --name $name --template debian -- --release=$LXC_CONTAINER_RELEASE
|
||||
echo 'lxc.apparmor.profile = unconfined' | $LXC_SUDO tee -a $(lxc_config $name)
|
||||
lxc_container_install_lxc_helpers $name
|
||||
lxc_container_start $name
|
||||
lxc_container_run $name apt-get update -qq
|
||||
lxc_apt_install $name sudo git python3
|
||||
lxc_container_stop $name
|
||||
}
|
||||
|
||||
function lxc_build_template() {
|
||||
local name="$1"
|
||||
local newname="$2"
|
||||
|
||||
if lxc_exists $newname ; then
|
||||
return
|
||||
fi
|
||||
|
||||
if test "$name" = "$(lxc_template_release)" ; then
|
||||
lxc_build_template_release
|
||||
fi
|
||||
|
||||
if ! $LXC_SUDO lxc-copy --name=$name --newname=$newname ; then
|
||||
echo lxc-copy --name=$name --newname=$newname failed
|
||||
return 1
|
||||
fi
|
||||
lxc_container_configure $newname
|
||||
}
|
||||
|
||||
function lxc_apt_install() {
|
||||
local name="$1"
|
||||
shift
|
||||
|
||||
lxc_container_inside $name lxc_apt_install_inside "$@"
|
||||
}
|
||||
|
||||
function lxc_apt_install_inside() {
|
||||
apt-get install -y -qq "$@"
|
||||
}
|
||||
|
||||
function lxc_install_lxc() {
|
||||
local name="$1"
|
||||
local prefix="$2"
|
||||
local prefixv6="$3"
|
||||
|
||||
lxc_container_inside $name lxc_install_lxc_inside $prefix $prefixv6
|
||||
}
|
||||
|
||||
function lxc_install_lxc_inside() {
|
||||
local prefix="$1"
|
||||
local prefixv6="${2:-$LXC_IPV6_PREFIX_DEFAULT}"
|
||||
|
||||
local packages="make git libvirt0 libpam-cgfs bridge-utils uidmap dnsmasq-base dnsmasq dnsmasq-utils qemu-user-static lxc-templates debootstrap"
|
||||
if test "$(lxc_release)" = bookworm ; then
|
||||
packages="$packages distro-info"
|
||||
fi
|
||||
|
||||
lxc_apt_install_inside $packages
|
||||
|
||||
if ! grep --quiet LXC_ADDR=.$prefix.1. /etc/default/lxc-net ; then
|
||||
systemctl disable --now dnsmasq
|
||||
apt-get install -y -qq lxc
|
||||
systemctl stop lxc-net
|
||||
sed -i -e '/ConditionVirtualization/d' /usr/lib/systemd/system/lxc-net.service
|
||||
systemctl daemon-reload
|
||||
cat >> /etc/default/lxc-net <<EOF
|
||||
LXC_ADDR="$prefix.1"
|
||||
LXC_NETMASK="255.255.255.0"
|
||||
LXC_NETWORK="$prefix.0/24"
|
||||
LXC_DHCP_RANGE="$prefix.2,$prefix.254"
|
||||
LXC_DHCP_MAX="253"
|
||||
LXC_IPV6_ADDR="$prefixv6::216:3eff:fe00:1"
|
||||
LXC_IPV6_MASK="64"
|
||||
LXC_IPV6_NETWORK="$prefixv6::/64"
|
||||
LXC_IPV6_NAT="true"
|
||||
EOF
|
||||
systemctl start lxc-net
|
||||
fi
|
||||
}
|
||||
|
||||
function lxc_install_docker() {
|
||||
local name="$1"
|
||||
|
||||
lxc_container_inside $name lxc_install_docker_inside
|
||||
}
|
||||
|
||||
function lxc_install_docker_inside() {
|
||||
mkdir /etc/docker
|
||||
cat > /etc/docker/daemon.json <<EOF
|
||||
{
|
||||
"ipv6": true,
|
||||
"fixed-cidr-v6": "$LXC_IPV6_DOCKER_PREFIX_DEFAULT:1::/64",
|
||||
"default-address-pools": [
|
||||
{"base": "$LXC_DOCKER_PREFIX_DEFAULT.0.0/16", "size": 24},
|
||||
{"base": "$LXC_IPV6_DOCKER_PREFIX_DEFAULT:2::/104", "size": 112}
|
||||
]
|
||||
}
|
||||
EOF
|
||||
lxc_apt_install_inside docker.io docker-compose
|
||||
}
|
165
pkg/runner/lxc-helpers.sh
Executable file
165
pkg/runner/lxc-helpers.sh
Executable file
|
@ -0,0 +1,165 @@
|
|||
#!/usr/bin/env bash
|
||||
# SPDX-License-Identifier: MIT
|
||||
|
||||
set -e
|
||||
|
||||
source $(dirname $0)/lxc-helpers-lib.sh
|
||||
|
||||
function verbose() {
|
||||
set -x
|
||||
PS4='${BASH_SOURCE[0]}:$LINENO: ${FUNCNAME[0]}: '
|
||||
LXC_VERBOSE=true
|
||||
}
|
||||
|
||||
function help() {
|
||||
cat <<'EOF'
|
||||
lxc-helpers.sh - LXC container management helpers
|
||||
|
||||
SYNOPSIS
|
||||
|
||||
lxc-helpers.sh [-v|--verbose] [-h|--help]
|
||||
[-o|--os {bookworm|bullseye} (default bookworm)]
|
||||
command [arguments]
|
||||
|
||||
lxc-helpers.sh [-v|--verbose] [-h|--help]
|
||||
[-o|--os {bookworm|bullseye} (default bookworm)]
|
||||
[-c|--config {unprivileged lxc libvirt docker k8s} (default "lxc libvirt docker")]
|
||||
lxc_container_create [arguments]
|
||||
|
||||
DESCRIPTION
|
||||
|
||||
A thin shell based layer on top of LXC to create, populate, run and
|
||||
destroy LXC containers. A container is created from a copy of an
|
||||
existing container.
|
||||
|
||||
The LXC network is configured to provide a NAT'ed IP address (IPv4
|
||||
and IPv6) to each container, in a configurable private range.
|
||||
|
||||
CREATE AND DESTROY
|
||||
|
||||
lxc_prepare_environment
|
||||
|
||||
Install LXC dependencies.
|
||||
|
||||
lxc_container_create `name`
|
||||
|
||||
Create the `name` container.
|
||||
|
||||
lxc_container_mount `name` `path`
|
||||
|
||||
Configure `name` container to bind mount `path` so that it is
|
||||
also accessible at `path` from within the container.
|
||||
|
||||
lxc_container_start `name`
|
||||
|
||||
Start the `name` container.
|
||||
|
||||
lxc_container_stop `name`
|
||||
|
||||
Stop the `name` container.
|
||||
|
||||
lxc_container_destroy `name`
|
||||
|
||||
Call lxc_container_stop `name` and destroy the container.
|
||||
|
||||
lxc_template_release
|
||||
|
||||
Echo the name of the container for the Operating System
|
||||
specified with `--os`.
|
||||
|
||||
lxc_build_template `existing_container` `new_container`
|
||||
|
||||
Copy `existing_container` into `new_container`. If
|
||||
`existing_container` is equal to $(lxc-helpers.sh lxc_template_release) it
|
||||
will be created on demand.
|
||||
|
||||
CONFIGURATION
|
||||
|
||||
The `--config` option provides preset configurations appended to the `/var/lib/lxc/name/config`
|
||||
file when the container is created with the `lxc_container_create` command. They are required
|
||||
to run the corresponding subsystem:
|
||||
|
||||
* `docker` https://www.docker.com/
|
||||
* `lxc` https://linuxcontainers.org/lxc/
|
||||
* `libvirt` https://libvirt.org/
|
||||
* `k8s` https://kubernetes.io/
|
||||
* `unprivileged` none of the above
|
||||
|
||||
Example: lxc-helpers.sh --config "docker libvirt" lxc_container_create mycontainer
|
||||
|
||||
The `unprivileged` configuration does not add anything.
|
||||
|
||||
ACTIONS IN THE CONTAINER
|
||||
|
||||
For some command lxc_something `name` that can be called from outside the container
|
||||
there is an equivalent function lxc_something_inside that can be called from inside
|
||||
the container.
|
||||
|
||||
lxc_install_lxc `name` `prefix` [`prefixv6`]
|
||||
lxc_install_lxc_inside `prefix` [`prefixv6`]
|
||||
|
||||
Install LXC in the `name` container to allow the creation of
|
||||
named containers. `prefix` is a class C IP prefix from which
|
||||
containers will obtain their IP (for instance 10.40.50). `prefixv6`
|
||||
is an optional IPv6 private address prefix that defaults to fc15.
|
||||
|
||||
lxc_container_run `name` command [options...]
|
||||
|
||||
Run the `command` within the `name` container.
|
||||
|
||||
lxc_container_run_script `name` `path`
|
||||
lxc_container_run_script_as `name` `user` `path`
|
||||
|
||||
Run the script found at `path` within the `name` container. The
|
||||
environment is cleared before running the script. The first form
|
||||
will run as root, the second form will impersonate `user`.
|
||||
|
||||
lxc_container_user_install `name` `user_id` `user` [`homedir` default `/home`]
|
||||
|
||||
Create the `user` with `user_id` in the `name` container with a
|
||||
HOME at `/homedir/user`. Passwordless sudo permissions are
|
||||
granted to `user`. It is made a member of the groups docker, kvm
|
||||
and libvirt if they exist already. A SSH key is created.
|
||||
|
||||
Example: lxc_container_user_install mycontainer $(id -u) $USER
|
||||
|
||||
EOF
|
||||
}
|
||||
|
||||
function main() {
|
||||
local options=$(getopt -o hvoc --long help,verbose,os:,config: -- "$@")
|
||||
[ $? -eq 0 ] || {
|
||||
echo "Incorrect options provided"
|
||||
exit 1
|
||||
}
|
||||
eval set -- "$options"
|
||||
while true; do
|
||||
case "$1" in
|
||||
-v | --verbose)
|
||||
verbose
|
||||
;;
|
||||
-h | --help)
|
||||
help
|
||||
;;
|
||||
-o | --os)
|
||||
LXC_CONTAINER_RELEASE=$2
|
||||
shift
|
||||
;;
|
||||
-c | --config)
|
||||
LXC_CONTAINER_CONFIG="$2"
|
||||
shift
|
||||
;;
|
||||
--)
|
||||
shift
|
||||
break
|
||||
;;
|
||||
esac
|
||||
shift
|
||||
done
|
||||
|
||||
lxc_maybe_sudo
|
||||
|
||||
"$@"
|
||||
}
|
||||
|
||||
main "$@"
|
|
@ -1,6 +1,7 @@
|
|||
package runner
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
@ -67,12 +68,51 @@ func newRemoteReusableWorkflowExecutor(rc *RunContext) common.Executor {
|
|||
// FIXME: if the reusable workflow is from a private repository, we need to provide a token to access the repository.
|
||||
token := ""
|
||||
|
||||
if rc.Config.ActionCache != nil {
|
||||
return newActionCacheReusableWorkflowExecutor(rc, filename, remoteReusableWorkflow)
|
||||
}
|
||||
|
||||
return common.NewPipelineExecutor(
|
||||
newMutexExecutor(cloneIfRequired(rc, *remoteReusableWorkflow, workflowDir, token)),
|
||||
newReusableWorkflowExecutor(rc, workflowDir, remoteReusableWorkflow.FilePath()),
|
||||
)
|
||||
}
|
||||
|
||||
func newActionCacheReusableWorkflowExecutor(rc *RunContext, filename string, remoteReusableWorkflow *remoteReusableWorkflow) common.Executor {
|
||||
return func(ctx context.Context) error {
|
||||
ghctx := rc.getGithubContext(ctx)
|
||||
remoteReusableWorkflow.URL = ghctx.ServerURL
|
||||
sha, err := rc.Config.ActionCache.Fetch(ctx, filename, remoteReusableWorkflow.CloneURL(), remoteReusableWorkflow.Ref, ghctx.Token)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
archive, err := rc.Config.ActionCache.GetTarArchive(ctx, filename, sha, fmt.Sprintf(".github/workflows/%s", remoteReusableWorkflow.Filename))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer archive.Close()
|
||||
treader := tar.NewReader(archive)
|
||||
if _, err = treader.Next(); err != nil {
|
||||
return err
|
||||
}
|
||||
planner, err := model.NewSingleWorkflowPlanner(remoteReusableWorkflow.Filename, treader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
plan, err := planner.PlanEvent("workflow_call")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
runner, err := NewReusableWorkflowRunner(rc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return runner.NewPlanExecutor(plan)(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
executorLock sync.Mutex
|
||||
)
|
||||
|
@ -99,10 +139,11 @@ func cloneIfRequired(rc *RunContext, remoteReusableWorkflow remoteReusableWorkfl
|
|||
// 2. Gitea has already full URL with rc.Config.GitHubInstance when calling newRemoteReusableWorkflowWithPlat
|
||||
// remoteReusableWorkflow.URL = rc.getGithubContext(ctx).ServerURL
|
||||
return git.NewGitCloneExecutor(git.NewGitCloneExecutorInput{
|
||||
URL: remoteReusableWorkflow.CloneURL(),
|
||||
Ref: remoteReusableWorkflow.Ref,
|
||||
Dir: targetDirectory,
|
||||
Token: token,
|
||||
URL: remoteReusableWorkflow.CloneURL(),
|
||||
Ref: remoteReusableWorkflow.Ref,
|
||||
Dir: targetDirectory,
|
||||
Token: token,
|
||||
OfflineMode: rc.Config.ActionOfflineMode,
|
||||
})(ctx)
|
||||
},
|
||||
nil,
|
||||
|
|
|
@ -3,9 +3,11 @@ package runner
|
|||
import (
|
||||
"archive/tar"
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
_ "embed"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
|
@ -16,14 +18,15 @@ import (
|
|||
"regexp"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/opencontainers/selinux/go-selinux"
|
||||
"text/template"
|
||||
|
||||
docker "github.com/docker/docker/api/types"
|
||||
"github.com/docker/go-connections/nat"
|
||||
"github.com/nektos/act/pkg/common"
|
||||
"github.com/nektos/act/pkg/container"
|
||||
"github.com/nektos/act/pkg/exprparser"
|
||||
"github.com/nektos/act/pkg/model"
|
||||
"github.com/opencontainers/selinux/go-selinux"
|
||||
)
|
||||
|
||||
// RunContext contains info about current job
|
||||
|
@ -65,7 +68,7 @@ func (rc *RunContext) String() string {
|
|||
if rc.caller != nil {
|
||||
// prefix the reusable workflow with the caller job
|
||||
// this is required to create unique container names
|
||||
name = fmt.Sprintf("%s/%s", rc.caller.runContext.Run.JobID, name)
|
||||
name = fmt.Sprintf("%s/%s", rc.caller.runContext.Name, name)
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
@ -95,9 +98,11 @@ func (rc *RunContext) jobContainerName() string {
|
|||
}
|
||||
|
||||
// networkName return the name of the network which will be created by `act` automatically for job,
|
||||
// only create network if `rc.Config.ContainerNetworkMode` is empty string.
|
||||
func (rc *RunContext) networkName() string {
|
||||
return fmt.Sprintf("%s-network", rc.jobContainerName())
|
||||
func (rc *RunContext) networkName() (string, bool) {
|
||||
if len(rc.Run.Job().Services) > 0 || rc.Config.ContainerNetworkMode == "" {
|
||||
return fmt.Sprintf("%s-%s-network", rc.jobContainerName(), rc.Run.JobID), true
|
||||
}
|
||||
return string(rc.Config.ContainerNetworkMode), false
|
||||
}
|
||||
|
||||
func getDockerDaemonSocketMountPath(daemonPath string) string {
|
||||
|
@ -135,7 +140,7 @@ func (rc *RunContext) GetBindsAndMounts() ([]string, map[string]string) {
|
|||
ext := container.LinuxContainerEnvironmentExtensions{}
|
||||
|
||||
mounts := map[string]string{
|
||||
"act-toolcache": "/toolcache",
|
||||
"act-toolcache": "/opt/hostedtoolcache",
|
||||
name + "-env": ext.GetActPath(),
|
||||
}
|
||||
|
||||
|
@ -178,6 +183,96 @@ func (rc *RunContext) GetBindsAndMounts() ([]string, map[string]string) {
|
|||
return binds, mounts
|
||||
}
|
||||
|
||||
//go:embed lxc-helpers-lib.sh
|
||||
var lxcHelpersLib string
|
||||
|
||||
//go:embed lxc-helpers.sh
|
||||
var lxcHelpers string
|
||||
|
||||
var startTemplate = template.Must(template.New("start").Parse(`#!/bin/bash -e
|
||||
|
||||
exec 5<>/tmp/forgejo-runner-lxc.lock ; flock --timeout 21600 5
|
||||
|
||||
LXC_CONTAINER_CONFIG="{{.Config}}"
|
||||
LXC_CONTAINER_RELEASE="{{.Release}}"
|
||||
|
||||
source $(dirname $0)/lxc-helpers-lib.sh
|
||||
|
||||
function template_act() {
|
||||
echo $(lxc_template_release)-act
|
||||
}
|
||||
|
||||
function install_nodejs() {
|
||||
local name="$1"
|
||||
|
||||
local script=/usr/local/bin/lxc-helpers-install-node.sh
|
||||
|
||||
cat > $(lxc_root $name)/$script <<'EOF'
|
||||
#!/bin/sh -e
|
||||
# https://github.com/nodesource/distributions#debinstall
|
||||
export DEBIAN_FRONTEND=noninteractive
|
||||
apt-get install -qq -y ca-certificates curl gnupg git
|
||||
mkdir -p /etc/apt/keyrings
|
||||
curl -fsSL https://deb.nodesource.com/gpgkey/nodesource-repo.gpg.key | gpg --dearmor -o /etc/apt/keyrings/nodesource.gpg
|
||||
NODE_MAJOR=20
|
||||
echo "deb [signed-by=/etc/apt/keyrings/nodesource.gpg] https://deb.nodesource.com/node_$NODE_MAJOR.x nodistro main" | tee /etc/apt/sources.list.d/nodesource.list
|
||||
apt-get update -qq
|
||||
apt-get install -qq -y nodejs
|
||||
EOF
|
||||
lxc_container_run_script $name $script
|
||||
}
|
||||
|
||||
function build_template_act() {
|
||||
local name="$(template_act)"
|
||||
|
||||
if lxc_exists $name ; then
|
||||
return
|
||||
fi
|
||||
|
||||
lxc_build_template $(lxc_template_release) $name
|
||||
lxc_container_start $name
|
||||
install_nodejs $name
|
||||
lxc_container_stop $name
|
||||
}
|
||||
|
||||
lxc_prepare_environment
|
||||
LXC_CONTAINER_CONFIG="" build_template_act
|
||||
lxc_build_template $(template_act) "{{.Name}}"
|
||||
lxc_container_mount "{{.Name}}" "{{ .Root }}"
|
||||
lxc_container_start "{{.Name}}"
|
||||
`))
|
||||
|
||||
var stopTemplate = template.Must(template.New("stop").Parse(`#!/bin/bash
|
||||
source $(dirname $0)/lxc-helpers-lib.sh
|
||||
|
||||
lxc_container_destroy "{{.Name}}"
|
||||
`))
|
||||
|
||||
func (rc *RunContext) stopHostEnvironment(ctx context.Context) error {
|
||||
logger := common.Logger(ctx)
|
||||
logger.Debugf("stopHostEnvironment")
|
||||
|
||||
var stopScript bytes.Buffer
|
||||
if err := stopTemplate.Execute(&stopScript, struct {
|
||||
Name string
|
||||
Root string
|
||||
}{
|
||||
Name: rc.JobContainer.GetName(),
|
||||
Root: rc.JobContainer.GetRoot(),
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return common.NewPipelineExecutor(
|
||||
rc.JobContainer.Copy(rc.JobContainer.GetActPath()+"/", &container.FileEntry{
|
||||
Name: "workflow/stop-lxc.sh",
|
||||
Mode: 0755,
|
||||
Body: stopScript.String(),
|
||||
}),
|
||||
rc.JobContainer.Exec([]string{rc.JobContainer.GetActPath() + "/workflow/stop-lxc.sh"}, map[string]string{}, "root", "/tmp"),
|
||||
)(ctx)
|
||||
}
|
||||
|
||||
func (rc *RunContext) startHostEnvironment() common.Executor {
|
||||
return func(ctx context.Context) error {
|
||||
logger := common.Logger(ctx)
|
||||
|
@ -193,7 +288,8 @@ func (rc *RunContext) startHostEnvironment() common.Executor {
|
|||
cacheDir := rc.ActionCacheDir()
|
||||
randBytes := make([]byte, 8)
|
||||
_, _ = rand.Read(randBytes)
|
||||
miscpath := filepath.Join(cacheDir, hex.EncodeToString(randBytes))
|
||||
randName := hex.EncodeToString(randBytes)
|
||||
miscpath := filepath.Join(cacheDir, randName)
|
||||
actPath := filepath.Join(miscpath, "act")
|
||||
if err := os.MkdirAll(actPath, 0o777); err != nil {
|
||||
return err
|
||||
|
@ -208,6 +304,8 @@ func (rc *RunContext) startHostEnvironment() common.Executor {
|
|||
}
|
||||
toolCache := filepath.Join(cacheDir, "tool_cache")
|
||||
rc.JobContainer = &container.HostEnvironment{
|
||||
Name: randName,
|
||||
Root: miscpath,
|
||||
Path: path,
|
||||
TmpDir: runnerTmp,
|
||||
ToolCache: toolCache,
|
||||
|
@ -217,6 +315,7 @@ func (rc *RunContext) startHostEnvironment() common.Executor {
|
|||
os.RemoveAll(miscpath)
|
||||
},
|
||||
StdOut: logWriter,
|
||||
LXC: rc.IsLXCHostEnv(ctx),
|
||||
}
|
||||
rc.cleanUpJobContainer = rc.JobContainer.Remove()
|
||||
for k, v := range rc.JobContainer.GetRunnerContext(ctx) {
|
||||
|
@ -233,20 +332,69 @@ func (rc *RunContext) startHostEnvironment() common.Executor {
|
|||
}
|
||||
}
|
||||
|
||||
return common.NewPipelineExecutor(
|
||||
rc.JobContainer.Copy(rc.JobContainer.GetActPath()+"/", &container.FileEntry{
|
||||
Name: "workflow/event.json",
|
||||
Mode: 0o644,
|
||||
Body: rc.EventJSON,
|
||||
}, &container.FileEntry{
|
||||
Name: "workflow/envs.txt",
|
||||
Mode: 0o666,
|
||||
Body: "",
|
||||
}),
|
||||
)(ctx)
|
||||
executors := make([]common.Executor, 0, 10)
|
||||
|
||||
isLXCHost, LXCTemplate, LXCRelease, LXCConfig := rc.GetLXCInfo(ctx)
|
||||
|
||||
if isLXCHost {
|
||||
var startScript bytes.Buffer
|
||||
if err := startTemplate.Execute(&startScript, struct {
|
||||
Name string
|
||||
Template string
|
||||
Release string
|
||||
Config string
|
||||
Repo string
|
||||
Root string
|
||||
TmpDir string
|
||||
Script string
|
||||
}{
|
||||
Name: rc.JobContainer.GetName(),
|
||||
Template: LXCTemplate,
|
||||
Release: LXCRelease,
|
||||
Config: LXCConfig,
|
||||
Repo: "", // step.Environment["CI_REPO"],
|
||||
Root: rc.JobContainer.GetRoot(),
|
||||
TmpDir: runnerTmp,
|
||||
Script: "", // "commands-" + step.Name,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
executors = append(executors,
|
||||
rc.JobContainer.Copy(rc.JobContainer.GetActPath()+"/", &container.FileEntry{
|
||||
Name: "workflow/lxc-helpers-lib.sh",
|
||||
Mode: 0755,
|
||||
Body: lxcHelpersLib,
|
||||
}),
|
||||
rc.JobContainer.Copy(rc.JobContainer.GetActPath()+"/", &container.FileEntry{
|
||||
Name: "workflow/lxc-helpers.sh",
|
||||
Mode: 0755,
|
||||
Body: lxcHelpers,
|
||||
}),
|
||||
rc.JobContainer.Copy(rc.JobContainer.GetActPath()+"/", &container.FileEntry{
|
||||
Name: "workflow/start-lxc.sh",
|
||||
Mode: 0755,
|
||||
Body: startScript.String(),
|
||||
}),
|
||||
rc.JobContainer.Exec([]string{rc.JobContainer.GetActPath() + "/workflow/start-lxc.sh"}, map[string]string{}, "root", "/tmp"),
|
||||
)
|
||||
}
|
||||
|
||||
executors = append(executors, rc.JobContainer.Copy(rc.JobContainer.GetActPath()+"/", &container.FileEntry{
|
||||
Name: "workflow/event.json",
|
||||
Mode: 0o644,
|
||||
Body: rc.EventJSON,
|
||||
}, &container.FileEntry{
|
||||
Name: "workflow/envs.txt",
|
||||
Mode: 0o666,
|
||||
Body: "",
|
||||
}))
|
||||
|
||||
return common.NewPipelineExecutor(executors...)(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
//nolint:gocyclo
|
||||
func (rc *RunContext) startJobContainer() common.Executor {
|
||||
return func(ctx context.Context) error {
|
||||
logger := common.Logger(ctx)
|
||||
|
@ -285,14 +433,15 @@ func (rc *RunContext) startJobContainer() common.Executor {
|
|||
|
||||
// specify the network to which the container will connect when `docker create` stage. (like execute command line: docker create --network <networkName> <image>)
|
||||
networkName := string(rc.Config.ContainerNetworkMode)
|
||||
var createAndDeleteNetwork bool
|
||||
if networkName == "" {
|
||||
// if networkName is empty string, will create a new network for the containers.
|
||||
// and it will be removed after at last.
|
||||
networkName = rc.networkName()
|
||||
networkName, createAndDeleteNetwork = rc.networkName()
|
||||
}
|
||||
|
||||
// add service containers
|
||||
for serviceId, spec := range rc.Run.Job().Services {
|
||||
for serviceID, spec := range rc.Run.Job().Services {
|
||||
// interpolate env
|
||||
interpolatedEnvs := make(map[string]string, len(spec.Env))
|
||||
for k, v := range spec.Env {
|
||||
|
@ -302,21 +451,36 @@ func (rc *RunContext) startJobContainer() common.Executor {
|
|||
for k, v := range interpolatedEnvs {
|
||||
envs = append(envs, fmt.Sprintf("%s=%s", k, v))
|
||||
}
|
||||
// interpolate cmd
|
||||
interpolatedCmd := make([]string, 0, len(spec.Cmd))
|
||||
for _, v := range spec.Cmd {
|
||||
interpolatedCmd = append(interpolatedCmd, rc.ExprEval.Interpolate(ctx, v))
|
||||
}
|
||||
username, password, err := rc.handleServiceCredentials(ctx, spec.Credentials)
|
||||
|
||||
username, password, err = rc.handleServiceCredentials(ctx, spec.Credentials)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to handle service %s credentials: %w", serviceId, err)
|
||||
return fmt.Errorf("failed to handle service %s credentials: %w", serviceID, err)
|
||||
}
|
||||
serviceBinds, serviceMounts := rc.GetServiceBindsAndMounts(spec.Volumes)
|
||||
serviceContainerName := createSimpleContainerName(rc.jobContainerName(), serviceId)
|
||||
|
||||
interpolatedVolumes := make([]string, 0, len(spec.Volumes))
|
||||
for _, volume := range spec.Volumes {
|
||||
interpolatedVolumes = append(interpolatedVolumes, rc.ExprEval.Interpolate(ctx, volume))
|
||||
}
|
||||
serviceBinds, serviceMounts := rc.GetServiceBindsAndMounts(interpolatedVolumes)
|
||||
|
||||
interpolatedPorts := make([]string, 0, len(spec.Ports))
|
||||
for _, port := range spec.Ports {
|
||||
interpolatedPorts = append(interpolatedPorts, rc.ExprEval.Interpolate(ctx, port))
|
||||
}
|
||||
exposedPorts, portBindings, err := nat.ParsePortSpecs(interpolatedPorts)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse service %s ports: %w", serviceID, err)
|
||||
}
|
||||
|
||||
serviceContainerName := createContainerName(rc.jobContainerName(), serviceID)
|
||||
c := container.NewContainer(&container.NewContainerInput{
|
||||
Name: serviceContainerName,
|
||||
WorkingDir: ext.ToContainerPath(rc.Config.Workdir),
|
||||
Image: spec.Image,
|
||||
Image: rc.ExprEval.Interpolate(ctx, spec.Image),
|
||||
Username: username,
|
||||
Password: password,
|
||||
Cmd: interpolatedCmd,
|
||||
|
@ -329,26 +493,51 @@ func (rc *RunContext) startJobContainer() common.Executor {
|
|||
UsernsMode: rc.Config.UsernsMode,
|
||||
Platform: rc.Config.ContainerArchitecture,
|
||||
AutoRemove: rc.Config.AutoRemove,
|
||||
Options: spec.Options,
|
||||
Options: rc.ExprEval.Interpolate(ctx, spec.Options),
|
||||
NetworkMode: networkName,
|
||||
NetworkAliases: []string{serviceId},
|
||||
NetworkAliases: []string{serviceID},
|
||||
ExposedPorts: exposedPorts,
|
||||
PortBindings: portBindings,
|
||||
ValidVolumes: rc.Config.ValidVolumes,
|
||||
})
|
||||
rc.ServiceContainers = append(rc.ServiceContainers, c)
|
||||
}
|
||||
|
||||
rc.cleanUpJobContainer = func(ctx context.Context) error {
|
||||
if rc.JobContainer != nil && !rc.Config.ReuseContainers {
|
||||
return rc.JobContainer.Remove().
|
||||
Then(container.NewDockerVolumeRemoveExecutor(rc.jobContainerName(), false)).
|
||||
Then(container.NewDockerVolumeRemoveExecutor(rc.jobContainerName()+"-env", false))(ctx)
|
||||
reuseJobContainer := func(ctx context.Context) bool {
|
||||
return rc.Config.ReuseContainers
|
||||
}
|
||||
|
||||
if rc.JobContainer != nil {
|
||||
return rc.JobContainer.Remove().IfNot(reuseJobContainer).
|
||||
Then(container.NewDockerVolumeRemoveExecutor(rc.jobContainerName(), false)).IfNot(reuseJobContainer).
|
||||
Then(container.NewDockerVolumeRemoveExecutor(rc.jobContainerName()+"-env", false)).IfNot(reuseJobContainer).
|
||||
Then(func(ctx context.Context) error {
|
||||
if len(rc.ServiceContainers) > 0 {
|
||||
logger.Infof("Cleaning up services for job %s", rc.JobName)
|
||||
if err := rc.stopServiceContainers()(ctx); err != nil {
|
||||
logger.Errorf("Error while cleaning services: %v", err)
|
||||
}
|
||||
if createAndDeleteNetwork {
|
||||
// clean network if it has been created by act
|
||||
// if using service containers
|
||||
// it means that the network to which containers are connecting is created by `act_runner`,
|
||||
// so, we should remove the network at last.
|
||||
logger.Infof("Cleaning up network for job %s, and network name is: %s", rc.JobName, networkName)
|
||||
if err := container.NewDockerNetworkRemoveExecutor(networkName)(ctx); err != nil {
|
||||
logger.Errorf("Error while cleaning network: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})(ctx)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
rc.JobContainer = container.NewContainer(&container.NewContainerInput{
|
||||
Cmd: nil,
|
||||
Entrypoint: []string{"/bin/sleep", fmt.Sprint(rc.Config.ContainerMaxLifetime.Round(time.Second).Seconds())},
|
||||
Entrypoint: []string{"tail", "-f", "/dev/null"},
|
||||
WorkingDir: ext.ToContainerPath(rc.Config.Workdir),
|
||||
Image: image,
|
||||
Username: username,
|
||||
|
@ -372,10 +561,16 @@ func (rc *RunContext) startJobContainer() common.Executor {
|
|||
return errors.New("Failed to create job container")
|
||||
}
|
||||
|
||||
networkConfig := docker.NetworkCreate{
|
||||
Driver: "bridge",
|
||||
Scope: "local",
|
||||
EnableIPv6: rc.Config.ContainerNetworkEnableIPv6,
|
||||
}
|
||||
return common.NewPipelineExecutor(
|
||||
rc.pullServicesImages(rc.Config.ForcePull),
|
||||
rc.JobContainer.Pull(rc.Config.ForcePull),
|
||||
container.NewDockerNetworkCreateExecutor(networkName).IfBool(!rc.IsHostEnv(ctx) && rc.Config.ContainerNetworkMode == ""), // if the value of `ContainerNetworkMode` is empty string, then will create a new network for containers.
|
||||
rc.stopJobContainer(),
|
||||
container.NewDockerNetworkCreateExecutor(networkName, &networkConfig).IfBool(!rc.IsHostEnv(ctx) && rc.Config.ContainerNetworkMode == ""), // if the value of `ContainerNetworkMode` is empty string, then will create a new network for containers.
|
||||
rc.startServiceContainers(networkName),
|
||||
rc.JobContainer.Create(rc.Config.ContainerCapAdd, rc.Config.ContainerCapDrop),
|
||||
rc.JobContainer.Start(false),
|
||||
|
@ -452,10 +647,10 @@ func (rc *RunContext) UpdateExtraPath(ctx context.Context, githubEnvPath string)
|
|||
return nil
|
||||
}
|
||||
|
||||
// stopJobContainer removes the job container (if it exists) and its volume (if it exists) if !rc.Config.ReuseContainers
|
||||
// stopJobContainer removes the job container (if it exists) and its volume (if it exists)
|
||||
func (rc *RunContext) stopJobContainer() common.Executor {
|
||||
return func(ctx context.Context) error {
|
||||
if rc.cleanUpJobContainer != nil && !rc.Config.ReuseContainers {
|
||||
if rc.cleanUpJobContainer != nil {
|
||||
return rc.cleanUpJobContainer(ctx)
|
||||
}
|
||||
return nil
|
||||
|
@ -472,7 +667,7 @@ func (rc *RunContext) pullServicesImages(forcePull bool) common.Executor {
|
|||
}
|
||||
}
|
||||
|
||||
func (rc *RunContext) startServiceContainers(networkName string) common.Executor {
|
||||
func (rc *RunContext) startServiceContainers(_ string) common.Executor {
|
||||
return func(ctx context.Context) error {
|
||||
execs := []common.Executor{}
|
||||
for _, c := range rc.ServiceContainers {
|
||||
|
@ -490,7 +685,7 @@ func (rc *RunContext) stopServiceContainers() common.Executor {
|
|||
return func(ctx context.Context) error {
|
||||
execs := []common.Executor{}
|
||||
for _, c := range rc.ServiceContainers {
|
||||
execs = append(execs, c.Remove())
|
||||
execs = append(execs, c.Remove().Finally(c.Close()))
|
||||
}
|
||||
return common.NewParallelExecutor(len(execs), execs...)(ctx)
|
||||
}
|
||||
|
@ -539,19 +734,55 @@ func (rc *RunContext) startContainer() common.Executor {
|
|||
}
|
||||
}
|
||||
|
||||
func (rc *RunContext) IsHostEnv(ctx context.Context) bool {
|
||||
func (rc *RunContext) IsBareHostEnv(ctx context.Context) bool {
|
||||
platform := rc.runsOnImage(ctx)
|
||||
image := rc.containerImage(ctx)
|
||||
return image == "" && strings.EqualFold(platform, "-self-hosted")
|
||||
}
|
||||
|
||||
const lxcPrefix = "lxc:"
|
||||
|
||||
func (rc *RunContext) IsLXCHostEnv(ctx context.Context) bool {
|
||||
platform := rc.runsOnImage(ctx)
|
||||
return strings.HasPrefix(platform, lxcPrefix)
|
||||
}
|
||||
|
||||
func (rc *RunContext) GetLXCInfo(ctx context.Context) (isLXC bool, template, release, config string) {
|
||||
platform := rc.runsOnImage(ctx)
|
||||
if !strings.HasPrefix(platform, lxcPrefix) {
|
||||
return
|
||||
}
|
||||
isLXC = true
|
||||
s := strings.SplitN(strings.TrimPrefix(platform, lxcPrefix), ":", 3)
|
||||
template = s[0]
|
||||
if len(s) > 1 {
|
||||
release = s[1]
|
||||
}
|
||||
if len(s) > 2 {
|
||||
config = s[2]
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (rc *RunContext) IsHostEnv(ctx context.Context) bool {
|
||||
return rc.IsBareHostEnv(ctx) || rc.IsLXCHostEnv(ctx)
|
||||
}
|
||||
|
||||
func (rc *RunContext) stopContainer() common.Executor {
|
||||
return rc.stopJobContainer()
|
||||
return func(ctx context.Context) error {
|
||||
if rc.IsLXCHostEnv(ctx) {
|
||||
return rc.stopHostEnvironment(ctx)
|
||||
}
|
||||
return rc.stopJobContainer()(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
func (rc *RunContext) closeContainer() common.Executor {
|
||||
return func(ctx context.Context) error {
|
||||
if rc.JobContainer != nil {
|
||||
if rc.IsLXCHostEnv(ctx) {
|
||||
return rc.stopHostEnvironment(ctx)
|
||||
}
|
||||
return rc.JobContainer.Close()(ctx)
|
||||
}
|
||||
return nil
|
||||
|
@ -573,7 +804,7 @@ func (rc *RunContext) steps() []*model.Step {
|
|||
// Executor returns a pipeline executor for all the steps in the job
|
||||
func (rc *RunContext) Executor() (common.Executor, error) {
|
||||
var executor common.Executor
|
||||
var jobType, err = rc.Run.Job().Type()
|
||||
jobType, err := rc.Run.Job().Type()
|
||||
|
||||
switch jobType {
|
||||
case model.JobTypeDefault:
|
||||
|
@ -610,13 +841,11 @@ func (rc *RunContext) containerImage(ctx context.Context) string {
|
|||
}
|
||||
|
||||
func (rc *RunContext) runsOnImage(ctx context.Context) string {
|
||||
job := rc.Run.Job()
|
||||
|
||||
if job.RunsOn() == nil {
|
||||
if rc.Run.Job().RunsOn() == nil {
|
||||
common.Logger(ctx).Errorf("'runs-on' key not defined in %s", rc.String())
|
||||
}
|
||||
|
||||
runsOn := job.RunsOn()
|
||||
runsOn := rc.Run.Job().RunsOn()
|
||||
for i, v := range runsOn {
|
||||
runsOn[i] = rc.ExprEval.Interpolate(ctx, v)
|
||||
}
|
||||
|
@ -627,8 +856,8 @@ func (rc *RunContext) runsOnImage(ctx context.Context) string {
|
|||
}
|
||||
}
|
||||
|
||||
for _, runnerLabel := range runsOn {
|
||||
image := rc.Config.Platforms[strings.ToLower(runnerLabel)]
|
||||
for _, platformName := range rc.runsOnPlatformNames(ctx) {
|
||||
image := rc.Config.Platforms[strings.ToLower(platformName)]
|
||||
if image != "" {
|
||||
return image
|
||||
}
|
||||
|
@ -637,6 +866,21 @@ func (rc *RunContext) runsOnImage(ctx context.Context) string {
|
|||
return ""
|
||||
}
|
||||
|
||||
func (rc *RunContext) runsOnPlatformNames(ctx context.Context) []string {
|
||||
job := rc.Run.Job()
|
||||
|
||||
if job.RunsOn() == nil {
|
||||
return []string{}
|
||||
}
|
||||
|
||||
if err := rc.ExprEval.EvaluateYamlNode(ctx, &job.RawRunsOn); err != nil {
|
||||
common.Logger(ctx).Errorf("Error while evaluating runs-on: %v", err)
|
||||
return []string{}
|
||||
}
|
||||
|
||||
return job.RunsOn()
|
||||
}
|
||||
|
||||
func (rc *RunContext) platformImage(ctx context.Context) string {
|
||||
if containerImage := rc.containerImage(ctx); containerImage != "" {
|
||||
return containerImage
|
||||
|
@ -649,7 +893,7 @@ func (rc *RunContext) options(ctx context.Context) string {
|
|||
job := rc.Run.Job()
|
||||
c := job.Container()
|
||||
if c != nil {
|
||||
return rc.ExprEval.Interpolate(ctx, c.Options)
|
||||
return rc.Config.ContainerOptions + " " + rc.ExprEval.Interpolate(ctx, c.Options)
|
||||
}
|
||||
|
||||
return rc.Config.ContainerOptions
|
||||
|
@ -667,8 +911,6 @@ func (rc *RunContext) isEnabled(ctx context.Context) (bool, error) {
|
|||
|
||||
if jobType == model.JobTypeInvalid {
|
||||
return false, jobTypeErr
|
||||
} else if jobType != model.JobTypeDefault {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
if !runJob {
|
||||
|
@ -676,14 +918,13 @@ func (rc *RunContext) isEnabled(ctx context.Context) (bool, error) {
|
|||
return false, nil
|
||||
}
|
||||
|
||||
if jobType != model.JobTypeDefault {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
img := rc.platformImage(ctx)
|
||||
if img == "" {
|
||||
if job.RunsOn() == nil {
|
||||
l.Errorf("'runs-on' key not defined in %s", rc.String())
|
||||
}
|
||||
|
||||
for _, runnerLabel := range job.RunsOn() {
|
||||
platformName := rc.ExprEval.Interpolate(ctx, runnerLabel)
|
||||
for _, platformName := range rc.runsOnPlatformNames(ctx) {
|
||||
l.Infof("\U0001F6A7 Skipping unsupported platform -- Try running with `-P %+v=...`", platformName)
|
||||
}
|
||||
return false, nil
|
||||
|
@ -987,9 +1228,7 @@ func (rc *RunContext) withGithubEnv(ctx context.Context, github *model.GithubCon
|
|||
setActionRuntimeVars(rc, env)
|
||||
}
|
||||
|
||||
job := rc.Run.Job()
|
||||
for _, runnerLabel := range job.RunsOn() {
|
||||
platformName := rc.ExprEval.Interpolate(ctx, runnerLabel)
|
||||
for _, platformName := range rc.runsOnPlatformNames(ctx) {
|
||||
if platformName != "" {
|
||||
if platformName == "ubuntu-latest" {
|
||||
// hardcode current ubuntu-latest since we have no way to check that 'on the fly'
|
||||
|
|
|
@ -470,6 +470,53 @@ func createJob(t *testing.T, input string, result string) *model.Job {
|
|||
return job
|
||||
}
|
||||
|
||||
func TestRunContextRunsOnPlatformNames(t *testing.T) {
|
||||
log.SetLevel(log.DebugLevel)
|
||||
assertObject := assert.New(t)
|
||||
|
||||
rc := createIfTestRunContext(map[string]*model.Job{
|
||||
"job1": createJob(t, `runs-on: ubuntu-latest`, ""),
|
||||
})
|
||||
assertObject.Equal([]string{"ubuntu-latest"}, rc.runsOnPlatformNames(context.Background()))
|
||||
|
||||
rc = createIfTestRunContext(map[string]*model.Job{
|
||||
"job1": createJob(t, `runs-on: ${{ 'ubuntu-latest' }}`, ""),
|
||||
})
|
||||
assertObject.Equal([]string{"ubuntu-latest"}, rc.runsOnPlatformNames(context.Background()))
|
||||
|
||||
rc = createIfTestRunContext(map[string]*model.Job{
|
||||
"job1": createJob(t, `runs-on: [self-hosted, my-runner]`, ""),
|
||||
})
|
||||
assertObject.Equal([]string{"self-hosted", "my-runner"}, rc.runsOnPlatformNames(context.Background()))
|
||||
|
||||
rc = createIfTestRunContext(map[string]*model.Job{
|
||||
"job1": createJob(t, `runs-on: [self-hosted, "${{ 'my-runner' }}"]`, ""),
|
||||
})
|
||||
assertObject.Equal([]string{"self-hosted", "my-runner"}, rc.runsOnPlatformNames(context.Background()))
|
||||
|
||||
rc = createIfTestRunContext(map[string]*model.Job{
|
||||
"job1": createJob(t, `runs-on: ${{ fromJSON('["ubuntu-latest"]') }}`, ""),
|
||||
})
|
||||
assertObject.Equal([]string{"ubuntu-latest"}, rc.runsOnPlatformNames(context.Background()))
|
||||
|
||||
// test missing / invalid runs-on
|
||||
rc = createIfTestRunContext(map[string]*model.Job{
|
||||
"job1": createJob(t, `name: something`, ""),
|
||||
})
|
||||
assertObject.Equal([]string{}, rc.runsOnPlatformNames(context.Background()))
|
||||
|
||||
rc = createIfTestRunContext(map[string]*model.Job{
|
||||
"job1": createJob(t, `runs-on:
|
||||
mapping: value`, ""),
|
||||
})
|
||||
assertObject.Equal([]string{}, rc.runsOnPlatformNames(context.Background()))
|
||||
|
||||
rc = createIfTestRunContext(map[string]*model.Job{
|
||||
"job1": createJob(t, `runs-on: ${{ invalid expression }}`, ""),
|
||||
})
|
||||
assertObject.Equal([]string{}, rc.runsOnPlatformNames(context.Background()))
|
||||
}
|
||||
|
||||
func TestRunContextIsEnabled(t *testing.T) {
|
||||
log.SetLevel(log.DebugLevel)
|
||||
assertObject := assert.New(t)
|
||||
|
@ -572,6 +619,17 @@ if: always()`, ""),
|
|||
})
|
||||
rc.Run.JobID = "job2"
|
||||
assertObject.True(rc.isEnabled(context.Background()))
|
||||
|
||||
rc = createIfTestRunContext(map[string]*model.Job{
|
||||
"job1": createJob(t, `uses: ./.github/workflows/reusable.yml`, ""),
|
||||
})
|
||||
assertObject.True(rc.isEnabled(context.Background()))
|
||||
|
||||
rc = createIfTestRunContext(map[string]*model.Job{
|
||||
"job1": createJob(t, `uses: ./.github/workflows/reusable.yml
|
||||
if: false`, ""),
|
||||
})
|
||||
assertObject.False(rc.isEnabled(context.Background()))
|
||||
}
|
||||
|
||||
func TestRunContextGetEnv(t *testing.T) {
|
||||
|
|
|
@ -22,54 +22,59 @@ type Runner interface {
|
|||
|
||||
// Config contains the config for a new runner
|
||||
type Config struct {
|
||||
Actor string // the user that triggered the event
|
||||
Workdir string // path to working directory
|
||||
ActionCacheDir string // path used for caching action contents
|
||||
BindWorkdir bool // bind the workdir to the job container
|
||||
EventName string // name of event to run
|
||||
EventPath string // path to JSON file to use for event.json in containers
|
||||
DefaultBranch string // name of the main branch for this repository
|
||||
ReuseContainers bool // reuse containers to maintain state
|
||||
ForcePull bool // force pulling of the image, even if already present
|
||||
ForceRebuild bool // force rebuilding local docker image action
|
||||
LogOutput bool // log the output from docker run
|
||||
JSONLogger bool // use json or text logger
|
||||
LogPrefixJobID bool // switches from the full job name to the job id
|
||||
Env map[string]string // env for containers
|
||||
Inputs map[string]string // manually passed action inputs
|
||||
Secrets map[string]string // list of secrets
|
||||
Vars map[string]string // list of vars
|
||||
Token string // GitHub token
|
||||
InsecureSecrets bool // switch hiding output when printing to terminal
|
||||
Platforms map[string]string // list of platforms
|
||||
Privileged bool // use privileged mode
|
||||
UsernsMode string // user namespace to use
|
||||
ContainerArchitecture string // Desired OS/architecture platform for running containers
|
||||
ContainerDaemonSocket string // Path to Docker daemon socket
|
||||
ContainerOptions string // Options for the job container
|
||||
UseGitIgnore bool // controls if paths in .gitignore should not be copied into container, default true
|
||||
GitHubInstance string // GitHub instance to use, default "github.com"
|
||||
ContainerCapAdd []string // list of kernel capabilities to add to the containers
|
||||
ContainerCapDrop []string // list of kernel capabilities to remove from the containers
|
||||
AutoRemove bool // controls if the container is automatically removed upon workflow completion
|
||||
ArtifactServerPath string // the path where the artifact server stores uploads
|
||||
ArtifactServerAddr string // the address the artifact server binds to
|
||||
ArtifactServerPort string // the port the artifact server binds to
|
||||
NoSkipCheckout bool // do not skip actions/checkout
|
||||
RemoteName string // remote name in local git repo config
|
||||
ReplaceGheActionWithGithubCom []string // Use actions from GitHub Enterprise instance to GitHub
|
||||
ReplaceGheActionTokenWithGithubCom string // Token of private action repo on GitHub.
|
||||
Matrix map[string]map[string]bool // Matrix config to run
|
||||
Actor string // the user that triggered the event
|
||||
Workdir string // path to working directory
|
||||
ActionCacheDir string // path used for caching action contents
|
||||
ActionOfflineMode bool // when offline, use caching action contents
|
||||
BindWorkdir bool // bind the workdir to the job container
|
||||
EventName string // name of event to run
|
||||
EventPath string // path to JSON file to use for event.json in containers
|
||||
DefaultBranch string // name of the main branch for this repository
|
||||
ReuseContainers bool // reuse containers to maintain state
|
||||
ForcePull bool // force pulling of the image, even if already present
|
||||
ForceRebuild bool // force rebuilding local docker image action
|
||||
LogOutput bool // log the output from docker run
|
||||
JSONLogger bool // use json or text logger
|
||||
LogPrefixJobID bool // switches from the full job name to the job id
|
||||
Env map[string]string // env for containers
|
||||
Inputs map[string]string // manually passed action inputs
|
||||
Secrets map[string]string // list of secrets
|
||||
Vars map[string]string // list of vars
|
||||
Token string // GitHub token
|
||||
InsecureSecrets bool // switch hiding output when printing to terminal
|
||||
Platforms map[string]string // list of platforms
|
||||
Privileged bool // use privileged mode
|
||||
UsernsMode string // user namespace to use
|
||||
ContainerArchitecture string // Desired OS/architecture platform for running containers
|
||||
ContainerDaemonSocket string // Path to Docker daemon socket
|
||||
ContainerOptions string // Options for the job container
|
||||
UseGitIgnore bool // controls if paths in .gitignore should not be copied into container, default true
|
||||
GitHubInstance string // GitHub instance to use, default "github.com"
|
||||
ContainerCapAdd []string // list of kernel capabilities to add to the containers
|
||||
ContainerCapDrop []string // list of kernel capabilities to remove from the containers
|
||||
AutoRemove bool // controls if the container is automatically removed upon workflow completion
|
||||
ArtifactServerPath string // the path where the artifact server stores uploads
|
||||
ArtifactServerAddr string // the address the artifact server binds to
|
||||
ArtifactServerPort string // the port the artifact server binds to
|
||||
NoSkipCheckout bool // do not skip actions/checkout
|
||||
RemoteName string // remote name in local git repo config
|
||||
ReplaceGheActionWithGithubCom []string // Use actions from GitHub Enterprise instance to GitHub
|
||||
ReplaceGheActionTokenWithGithubCom string // Token of private action repo on GitHub.
|
||||
Matrix map[string]map[string]bool // Matrix config to run
|
||||
ContainerNetworkMode docker_container.NetworkMode // the network mode of job containers (the value of --network)
|
||||
ActionCache ActionCache // Use a custom ActionCache Implementation
|
||||
|
||||
PresetGitHubContext *model.GithubContext // the preset github context, overrides some fields like DefaultBranch, Env, Secrets etc.
|
||||
EventJSON string // the content of JSON file to use for event.json in containers, overrides EventPath
|
||||
ContainerNamePrefix string // the prefix of container name
|
||||
ContainerMaxLifetime time.Duration // the max lifetime of job containers
|
||||
ContainerNetworkMode docker_container.NetworkMode // the network mode of job containers (the value of --network)
|
||||
DefaultActionInstance string // the default actions web site
|
||||
PlatformPicker func(labels []string) string // platform picker, it will take precedence over Platforms if isn't nil
|
||||
JobLoggerLevel *log.Level // the level of job logger
|
||||
ValidVolumes []string // only volumes (and bind mounts) in this slice can be mounted on the job container or service containers
|
||||
InsecureSkipTLS bool // whether to skip verifying TLS certificate of the Gitea instance
|
||||
|
||||
ContainerNetworkEnableIPv6 bool // create the network with IPv6 support enabled
|
||||
}
|
||||
|
||||
// GetToken: Adapt to Gitea
|
||||
|
@ -206,7 +211,6 @@ func (runner *runnerImpl) NewPlanExecutor(plan *model.Plan) common.Executor {
|
|||
stageExecutor = append(stageExecutor, func(ctx context.Context) error {
|
||||
jobName := fmt.Sprintf("%-*s", maxJobNameLen, rc.String())
|
||||
executor, err := rc.Executor()
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -302,6 +302,11 @@ func TestRunEvent(t *testing.T) {
|
|||
{workdir, "set-env-step-env-override", "push", "", platforms, secrets},
|
||||
{workdir, "set-env-new-env-file-per-step", "push", "", platforms, secrets},
|
||||
{workdir, "no-panic-on-invalid-composite-action", "push", "jobs failed due to invalid action", platforms, secrets},
|
||||
|
||||
// services
|
||||
{workdir, "services", "push", "", platforms, secrets},
|
||||
{workdir, "services-host-network", "push", "", platforms, secrets},
|
||||
{workdir, "services-with-container", "push", "", platforms, secrets},
|
||||
}
|
||||
|
||||
for _, table := range tables {
|
||||
|
|
|
@ -34,6 +34,9 @@ const (
|
|||
stepStagePost
|
||||
)
|
||||
|
||||
// Controls how many symlinks are resolved for local and remote Actions
|
||||
const maxSymlinkDepth = 10
|
||||
|
||||
func (s stepStage) String() string {
|
||||
switch s {
|
||||
case stepStagePre:
|
||||
|
@ -307,3 +310,13 @@ func mergeIntoMapCaseInsensitive(target map[string]string, maps ...map[string]st
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
func symlinkJoin(filename, sym, parent string) (string, error) {
|
||||
dir := path.Dir(filename)
|
||||
dest := path.Join(dir, sym)
|
||||
prefix := path.Clean(parent) + "/"
|
||||
if strings.HasPrefix(dest, prefix) || prefix == "./" {
|
||||
return dest, nil
|
||||
}
|
||||
return "", fmt.Errorf("symlink tries to access file '%s' outside of '%s'", strings.ReplaceAll(dest, "'", "''"), strings.ReplaceAll(parent, "'", "''"))
|
||||
}
|
||||
|
|
|
@ -3,7 +3,10 @@ package runner
|
|||
import (
|
||||
"archive/tar"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
|
@ -42,15 +45,31 @@ func (sal *stepActionLocal) main() common.Executor {
|
|||
localReader := func(ctx context.Context) actionYamlReader {
|
||||
_, cpath := getContainerActionPaths(sal.Step, path.Join(actionDir, ""), sal.RunContext)
|
||||
return func(filename string) (io.Reader, io.Closer, error) {
|
||||
tars, err := sal.RunContext.JobContainer.GetContainerArchive(ctx, path.Join(cpath, filename))
|
||||
if err != nil {
|
||||
return nil, nil, os.ErrNotExist
|
||||
spath := path.Join(cpath, filename)
|
||||
for i := 0; i < maxSymlinkDepth; i++ {
|
||||
tars, err := sal.RunContext.JobContainer.GetContainerArchive(ctx, spath)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, nil, err
|
||||
} else if err != nil {
|
||||
return nil, nil, fs.ErrNotExist
|
||||
}
|
||||
treader := tar.NewReader(tars)
|
||||
header, err := treader.Next()
|
||||
if errors.Is(err, io.EOF) {
|
||||
return nil, nil, os.ErrNotExist
|
||||
} else if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if header.FileInfo().Mode()&os.ModeSymlink == os.ModeSymlink {
|
||||
spath, err = symlinkJoin(spath, header.Linkname, cpath)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
} else {
|
||||
return treader, tars, nil
|
||||
}
|
||||
}
|
||||
treader := tar.NewReader(tars)
|
||||
if _, err := treader.Next(); err != nil {
|
||||
return nil, nil, os.ErrNotExist
|
||||
}
|
||||
return treader, tars, nil
|
||||
return nil, nil, fmt.Errorf("max depth %d of symlinks exceeded while reading %s", maxSymlinkDepth, spath)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package runner
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
@ -28,6 +29,8 @@ type stepActionRemote struct {
|
|||
action *model.Action
|
||||
env map[string]string
|
||||
remoteAction *remoteAction
|
||||
cacheDir string
|
||||
resolvedSha string
|
||||
}
|
||||
|
||||
var stepActionRemoteNewCloneExecutor = git.NewGitCloneExecutor
|
||||
|
@ -62,6 +65,48 @@ func (sar *stepActionRemote) prepareActionExecutor() common.Executor {
|
|||
github.Token = sar.RunContext.Config.ReplaceGheActionTokenWithGithubCom
|
||||
}
|
||||
}
|
||||
if sar.RunContext.Config.ActionCache != nil {
|
||||
cache := sar.RunContext.Config.ActionCache
|
||||
|
||||
var err error
|
||||
sar.cacheDir = fmt.Sprintf("%s/%s", sar.remoteAction.Org, sar.remoteAction.Repo)
|
||||
repoURL := sar.remoteAction.URL + "/" + sar.cacheDir
|
||||
repoRef := sar.remoteAction.Ref
|
||||
sar.resolvedSha, err = cache.Fetch(ctx, sar.cacheDir, repoURL, repoRef, github.Token)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to fetch \"%s\" version \"%s\": %w", repoURL, repoRef, err)
|
||||
}
|
||||
|
||||
remoteReader := func(ctx context.Context) actionYamlReader {
|
||||
return func(filename string) (io.Reader, io.Closer, error) {
|
||||
spath := path.Join(sar.remoteAction.Path, filename)
|
||||
for i := 0; i < maxSymlinkDepth; i++ {
|
||||
tars, err := cache.GetTarArchive(ctx, sar.cacheDir, sar.resolvedSha, spath)
|
||||
if err != nil {
|
||||
return nil, nil, os.ErrNotExist
|
||||
}
|
||||
treader := tar.NewReader(tars)
|
||||
header, err := treader.Next()
|
||||
if err != nil {
|
||||
return nil, nil, os.ErrNotExist
|
||||
}
|
||||
if header.FileInfo().Mode()&os.ModeSymlink == os.ModeSymlink {
|
||||
spath, err = symlinkJoin(spath, header.Linkname, ".")
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
} else {
|
||||
return treader, tars, nil
|
||||
}
|
||||
}
|
||||
return nil, nil, fmt.Errorf("max depth %d of symlinks exceeded while reading %s", maxSymlinkDepth, spath)
|
||||
}
|
||||
}
|
||||
|
||||
actionModel, err := sar.readAction(ctx, sar.Step, sar.resolvedSha, sar.remoteAction.Path, remoteReader(ctx), os.WriteFile)
|
||||
sar.action = actionModel
|
||||
return err
|
||||
}
|
||||
|
||||
actionDir := fmt.Sprintf("%s/%s", sar.RunContext.ActionCacheDir(), safeFilename(sar.Step.Uses))
|
||||
gitClone := stepActionRemoteNewCloneExecutor(git.NewGitCloneExecutorInput{
|
||||
|
@ -75,6 +120,9 @@ func (sar *stepActionRemote) prepareActionExecutor() common.Executor {
|
|||
For GitHub, they are the same, always github.com.
|
||||
But for Gitea, tasks triggered by a.com can clone actions from b.com.
|
||||
*/
|
||||
OfflineMode: sar.RunContext.Config.ActionOfflineMode,
|
||||
|
||||
InsecureSkipTLS: sar.cloneSkipTLS(), // For Gitea
|
||||
})
|
||||
var ntErr common.Executor
|
||||
if err := gitClone(ctx); err != nil {
|
||||
|
@ -212,6 +260,22 @@ func (sar *stepActionRemote) getCompositeSteps() *compositeSteps {
|
|||
return sar.compositeSteps
|
||||
}
|
||||
|
||||
// For Gitea
|
||||
// cloneSkipTLS returns true if the runner can clone an action from the Gitea instance
|
||||
func (sar *stepActionRemote) cloneSkipTLS() bool {
|
||||
if !sar.RunContext.Config.InsecureSkipTLS {
|
||||
// Return false if the Gitea instance is not an insecure instance
|
||||
return false
|
||||
}
|
||||
if sar.remoteAction.URL == "" {
|
||||
// Empty URL means the default action instance should be used
|
||||
// Return true if the URL of the Gitea instance is the same as the URL of the default action instance
|
||||
return sar.RunContext.Config.DefaultActionInstance == sar.RunContext.Config.GitHubInstance
|
||||
}
|
||||
// Return true if the URL of the remote action is the same as the URL of the Gitea instance
|
||||
return sar.remoteAction.URL == sar.RunContext.Config.GitHubInstance
|
||||
}
|
||||
|
||||
type remoteAction struct {
|
||||
URL string
|
||||
Org string
|
||||
|
|
14
pkg/runner/testdata/services-host-network/push.yml
vendored
Normal file
14
pkg/runner/testdata/services-host-network/push.yml
vendored
Normal file
|
@ -0,0 +1,14 @@
|
|||
name: services-host-network
|
||||
on: push
|
||||
jobs:
|
||||
services-host-network:
|
||||
runs-on: ubuntu-latest
|
||||
services:
|
||||
nginx:
|
||||
image: "nginx:latest"
|
||||
ports:
|
||||
- "8080:80"
|
||||
steps:
|
||||
- run: apt-get -qq update && apt-get -yqq install --no-install-recommends curl net-tools
|
||||
- run: netstat -tlpen
|
||||
- run: curl -v http://localhost:8080
|
16
pkg/runner/testdata/services-with-container/push.yml
vendored
Normal file
16
pkg/runner/testdata/services-with-container/push.yml
vendored
Normal file
|
@ -0,0 +1,16 @@
|
|||
name: services-with-containers
|
||||
on: push
|
||||
jobs:
|
||||
services-with-containers:
|
||||
runs-on: ubuntu-latest
|
||||
# https://docs.github.com/en/actions/using-containerized-services/about-service-containers#running-jobs-in-a-container
|
||||
container:
|
||||
image: "ubuntu:latest"
|
||||
services:
|
||||
nginx:
|
||||
image: "nginx:latest"
|
||||
ports:
|
||||
- "8080:80"
|
||||
steps:
|
||||
- run: apt-get -qq update && apt-get -yqq install --no-install-recommends curl
|
||||
- run: curl -v http://nginx:80
|
21
renovate.json
Normal file
21
renovate.json
Normal file
|
@ -0,0 +1,21 @@
|
|||
{
|
||||
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
|
||||
"extends": ["local>forgejo/renovate-config"],
|
||||
"packageRules": [
|
||||
{
|
||||
"description": "Disable runner test data",
|
||||
"matchFileNames": ["pkg/runner/testdata/**"],
|
||||
"enabled": false
|
||||
},
|
||||
{
|
||||
"description": "Require approval for all dependencies",
|
||||
"matchDepNames": ["/.+/"],
|
||||
"dependencyDashboardApproval": true
|
||||
},
|
||||
{
|
||||
"description": "Separate minor and patch for some packages",
|
||||
"matchDepNames": ["github.com/rhysd/actionlint"],
|
||||
"separateMinorPatch": true
|
||||
}
|
||||
]
|
||||
}
|
Loading…
Reference in a new issue