2023-04-02 09:41:48 -05:00
|
|
|
# Example configuration file, it's safe to copy this as the default config file without any modification.
|
|
|
|
|
2023-07-19 21:16:30 -05:00
|
|
|
# You don't have to copy this file to your instance,
|
2024-10-29 01:49:55 -05:00
|
|
|
# just run `forgejo-runner generate-config > config.yaml` to generate a config file.
|
2023-07-19 21:16:30 -05:00
|
|
|
|
2023-04-02 09:41:48 -05:00
|
|
|
log:
|
|
|
|
# The level of logging, can be trace, debug, info, warn, error, fatal
|
|
|
|
level: info
|
2024-10-20 07:32:56 -05:00
|
|
|
# The level of logging for jobs, can be trace, debug, info, earn, error, fatal
|
|
|
|
job_level: info
|
2023-04-02 09:41:48 -05:00
|
|
|
|
|
|
|
runner:
|
|
|
|
# Where to store the registration result.
|
|
|
|
file: .runner
|
|
|
|
# Execute how many tasks concurrently at the same time.
|
|
|
|
capacity: 1
|
|
|
|
# Extra environment variables to run jobs.
|
|
|
|
envs:
|
|
|
|
A_TEST_ENV_NAME_1: a_test_env_value_1
|
|
|
|
A_TEST_ENV_NAME_2: a_test_env_value_2
|
|
|
|
# Extra environment variables to run jobs from a file.
|
|
|
|
# It will be ignored if it's empty or the file doesn't exist.
|
|
|
|
env_file: .env
|
|
|
|
# The timeout for a job to be finished.
|
2023-05-30 05:47:12 -05:00
|
|
|
# Please note that the Forgejo instance also has a timeout (3h by default) for the job.
|
|
|
|
# So the job could be stopped by the Forgejo instance if it's timeout is shorter than this.
|
2023-04-02 09:41:48 -05:00
|
|
|
timeout: 3h
|
2024-06-06 04:40:31 -05:00
|
|
|
# The timeout for the runner to wait for running jobs to finish when
|
|
|
|
# shutting down because a TERM or INT signal has been received. Any
|
|
|
|
# running jobs that haven't finished after this timeout will be
|
|
|
|
# cancelled.
|
|
|
|
# If unset or zero the jobs will be cancelled immediately.
|
|
|
|
shutdown_timeout: 3h
|
|
|
|
# Whether skip verifying the TLS certificate of the instance.
|
2023-04-02 09:41:48 -05:00
|
|
|
insecure: false
|
2023-05-30 05:47:12 -05:00
|
|
|
# The timeout for fetching the job from the Forgejo instance.
|
2023-04-05 21:57:36 -05:00
|
|
|
fetch_timeout: 5s
|
2023-05-30 05:47:12 -05:00
|
|
|
# The interval for fetching the job from the Forgejo instance.
|
2023-04-05 21:57:36 -05:00
|
|
|
fetch_interval: 2s
|
2024-07-27 09:51:45 -05:00
|
|
|
# The interval for reporting the job status and logs to the Forgejo instance.
|
|
|
|
report_interval: 1s
|
2023-06-14 22:59:15 -05:00
|
|
|
# The labels of a runner are used to determine which jobs the runner can run, and how to run them.
|
2024-06-07 03:13:36 -05:00
|
|
|
# Like: ["macos-arm64:host", "ubuntu-latest:docker://node:20-bookworm", "ubuntu-22.04:docker://node:20-bookworm"]
|
2023-06-14 22:59:15 -05:00
|
|
|
# If it's empty when registering, it will ask for inputting labels.
|
2024-10-29 01:49:55 -05:00
|
|
|
# If it's empty when executing the `daemon`, it will use labels in the `.runner` file.
|
2023-06-14 22:59:15 -05:00
|
|
|
labels: []
|
2023-04-02 09:41:48 -05:00
|
|
|
|
|
|
|
cache:
|
|
|
|
# Enable cache server to use actions/cache.
|
|
|
|
enabled: true
|
|
|
|
# The directory to store the cache data.
|
|
|
|
# If it's empty, the cache data will be stored in $HOME/.cache/actcache.
|
|
|
|
dir: ""
|
|
|
|
# The host of the cache server.
|
|
|
|
# It's not for the address to listen, but the address to connect from job containers.
|
|
|
|
# So 0.0.0.0 is a bad choice, leave it empty to detect automatically.
|
|
|
|
host: ""
|
|
|
|
# The port of the cache server.
|
|
|
|
# 0 means to use a random available port.
|
|
|
|
port: 0
|
2023-07-07 03:28:54 -05:00
|
|
|
# The external cache server URL. Valid only when enable is true.
|
2024-10-29 01:49:55 -05:00
|
|
|
# If it's specified, it will be used to set the ACTIONS_CACHE_URL environment variable. The URL should generally end with "/".
|
|
|
|
# Otherwise it will be set to the the URL of the internal cache server.
|
2023-07-07 03:28:54 -05:00
|
|
|
external_server: ""
|
2023-04-04 01:32:01 -05:00
|
|
|
|
|
|
|
container:
|
Add configuration item of `container.network` (#184)
Close https://gitea.com/gitea/act_runner/issues/177
Related https://gitea.com/gitea/act/pulls/56
### ⚠️ Breaking
The `container.network_mode` is a deprecated configuration item. It may be removed after Gitea 1.20 released.
Previously, if the value of `container.network_mode` is `bridge`, it means that `act_runner` will create a new network for job.But `bridge` is easily confused with the bridge network created by Docker by default.
We recommand that using `container.network` to specify the network to which containers created by `act_runner` connect.
### 🆕 container.network
The configuration file of `act_runner` add a new item of `contianer.network`.
In `config.example.yaml`:
```yaml
container:
# Specifies the network to which the container will connect.
# Could be host, bridge or the name of a custom network.
# If it's empty, act_runner will create a network automatically.
network: ""
```
As the comment in the example above says, the purpose of the `container.network` is specifying the network to which containers created by `act_runner` will connect.
`container.network` accepts the following valid values:
- `host`: All of containers (including job containers and service contianers) created by `act_runner` will be connected to the network named `host` which is created automatically by Docker. Containers will share the host’s network stack and all interfaces from the host will be available to these containers.
- `bridge`: It is similar to `host`. All of containers created by `act_runner` will be connected to the network named `bridge` which is created automatically by Docker. All containers connected to the `bridge` (Perhaps there are containers that are not created by `act_runner`) are allowed to communicate with each other, while providing isolation from containers which are not connected to that `bridge` network.
- `<custom_network>`: Please make sure that the `<custom_network>` network already exists firstly (`act_runner` does not detect whether the specified network exists currently. If not exists yet, will return error in the stage of `docker create`). All of containers created by `act_runner` will be connected to `<custom_network>`. After the job is executed, containers are removed and automatically disconnected from the `<custom_network>`.
- empty: `act_runner` will create a new network for each job container and their service containers (if defined in workflow). So each job container and their service containers share a network environment, but are isolated from others container and the Docker host. Of course, these networks created by `act_runner` will be removed at last.
### Others
- If you do not have special needs, we highly recommend that setting `container.network` to empty string (and do not use `container.network_mode` any more). Because the containers created by `act_runner` will connect to the networks that are created by itself. This point will provide better isolation.
- If you set `contianer.network` to empty string or `<custom_network>`, we can be access to service containers by `<service-id>:<port>` in the steps of job. Because we added an alias to the service container when connecting to the network.
Co-authored-by: Jason Song <i@wolfogre.com>
Reviewed-on: https://gitea.com/gitea/act_runner/pulls/184
Reviewed-by: Jason Song <i@wolfogre.com>
Co-authored-by: sillyguodong <gedong_1994@163.com>
Co-committed-by: sillyguodong <gedong_1994@163.com>
2023-05-16 01:46:59 -05:00
|
|
|
# Specifies the network to which the container will connect.
|
|
|
|
# Could be host, bridge or the name of a custom network.
|
2023-05-30 05:47:12 -05:00
|
|
|
# If it's empty, create a network automatically.
|
Add configuration item of `container.network` (#184)
Close https://gitea.com/gitea/act_runner/issues/177
Related https://gitea.com/gitea/act/pulls/56
### ⚠️ Breaking
The `container.network_mode` is a deprecated configuration item. It may be removed after Gitea 1.20 released.
Previously, if the value of `container.network_mode` is `bridge`, it means that `act_runner` will create a new network for job.But `bridge` is easily confused with the bridge network created by Docker by default.
We recommand that using `container.network` to specify the network to which containers created by `act_runner` connect.
### 🆕 container.network
The configuration file of `act_runner` add a new item of `contianer.network`.
In `config.example.yaml`:
```yaml
container:
# Specifies the network to which the container will connect.
# Could be host, bridge or the name of a custom network.
# If it's empty, act_runner will create a network automatically.
network: ""
```
As the comment in the example above says, the purpose of the `container.network` is specifying the network to which containers created by `act_runner` will connect.
`container.network` accepts the following valid values:
- `host`: All of containers (including job containers and service contianers) created by `act_runner` will be connected to the network named `host` which is created automatically by Docker. Containers will share the host’s network stack and all interfaces from the host will be available to these containers.
- `bridge`: It is similar to `host`. All of containers created by `act_runner` will be connected to the network named `bridge` which is created automatically by Docker. All containers connected to the `bridge` (Perhaps there are containers that are not created by `act_runner`) are allowed to communicate with each other, while providing isolation from containers which are not connected to that `bridge` network.
- `<custom_network>`: Please make sure that the `<custom_network>` network already exists firstly (`act_runner` does not detect whether the specified network exists currently. If not exists yet, will return error in the stage of `docker create`). All of containers created by `act_runner` will be connected to `<custom_network>`. After the job is executed, containers are removed and automatically disconnected from the `<custom_network>`.
- empty: `act_runner` will create a new network for each job container and their service containers (if defined in workflow). So each job container and their service containers share a network environment, but are isolated from others container and the Docker host. Of course, these networks created by `act_runner` will be removed at last.
### Others
- If you do not have special needs, we highly recommend that setting `container.network` to empty string (and do not use `container.network_mode` any more). Because the containers created by `act_runner` will connect to the networks that are created by itself. This point will provide better isolation.
- If you set `contianer.network` to empty string or `<custom_network>`, we can be access to service containers by `<service-id>:<port>` in the steps of job. Because we added an alias to the service container when connecting to the network.
Co-authored-by: Jason Song <i@wolfogre.com>
Reviewed-on: https://gitea.com/gitea/act_runner/pulls/184
Reviewed-by: Jason Song <i@wolfogre.com>
Co-authored-by: sillyguodong <gedong_1994@163.com>
Co-committed-by: sillyguodong <gedong_1994@163.com>
2023-05-16 01:46:59 -05:00
|
|
|
network: ""
|
2023-11-14 12:16:09 -06:00
|
|
|
# Whether to create networks with IPv6 enabled. Requires the Docker daemon to be set up accordingly.
|
|
|
|
# Only takes effect if "network" is set to "".
|
|
|
|
enable_ipv6: false
|
2023-04-10 21:58:12 -05:00
|
|
|
# Whether to use privileged mode or not when launching task containers (privileged mode is required for Docker-in-Docker).
|
|
|
|
privileged: false
|
2023-05-30 05:47:12 -05:00
|
|
|
# And other options to be used when the container is started (eg, --add-host=my.forgejo.url:host-gateway).
|
2023-04-10 21:58:12 -05:00
|
|
|
options:
|
2023-04-28 09:03:52 -05:00
|
|
|
# The parent directory of a job's working directory.
|
|
|
|
# If it's empty, /workspace will be used.
|
|
|
|
workdir_parent:
|
2023-06-16 01:07:48 -05:00
|
|
|
# Volumes (including bind mounts) can be mounted to containers. Glob syntax is supported, see https://github.com/gobwas/glob
|
|
|
|
# You can specify multiple volumes. If the sequence is empty, no volumes can be mounted.
|
|
|
|
# For example, if you only allow containers to mount the `data` volume and all the json files in `/src`, you should change the config to:
|
|
|
|
# valid_volumes:
|
|
|
|
# - data
|
|
|
|
# - /src/*.json
|
|
|
|
# If you want to allow any volume, please use the following configuration:
|
|
|
|
# valid_volumes:
|
|
|
|
# - '**'
|
|
|
|
valid_volumes: []
|
2023-06-18 00:38:38 -05:00
|
|
|
# overrides the docker client host with the specified one.
|
2024-10-29 01:49:55 -05:00
|
|
|
# If "-", an available docker host will automatically be found.
|
|
|
|
# If empty, an available docker host will automatically be found and mounted in the job container (e.g. /var/run/docker.sock).
|
|
|
|
# Otherwise the specified docker host will be used and an error will be returned if it doesn't work.
|
|
|
|
docker_host: "-"
|
2023-08-17 01:51:57 -05:00
|
|
|
# Pull docker image(s) even if already present
|
|
|
|
force_pull: false
|
2023-06-20 03:29:05 -05:00
|
|
|
|
|
|
|
host:
|
|
|
|
# The parent directory of a job's working directory.
|
|
|
|
# If it's empty, $HOME/.cache/act/ will be used.
|
|
|
|
workdir_parent:
|