fix: update artifact server to address GHSL-2023-004 (#1565)
This commit is contained in:
parent
efb12b7f12
commit
63ae215071
4 changed files with 208 additions and 79 deletions
1
.vscode/settings.json
vendored
1
.vscode/settings.json
vendored
|
@ -1,6 +1,7 @@
|
||||||
{
|
{
|
||||||
"go.lintTool": "golangci-lint",
|
"go.lintTool": "golangci-lint",
|
||||||
"go.lintFlags": ["--fix"],
|
"go.lintFlags": ["--fix"],
|
||||||
|
"go.testTimeout": "300s",
|
||||||
"[json]": {
|
"[json]": {
|
||||||
"editor.defaultFormatter": "esbenp.prettier-vscode"
|
"editor.defaultFormatter": "esbenp.prettier-vscode"
|
||||||
},
|
},
|
||||||
|
|
|
@ -9,7 +9,6 @@ import (
|
||||||
"io/fs"
|
"io/fs"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
@ -46,28 +45,34 @@ type ResponseMessage struct {
|
||||||
Message string `json:"message"`
|
Message string `json:"message"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type MkdirFS interface {
|
type WritableFile interface {
|
||||||
fs.FS
|
io.WriteCloser
|
||||||
MkdirAll(path string, perm fs.FileMode) error
|
|
||||||
Open(name string) (fs.File, error)
|
|
||||||
OpenAtEnd(name string) (fs.File, error)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type MkdirFsImpl struct {
|
type WriteFS interface {
|
||||||
dir string
|
OpenWritable(name string) (WritableFile, error)
|
||||||
fs.FS
|
OpenAppendable(name string) (WritableFile, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fsys MkdirFsImpl) MkdirAll(path string, perm fs.FileMode) error {
|
type readWriteFSImpl struct {
|
||||||
return os.MkdirAll(fsys.dir+"/"+path, perm)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fsys MkdirFsImpl) Open(name string) (fs.File, error) {
|
func (fwfs readWriteFSImpl) Open(name string) (fs.File, error) {
|
||||||
return os.OpenFile(fsys.dir+"/"+name, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0644)
|
return os.Open(name)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fsys MkdirFsImpl) OpenAtEnd(name string) (fs.File, error) {
|
func (fwfs readWriteFSImpl) OpenWritable(name string) (WritableFile, error) {
|
||||||
file, err := os.OpenFile(fsys.dir+"/"+name, os.O_CREATE|os.O_RDWR, 0644)
|
if err := os.MkdirAll(filepath.Dir(name), os.ModePerm); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return os.OpenFile(name, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0644)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fwfs readWriteFSImpl) OpenAppendable(name string) (WritableFile, error) {
|
||||||
|
if err := os.MkdirAll(filepath.Dir(name), os.ModePerm); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
file, err := os.OpenFile(name, os.O_CREATE|os.O_RDWR, 0644)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -77,13 +82,16 @@ func (fsys MkdirFsImpl) OpenAtEnd(name string) (fs.File, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return file, nil
|
return file, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var gzipExtension = ".gz__"
|
var gzipExtension = ".gz__"
|
||||||
|
|
||||||
func uploads(router *httprouter.Router, fsys MkdirFS) {
|
func safeResolve(baseDir string, relPath string) string {
|
||||||
|
return filepath.Join(baseDir, filepath.Clean(filepath.Join(string(os.PathSeparator), relPath)))
|
||||||
|
}
|
||||||
|
|
||||||
|
func uploads(router *httprouter.Router, baseDir string, fsys WriteFS) {
|
||||||
router.POST("/_apis/pipelines/workflows/:runId/artifacts", func(w http.ResponseWriter, req *http.Request, params httprouter.Params) {
|
router.POST("/_apis/pipelines/workflows/:runId/artifacts", func(w http.ResponseWriter, req *http.Request, params httprouter.Params) {
|
||||||
runID := params.ByName("runId")
|
runID := params.ByName("runId")
|
||||||
|
|
||||||
|
@ -108,19 +116,15 @@ func uploads(router *httprouter.Router, fsys MkdirFS) {
|
||||||
itemPath += gzipExtension
|
itemPath += gzipExtension
|
||||||
}
|
}
|
||||||
|
|
||||||
filePath := fmt.Sprintf("%s/%s", runID, itemPath)
|
safeRunPath := safeResolve(baseDir, runID)
|
||||||
|
safePath := safeResolve(safeRunPath, itemPath)
|
||||||
|
|
||||||
err := fsys.MkdirAll(path.Dir(filePath), os.ModePerm)
|
file, err := func() (WritableFile, error) {
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
file, err := func() (fs.File, error) {
|
|
||||||
contentRange := req.Header.Get("Content-Range")
|
contentRange := req.Header.Get("Content-Range")
|
||||||
if contentRange != "" && !strings.HasPrefix(contentRange, "bytes 0-") {
|
if contentRange != "" && !strings.HasPrefix(contentRange, "bytes 0-") {
|
||||||
return fsys.OpenAtEnd(filePath)
|
return fsys.OpenAppendable(safePath)
|
||||||
}
|
}
|
||||||
return fsys.Open(filePath)
|
return fsys.OpenWritable(safePath)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -170,11 +174,13 @@ func uploads(router *httprouter.Router, fsys MkdirFS) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func downloads(router *httprouter.Router, fsys fs.FS) {
|
func downloads(router *httprouter.Router, baseDir string, fsys fs.FS) {
|
||||||
router.GET("/_apis/pipelines/workflows/:runId/artifacts", func(w http.ResponseWriter, req *http.Request, params httprouter.Params) {
|
router.GET("/_apis/pipelines/workflows/:runId/artifacts", func(w http.ResponseWriter, req *http.Request, params httprouter.Params) {
|
||||||
runID := params.ByName("runId")
|
runID := params.ByName("runId")
|
||||||
|
|
||||||
entries, err := fs.ReadDir(fsys, runID)
|
safePath := safeResolve(baseDir, runID)
|
||||||
|
|
||||||
|
entries, err := fs.ReadDir(fsys, safePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
@ -204,12 +210,12 @@ func downloads(router *httprouter.Router, fsys fs.FS) {
|
||||||
router.GET("/download/:container", func(w http.ResponseWriter, req *http.Request, params httprouter.Params) {
|
router.GET("/download/:container", func(w http.ResponseWriter, req *http.Request, params httprouter.Params) {
|
||||||
container := params.ByName("container")
|
container := params.ByName("container")
|
||||||
itemPath := req.URL.Query().Get("itemPath")
|
itemPath := req.URL.Query().Get("itemPath")
|
||||||
dirPath := fmt.Sprintf("%s/%s", container, itemPath)
|
safePath := safeResolve(baseDir, filepath.Join(container, itemPath))
|
||||||
|
|
||||||
var files []ContainerItem
|
var files []ContainerItem
|
||||||
err := fs.WalkDir(fsys, dirPath, func(path string, entry fs.DirEntry, err error) error {
|
err := fs.WalkDir(fsys, safePath, func(path string, entry fs.DirEntry, err error) error {
|
||||||
if !entry.IsDir() {
|
if !entry.IsDir() {
|
||||||
rel, err := filepath.Rel(dirPath, path)
|
rel, err := filepath.Rel(safePath, path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
@ -218,7 +224,7 @@ func downloads(router *httprouter.Router, fsys fs.FS) {
|
||||||
rel = strings.TrimSuffix(rel, gzipExtension)
|
rel = strings.TrimSuffix(rel, gzipExtension)
|
||||||
|
|
||||||
files = append(files, ContainerItem{
|
files = append(files, ContainerItem{
|
||||||
Path: fmt.Sprintf("%s/%s", itemPath, rel),
|
Path: filepath.Join(itemPath, rel),
|
||||||
ItemType: "file",
|
ItemType: "file",
|
||||||
ContentLocation: fmt.Sprintf("http://%s/artifact/%s/%s/%s", req.Host, container, itemPath, rel),
|
ContentLocation: fmt.Sprintf("http://%s/artifact/%s/%s/%s", req.Host, container, itemPath, rel),
|
||||||
})
|
})
|
||||||
|
@ -245,10 +251,12 @@ func downloads(router *httprouter.Router, fsys fs.FS) {
|
||||||
router.GET("/artifact/*path", func(w http.ResponseWriter, req *http.Request, params httprouter.Params) {
|
router.GET("/artifact/*path", func(w http.ResponseWriter, req *http.Request, params httprouter.Params) {
|
||||||
path := params.ByName("path")[1:]
|
path := params.ByName("path")[1:]
|
||||||
|
|
||||||
file, err := fsys.Open(path)
|
safePath := safeResolve(baseDir, path)
|
||||||
|
|
||||||
|
file, err := fsys.Open(safePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// try gzip file
|
// try gzip file
|
||||||
file, err = fsys.Open(path + gzipExtension)
|
file, err = fsys.Open(safePath + gzipExtension)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
@ -273,9 +281,9 @@ func Serve(ctx context.Context, artifactPath string, addr string, port string) c
|
||||||
router := httprouter.New()
|
router := httprouter.New()
|
||||||
|
|
||||||
logger.Debugf("Artifacts base path '%s'", artifactPath)
|
logger.Debugf("Artifacts base path '%s'", artifactPath)
|
||||||
fs := os.DirFS(artifactPath)
|
fsys := readWriteFSImpl{}
|
||||||
uploads(router, MkdirFsImpl{artifactPath, fs})
|
uploads(router, artifactPath, fsys)
|
||||||
downloads(router, fs)
|
downloads(router, artifactPath, fsys)
|
||||||
|
|
||||||
server := &http.Server{
|
server := &http.Server{
|
||||||
Addr: fmt.Sprintf("%s:%s", addr, port),
|
Addr: fmt.Sprintf("%s:%s", addr, port),
|
||||||
|
|
|
@ -4,7 +4,6 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/fs"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/http/httptest"
|
"net/http/httptest"
|
||||||
"os"
|
"os"
|
||||||
|
@ -21,44 +20,43 @@ import (
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
|
||||||
type MapFsImpl struct {
|
type writableMapFile struct {
|
||||||
fstest.MapFS
|
fstest.MapFile
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fsys MapFsImpl) MkdirAll(path string, perm fs.FileMode) error {
|
func (f *writableMapFile) Write(data []byte) (int, error) {
|
||||||
// mocked no-op
|
f.Data = data
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type WritableFile struct {
|
|
||||||
fs.File
|
|
||||||
fsys fstest.MapFS
|
|
||||||
path string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (file WritableFile) Write(data []byte) (int, error) {
|
|
||||||
file.fsys[file.path].Data = data
|
|
||||||
return len(data), nil
|
return len(data), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fsys MapFsImpl) Open(path string) (fs.File, error) {
|
func (f *writableMapFile) Close() error {
|
||||||
var file = fstest.MapFile{
|
return nil
|
||||||
Data: []byte("content2"),
|
|
||||||
}
|
|
||||||
fsys.MapFS[path] = &file
|
|
||||||
|
|
||||||
result, err := fsys.MapFS.Open(path)
|
|
||||||
return WritableFile{result, fsys.MapFS, path}, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fsys MapFsImpl) OpenAtEnd(path string) (fs.File, error) {
|
type writeMapFS struct {
|
||||||
var file = fstest.MapFile{
|
fstest.MapFS
|
||||||
Data: []byte("content2"),
|
}
|
||||||
}
|
|
||||||
fsys.MapFS[path] = &file
|
|
||||||
|
|
||||||
result, err := fsys.MapFS.Open(path)
|
func (fsys writeMapFS) OpenWritable(name string) (WritableFile, error) {
|
||||||
return WritableFile{result, fsys.MapFS, path}, err
|
var file = &writableMapFile{
|
||||||
|
MapFile: fstest.MapFile{
|
||||||
|
Data: []byte("content2"),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
fsys.MapFS[name] = &file.MapFile
|
||||||
|
|
||||||
|
return file, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fsys writeMapFS) OpenAppendable(name string) (WritableFile, error) {
|
||||||
|
var file = &writableMapFile{
|
||||||
|
MapFile: fstest.MapFile{
|
||||||
|
Data: []byte("content2"),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
fsys.MapFS[name] = &file.MapFile
|
||||||
|
|
||||||
|
return file, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNewArtifactUploadPrepare(t *testing.T) {
|
func TestNewArtifactUploadPrepare(t *testing.T) {
|
||||||
|
@ -67,7 +65,7 @@ func TestNewArtifactUploadPrepare(t *testing.T) {
|
||||||
var memfs = fstest.MapFS(map[string]*fstest.MapFile{})
|
var memfs = fstest.MapFS(map[string]*fstest.MapFile{})
|
||||||
|
|
||||||
router := httprouter.New()
|
router := httprouter.New()
|
||||||
uploads(router, MapFsImpl{memfs})
|
uploads(router, "artifact/server/path", writeMapFS{memfs})
|
||||||
|
|
||||||
req, _ := http.NewRequest("POST", "http://localhost/_apis/pipelines/workflows/1/artifacts", nil)
|
req, _ := http.NewRequest("POST", "http://localhost/_apis/pipelines/workflows/1/artifacts", nil)
|
||||||
rr := httptest.NewRecorder()
|
rr := httptest.NewRecorder()
|
||||||
|
@ -93,7 +91,7 @@ func TestArtifactUploadBlob(t *testing.T) {
|
||||||
var memfs = fstest.MapFS(map[string]*fstest.MapFile{})
|
var memfs = fstest.MapFS(map[string]*fstest.MapFile{})
|
||||||
|
|
||||||
router := httprouter.New()
|
router := httprouter.New()
|
||||||
uploads(router, MapFsImpl{memfs})
|
uploads(router, "artifact/server/path", writeMapFS{memfs})
|
||||||
|
|
||||||
req, _ := http.NewRequest("PUT", "http://localhost/upload/1?itemPath=some/file", strings.NewReader("content"))
|
req, _ := http.NewRequest("PUT", "http://localhost/upload/1?itemPath=some/file", strings.NewReader("content"))
|
||||||
rr := httptest.NewRecorder()
|
rr := httptest.NewRecorder()
|
||||||
|
@ -111,7 +109,7 @@ func TestArtifactUploadBlob(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
assert.Equal("success", response.Message)
|
assert.Equal("success", response.Message)
|
||||||
assert.Equal("content", string(memfs["1/some/file"].Data))
|
assert.Equal("content", string(memfs["artifact/server/path/1/some/file"].Data))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFinalizeArtifactUpload(t *testing.T) {
|
func TestFinalizeArtifactUpload(t *testing.T) {
|
||||||
|
@ -120,7 +118,7 @@ func TestFinalizeArtifactUpload(t *testing.T) {
|
||||||
var memfs = fstest.MapFS(map[string]*fstest.MapFile{})
|
var memfs = fstest.MapFS(map[string]*fstest.MapFile{})
|
||||||
|
|
||||||
router := httprouter.New()
|
router := httprouter.New()
|
||||||
uploads(router, MapFsImpl{memfs})
|
uploads(router, "artifact/server/path", writeMapFS{memfs})
|
||||||
|
|
||||||
req, _ := http.NewRequest("PATCH", "http://localhost/_apis/pipelines/workflows/1/artifacts", nil)
|
req, _ := http.NewRequest("PATCH", "http://localhost/_apis/pipelines/workflows/1/artifacts", nil)
|
||||||
rr := httptest.NewRecorder()
|
rr := httptest.NewRecorder()
|
||||||
|
@ -144,13 +142,13 @@ func TestListArtifacts(t *testing.T) {
|
||||||
assert := assert.New(t)
|
assert := assert.New(t)
|
||||||
|
|
||||||
var memfs = fstest.MapFS(map[string]*fstest.MapFile{
|
var memfs = fstest.MapFS(map[string]*fstest.MapFile{
|
||||||
"1/file.txt": {
|
"artifact/server/path/1/file.txt": {
|
||||||
Data: []byte(""),
|
Data: []byte(""),
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
|
||||||
router := httprouter.New()
|
router := httprouter.New()
|
||||||
downloads(router, memfs)
|
downloads(router, "artifact/server/path", memfs)
|
||||||
|
|
||||||
req, _ := http.NewRequest("GET", "http://localhost/_apis/pipelines/workflows/1/artifacts", nil)
|
req, _ := http.NewRequest("GET", "http://localhost/_apis/pipelines/workflows/1/artifacts", nil)
|
||||||
rr := httptest.NewRecorder()
|
rr := httptest.NewRecorder()
|
||||||
|
@ -176,13 +174,13 @@ func TestListArtifactContainer(t *testing.T) {
|
||||||
assert := assert.New(t)
|
assert := assert.New(t)
|
||||||
|
|
||||||
var memfs = fstest.MapFS(map[string]*fstest.MapFile{
|
var memfs = fstest.MapFS(map[string]*fstest.MapFile{
|
||||||
"1/some/file": {
|
"artifact/server/path/1/some/file": {
|
||||||
Data: []byte(""),
|
Data: []byte(""),
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
|
||||||
router := httprouter.New()
|
router := httprouter.New()
|
||||||
downloads(router, memfs)
|
downloads(router, "artifact/server/path", memfs)
|
||||||
|
|
||||||
req, _ := http.NewRequest("GET", "http://localhost/download/1?itemPath=some/file", nil)
|
req, _ := http.NewRequest("GET", "http://localhost/download/1?itemPath=some/file", nil)
|
||||||
rr := httptest.NewRecorder()
|
rr := httptest.NewRecorder()
|
||||||
|
@ -200,7 +198,7 @@ func TestListArtifactContainer(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
assert.Equal(1, len(response.Value))
|
assert.Equal(1, len(response.Value))
|
||||||
assert.Equal("some/file/.", response.Value[0].Path)
|
assert.Equal("some/file", response.Value[0].Path)
|
||||||
assert.Equal("file", response.Value[0].ItemType)
|
assert.Equal("file", response.Value[0].ItemType)
|
||||||
assert.Equal("http://localhost/artifact/1/some/file/.", response.Value[0].ContentLocation)
|
assert.Equal("http://localhost/artifact/1/some/file/.", response.Value[0].ContentLocation)
|
||||||
}
|
}
|
||||||
|
@ -209,13 +207,13 @@ func TestDownloadArtifactFile(t *testing.T) {
|
||||||
assert := assert.New(t)
|
assert := assert.New(t)
|
||||||
|
|
||||||
var memfs = fstest.MapFS(map[string]*fstest.MapFile{
|
var memfs = fstest.MapFS(map[string]*fstest.MapFile{
|
||||||
"1/some/file": {
|
"artifact/server/path/1/some/file": {
|
||||||
Data: []byte("content"),
|
Data: []byte("content"),
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
|
||||||
router := httprouter.New()
|
router := httprouter.New()
|
||||||
downloads(router, memfs)
|
downloads(router, "artifact/server/path", memfs)
|
||||||
|
|
||||||
req, _ := http.NewRequest("GET", "http://localhost/artifact/1/some/file", nil)
|
req, _ := http.NewRequest("GET", "http://localhost/artifact/1/some/file", nil)
|
||||||
rr := httptest.NewRecorder()
|
rr := httptest.NewRecorder()
|
||||||
|
@ -260,6 +258,7 @@ func TestArtifactFlow(t *testing.T) {
|
||||||
|
|
||||||
tables := []TestJobFileInfo{
|
tables := []TestJobFileInfo{
|
||||||
{"testdata", "upload-and-download", "push", "", platforms, ""},
|
{"testdata", "upload-and-download", "push", "", platforms, ""},
|
||||||
|
{"testdata", "GHSL-2023-004", "push", "", platforms, ""},
|
||||||
}
|
}
|
||||||
log.SetLevel(log.DebugLevel)
|
log.SetLevel(log.DebugLevel)
|
||||||
|
|
||||||
|
@ -310,3 +309,81 @@ func runTestJobFile(ctx context.Context, t *testing.T, tjfi TestJobFileInfo) {
|
||||||
fmt.Println("::endgroup::")
|
fmt.Println("::endgroup::")
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestMkdirFsImplSafeResolve(t *testing.T) {
|
||||||
|
assert := assert.New(t)
|
||||||
|
|
||||||
|
baseDir := "/foo/bar"
|
||||||
|
|
||||||
|
tests := map[string]struct {
|
||||||
|
input string
|
||||||
|
want string
|
||||||
|
}{
|
||||||
|
"simple": {input: "baz", want: "/foo/bar/baz"},
|
||||||
|
"nested": {input: "baz/blue", want: "/foo/bar/baz/blue"},
|
||||||
|
"dots in middle": {input: "baz/../../blue", want: "/foo/bar/blue"},
|
||||||
|
"leading dots": {input: "../../parent", want: "/foo/bar/parent"},
|
||||||
|
"root path": {input: "/root", want: "/foo/bar/root"},
|
||||||
|
"root": {input: "/", want: "/foo/bar"},
|
||||||
|
"empty": {input: "", want: "/foo/bar"},
|
||||||
|
}
|
||||||
|
|
||||||
|
for name, tc := range tests {
|
||||||
|
t.Run(name, func(t *testing.T) {
|
||||||
|
assert.Equal(tc.want, safeResolve(baseDir, tc.input))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDownloadArtifactFileUnsafePath(t *testing.T) {
|
||||||
|
assert := assert.New(t)
|
||||||
|
|
||||||
|
var memfs = fstest.MapFS(map[string]*fstest.MapFile{
|
||||||
|
"artifact/server/path/some/file": {
|
||||||
|
Data: []byte("content"),
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
|
router := httprouter.New()
|
||||||
|
downloads(router, "artifact/server/path", memfs)
|
||||||
|
|
||||||
|
req, _ := http.NewRequest("GET", "http://localhost/artifact/2/../../some/file", nil)
|
||||||
|
rr := httptest.NewRecorder()
|
||||||
|
|
||||||
|
router.ServeHTTP(rr, req)
|
||||||
|
|
||||||
|
if status := rr.Code; status != http.StatusOK {
|
||||||
|
assert.FailNow(fmt.Sprintf("Wrong status: %d", status))
|
||||||
|
}
|
||||||
|
|
||||||
|
data := rr.Body.Bytes()
|
||||||
|
|
||||||
|
assert.Equal("content", string(data))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestArtifactUploadBlobUnsafePath(t *testing.T) {
|
||||||
|
assert := assert.New(t)
|
||||||
|
|
||||||
|
var memfs = fstest.MapFS(map[string]*fstest.MapFile{})
|
||||||
|
|
||||||
|
router := httprouter.New()
|
||||||
|
uploads(router, "artifact/server/path", writeMapFS{memfs})
|
||||||
|
|
||||||
|
req, _ := http.NewRequest("PUT", "http://localhost/upload/1?itemPath=../../some/file", strings.NewReader("content"))
|
||||||
|
rr := httptest.NewRecorder()
|
||||||
|
|
||||||
|
router.ServeHTTP(rr, req)
|
||||||
|
|
||||||
|
if status := rr.Code; status != http.StatusOK {
|
||||||
|
assert.Fail("Wrong status")
|
||||||
|
}
|
||||||
|
|
||||||
|
response := ResponseMessage{}
|
||||||
|
err := json.Unmarshal(rr.Body.Bytes(), &response)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.Equal("success", response.Message)
|
||||||
|
assert.Equal("content", string(memfs["artifact/server/path/1/some/file"].Data))
|
||||||
|
}
|
||||||
|
|
43
pkg/artifacts/testdata/GHSL-2023-004/artifacts.yml
vendored
Normal file
43
pkg/artifacts/testdata/GHSL-2023-004/artifacts.yml
vendored
Normal file
|
@ -0,0 +1,43 @@
|
||||||
|
|
||||||
|
name: "GHSL-2023-0004"
|
||||||
|
on: push
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
test-artifacts:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- run: echo "hello world" > test.txt
|
||||||
|
- name: curl upload
|
||||||
|
uses: wei/curl@v1
|
||||||
|
with:
|
||||||
|
args: -s --fail ${ACTIONS_RUNTIME_URL}upload/1?itemPath=../../my-artifact/secret.txt --upload-file test.txt
|
||||||
|
- uses: actions/download-artifact@v2
|
||||||
|
with:
|
||||||
|
name: my-artifact
|
||||||
|
path: test-artifacts
|
||||||
|
- name: 'Verify Artifact #1'
|
||||||
|
run: |
|
||||||
|
file="test-artifacts/secret.txt"
|
||||||
|
if [ ! -f $file ] ; then
|
||||||
|
echo "Expected file does not exist"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
if [ "$(cat $file)" != "hello world" ] ; then
|
||||||
|
echo "File contents of downloaded artifact are incorrect"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
- name: Verify download should work by clean extra dots
|
||||||
|
uses: wei/curl@v1
|
||||||
|
with:
|
||||||
|
args: --path-as-is -s -o out.txt --fail ${ACTIONS_RUNTIME_URL}artifact/1/../../../1/my-artifact/secret.txt
|
||||||
|
- name: 'Verify download content'
|
||||||
|
run: |
|
||||||
|
file="out.txt"
|
||||||
|
if [ ! -f $file ] ; then
|
||||||
|
echo "Expected file does not exist"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
if [ "$(cat $file)" != "hello world" ] ; then
|
||||||
|
echo "File contents of downloaded artifact are incorrect"
|
||||||
|
exit 1
|
||||||
|
fi
|
Loading…
Reference in a new issue