Compare commits

..

3 commits

Author SHA1 Message Date
09ef963ca3
wip: needs tests 2024-10-20 14:55:16 +02:00
3d43ff4758
feat: add cache helpers 2024-07-22 21:13:54 +02:00
db684e73f8
feat: add initial helper methods 2024-07-21 12:15:55 +02:00
17 changed files with 732 additions and 1429 deletions

View file

@ -1,17 +0,0 @@
name: default
on:
push:
branches: [main]
jobs:
default:
name: ensure tests work
runs-on: debian-latest
container: docker.io/golang:1.24-alpine
steps:
- name: checkout
uses: https://code.geekeey.de/actions/checkout@1
- name: go run
run: |
go test

9
cache.go Normal file
View file

@ -0,0 +1,9 @@
package sdk
import "git.geekeey.de/actions/sdk/cache"
func (c *Action) Cache() *cache.Client {
token := c.env("ACTIONS_RUNTIME_TOKEN")
url := c.env("ACTIONS_CACHE_URL")
return cache.New(token, url)
}

56
cache/blob.go vendored Normal file
View file

@ -0,0 +1,56 @@
package cache
import (
"bytes"
"io"
"os"
)
type Blob interface {
io.ReaderAt
io.Closer
Size() int64
}
type byteBlob struct {
buf *bytes.Reader
}
func NewByteBlob(b []byte) Blob {
return &byteBlob{buf: bytes.NewReader(b)}
}
func (blob *byteBlob) ReadAt(p []byte, off int64) (n int, err error) {
return blob.buf.ReadAt(p, off)
}
func (blob *byteBlob) Size() int64 {
return blob.buf.Size()
}
func (blob *byteBlob) Close() error {
return nil
}
type fileBlob struct {
buf *os.File
}
func NewFileBlob(f *os.File) Blob {
return &fileBlob{buf: f}
}
func (blob *fileBlob) ReadAt(p []byte, off int64) (n int, err error) {
return blob.buf.ReadAt(p, off)
}
func (blob *fileBlob) Size() int64 {
if i, err := blob.buf.Stat(); err != nil {
return i.Size()
}
return 0
}
func (blob *fileBlob) Close() error {
return nil
}

329
cache/cache.go vendored Normal file
View file

@ -0,0 +1,329 @@
package cache
import (
"bytes"
"context"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"os"
"path"
"strings"
"sync"
"golang.org/x/sync/errgroup"
)
var UploadConcurrency = 4
var UploadChunkSize = 32 * 1024 * 1024
type Client struct {
base string
http *http.Client
}
type auth struct {
transport http.RoundTripper
token string
}
func (t *auth) RoundTrip(req *http.Request) (*http.Response, error) {
req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", t.token))
return t.transport.RoundTrip(req)
}
func New(token, url string) *Client {
t := &auth{transport: &retry{transport: &http.Transport{}}, token: token}
return &Client{
base: url,
http: &http.Client{Transport: t},
}
}
func (c *Client) url(p string) string {
return path.Join(c.base, "_apis/artifactcache", p)
}
func (c *Client) version(k string) string {
h := sha256.New()
h.Write([]byte("|go-actionscache-1.0"))
return hex.EncodeToString(h.Sum(nil))
}
type ApiError struct {
Message string `json:"message"`
TypeName string `json:"typeName"`
TypeKey string `json:"typeKey"`
ErrorCode int `json:"errorCode"`
}
func (e ApiError) Error() string {
return e.Message
}
func (e ApiError) Is(err error) bool {
if err == os.ErrExist {
if strings.Contains(e.TypeKey, "AlreadyExists") {
return true
}
}
return false
}
func checkApiError(res *http.Response) error {
if res.StatusCode >= 200 && res.StatusCode < 300 {
return nil
}
dec := json.NewDecoder(io.LimitReader(res.Body, 32*1024))
var details ApiError
if err := dec.Decode(&details); err != nil {
return err
}
if details.Message != "" {
return details
} else {
return fmt.Errorf("unknown error %s", res.Status)
}
}
func (c *Client) Load(ctx context.Context, keys ...string) (*Entry, error) {
u, err := url.Parse(c.url("cache"))
if err != nil {
return nil, err
}
q := u.Query()
q.Set("keys", strings.Join(keys, ","))
q.Set("version", c.version(keys[0]))
u.RawQuery = q.Encode()
req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), nil)
if err != nil {
return nil, err
}
req.Header.Add("Accept", "application/json;api-version=6.0-preview.1")
res, err := c.http.Do(req)
if err != nil {
return nil, err
}
defer res.Body.Close()
err = checkApiError(res)
if err != nil {
return nil, err
}
dec := json.NewDecoder(io.LimitReader(res.Body, 32*1024))
var ce Entry
if err = dec.Decode(&ce); err != nil {
return nil, err
}
ce.http = c.http
return &ce, nil
}
func (c *Client) Save(ctx context.Context, key string, b Blob) error {
id, err := c.reserve(ctx, key)
if err != nil {
return err
}
err = c.upload(ctx, id, b)
if err != nil {
return err
}
return c.commit(ctx, id, b.Size())
}
type ReserveCacheReq struct {
Key string `json:"key"`
Version string `json:"version"`
}
type ReserveCacheRes struct {
CacheID int `json:"cacheID"`
}
func (c *Client) reserve(ctx context.Context, key string) (int, error) {
payload := ReserveCacheReq{Key: key, Version: c.version(key)}
buf := new(bytes.Buffer)
if err := json.NewEncoder(buf).Encode(payload); err != nil {
return 0, err
}
url := c.url("caches")
req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, buf)
if err != nil {
return 0, err
}
req.Header.Add("Content-Type", "application/json")
res, err := c.http.Do(req)
if err != nil {
return 0, err
}
defer res.Body.Close()
err = checkApiError(res)
if err != nil {
return 0, err
}
dec := json.NewDecoder(io.LimitReader(res.Body, 32*1024))
var cr ReserveCacheRes
if err = dec.Decode(&cr); err != nil {
return 0, err
}
if cr.CacheID == 0 {
return 0, fmt.Errorf("invalid response (cache id is 0)")
}
return cr.CacheID, nil
}
type CommitCacheReq struct {
Size int64 `json:"size"`
}
func (c *Client) commit(ctx context.Context, id int, size int64) error {
payload := CommitCacheReq{Size: size}
buf := new(bytes.Buffer)
if err := json.NewEncoder(buf).Encode(payload); err != nil {
return err
}
url := c.url(fmt.Sprintf("caches/%d", id))
req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, buf)
if err != nil {
return err
}
req.Header.Add("Content-Type", "application/json")
res, err := c.http.Do(req)
if err != nil {
return err
}
defer res.Body.Close()
err = checkApiError(res)
if err != nil {
return err
}
return nil
}
func (c *Client) upload(ctx context.Context, id int, b Blob) error {
var mu sync.Mutex
grp, ctx := errgroup.WithContext(ctx)
offset := int64(0)
for i := 0; i < UploadConcurrency; i++ {
grp.Go(func() error {
for {
mu.Lock()
start := offset
if start >= b.Size() {
mu.Unlock()
return nil
}
end := start + int64(UploadChunkSize)
if end > b.Size() {
end = b.Size()
}
offset = end
mu.Unlock()
if err := c.create(ctx, id, b, start, end-start); err != nil {
return err
}
}
})
}
return grp.Wait()
}
func (c *Client) create(ctx context.Context, id int, ra io.ReaderAt, off, n int64) error {
url := c.url(fmt.Sprintf("caches/%d", id))
req, err := http.NewRequestWithContext(ctx, http.MethodPatch, url, io.NewSectionReader(ra, off, n))
if err != nil {
return err
}
req.Header.Add("Content-Type", "application/octet-stream")
req.Header.Add("Content-Range", fmt.Sprintf("bytes %d-%d/*", off, off+n-1))
res, err := c.http.Do(req)
if err != nil {
return err
}
defer res.Body.Close()
err = checkApiError(res)
if err != nil {
return err
}
return nil
}
type Entry struct {
Key string `json:"cacheKey"`
Scope string `json:"scope"`
URL string `json:"archiveLocation"`
http *http.Client
}
// Download returns a ReaderAtCloser for pulling the data. Concurrent reads are not allowed
func (ce *Entry) Download(ctx context.Context) ReaderAtCloser {
return NewReaderAtCloser(func(offset int64) (io.ReadCloser, error) {
req, err := http.NewRequestWithContext(ctx, "GET", ce.URL, nil)
if err != nil {
return nil, err
}
if offset != 0 {
req.Header.Set("Range", fmt.Sprintf("bytes=%d-", offset))
}
client := ce.http
if client == nil {
client = http.DefaultClient
}
res, err := client.Do(req)
if err != nil {
return nil, err
}
if res.StatusCode < 200 || res.StatusCode >= 300 {
if res.StatusCode == http.StatusRequestedRangeNotSatisfiable {
return nil, fmt.Errorf("invalid status response %v for %s, range: %v", res.Status, ce.URL, req.Header.Get("Range"))
}
return nil, fmt.Errorf("invalid status response %v for %s", res.Status, ce.URL)
}
if offset != 0 {
cr := res.Header.Get("content-range")
if !strings.HasPrefix(cr, fmt.Sprintf("bytes %d-", offset)) {
res.Body.Close()
return nil, fmt.Errorf("unhandled content range in response: %v", cr)
}
}
return res.Body, nil
})
}
func (ce *Entry) WriteTo(ctx context.Context, w io.Writer) error {
rac := ce.Download(ctx)
if _, err := io.Copy(w, &rc{ReaderAt: rac}); err != nil {
return err
}
return rac.Close()
}

89
cache/reader.go vendored Normal file
View file

@ -0,0 +1,89 @@
package cache
import (
"io"
)
type ReaderAtCloser interface {
io.ReaderAt
io.Closer
}
type readerAtCloser struct {
offset int64
rc io.ReadCloser
ra io.ReaderAt
open func(offset int64) (io.ReadCloser, error)
closed bool
}
func NewReaderAtCloser(open func(offset int64) (io.ReadCloser, error)) ReaderAtCloser {
return &readerAtCloser{
open: open,
}
}
func (hrs *readerAtCloser) ReadAt(p []byte, off int64) (n int, err error) {
if hrs.closed {
return 0, io.EOF
}
if hrs.ra != nil {
return hrs.ra.ReadAt(p, off)
}
if hrs.rc == nil || off != hrs.offset {
if hrs.rc != nil {
hrs.rc.Close()
hrs.rc = nil
}
rc, err := hrs.open(off)
if err != nil {
return 0, err
}
hrs.rc = rc
}
if ra, ok := hrs.rc.(io.ReaderAt); ok {
hrs.ra = ra
n, err = ra.ReadAt(p, off)
} else {
for {
var nn int
nn, err = hrs.rc.Read(p)
n += nn
p = p[nn:]
if nn == len(p) || err != nil {
break
}
}
}
hrs.offset += int64(n)
return
}
func (hrs *readerAtCloser) Close() error {
if hrs.closed {
return nil
}
hrs.closed = true
if hrs.rc != nil {
return hrs.rc.Close()
}
return nil
}
type rc struct {
io.ReaderAt
offset int
}
func (r *rc) Read(b []byte) (int, error) {
n, err := r.ReadAt(b, int64(r.offset))
r.offset += n
if n > 0 && err == io.EOF {
err = nil
}
return n, err
}

42
cache/retry.go vendored Normal file
View file

@ -0,0 +1,42 @@
package cache
import (
"bytes"
"fmt"
"io"
"net/http"
)
type retry struct {
transport http.RoundTripper
retry int
}
func (t *retry) RoundTrip(req *http.Request) (*http.Response, error) {
var body []byte
if req.Body != nil {
body, _ = io.ReadAll(req.Body)
}
for count := 0; count < t.retry; count++ {
req.Body = io.NopCloser(bytes.NewBuffer(body))
res, err := t.transport.RoundTrip(req)
if err != nil {
return nil, err
}
if t.check(res) {
if res.Body != nil {
io.Copy(io.Discard, res.Body)
res.Body.Close()
}
continue
}
return res, err
}
return nil, fmt.Errorf("too many retries")
}
func (t *retry) check(res *http.Response) bool {
return res.StatusCode > 399
}

115
cache/tar.go vendored Normal file
View file

@ -0,0 +1,115 @@
package cache
import (
"archive/tar"
"compress/gzip"
"errors"
"fmt"
"io"
"os"
"path/filepath"
"strings"
)
// Tar takes a source and variable writers and walks 'source' writing each file
// found to the tar writer; the purpose for accepting multiple writers is to allow
// for multiple outputs (for example a file, or md5 hash)
func Tar(src string, writers ...io.Writer) error {
if _, err := os.Stat(src); err != nil {
return fmt.Errorf("unable to tar files - %v", err.Error())
}
mw := io.MultiWriter(writers...)
gzw := gzip.NewWriter(mw)
defer gzw.Close()
tw := tar.NewWriter(gzw)
defer tw.Close()
// walk path
return filepath.Walk(src, func(file string, fi os.FileInfo, err error) error {
if err != nil {
return err
}
if !fi.Mode().IsRegular() {
return nil
}
header, err := tar.FileInfoHeader(fi, fi.Name())
if err != nil {
return err
}
// update the name to correctly reflect the desired destination when untaring
header.Name = strings.TrimPrefix(strings.Replace(file, src, "", -1), string(filepath.Separator))
if err := tw.WriteHeader(header); err != nil {
return err
}
f, err := os.Open(file)
if err != nil {
return err
}
if _, err := io.Copy(tw, f); err != nil {
f.Close()
return err
}
f.Close()
return nil
})
}
// Untar takes a destination path and a reader; a tar reader loops over the tarfile
// creating the file structure at 'dst' along the way, and writing any files
func Untar(dst string, r io.Reader) error {
gzr, err := gzip.NewReader(r)
if err != nil {
return err
}
defer gzr.Close()
tr := tar.NewReader(gzr)
for {
header, err := tr.Next()
if errors.Is(err, io.EOF) || header == nil {
break
}
if err != nil {
return err
}
target := filepath.Join(dst, header.Name)
switch header.Typeflag {
// if its a dir and it doesn't exist create it
case tar.TypeDir:
if _, err := os.Stat(target); err != nil {
if err := os.MkdirAll(target, 0755); err != nil {
return err
}
}
// if it's a file create it
case tar.TypeReg:
f, err := os.OpenFile(target, os.O_CREATE|os.O_RDWR, os.FileMode(header.Mode))
if err != nil {
return err
}
if _, err := io.Copy(f, tr); err != nil {
return err
}
f.Close()
}
}
return nil
}

View file

@ -1,33 +0,0 @@
package sdk
import (
"fmt"
"net/http"
"net/url"
)
func (a *Action) Client() *Client {
c := &Client{Client: &http.Client{}}
context := a.Context()
c.base = context.APIURL
c.token = fmt.Sprintf("Bearer %s", context.Token)
return c
}
type Client struct {
*http.Client
base string
token string
}
func (c *Client) Do(req *http.Request) (*http.Response, error) {
req.Header.Set("Authorization", c.token)
if !req.URL.IsAbs() {
u, err := url.Parse(fmt.Sprintf("%s%s", c.base, req.URL))
if err != nil {
return nil, err
}
req.URL = u
}
return c.Client.Do(req)
}

29
cmd/main.go Normal file
View file

@ -0,0 +1,29 @@
package main
import (
"context"
"os"
"git.geekeey.de/actions/sdk"
"git.geekeey.de/actions/sdk/cache"
)
func main() {
a := sdk.New()
a.AddMask("hello")
a.WithFieldsSlice("foo=bar", "biz=baz").Debugf("hello world")
blob, err := a.Cache().Load(context.Background(), "example")
if err != nil {
panic(err)
}
cache.Tar("./foo")
f, err := os.Open("")
if err != nil {
panic(err)
}
a.Cache().Save(context.Background(), "", cache.NewFileBlob(f))
entry := blob.Download(context.Background())
if entry == nil {
return
}
}

View file

@ -2,8 +2,10 @@ package sdk
import (
"encoding/json"
"errors"
"fmt"
"os"
"strconv"
)
// GitHubContext of current workflow.
@ -12,7 +14,7 @@ type GitHubContext struct {
Action string `env:"GITHUB_ACTION"`
ActionPath string `env:"GITHUB_ACTION_PATH"`
ActionRepository string `env:"GITHUB_ACTION_REPOSITORY"`
Actions string `env:"GITHUB_ACTIONS"`
Actions bool `env:"GITHUB_ACTIONS"`
Actor string `env:"GITHUB_ACTOR"`
APIURL string `env:"GITHUB_API_URL,default=https://api.github.com"`
BaseRef string `env:"GITHUB_BASE_REF"`
@ -25,36 +27,34 @@ type GitHubContext struct {
Path string `env:"GITHUB_PATH"`
Ref string `env:"GITHUB_REF"`
RefName string `env:"GITHUB_REF_NAME"`
RefProtected string `env:"GITHUB_REF_PROTECTED"`
RefProtected bool `env:"GITHUB_REF_PROTECTED"`
RefType string `env:"GITHUB_REF_TYPE"`
Repository string `env:"GITHUB_REPOSITORY"`
RepositoryOwner string `env:"GITHUB_REPOSITORY_OWNER"`
RetentionDays string `env:"GITHUB_RETENTION_DAYS"`
RunAttempt string `env:"GITHUB_RUN_ATTEMPT"`
RunID string `env:"GITHUB_RUN_ID"`
RunNumber string `env:"GITHUB_RUN_NUMBER"`
RetentionDays int64 `env:"GITHUB_RETENTION_DAYS"`
RunAttempt int64 `env:"GITHUB_RUN_ATTEMPT"`
RunID int64 `env:"GITHUB_RUN_ID"`
RunNumber int64 `env:"GITHUB_RUN_NUMBER"`
ServerURL string `env:"GITHUB_SERVER_URL,default=https://github.com"`
SHA string `env:"GITHUB_SHA"`
StepSummary string `env:"GITHUB_STEP_SUMMARY"`
Workflow string `env:"GITHUB_WORKFLOW"`
Workspace string `env:"GITHUB_WORKSPACE"`
Token string `env:"GITHUB_TOKEN"`
// Event is populated by parsing the file at EventPath, if it exists.
event map[string]any
Event map[string]any
}
// Context returns the context of current action with the payload object
// that triggered the workflow
func (c *Action) Context() *GitHubContext {
func (c *Action) Context() (*GitHubContext, error) {
var merr error
context := &GitHubContext{
APIURL: "https://api.github.com",
GraphqlURL: "https://api.github.com/graphql",
ServerURL: "https://github.com",
event: map[string]any{},
}
if v := c.env("GITHUB_ACTION"); v != "" {
@ -66,8 +66,10 @@ func (c *Action) Context() *GitHubContext {
if v := c.env("GITHUB_ACTION_REPOSITORY"); v != "" {
context.ActionRepository = v
}
if v := c.env("GITHUB_ACTIONS"); v != "" {
if v, err := parseBool(c.env("GITHUB_ACTIONS")); err == nil {
context.Actions = v
} else {
merr = errors.Join(merr, err)
}
if v := c.env("GITHUB_ACTOR"); v != "" {
context.Actor = v
@ -105,29 +107,41 @@ func (c *Action) Context() *GitHubContext {
if v := c.env("GITHUB_REF_NAME"); v != "" {
context.RefName = v
}
if v := c.env("GITHUB_REF_PROTECTED"); v != "" {
if v, err := parseBool(c.env("GITHUB_REF_PROTECTED")); err == nil {
context.RefProtected = v
} else {
merr = errors.Join(merr, err)
}
if v := c.env("GITHUB_REF_TYPE"); v != "" {
context.RefType = v
}
if v := c.env("GITHUB_REPOSITORY"); v != "" {
context.Repository = v
}
if v := c.env("GITHUB_REPOSITORY_OWNER"); v != "" {
context.RepositoryOwner = v
}
if v := c.env("GITHUB_RETENTION_DAYS"); v != "" {
if v, err := parseInt(c.env("GITHUB_RETENTION_DAYS")); err == nil {
context.RetentionDays = v
} else {
merr = errors.Join(merr, err)
}
if v := c.env("GITHUB_RUN_ATTEMPT"); v != "" {
if v, err := parseInt(c.env("GITHUB_RUN_ATTEMPT")); err == nil {
context.RunAttempt = v
} else {
merr = errors.Join(merr, err)
}
if v := c.env("GITHUB_RUN_ID"); v != "" {
if v, err := parseInt(c.env("GITHUB_RUN_ID")); err == nil {
context.RunID = v
} else {
merr = errors.Join(merr, err)
}
if v := c.env("GITHUB_RUN_NUMBER"); v != "" {
if v, err := parseInt(c.env("GITHUB_RUN_NUMBER")); err == nil {
context.RunNumber = v
} else {
merr = errors.Join(merr, err)
}
if v := c.env("GITHUB_SERVER_URL"); v != "" {
context.ServerURL = v
@ -144,24 +158,32 @@ func (c *Action) Context() *GitHubContext {
if v := c.env("GITHUB_WORKSPACE"); v != "" {
context.Workspace = v
}
if v := c.env("GITHUB_TOKEN"); v != "" {
context.Token = v
}
return context
}
func (c *GitHubContext) Event() (map[string]any, error) {
if c.EventPath != "" {
eventData, err := os.ReadFile(c.EventPath)
if context.EventPath != "" {
eventData, err := os.ReadFile(context.EventPath)
if err != nil && !os.IsNotExist(err) {
return nil, fmt.Errorf("could not read event file: %w", err)
}
if eventData != nil {
if err := json.Unmarshal(eventData, &c.event); err != nil {
if err := json.Unmarshal(eventData, &context.Event); err != nil {
return nil, fmt.Errorf("failed to unmarshal event payload: %w", err)
}
}
}
return c.event, nil
return context, merr
}
func parseBool(v string) (bool, error) {
if v == "" {
return false, nil
}
return strconv.ParseBool(v)
}
func parseInt(v string) (int64, error) {
if v == "" {
return 0, nil
}
return strconv.ParseInt(v, 10, 64)
}

View file

@ -37,7 +37,6 @@ func TestAction_Context(t *testing.T) {
APIURL: "https://api.github.com",
ServerURL: "https://github.com",
GraphqlURL: "https://api.github.com/graphql",
event: map[string]any{},
},
},
{
@ -71,13 +70,12 @@ func TestAction_Context(t *testing.T) {
"GITHUB_STEP_SUMMARY": "/path/to/summary",
"GITHUB_WORKFLOW": "test",
"GITHUB_WORKSPACE": "/path/to/workspace",
"GITHUB_TOKEN": "somerandomtoken",
},
exp: &GitHubContext{
Action: "__repo-owner_name-of-action-repo",
ActionPath: "/path/to/action",
ActionRepository: "repo-owner/name-of-action-repo",
Actions: "true",
Actions: true,
Actor: "sethvargo",
APIURL: "https://foo.com",
BaseRef: "main",
@ -90,21 +88,19 @@ func TestAction_Context(t *testing.T) {
Path: "/path/to/path",
Ref: "refs/tags/v1.0",
RefName: "v1.0",
RefProtected: "true",
RefProtected: true,
RefType: "tag",
Repository: "sethvargo/baz",
RepositoryOwner: "sethvargo",
RetentionDays: "90",
RunAttempt: "6",
RunID: "56",
RunNumber: "34",
RetentionDays: 90,
RunAttempt: 6,
RunID: 56,
RunNumber: 34,
ServerURL: "https://bar.com",
SHA: "abcd1234",
StepSummary: "/path/to/summary",
Workflow: "test",
Workspace: "/path/to/workspace",
Token: "somerandomtoken",
event: map[string]any{},
},
},
{
@ -120,7 +116,7 @@ func TestAction_Context(t *testing.T) {
ServerURL: "https://github.com",
GraphqlURL: "https://api.github.com/graphql",
event: map[string]any{
Event: map[string]any{
"foo": "bar",
},
},
@ -135,8 +131,7 @@ func TestAction_Context(t *testing.T) {
a := New()
a.env = func(s string) string { return tc.env[s] }
got := a.Context()
_, err := got.Event()
got, err := a.Context()
if err != nil {
t.Fatal(err)
}

View file

@ -1,127 +0,0 @@
package glob
import (
"io/fs"
"path/filepath"
"slices"
)
type GlobFS struct {
base fs.FS
patterns []Pattern
}
// NewGlobFS creates a new GlobFS that exposes only files matching any of the given glob patterns.
func NewGlobFS(base fs.FS, patterns ...string) (*GlobFS, error) {
fs := &GlobFS{base: base, patterns: []Pattern{}}
for _, value := range patterns {
pattern, err := New(value)
if err != nil {
return nil, err
}
fs.patterns = append(fs.patterns, *pattern)
}
return fs, nil
}
func (g *GlobFS) match(name string, prefix bool) bool {
var f func(Pattern) bool
if prefix {
f = func(p Pattern) bool { return p.MatchPrefix(name) }
} else {
f = func(p Pattern) bool { return p.Match(name) }
}
return slices.ContainsFunc(g.patterns, f)
}
func (g *GlobFS) contains(name string) (bool, error) {
stat, err := fs.Stat(g.base, name)
if err != nil {
return false, err
}
if stat.IsDir() {
contains := false
err := fs.WalkDir(g.base, name, func(path string, d fs.DirEntry, err error) error {
if err != nil {
return err
}
if d.IsDir() && !g.match(name+string(filepath.Separator), true) {
return fs.SkipDir
}
if g.match(path, false) {
contains = true
return fs.SkipAll
}
return nil
})
return contains, err
} else {
return g.match(name, false), nil
}
}
func (g *GlobFS) Open(name string) (fs.File, error) {
root := name == "."
// fast path some of the pattern matches
if root || g.match(name, false) {
return g.base.Open(name)
}
ok, err := g.contains(name)
if err != nil {
return nil, err
}
if ok {
return g.base.Open(name)
} else {
return nil, fs.ErrNotExist
}
}
func (g *GlobFS) ReadDir(name string) ([]fs.DirEntry, error) {
root := name == "."
path := name + string(filepath.Separator)
// fast path no pattern matches (prefix check)
// root dir ('.') must be handled to get initial entries
if !root && !g.match(path, true) {
return nil, fs.ErrNotExist
}
entries, err := fs.ReadDir(g.base, name)
if err != nil {
return nil, err
}
// if we do not have any child entries, we need to check if the directory
// itself matched some of the defined patterns, if so we should be able to
// read it, otherwise we can not read it.
if !root && len(entries) == 0 {
if !g.match(path, false) {
return nil, fs.ErrNotExist
}
}
children := []fs.DirEntry{}
for _, entry := range entries {
ok, err := g.contains(filepath.Join(name, entry.Name()))
if err != nil {
return nil, err
}
if ok {
children = append(children, entry)
} else {
continue
}
}
return children, nil
}

View file

@ -1,505 +0,0 @@
package glob
import (
"archive/zip"
"bytes"
"io"
"io/fs"
"reflect"
"sort"
"testing"
"testing/fstest"
)
func setupFS() fs.ReadDirFS {
// Create an in-memory FS with a mix of files and directories
return fstest.MapFS{
"main.go": &fstest.MapFile{Data: []byte("package main")},
"main_test.go": &fstest.MapFile{Data: []byte("package main_test")},
"README.md": &fstest.MapFile{Data: []byte("# readme")},
"LICENSE": &fstest.MapFile{Data: []byte("MIT")},
"docs/guide.md": &fstest.MapFile{Data: []byte("Docs")},
"docs/other.txt": &fstest.MapFile{Data: []byte("Other")},
"docs/hidden/.keep": &fstest.MapFile{Data: []byte("")},
"assets/img.png": &fstest.MapFile{Data: []byte("PNG")},
"assets/style.css": &fstest.MapFile{Data: []byte("CSS")},
".gitignore": &fstest.MapFile{Data: []byte("*.log")},
".hiddenfile": &fstest.MapFile{Data: []byte("")},
"emptydir": &fstest.MapFile{Mode: fs.ModeDir | 0o755},
}
}
// helper to get base names for easier comparison
func basenames(entries []fs.DirEntry) []string {
names := []string{}
for _, e := range entries {
names = append(names, e.Name())
}
sort.Strings(names)
return names
}
func TestGlobFS_MultiplePatterns(t *testing.T) {
memfs := setupFS()
gfs, err := NewGlobFS(memfs, "*.go", "*.md", "assets/*", "docs/guide.md", ".gitignore")
if err != nil {
t.Errorf("unexpected error while creating glob fs: %v", err)
}
tests := []struct {
path string
want []string
wantErr bool
}{
{path: ".", want: []string{"README.md", "assets", "docs", "main.go", "main_test.go", ".gitignore"}},
{path: "assets", want: []string{"img.png", "style.css"}},
{path: "docs", want: []string{"guide.md"}},
{path: "docs/hidden", want: []string{}, wantErr: true},
{path: "emptydir", want: []string{}, wantErr: true},
}
for _, tc := range tests {
tc := tc // capture range variable
t.Run(escape(tc.path), func(t *testing.T) {
entries, err := fs.ReadDir(gfs, tc.path)
if tc.wantErr && err == nil {
t.Errorf("expected error, got nil")
return
}
if !tc.wantErr && err != nil {
t.Errorf("unexpected error: %v", err)
return
}
got := basenames(entries)
sort.Strings(tc.want)
if !reflect.DeepEqual(got, tc.want) {
t.Errorf("got %v; want %v", got, tc.want)
}
})
}
}
func TestGlobFS_Open(t *testing.T) {
memfs := setupFS()
gfs, err := NewGlobFS(memfs, "*.go", "*.md", "assets/*", "docs/guide.md", ".gitignore")
if err != nil {
t.Errorf("unexpected error while creating glob fs: %v", err)
}
type test struct {
path string
wantErr bool
}
tests := []test{
{path: "main.go"},
{path: "README.md"},
{path: "LICENSE", wantErr: true},
{path: "assets/img.png"},
{path: "assets/style.css"},
{path: "assets/nonexistent.png", wantErr: true},
{path: "docs/guide.md"},
{path: "docs/other.txt", wantErr: true},
{path: ".gitignore"},
{path: ".hiddenfile", wantErr: true},
{path: "docs/hidden/.keep", wantErr: true},
{path: "emptydir", wantErr: true},
{path: "docs"}, // allowed because it contains matching file(s)
{path: "assets"}, // allowed because it contains matching file(s)
}
for _, tc := range tests {
tc := tc
t.Run(escape(tc.path), func(t *testing.T) {
f, err := gfs.Open(tc.path)
if tc.wantErr && err == nil {
t.Errorf("expected error, got file")
if f != nil {
f.Close()
}
} else if !tc.wantErr && err != nil {
t.Errorf("unexpected error: %v", err)
} else if !tc.wantErr && err == nil {
info, _ := f.Stat()
if info.IsDir() {
_, derr := fs.ReadDir(gfs, tc.path)
if derr != nil && !tc.wantErr {
t.Errorf("unexpected error: %v", derr)
}
}
f.Close()
}
})
}
}
func TestGlobFS_ReadFile(t *testing.T) {
memfs := setupFS()
gfs, err := NewGlobFS(memfs, "*.go", "*.md", "assets/*", ".gitignore")
if err != nil {
t.Errorf("unexpected error while creating glob fs: %v", err)
}
tests := []struct {
name string
want []byte
wantErr bool
}{
{name: "main.go", want: []byte("package main")},
{name: "main_test.go", want: []byte("package main_test")},
{name: "README.md", want: []byte("# readme")},
{name: "assets/img.png", want: []byte("PNG")},
{name: "assets/style.css", want: []byte("CSS")},
{name: ".gitignore", want: []byte("*.log")},
{name: "LICENSE", wantErr: true}, // not allowed by filter
{name: "docs/guide.md", wantErr: true}, // not allowed by filter
{name: "docs/hidden/.keep", wantErr: true}, // not allowed by filter
{name: "doesnotexist.txt", wantErr: true}, // does not exist
}
for _, tc := range tests {
tc := tc
t.Run(escape(tc.name), func(t *testing.T) {
got, err := fs.ReadFile(gfs, tc.name)
if tc.wantErr {
if err == nil {
t.Errorf("expected error, got nil (got=%q)", got)
}
} else {
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if string(got) != string(tc.want) {
t.Errorf("got %q; want %q", got, tc.want)
}
}
})
}
}
func TestGlobFS_RelativePaths(t *testing.T) {
memfs := setupFS()
gfs, err := NewGlobFS(memfs, "docs/*.md")
if err != nil {
t.Errorf("unexpected error while creating glob fs: %v", err)
}
entries, err := fs.ReadDir(gfs, "docs")
if err != nil {
t.Fatal(err)
}
got := basenames(entries)
want := []string{"guide.md"}
if !reflect.DeepEqual(got, want) {
t.Errorf("docs/*.md: got %v, want %v", got, want)
}
}
func TestGlobFS_NoMatchesOpen(t *testing.T) {
gfs, err := NewGlobFS(setupFS(), "*.xyz")
if err != nil {
t.Errorf("unexpected error while creating glob fs: %v", err)
}
_, err = gfs.Open("main.go")
if err == nil {
t.Fatal("expected error when opening file with no matches")
}
}
func TestGlobFS_NoMatchesStat(t *testing.T) {
gfs, err := NewGlobFS(setupFS(), "*.xyz")
if err != nil {
t.Errorf("unexpected error while creating glob fs: %v", err)
}
_, err = fs.Stat(gfs, "main.go")
if err == nil {
t.Fatal("expected error with no matches: stat")
}
}
func TestGlobFS_NoMatchesReadDir(t *testing.T) {
gfs, err := NewGlobFS(setupFS(), "*.xyz")
if err != nil {
t.Errorf("unexpected error while creating glob fs: %v", err)
}
_, err = fs.ReadDir(gfs, "main.go")
if err == nil {
t.Fatal("expected error with no matches: readdir")
}
}
func TestGlobFS_NoMatchesReadFile(t *testing.T) {
gfs, err := NewGlobFS(setupFS(), "*.xyz")
if err != nil {
t.Errorf("unexpected error while creating glob fs: %v", err)
}
_, err = fs.ReadFile(gfs, "main.go")
if err == nil {
t.Fatal("expected error with no matches: readfile")
}
}
func TestGlobFS_MatchEmptyDirExact(t *testing.T) {
// the trailing slash indicates that the directory should be included
gfs, err := NewGlobFS(setupFS(), "emptydir/")
if err != nil {
t.Errorf("unexpected error while creating glob fs: %v", err)
}
_, err = fs.ReadDir(gfs, "emptydir")
if err != nil {
t.Errorf("unexpected error: %v", err)
}
}
func TestGlobFS_MatchEmptyDirExact2(t *testing.T) {
// the trailing slash indicates that the directory should be included
gfs, err := NewGlobFS(setupFS(), "emptydir/*")
if err != nil {
t.Errorf("unexpected error while creating glob fs: %v", err)
}
_, err = fs.ReadDir(gfs, "emptydir")
if err != nil {
t.Errorf("unexpected error: %v", err)
}
}
func TestGlobFS_NoMatchEmptyDirExact(t *testing.T) {
// no traling slash indicates that the directory must be a file to be included
gfs, err := NewGlobFS(setupFS(), "emptydir")
if err != nil {
t.Errorf("unexpected error while creating glob fs: %v", err)
}
_, err = fs.ReadDir(gfs, "emptydir")
if err == nil {
t.Fatal("expected error with no matches: readfile")
}
}
func TestGlobFS_IntegrationWithStdlibWalkDir(t *testing.T) {
memfs := setupFS()
gfs, err := NewGlobFS(memfs, "*.go", "docs/guide.md")
if err != nil {
t.Errorf("unexpected error while creating glob fs: %v", err)
}
// Use fs.WalkDir with our filtered FS
var walked []string
err = fs.WalkDir(gfs, ".", func(path string, d fs.DirEntry, err error) error {
if err != nil {
t.Fatalf("the %q caused: %v", path, err)
return err
}
walked = append(walked, path)
return nil
})
if err != nil {
t.Fatal(err)
}
// Only files and dirs matching or containing matches should appear
for _, p := range walked {
if p == "." || p == "main.go" || p == "main_test.go" || p == "docs" || p == "docs/guide.md" {
continue
}
t.Errorf("WalkDir: unexpected path %q", p)
}
}
func TestGlobFS_InvalidPattern(t *testing.T) {
_, err := NewGlobFS(setupFS(), "[invalid")
if err == nil {
t.Fatal("expected error for invalid pattern, got nil")
}
}
func TestGlobFS_WildcardInDirSegment(t *testing.T) {
gfs, err := NewGlobFS(setupFS(), "docs/*/*.md")
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
entries, err := fs.ReadDir(gfs, "docs/hidden")
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if len(entries) != 0 {
t.Errorf("expected no entries, got %v", basenames(entries))
}
}
func TestGlobFS_DeeplyNestedMatch(t *testing.T) {
memfs := fstest.MapFS{
"a/b/c/d.txt": &fstest.MapFile{Data: []byte("deep")},
}
gfs, err := NewGlobFS(memfs, "a/b/c/*.txt")
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
data, err := fs.ReadFile(gfs, "a/b/c/d.txt")
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if string(data) != "deep" {
t.Errorf("got %q, want %q", data, "deep")
}
}
func TestGlobFS_HiddenFilesOnly(t *testing.T) {
gfs, err := NewGlobFS(setupFS(), ".*")
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
entries, err := fs.ReadDir(gfs, ".")
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
got := basenames(entries)
want := []string{".gitignore", ".hiddenfile"}
if !reflect.DeepEqual(got, want) {
t.Errorf("got %v, want %v", got, want)
}
}
// Test directory pattern matching with various directory globs
func TestGlobFS_DirectoryPatterns(t *testing.T) {
memfs := fstest.MapFS{
"foo/bar/baz.txt": &fstest.MapFile{Data: []byte("baz")},
"foo/bar/qux.txt": &fstest.MapFile{Data: []byte("qux")},
"foo/readme.md": &fstest.MapFile{Data: []byte("readme")},
"foo/empty/.keep": &fstest.MapFile{Data: []byte("")}, // represent empty dir by a file inside
"top.txt": &fstest.MapFile{Data: []byte("top")},
}
t.Run("single dir segment wildcard", func(t *testing.T) {
gfs, err := NewGlobFS(memfs, "foo/bar/*")
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
entries, err := fs.ReadDir(gfs, "foo/bar")
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
got := basenames(entries)
want := []string{"baz.txt", "qux.txt"}
if !reflect.DeepEqual(got, want) {
t.Errorf("got %v, want %v", got, want)
}
})
t.Run("recursive dir wildcard", func(t *testing.T) {
gfs, err := NewGlobFS(memfs, "foo/bar/*")
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
entries, err := fs.ReadDir(gfs, "foo/bar")
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
got := basenames(entries)
want := []string{"baz.txt", "qux.txt"}
if !reflect.DeepEqual(got, want) {
t.Errorf("got %v, want %v", got, want)
}
entries, err = fs.ReadDir(gfs, "foo")
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
got = basenames(entries)
want = []string{"bar"}
if !reflect.DeepEqual(got, want) {
t.Errorf("got %v, want %v", got, want)
}
})
t.Run("match empty directory", func(t *testing.T) {
gfs, err := NewGlobFS(memfs, "foo/empty/")
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
entries, err := fs.ReadDir(gfs, "foo/empty")
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if len(entries) != 0 {
t.Errorf("expected empty, got %v", basenames(entries))
}
})
t.Run("top-level dir wildcard", func(t *testing.T) {
gfs, err := NewGlobFS(memfs, "*/bar/*")
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
entries, err := fs.ReadDir(gfs, "foo/bar")
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
got := basenames(entries)
want := []string{"baz.txt", "qux.txt"}
if !reflect.DeepEqual(got, want) {
t.Errorf("got %v, want %v", got, want)
}
})
}
func TestGlobFS_IntegrationWithStdlibZipWriter(t *testing.T) {
gfs, err := NewGlobFS(setupFS(), "*")
if err != nil {
t.Errorf("unexpected error while creating glob fs: %v", err)
}
want := map[string]string{
"main.go": "package main",
"main_test.go": "package main_test",
"README.md": "# readme",
"LICENSE": "MIT",
".gitignore": "*.log",
".hiddenfile": "",
}
buf := new(bytes.Buffer)
wr := zip.NewWriter(buf)
err = wr.AddFS(gfs)
if err != nil {
t.Fatalf("adding fs to zip writer: %v", err)
}
err = wr.Close()
if err != nil {
t.Fatalf("close zip writer: %v", err)
}
rd, err := zip.NewReader(bytes.NewReader(buf.Bytes()), int64(len(buf.Bytes())))
if err != nil {
t.Fatalf("invalid zip archive: %v", err)
}
got := make(map[string]string)
for _, f := range rd.File {
rc, err := f.Open()
if err != nil {
t.Fatalf("cannot open file %s: %v", f.Name, err)
}
content, err := io.ReadAll(rc)
defer rc.Close()
if err != nil {
t.Fatalf("cannot read file %s: %v", f.Name, err)
}
got[f.Name] = string(content)
}
// Compare expected vs actual.
for name, exp := range want {
act, ok := got[name]
if !ok {
t.Errorf("expected file %q not found in zip", name)
continue
}
if act != exp {
t.Errorf("content mismatch for %q:\nexpected: %q\nactual: %q", name, exp, act)
}
}
// Check for unexpected extra files.
for name := range got {
if _, ok := want[name]; !ok {
t.Errorf("unexpected file %q found in zip", name)
}
}
}

View file

@ -1,485 +0,0 @@
// Package glob implements a language for specifying glob patterns for path
// names starting at some root. The language does not follow the specs from
// filepath.Match but provides a superset which allows for directory
// wildcards.
//
// Patterns consist of normal characters, non-separator wildcards '*' and '?',
// separators '/' and directory wildcards '**'.
//
// A somewhat formal grammer can be given as:
//
// pattern = term, { '/', term };
// term = '**' | name;
// name = { charSpecial | group | escapedChar | '*' | '?' };
// charSpecial = (* any unicode rune except '/', '*', '?', '[' and '\' *);
// char = (* any unicode rune *);
// escapedChar = '\\', char;
// group = '[', [ '^' ] { escapedChar | groupChar | range } ']';
// groupChar = (* any unicode rune except '-' and ']' *);
// range = ( groupChar | escapedChar ), '-', (groupChar | escapedChar);
//
// The format operators have the following meaning:
//
// - any character (rune) matches the exactly this rune - with the following
// exceptions
// - `/` works as a directory separator. It matches directory boundarys of the
// underlying system independently of the separator char used by the OS.
// - `?` matches exactly one non-separator char
// - `*` matches any number of non-separator chars - including zero
// - `\` escapes a character's special meaning allowing `*` and `?` to be used
// as regular characters.
// - `**` matches any number of nested directories. If anything is matched it
// always extends until a separator or the end of the name.
// - Groups can be defined using the `[` and `]` characters. Inside a group the
// special meaning of the characters mentioned before is disabled but the
// following rules apply
// - any character used as part of the group acts as a choice to pick from
// - if the group's first character is a `^` the whole group is negated
// - a range can be defined using `-` matching any rune between low and high
// inclusive
// - Multiple ranges can be given. Ranges can be combined with choices.
// - The meaning of `-` and `]` can be escacped using `\`
package glob
import (
"errors"
"fmt"
"io/fs"
"strings"
"unicode/utf8"
)
const (
// Separator defines the path separator to use in patterns. This is always
// a forward slash independently of the underlying's OS separator
Separator = '/'
// SingleWildcard defines the the single non-separator character wildcard
// operator.
SingleWildcard = '?'
// AnyWildcard defines the the any number of non-separator characters
// wildcard operator.
AnyWildcard = '*'
// Backslash escapes the next character's special meaning
Backslash = '\\'
// GroupStart starts a range
GroupStart = '['
// GroupEnd starts a range
GroupEnd = ']'
// GroupNegate when used as the first character of a group negates the group.
GroupNegate = '^'
// Range defines the range operator
Range = '-'
)
var (
// ErrBadPattern is returned when an invalid pattern is found. Make
// sure you use errors.Is to compare errors to this sentinel value.
ErrBadPattern = errors.New("bad pattern")
)
// Pattern defines a glob pattern prepared ahead of time which can be used to
// match filenames. Pattern is safe to use concurrently.
type Pattern struct {
tokens []token
}
// New creates a new pattern from pat and returns it. It returns an error
// indicating any invalid pattern.
func New(pat string) (*Pattern, error) {
var tokens []token
p := pat
for {
if len(p) == 0 {
return &Pattern{tokens: tokens}, nil
}
r, l := utf8.DecodeRuneInString(p)
var t token
switch r {
case Separator:
if len(tokens) > 0 && tokens[len(tokens)-1].r == Separator {
return nil, fmt.Errorf("%w: unexpected //", ErrBadPattern)
}
t = token{tokenTypeLiteral, Separator, runeGroup{}}
case SingleWildcard:
if len(tokens) > 0 && (tokens[len(tokens)-1].t == tokenTypeAnyRunes || tokens[len(tokens)-1].t == tokenTypeAnyDirectories) {
return nil, fmt.Errorf("%w: unexpected ?", ErrBadPattern)
}
t = token{tokenTypeSingleRune, 0, runeGroup{}}
case AnyWildcard:
if len(tokens) > 0 && (tokens[len(tokens)-1].t == tokenTypeSingleRune || tokens[len(tokens)-1].t == tokenTypeAnyDirectories) {
return nil, fmt.Errorf("%w: unexpected ?", ErrBadPattern)
}
t = token{tokenTypeAnyRunes, 0, runeGroup{}}
if len(p[l:]) > 0 {
n, nl := utf8.DecodeRuneInString(p[l:])
if n == AnyWildcard {
d, _ := utf8.DecodeRuneInString(p[l+nl:])
if d == utf8.RuneError {
return nil, fmt.Errorf("%w: unexpected end of patterm after **", ErrBadPattern)
}
if d != Separator {
return nil, fmt.Errorf("%w: unexpected %c after **", ErrBadPattern, d)
}
t.t = tokenTypeAnyDirectories
l += nl
}
}
case Backslash:
if len(p[l:]) == 0 {
return nil, fmt.Errorf("%w: no character given after \\", ErrBadPattern)
}
p = p[l:]
r, l = utf8.DecodeRuneInString(p)
t = token{tokenTypeLiteral, r, runeGroup{}}
case GroupStart:
var err error
t, l, err = parseGroup(p)
if err != nil {
return nil, err
}
case GroupEnd:
return nil, fmt.Errorf("%w: using ] w/o [", ErrBadPattern)
default:
t = token{tokenTypeLiteral, r, runeGroup{}}
}
tokens = append(tokens, t)
p = p[l:]
}
}
// String reconstructs the glob pattern from the tokens.
func (pat *Pattern) String() string {
var b strings.Builder
for _, t := range pat.tokens {
switch t.t {
case tokenTypeLiteral:
switch t.r {
case GroupStart, GroupEnd, GroupNegate, AnyWildcard, SingleWildcard, Range:
b.WriteRune(Backslash)
fallthrough
default:
b.WriteRune(t.r)
}
case tokenTypeSingleRune:
b.WriteRune(SingleWildcard)
case tokenTypeAnyRunes:
b.WriteRune(AnyWildcard)
case tokenTypeAnyDirectories:
b.WriteString("**")
case tokenTypeGroup:
b.WriteRune(GroupStart)
if t.g.neg {
b.WriteRune(GroupNegate)
}
for _, r := range t.g.runes {
b.WriteRune(r)
}
for _, rg := range t.g.ranges {
b.WriteRune(rg.lo)
b.WriteRune(Range)
b.WriteRune(rg.hi)
}
b.WriteRune(GroupEnd)
}
}
return b.String()
}
func (pat *Pattern) GoString() string {
return pat.String()
}
// Match matches a file's path name f to the compiled pattern and returns
// whether the path matches the pattern or not.
func (pat *Pattern) Match(f string) bool {
return match(f, pat.tokens, false)
}
func (pat *Pattern) MatchPrefix(f string) bool {
return match(f, pat.tokens, true)
}
// GlobFS applies pat to all files found in fsys under root and returns the
// matching path names as a string slice. It uses fs.WalkDir internally and all
// constraints given for that function apply to GlobFS.
func (pat *Pattern) GlobFS(fsys fs.FS, root string) ([]string, error) {
results := make([]string, 0)
err := fs.WalkDir(fsys, root, func(p string, d fs.DirEntry, err error) error {
if err != nil {
return err
}
if p == "." {
return nil
}
if root != "." && root != "" {
p = strings.Replace(p, root, "", 1)
}
if d.IsDir() {
if !pat.MatchPrefix(p) {
return fs.SkipDir
}
return nil
}
if pat.Match(p) {
results = append(results, p)
}
return nil
})
return results, err
}
func parseGroup(p string) (token, int, error) {
// re-read the [. No need to assert the rune here as it has been
// done in the main parsing loop.
_, le := utf8.DecodeRuneInString(p)
t := token{
t: tokenTypeGroup,
g: runeGroup{},
}
initialLen := le
var start rune
for {
if len(p[le:]) == 0 {
return t, le, fmt.Errorf("%w: missing %c", ErrBadPattern, GroupEnd)
}
r, l := utf8.DecodeRuneInString(p[le:])
le += l
if initialLen == le-l && r == GroupNegate {
t.g.neg = true
continue
}
switch r {
case GroupEnd:
if start != 0 {
t.g.runes = append(t.g.runes, start)
}
return t, le, nil
case Range:
if start == 0 {
return t, le, fmt.Errorf("%w: missing start for character range", ErrBadPattern)
}
if len(p[le:]) == 0 {
return t, le, fmt.Errorf("%w: missing range end", ErrBadPattern)
}
r, l = utf8.DecodeRuneInString(p[le:])
le += l
switch r {
case GroupEnd:
return t, le, fmt.Errorf("%w: unterminated range", ErrBadPattern)
case Backslash:
if len(p[le:]) == 0 {
return t, le, fmt.Errorf("%w: missing character after \\", ErrBadPattern)
}
r, l = utf8.DecodeRuneInString(p[le:])
le += l
fallthrough
default:
t.g.ranges = append(t.g.ranges, runeRange{start, r})
start = 0
}
case Backslash:
if len(p[le:]) == 0 {
return t, le, fmt.Errorf("%w: missing character after \\", ErrBadPattern)
}
r, l = utf8.DecodeRuneInString(p[le:])
le += l
fallthrough
default:
if start != 0 {
t.g.runes = append(t.g.runes, start)
}
start = r
}
}
}
// match is used internally to implement a simple recursive backtracking
// algorithmn using the token list t to match against file path f. If matchPrefix
// is set to true, match returns true if f is completely matched by any prefix
// of t. Otherwise, match returns true if f is matched by _all_ tokens in t.
func match(f string, t []token, matchPrefix bool) bool {
for {
if len(f) == 0 {
if matchPrefix {
return true
}
if len(t) == 0 {
return true
}
if len(t) == 1 && t[0].t == tokenTypeAnyRunes {
return true
}
return false
}
if len(t) == 0 {
return false
}
r, le := utf8.DecodeRuneInString(f)
switch t[0].t {
case tokenTypeLiteral:
if t[0].r != r {
return false
}
case tokenTypeGroup:
if !t[0].g.match(r) {
return false
}
case tokenTypeSingleRune:
if r == Separator {
return false
}
case tokenTypeAnyRunes:
if r == Separator {
return match(f, t[1:], matchPrefix)
}
if match(f[le:], t, matchPrefix) {
return true
}
if match(f, t[1:], matchPrefix) {
return true
}
case tokenTypeAnyDirectories:
if match(f, t[2:], matchPrefix) {
return true
}
var l2 int
for {
if len(f[le+l2:]) == 0 {
return false
}
n, nl := utf8.DecodeRuneInString(f[le+l2:])
l2 += nl
if n == Separator {
break
}
}
if match(f[le+l2:], t[2:], matchPrefix) {
return true
}
return match(f[le+l2:], t, matchPrefix)
}
t = t[1:]
f = f[le:]
}
}
// tokenType enumerates the different types of tokens.
type tokenType int
const (
// a rune literal
tokenTypeLiteral tokenType = iota + 1
// any single non-separator rune
tokenTypeSingleRune
// any number of non-separator runes (incl. zero)
tokenTypeAnyRunes
// any number runes including separators. Matches whole directories.
tokenTypeAnyDirectories
// a group of rune consisting of named runes and/or ranges. Might be negated.
tokenTypeGroup
)
// token implements a single token in the pattern.
type token struct {
// the token's type
t tokenType
// a literal rune to matche. Literal runes are stored separate from groups
// to improve matching performance.
r rune
// A rune group to match.
g runeGroup
}
// A group of runes. Groups can contain any number of enumerated runes and rune
// ranges. In addition a whole group can be negated.
type runeGroup struct {
// Whether the group is negated
neg bool
// Enumerated runes contained in this group
runes []rune
// All ranges contained in this group
ranges []runeRange
}
// match matches r with g. It returns true if r is matched.
func (g runeGroup) match(r rune) bool {
for _, ru := range g.runes {
if ru == r {
return !g.neg
}
}
for _, rang := range g.ranges {
if rang.match(r) {
return !g.neg
}
}
return g.neg
}
// A closed range of runes consisting of all runes between lo and hi both
// inclusive.
type runeRange struct {
lo, hi rune
}
// match returns whether r is in rg.
func (rg runeRange) match(r rune) bool {
return rg.lo <= r && r <= rg.hi
}

View file

@ -1,220 +0,0 @@
package glob
import (
"errors"
"fmt"
"reflect"
"strings"
"testing"
"testing/fstest"
)
type test struct {
pattern, f string
match bool
err error
}
func escape(name string) string {
// use a math division slash for correct visual
return strings.ReplaceAll(name, "/", "")
}
func TestPattern_Match(t *testing.T) {
tests := []test{
// Test cases not covered by path.Match
{"main.go", "main.go", true, nil},
{"main_test.go", "main_test.go", true, nil},
{"foo/foo_test.go", "foo/foo_test.go", true, nil},
{"?.go", "m.go", true, nil},
{"*.go", "main.go", true, nil},
{"**/*.go", "main.go", true, nil},
{"*.go", "*.go", true, nil},
{"//", "", false, ErrBadPattern},
{"foo//", "", false, ErrBadPattern},
{"*?.go", "", false, ErrBadPattern},
{"?*.go", "", false, ErrBadPattern},
{"**?.go", "", false, ErrBadPattern},
{"**f", "", false, ErrBadPattern},
{"[a-", "", false, ErrBadPattern},
{"[a-\\", "", false, ErrBadPattern},
{"[\\", "", false, ErrBadPattern},
{"**/m.go", "foo.go", false, nil},
{"**/m.go", "foo/a.go", false, nil},
{"**/m.go", "m.go", true, nil},
{"**/m.go", "foo/m.go", true, nil},
{"**/m.go", "bar/m.go", true, nil},
{"**/m.go", "foo/bar/m.go", true, nil},
{"ab[cde]", "abc", true, nil},
{"ab[cde]", "abd", true, nil},
{"ab[cde]", "abe", true, nil},
{"ab[+-\\-]", "ab-", true, nil},
{"ab[\\--a]", "ab-", true, nil},
{"[a-fA-F]", "a", true, nil},
{"[a-fA-F]", "f", true, nil},
{"[a-fA-F]", "A", true, nil},
{"[a-fA-F]", "F", true, nil},
// The following test cases are taken from
// https://github.com/golang/go/blob/master/src/path/match_test.go and are
// provided here to test compatebility of the match implementation with the
// test cases from the golang standard lib.
{"abc", "abc", true, nil},
{"*", "abc", true, nil},
{"*c", "abc", true, nil},
{"a*", "a", true, nil},
{"a*", "abc", true, nil},
{"a*", "ab/c", false, nil},
{"a*/b", "abc/b", true, nil},
{"a*/b", "a/c/b", false, nil},
{"a*b*c*d*e*/f", "axbxcxdxe/f", true, nil},
{"a*b*c*d*e*/f", "axbxcxdxexxx/f", true, nil},
{"a*b*c*d*e*/f", "axbxcxdxe/xxx/f", false, nil},
{"a*b*c*d*e*/f", "axbxcxdxexxx/fff", false, nil},
{"a*b?c*x", "abxbbxdbxebxczzx", true, nil},
{"a*b?c*x", "abxbbxdbxebxczzy", false, nil},
{"ab[c]", "abc", true, nil},
{"ab[b-d]", "abc", true, nil},
{"ab[e-g]", "abc", false, nil},
{"ab[^c]", "abc", false, nil},
{"ab[^b-d]", "abc", false, nil},
{"ab[^e-g]", "abc", true, nil},
{"a\\*b", "a*b", true, nil},
{"a\\*b", "ab", false, nil},
{"a?b", "a☺b", true, nil},
{"a[^a]b", "a☺b", true, nil},
{"a???b", "a☺b", false, nil},
{"a[^a][^a][^a]b", "a☺b", false, nil},
{"[a-ζ]*", "α", true, nil},
{"*[a-ζ]", "A", false, nil},
{"a?b", "a/b", false, nil},
{"a*b", "a/b", false, nil},
{"[\\]a]", "]", true, nil},
{"[\\-]", "-", true, nil},
{"[x\\-]", "x", true, nil},
{"[x\\-]", "-", true, nil},
{"[x\\-]", "z", false, nil},
{"[\\-x]", "x", true, nil},
{"[\\-x]", "-", true, nil},
{"[\\-x]", "a", false, nil},
{"[]a]", "]", false, ErrBadPattern},
{"[-]", "-", false, ErrBadPattern},
{"[x-]", "x", false, ErrBadPattern},
{"[x-]", "-", false, ErrBadPattern},
{"[x-]", "z", false, ErrBadPattern},
{"[-x]", "x", false, ErrBadPattern},
{"[-x]", "-", false, ErrBadPattern},
{"[-x]", "a", false, ErrBadPattern},
{"\\", "a", false, ErrBadPattern},
{"[a-b-c]", "a", false, ErrBadPattern},
{"[", "a", false, ErrBadPattern},
{"[^", "a", false, ErrBadPattern},
{"[^bc", "a", false, ErrBadPattern},
{"a[", "a", false, ErrBadPattern},
{"a[", "ab", false, ErrBadPattern},
{"a[", "x", false, ErrBadPattern},
{"a/b[", "x", false, ErrBadPattern},
{"*x", "xxx", true, nil},
}
for _, tt := range tests {
pat, err := New(tt.pattern)
if err != tt.err && !errors.Is(err, tt.err) {
t.Errorf("New(%#q): wanted error %v but got %v", tt.pattern, tt.err, err)
}
if pat != nil {
match := pat.Match(tt.f)
if match != tt.match {
t.Errorf("New(%#q).Match(%#q): wanted match %v but got %v", tt.pattern, tt.f, tt.match, match)
}
}
}
}
func TestPattern_MatchPrefix(t *testing.T) {
tests := []test{
{"**/*.go", "foo/", true, nil},
{"**/*.go", "foo", true, nil},
{"**/*.go", "foo/bar/", true, nil},
{"**/*.go", "foo/bar", true, nil},
{"*/*.go", "foo", true, nil},
}
for _, tc := range tests {
tc := tc // capture range variable
t.Run(fmt.Sprintf("%s (%s)", escape(tc.pattern), escape(tc.f)), func(t *testing.T) {
pat, err := New(tc.pattern)
if err != nil {
t.Errorf("unexpected error: %v", err)
return
}
got := pat.MatchPrefix(tc.f)
if got != tc.match {
t.Errorf("got %v; want %v", got, tc.match)
}
})
}
}
func TestPattern_GlobFS(t *testing.T) {
fsys := fstest.MapFS{
"go.mod": &fstest.MapFile{Mode: 0644},
"go.sum": &fstest.MapFile{Mode: 0644},
"cmd/main.go": &fstest.MapFile{Mode: 0644},
"cmd/main_test.go": &fstest.MapFile{Mode: 0644},
"internal/tool/tool.go": &fstest.MapFile{Mode: 0644},
"internal/tool/tool_test.go": &fstest.MapFile{Mode: 0644},
"internal/cli/cli.go": &fstest.MapFile{Mode: 0644},
"internal/cli/cli_test.go": &fstest.MapFile{Mode: 0644},
}
pat, err := New("**/*_test.go")
if err != nil {
t.Fatal(err)
}
files, err := pat.GlobFS(fsys, ".")
if err != nil {
t.Fatal(err)
}
expect := []string{
"cmd/main_test.go",
"internal/cli/cli_test.go",
"internal/tool/tool_test.go",
}
if !reflect.DeepEqual(expect, files) {
t.Errorf("got %v; want %v", files, expect)
}
}
func TestPattern_String(t *testing.T) {
tests := []string{
"main.go",
"*.go",
"**/*.go",
"foo/bar/*",
"foo/?ar.go",
"foo/[abc].go",
"foo/[a-c].go",
"foo/**/",
"foo/*/bar.go",
"foo/\\*bar.go",
}
for _, patstr := range tests {
t.Run(escape(patstr), func(t *testing.T) {
pat, err := New(patstr)
if err != nil {
t.Fatalf("New(%q) failed: %v", patstr, err)
}
if pat.String() != patstr {
t.Fatalf("Pattern.String() = %q, want %q", pat.String(), patstr)
}
})
}
}

4
go.mod
View file

@ -1,3 +1,5 @@
module code.geekeey.de/actions/sdk
module git.geekeey.de/actions/sdk
go 1.22.5
require golang.org/x/sync v0.7.0 // indirect

2
go.sum Normal file
View file

@ -0,0 +1,2 @@
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=