update
Some checks failed
Pipeline: Test, Lint, Build / Get version info (push) Has been cancelled
Pipeline: Test, Lint, Build / Lint Go code (push) Has been cancelled
Pipeline: Test, Lint, Build / Test Go code (push) Has been cancelled
Pipeline: Test, Lint, Build / Test JS code (push) Has been cancelled
Pipeline: Test, Lint, Build / Lint i18n files (push) Has been cancelled
Pipeline: Test, Lint, Build / Check Docker configuration (push) Has been cancelled
Pipeline: Test, Lint, Build / Build (darwin/amd64) (push) Has been cancelled
Pipeline: Test, Lint, Build / Build (darwin/arm64) (push) Has been cancelled
Pipeline: Test, Lint, Build / Build (linux/386) (push) Has been cancelled
Pipeline: Test, Lint, Build / Build (linux/amd64) (push) Has been cancelled
Pipeline: Test, Lint, Build / Build (linux/arm/v5) (push) Has been cancelled
Pipeline: Test, Lint, Build / Build (linux/arm/v6) (push) Has been cancelled
Pipeline: Test, Lint, Build / Build (linux/arm/v7) (push) Has been cancelled
Pipeline: Test, Lint, Build / Build (linux/arm64) (push) Has been cancelled
Pipeline: Test, Lint, Build / Build (windows/386) (push) Has been cancelled
Pipeline: Test, Lint, Build / Build (windows/amd64) (push) Has been cancelled
Pipeline: Test, Lint, Build / Push to GHCR (push) Has been cancelled
Pipeline: Test, Lint, Build / Push to Docker Hub (push) Has been cancelled
Pipeline: Test, Lint, Build / Cleanup digest artifacts (push) Has been cancelled
Pipeline: Test, Lint, Build / Build Windows installers (push) Has been cancelled
Pipeline: Test, Lint, Build / Package/Release (push) Has been cancelled
Pipeline: Test, Lint, Build / Upload Linux PKG (push) Has been cancelled
Close stale issues and PRs / stale (push) Has been cancelled
POEditor import / update-translations (push) Has been cancelled

This commit is contained in:
2025-12-08 16:16:23 +01:00
commit c251f174ed
1349 changed files with 194301 additions and 0 deletions

17
utils/cache/cache_suite_test.go vendored Normal file
View File

@@ -0,0 +1,17 @@
package cache
import (
"testing"
"github.com/navidrome/navidrome/log"
"github.com/navidrome/navidrome/tests"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
func TestCache(t *testing.T) {
tests.Init(t, false)
log.SetLevel(log.LevelFatal)
RegisterFailHandler(Fail)
RunSpecs(t, "Cache Suite")
}

110
utils/cache/cached_http_client.go vendored Normal file
View File

@@ -0,0 +1,110 @@
package cache
import (
"bufio"
"bytes"
"encoding/base64"
"encoding/json"
"io"
"net/http"
"strings"
"time"
"github.com/navidrome/navidrome/log"
)
const cacheSizeLimit = 100
type HTTPClient struct {
cache SimpleCache[string, string]
hc httpDoer
ttl time.Duration
}
type httpDoer interface {
Do(req *http.Request) (*http.Response, error)
}
type requestData struct {
Method string
Header http.Header
URL string
Body *string
}
func NewHTTPClient(wrapped httpDoer, ttl time.Duration) *HTTPClient {
c := &HTTPClient{hc: wrapped, ttl: ttl}
c.cache = NewSimpleCache[string, string](Options{
SizeLimit: cacheSizeLimit,
DefaultTTL: ttl,
})
return c
}
func (c *HTTPClient) Do(req *http.Request) (*http.Response, error) {
key := c.serializeReq(req)
cached := true
start := time.Now()
respStr, err := c.cache.GetWithLoader(key, func(key string) (string, time.Duration, error) {
cached = false
req, err := c.deserializeReq(key)
if err != nil {
log.Trace(req.Context(), "CachedHTTPClient.Do", "key", key, err)
return "", 0, err
}
resp, err := c.hc.Do(req)
if err != nil {
log.Trace(req.Context(), "CachedHTTPClient.Do", "req", req, err)
return "", 0, err
}
defer resp.Body.Close()
return c.serializeResponse(resp), c.ttl, nil
})
log.Trace(req.Context(), "CachedHTTPClient.Do", "key", key, "cached", cached, "elapsed", time.Since(start), err)
if err != nil {
return nil, err
}
return c.deserializeResponse(req, respStr)
}
func (c *HTTPClient) serializeReq(req *http.Request) string {
data := requestData{
Method: req.Method,
Header: req.Header,
URL: req.URL.String(),
}
if req.Body != nil {
bodyData, _ := io.ReadAll(req.Body)
bodyStr := base64.StdEncoding.EncodeToString(bodyData)
data.Body = &bodyStr
}
j, _ := json.Marshal(&data)
return string(j)
}
func (c *HTTPClient) deserializeReq(reqStr string) (*http.Request, error) {
var data requestData
_ = json.Unmarshal([]byte(reqStr), &data)
var body io.Reader
if data.Body != nil {
bodyStr, _ := base64.StdEncoding.DecodeString(*data.Body)
body = strings.NewReader(string(bodyStr))
}
req, err := http.NewRequest(data.Method, data.URL, body)
if err != nil {
return nil, err
}
req.Header = data.Header
return req, nil
}
func (c *HTTPClient) serializeResponse(resp *http.Response) string {
var b = &bytes.Buffer{}
_ = resp.Write(b)
return b.String()
}
func (c *HTTPClient) deserializeResponse(req *http.Request, respStr string) (*http.Response, error) {
r := bufio.NewReader(strings.NewReader(respStr))
return http.ReadResponse(r, req)
}

93
utils/cache/cached_http_client_test.go vendored Normal file
View File

@@ -0,0 +1,93 @@
package cache
import (
"fmt"
"io"
"net/http"
"net/http/httptest"
"time"
"github.com/navidrome/navidrome/consts"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var _ = Describe("HTTPClient", func() {
Context("GET", func() {
var chc *HTTPClient
var ts *httptest.Server
var requestsReceived int
var header string
BeforeEach(func() {
ts = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
requestsReceived++
header = r.Header.Get("head")
_, _ = fmt.Fprintf(w, "Hello, %s", r.URL.Query()["name"])
}))
chc = NewHTTPClient(http.DefaultClient, consts.DefaultHttpClientTimeOut)
})
AfterEach(func() {
defer ts.Close()
})
It("caches repeated requests", func() {
r, _ := http.NewRequest("GET", ts.URL+"?name=doe", nil)
resp, err := chc.Do(r)
Expect(err).To(BeNil())
body, err := io.ReadAll(resp.Body)
Expect(err).To(BeNil())
Expect(string(body)).To(Equal("Hello, [doe]"))
Expect(requestsReceived).To(Equal(1))
// Same request
r, _ = http.NewRequest("GET", ts.URL+"?name=doe", nil)
resp, err = chc.Do(r)
Expect(err).To(BeNil())
body, err = io.ReadAll(resp.Body)
Expect(err).To(BeNil())
Expect(string(body)).To(Equal("Hello, [doe]"))
Expect(requestsReceived).To(Equal(1))
// Different request
r, _ = http.NewRequest("GET", ts.URL, nil)
resp, err = chc.Do(r)
Expect(err).To(BeNil())
body, err = io.ReadAll(resp.Body)
Expect(err).To(BeNil())
Expect(string(body)).To(Equal("Hello, []"))
Expect(requestsReceived).To(Equal(2))
// Different again (same as before, but with header)
r, _ = http.NewRequest("GET", ts.URL, nil)
r.Header.Add("head", "this is a header")
resp, err = chc.Do(r)
Expect(err).To(BeNil())
body, err = io.ReadAll(resp.Body)
Expect(err).To(BeNil())
Expect(string(body)).To(Equal("Hello, []"))
Expect(header).To(Equal("this is a header"))
Expect(requestsReceived).To(Equal(3))
})
It("expires responses after TTL", func() {
requestsReceived = 0
chc = NewHTTPClient(http.DefaultClient, 10*time.Millisecond)
r, _ := http.NewRequest("GET", ts.URL+"?name=doe", nil)
_, err := chc.Do(r)
Expect(err).To(BeNil())
Expect(requestsReceived).To(Equal(1))
// Wait more than the TTL
time.Sleep(50 * time.Millisecond)
// Same request
r, _ = http.NewRequest("GET", ts.URL+"?name=doe", nil)
_, err = chc.Do(r)
Expect(err).To(BeNil())
Expect(requestsReceived).To(Equal(2))
})
})
})

283
utils/cache/file_caches.go vendored Normal file
View File

@@ -0,0 +1,283 @@
package cache
import (
"context"
"fmt"
"io"
"path/filepath"
"sync"
"sync/atomic"
"time"
"github.com/djherbis/fscache"
"github.com/dustin/go-humanize"
"github.com/hashicorp/go-multierror"
"github.com/navidrome/navidrome/conf"
"github.com/navidrome/navidrome/consts"
"github.com/navidrome/navidrome/log"
)
// Item represents an item that can be cached. It must implement the Key method that returns a unique key for a
// given item.
type Item interface {
Key() string
}
// ReadFunc is a function that retrieves the data to be cached. It receives the Item to be cached and returns
// an io.Reader with the data and an error.
type ReadFunc func(ctx context.Context, item Item) (io.Reader, error)
// FileCache is designed to cache data on the filesystem to improve performance by avoiding repeated data
// retrieval operations.
//
// Errors are handled gracefully. If the cache is not initialized or an error occurs during data
// retrieval, it will log the error and proceed without caching.
type FileCache interface {
// Get retrieves data from the cache. This method checks if the data is already cached. If it is, it
// returns the cached data. If not, it retrieves the data using the provided getReader function and caches it.
//
// Example Usage:
//
// s, err := fc.Get(context.Background(), cacheKey("testKey"))
// if err != nil {
// log.Fatal(err)
// }
// defer s.Close()
//
// data, err := io.ReadAll(s)
// if err != nil {
// log.Fatal(err)
// }
// fmt.Println(string(data))
Get(ctx context.Context, item Item) (*CachedStream, error)
// Available checks if the cache is available
Available(ctx context.Context) bool
// Disabled reports if the cache has been permanently disabled
Disabled(ctx context.Context) bool
}
// NewFileCache creates a new FileCache. This function initializes the cache and starts it in the background.
//
// name: A string representing the name of the cache.
// cacheSize: A string representing the maximum size of the cache (e.g., "1KB", "10MB").
// cacheFolder: A string representing the folder where the cache files will be stored.
// maxItems: An integer representing the maximum number of items the cache can hold.
// getReader: A function of type ReadFunc that retrieves the data to be cached.
//
// Example Usage:
//
// fc := NewFileCache("exampleCache", "10MB", "cacheFolder", 100, func(ctx context.Context, item Item) (io.Reader, error) {
// // Implement the logic to retrieve the data for the given item
// return strings.NewReader(item.Key()), nil
// })
func NewFileCache(name, cacheSize, cacheFolder string, maxItems int, getReader ReadFunc) FileCache {
fc := &fileCache{
name: name,
cacheSize: cacheSize,
cacheFolder: filepath.FromSlash(cacheFolder),
maxItems: maxItems,
getReader: getReader,
mutex: &sync.RWMutex{},
}
go func() {
start := time.Now()
cache, err := newFSCache(fc.name, fc.cacheSize, fc.cacheFolder, fc.maxItems)
fc.mutex.Lock()
defer fc.mutex.Unlock()
fc.cache = cache
fc.disabled = cache == nil || err != nil
log.Info("Finished initializing cache", "cache", fc.name, "maxSize", fc.cacheSize, "elapsedTime", time.Since(start))
fc.ready.Store(true)
if err != nil {
log.Error(fmt.Sprintf("Cache %s will be DISABLED due to previous errors", "name"), fc.name, err)
}
if fc.disabled {
log.Debug("Cache DISABLED", "cache", fc.name, "size", fc.cacheSize)
}
}()
return fc
}
type fileCache struct {
name string
cacheSize string
cacheFolder string
maxItems int
cache fscache.Cache
getReader ReadFunc
disabled bool
ready atomic.Bool
mutex *sync.RWMutex
}
func (fc *fileCache) Available(_ context.Context) bool {
fc.mutex.RLock()
defer fc.mutex.RUnlock()
return fc.ready.Load() && !fc.disabled
}
func (fc *fileCache) Disabled(_ context.Context) bool {
fc.mutex.RLock()
defer fc.mutex.RUnlock()
return fc.disabled
}
func (fc *fileCache) invalidate(ctx context.Context, key string) error {
if !fc.Available(ctx) {
log.Debug(ctx, "Cache not initialized yet. Cannot invalidate key", "cache", fc.name, "key", key)
return nil
}
if !fc.cache.Exists(key) {
return nil
}
err := fc.cache.Remove(key)
if err != nil {
log.Warn(ctx, "Error removing key from cache", "cache", fc.name, "key", key, err)
}
return err
}
func (fc *fileCache) Get(ctx context.Context, arg Item) (*CachedStream, error) {
if !fc.Available(ctx) {
log.Debug(ctx, "Cache not initialized yet. Reading data directly from reader", "cache", fc.name)
reader, err := fc.getReader(ctx, arg)
if err != nil {
return nil, err
}
return &CachedStream{Reader: reader}, nil
}
key := arg.Key()
r, w, err := fc.cache.Get(key)
if err != nil {
return nil, err
}
cached := w == nil
if !cached {
log.Trace(ctx, "Cache MISS", "cache", fc.name, "key", key)
reader, err := fc.getReader(ctx, arg)
if err != nil {
_ = r.Close()
_ = w.Close()
_ = fc.invalidate(ctx, key)
return nil, err
}
go func() {
if err := copyAndClose(w, reader); err != nil {
log.Debug(ctx, "Error storing file in cache", "cache", fc.name, "key", key, err)
_ = fc.invalidate(ctx, key)
} else {
log.Trace(ctx, "File successfully stored in cache", "cache", fc.name, "key", key)
}
}()
}
// If it is in the cache, check if the stream is done being written. If so, return a ReadSeeker
if cached {
size := getFinalCachedSize(r)
if size >= 0 {
log.Trace(ctx, "Cache HIT", "cache", fc.name, "key", key, "size", size)
sr := io.NewSectionReader(r, 0, size)
return &CachedStream{
Reader: sr,
Seeker: sr,
Closer: r,
Cached: true,
}, nil
} else {
log.Trace(ctx, "Cache HIT", "cache", fc.name, "key", key)
}
}
// All other cases, just return the cache reader, without Seek capabilities
return &CachedStream{Reader: r, Cached: cached}, nil
}
// CachedStream is a wrapper around an io.ReadCloser that allows reading from a cache.
type CachedStream struct {
io.Reader
io.Seeker
io.Closer
Cached bool
}
func (s *CachedStream) Close() error {
if s.Closer != nil {
return s.Closer.Close()
}
if c, ok := s.Reader.(io.Closer); ok {
return c.Close()
}
return nil
}
func getFinalCachedSize(r fscache.ReadAtCloser) int64 {
cr, ok := r.(*fscache.CacheReader)
if ok {
size, final, err := cr.Size()
if final && err == nil {
return size
}
}
return -1
}
func copyAndClose(w io.WriteCloser, r io.Reader) error {
_, err := io.Copy(w, r)
if err != nil {
err = fmt.Errorf("copying data to cache: %w", err)
}
if c, ok := r.(io.Closer); ok {
if cErr := c.Close(); cErr != nil {
err = multierror.Append(err, fmt.Errorf("closing source stream: %w", cErr))
}
}
if cErr := w.Close(); cErr != nil {
err = multierror.Append(err, fmt.Errorf("closing cache writer: %w", cErr))
}
return err
}
func newFSCache(name, cacheSize, cacheFolder string, maxItems int) (fscache.Cache, error) {
size, err := humanize.ParseBytes(cacheSize)
if err != nil {
log.Error("Invalid cache size. Using default size", "cache", name, "size", cacheSize,
"defaultSize", humanize.Bytes(consts.DefaultCacheSize))
size = consts.DefaultCacheSize
}
if size == 0 {
log.Warn(fmt.Sprintf("%s cache disabled", name))
return nil, nil
}
lru := NewFileHaunter(name, maxItems, size, consts.DefaultCacheCleanUpInterval)
h := fscache.NewLRUHaunterStrategy(lru)
cacheFolder = filepath.Join(conf.Server.CacheFolder, cacheFolder)
var fs *spreadFS
log.Info(fmt.Sprintf("Creating %s cache", name), "path", cacheFolder, "maxSize", humanize.Bytes(size))
fs, err = NewSpreadFS(cacheFolder, 0755)
if err != nil {
log.Error(fmt.Sprintf("Error initializing %s cache FS", name), err)
return nil, err
}
ck, err := fscache.NewCacheWithHaunter(fs, h)
if err != nil {
log.Error(fmt.Sprintf("Error initializing %s cache", name), err)
return nil, err
}
ck.SetKeyMapper(fs.KeyMapper)
return ck, nil
}

150
utils/cache/file_caches_test.go vendored Normal file
View File

@@ -0,0 +1,150 @@
package cache
import (
"context"
"errors"
"io"
"os"
"path/filepath"
"strings"
"github.com/navidrome/navidrome/conf"
"github.com/navidrome/navidrome/conf/configtest"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
// Call NewFileCache and wait for it to be ready
func callNewFileCache(name, cacheSize, cacheFolder string, maxItems int, getReader ReadFunc) *fileCache {
fc := NewFileCache(name, cacheSize, cacheFolder, maxItems, getReader).(*fileCache)
Eventually(func() bool { return fc.ready.Load() }).Should(BeTrue())
return fc
}
var _ = Describe("File Caches", func() {
BeforeEach(func() {
tmpDir, _ := os.MkdirTemp("", "file_caches")
DeferCleanup(func() {
configtest.SetupConfig()
_ = os.RemoveAll(tmpDir)
})
conf.Server.CacheFolder = tmpDir
})
Describe("NewFileCache", func() {
It("creates the cache folder", func() {
Expect(callNewFileCache("test", "1k", "test", 0, nil)).ToNot(BeNil())
_, err := os.Stat(filepath.Join(conf.Server.CacheFolder, "test"))
Expect(os.IsNotExist(err)).To(BeFalse())
})
It("creates the cache folder with invalid size", func() {
fc := callNewFileCache("test", "abc", "test", 0, nil)
Expect(fc.cache).ToNot(BeNil())
Expect(fc.disabled).To(BeFalse())
})
It("returns empty if cache size is '0'", func() {
fc := callNewFileCache("test", "0", "test", 0, nil)
Expect(fc.cache).To(BeNil())
Expect(fc.disabled).To(BeTrue())
})
It("reports when cache is disabled", func() {
fc := callNewFileCache("test", "0", "test", 0, nil)
Expect(fc.Disabled(context.Background())).To(BeTrue())
fc = callNewFileCache("test", "1KB", "test", 0, nil)
Expect(fc.Disabled(context.Background())).To(BeFalse())
})
})
Describe("FileCache", func() {
It("caches data if cache is enabled", func() {
called := false
fc := callNewFileCache("test", "1KB", "test", 0, func(ctx context.Context, arg Item) (io.Reader, error) {
called = true
return strings.NewReader(arg.Key()), nil
})
// First call is a MISS
s, err := fc.Get(context.Background(), &testArg{"test"})
Expect(err).To(BeNil())
Expect(s.Cached).To(BeFalse())
Expect(s.Closer).To(BeNil())
Expect(io.ReadAll(s)).To(Equal([]byte("test")))
// Second call is a HIT
called = false
s, err = fc.Get(context.Background(), &testArg{"test"})
Expect(err).To(BeNil())
Expect(io.ReadAll(s)).To(Equal([]byte("test")))
Expect(s.Cached).To(BeTrue())
Expect(s.Closer).ToNot(BeNil())
Expect(called).To(BeFalse())
})
It("does not cache data if cache is disabled", func() {
called := false
fc := callNewFileCache("test", "0", "test", 0, func(ctx context.Context, arg Item) (io.Reader, error) {
called = true
return strings.NewReader(arg.Key()), nil
})
// First call is a MISS
s, err := fc.Get(context.Background(), &testArg{"test"})
Expect(err).To(BeNil())
Expect(s.Cached).To(BeFalse())
Expect(io.ReadAll(s)).To(Equal([]byte("test")))
// Second call is also a MISS
called = false
s, err = fc.Get(context.Background(), &testArg{"test"})
Expect(err).To(BeNil())
Expect(io.ReadAll(s)).To(Equal([]byte("test")))
Expect(s.Cached).To(BeFalse())
Expect(called).To(BeTrue())
})
Context("reader errors", func() {
When("creating a reader fails", func() {
It("does not cache", func() {
fc := callNewFileCache("test", "1KB", "test", 0, func(ctx context.Context, arg Item) (io.Reader, error) {
return nil, errors.New("failed")
})
_, err := fc.Get(context.Background(), &testArg{"test"})
Expect(err).To(MatchError("failed"))
})
})
When("reader returns error", func() {
It("does not cache", func() {
fc := callNewFileCache("test", "1KB", "test", 0, func(ctx context.Context, arg Item) (io.Reader, error) {
return errFakeReader{errors.New("read failure")}, nil
})
s, err := fc.Get(context.Background(), &testArg{"test"})
Expect(err).ToNot(HaveOccurred())
_, _ = io.Copy(io.Discard, s)
// TODO How to make the fscache reader return the underlying reader error?
//Expect(err).To(MatchError("read failure"))
// Data should not be cached (or eventually be removed from cache)
Eventually(func() bool {
s, _ = fc.Get(context.Background(), &testArg{"test"})
if s != nil {
return s.Cached
}
return false
}).Should(BeFalse())
})
})
})
})
})
type testArg struct{ s string }
func (t *testArg) Key() string { return t.s }
type errFakeReader struct{ err error }
func (e errFakeReader) Read([]byte) (int, error) { return 0, e.err }

130
utils/cache/file_haunter.go vendored Normal file
View File

@@ -0,0 +1,130 @@
package cache
import (
"sort"
"time"
"github.com/djherbis/fscache"
"github.com/dustin/go-humanize"
"github.com/navidrome/navidrome/log"
)
type haunterKV struct {
key string
value fscache.Entry
info fscache.FileInfo
}
// NewFileHaunter returns a simple haunter which runs every "period"
// and scrubs older files when the total file size is over maxSize or
// total item count is over maxItems. It also removes empty (invalid) files.
// If maxItems or maxSize are 0, they won't be checked
//
// Based on fscache.NewLRUHaunter
func NewFileHaunter(name string, maxItems int, maxSize uint64, period time.Duration) fscache.LRUHaunter {
return &fileHaunter{
name: name,
period: period,
maxItems: maxItems,
maxSize: maxSize,
}
}
type fileHaunter struct {
name string
period time.Duration
maxItems int
maxSize uint64
}
func (j *fileHaunter) Next() time.Duration {
return j.period
}
func (j *fileHaunter) Scrub(c fscache.CacheAccessor) (keysToReap []string) {
var count int
var size uint64
var okFiles []haunterKV
log.Trace("Running cache cleanup", "cache", j.name, "maxSize", humanize.Bytes(j.maxSize))
c.EnumerateEntries(func(key string, e fscache.Entry) bool {
if e.InUse() {
return true
}
fileInfo, err := c.Stat(e.Name())
if err != nil {
return true
}
if fileInfo.Size() == 0 {
log.Trace("Removing invalid empty file", "file", e.Name())
keysToReap = append(keysToReap, key)
}
count++
size = size + uint64(fileInfo.Size())
okFiles = append(okFiles, haunterKV{
key: key,
value: e,
info: fileInfo,
})
return true
})
sort.Slice(okFiles, func(i, j int) bool {
iLastRead := okFiles[i].info.AccessTime()
jLastRead := okFiles[j].info.AccessTime()
return iLastRead.Before(jLastRead)
})
collectKeysToReapFn := func() bool {
var key *string
var err error
key, count, size, err = j.removeFirst(&okFiles, count, size)
if err != nil {
return false
}
if key != nil {
keysToReap = append(keysToReap, *key)
}
return true
}
log.Trace("Current cache stats", "cache", j.name, "size", humanize.Bytes(size), "numItems", count)
if j.maxItems > 0 {
for count > j.maxItems {
if !collectKeysToReapFn() {
break
}
}
}
if j.maxSize > 0 {
for size > j.maxSize {
if !collectKeysToReapFn() {
break
}
}
}
if len(keysToReap) > 0 {
log.Trace("Removing items from cache", "cache", j.name, "numItems", len(keysToReap))
}
return keysToReap
}
func (j *fileHaunter) removeFirst(items *[]haunterKV, count int, size uint64) (*string, int, uint64, error) {
var f haunterKV
f, *items = (*items)[0], (*items)[1:]
count--
size = size - uint64(f.info.Size())
return &f.key, count, size, nil
}

102
utils/cache/file_haunter_test.go vendored Normal file
View File

@@ -0,0 +1,102 @@
package cache_test
import (
"errors"
"fmt"
"io"
"os"
"path/filepath"
"time"
"github.com/djherbis/fscache"
"github.com/navidrome/navidrome/utils/cache"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var _ = Describe("FileHaunter", func() {
var fs fscache.FileSystem
var fsCache *fscache.FSCache
var cacheDir string
var err error
var maxItems int
var maxSize uint64
JustBeforeEach(func() {
tempDir, _ := os.MkdirTemp("", "spread_fs")
cacheDir = filepath.Join(tempDir, "cache1")
fs, err = fscache.NewFs(cacheDir, 0700)
Expect(err).ToNot(HaveOccurred())
DeferCleanup(func() { _ = os.RemoveAll(tempDir) })
fsCache, err = fscache.NewCacheWithHaunter(fs, fscache.NewLRUHaunterStrategy(
cache.NewFileHaunter("", maxItems, maxSize, 300*time.Millisecond),
))
Expect(err).ToNot(HaveOccurred())
DeferCleanup(fsCache.Clean)
Expect(createTestFiles(fsCache)).To(Succeed())
<-time.After(400 * time.Millisecond)
})
Context("When maxSize is defined", func() {
BeforeEach(func() {
maxSize = 20
})
It("removes files", func() {
Expect(os.ReadDir(cacheDir)).To(HaveLen(4))
Expect(fsCache.Exists("stream-5")).To(BeFalse(), "stream-5 (empty file) should have been scrubbed")
// TODO Fix flaky tests
//Expect(fsCache.Exists("stream-0")).To(BeFalse(), "stream-0 should have been scrubbed")
})
})
XContext("When maxItems is defined", func() {
BeforeEach(func() {
maxItems = 3
})
It("removes files", func() {
Expect(os.ReadDir(cacheDir)).To(HaveLen(maxItems))
Expect(fsCache.Exists("stream-5")).To(BeFalse(), "stream-5 (empty file) should have been scrubbed")
// TODO Fix flaky tests
//Expect(fsCache.Exists("stream-0")).To(BeFalse(), "stream-0 should have been scrubbed")
//Expect(fsCache.Exists("stream-1")).To(BeFalse(), "stream-1 should have been scrubbed")
})
})
})
func createTestFiles(c *fscache.FSCache) error {
// Create 5 normal files and 1 empty
for i := 0; i < 6; i++ {
name := fmt.Sprintf("stream-%v", i)
var r fscache.ReadAtCloser
if i < 5 {
r = createCachedStream(c, name, "hello")
} else { // Last one is empty
r = createCachedStream(c, name, "")
}
if !c.Exists(name) {
return errors.New(name + " should exist")
}
<-time.After(10 * time.Millisecond)
err := r.Close()
if err != nil {
return err
}
}
return nil
}
func createCachedStream(c *fscache.FSCache, name string, contents string) fscache.ReadAtCloser {
r, w, _ := c.Get(name)
_, _ = w.Write([]byte(contents))
_ = w.Close()
_, _ = io.Copy(io.Discard, r)
return r
}

153
utils/cache/simple_cache.go vendored Normal file
View File

@@ -0,0 +1,153 @@
package cache
import (
"context"
"errors"
"fmt"
"runtime"
"sync/atomic"
"time"
"github.com/jellydator/ttlcache/v3"
. "github.com/navidrome/navidrome/utils/gg"
)
type SimpleCache[K comparable, V any] interface {
Add(key K, value V) error
AddWithTTL(key K, value V, ttl time.Duration) error
Get(key K) (V, error)
GetWithLoader(key K, loader func(key K) (V, time.Duration, error)) (V, error)
Keys() []K
Values() []V
Len() int
OnExpiration(fn func(K, V)) func()
}
type Options struct {
SizeLimit uint64
DefaultTTL time.Duration
}
func NewSimpleCache[K comparable, V any](options ...Options) SimpleCache[K, V] {
opts := []ttlcache.Option[K, V]{
ttlcache.WithDisableTouchOnHit[K, V](),
}
if len(options) > 0 {
o := options[0]
if o.SizeLimit > 0 {
opts = append(opts, ttlcache.WithCapacity[K, V](o.SizeLimit))
}
if o.DefaultTTL > 0 {
opts = append(opts, ttlcache.WithTTL[K, V](o.DefaultTTL))
}
}
c := ttlcache.New[K, V](opts...)
cache := &simpleCache[K, V]{
data: c,
}
go cache.data.Start()
// Automatic cleanup to prevent goroutine leak when cache is garbage collected
runtime.AddCleanup(cache, func(ttlCache *ttlcache.Cache[K, V]) {
ttlCache.Stop()
}, cache.data)
return cache
}
const evictionTimeout = 1 * time.Hour
type simpleCache[K comparable, V any] struct {
data *ttlcache.Cache[K, V]
evictionDeadline atomic.Pointer[time.Time]
}
func (c *simpleCache[K, V]) Add(key K, value V) error {
c.evictExpired()
return c.AddWithTTL(key, value, ttlcache.DefaultTTL)
}
func (c *simpleCache[K, V]) AddWithTTL(key K, value V, ttl time.Duration) error {
c.evictExpired()
item := c.data.Set(key, value, ttl)
if item == nil {
return errors.New("failed to add item")
}
return nil
}
func (c *simpleCache[K, V]) Get(key K) (V, error) {
item := c.data.Get(key)
if item == nil {
var zero V
return zero, errors.New("item not found")
}
return item.Value(), nil
}
func (c *simpleCache[K, V]) GetWithLoader(key K, loader func(key K) (V, time.Duration, error)) (V, error) {
var err error
loaderWrapper := ttlcache.LoaderFunc[K, V](
func(t *ttlcache.Cache[K, V], key K) *ttlcache.Item[K, V] {
c.evictExpired()
var value V
var ttl time.Duration
value, ttl, err = loader(key)
if err != nil {
return nil
}
return t.Set(key, value, ttl)
},
)
item := c.data.Get(key, ttlcache.WithLoader[K, V](loaderWrapper))
if item == nil {
var zero V
if err != nil {
return zero, fmt.Errorf("cache error: loader returned %w", err)
}
return zero, errors.New("item not found")
}
return item.Value(), nil
}
func (c *simpleCache[K, V]) evictExpired() {
if c.evictionDeadline.Load() == nil || c.evictionDeadline.Load().Before(time.Now()) {
c.data.DeleteExpired()
c.evictionDeadline.Store(P(time.Now().Add(evictionTimeout)))
}
}
func (c *simpleCache[K, V]) Keys() []K {
res := make([]K, 0, c.data.Len())
c.data.Range(func(item *ttlcache.Item[K, V]) bool {
if !item.IsExpired() {
res = append(res, item.Key())
}
return true
})
return res
}
func (c *simpleCache[K, V]) Values() []V {
res := make([]V, 0, c.data.Len())
c.data.Range(func(item *ttlcache.Item[K, V]) bool {
if !item.IsExpired() {
res = append(res, item.Value())
}
return true
})
return res
}
func (c *simpleCache[K, V]) Len() int {
return c.data.Len()
}
func (c *simpleCache[K, V]) OnExpiration(fn func(K, V)) func() {
return c.data.OnEviction(func(_ context.Context, reason ttlcache.EvictionReason, item *ttlcache.Item[K, V]) {
if reason == ttlcache.EvictionReasonExpired {
fn(item.Key(), item.Value())
}
})
}

161
utils/cache/simple_cache_test.go vendored Normal file
View File

@@ -0,0 +1,161 @@
package cache
import (
"errors"
"fmt"
"time"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var _ = Describe("SimpleCache", func() {
var (
cache SimpleCache[string, string]
)
BeforeEach(func() {
cache = NewSimpleCache[string, string]()
})
Describe("Add and Get", func() {
It("should add and retrieve a value", func() {
err := cache.Add("key", "value")
Expect(err).NotTo(HaveOccurred())
value, err := cache.Get("key")
Expect(err).NotTo(HaveOccurred())
Expect(value).To(Equal("value"))
})
})
Describe("AddWithTTL and Get", func() {
It("should add a value with TTL and retrieve it", func() {
err := cache.AddWithTTL("key", "value", 1*time.Minute)
Expect(err).NotTo(HaveOccurred())
value, err := cache.Get("key")
Expect(err).NotTo(HaveOccurred())
Expect(value).To(Equal("value"))
})
It("should not retrieve a value after its TTL has expired", func() {
err := cache.AddWithTTL("key", "value", 10*time.Millisecond)
Expect(err).NotTo(HaveOccurred())
time.Sleep(50 * time.Millisecond)
_, err = cache.Get("key")
Expect(err).To(HaveOccurred())
})
})
Describe("GetWithLoader", func() {
It("should retrieve a value using the loader function", func() {
loader := func(key string) (string, time.Duration, error) {
return fmt.Sprintf("%s=value", key), 1 * time.Minute, nil
}
value, err := cache.GetWithLoader("key", loader)
Expect(err).NotTo(HaveOccurred())
Expect(value).To(Equal("key=value"))
})
It("should return the error returned by the loader function", func() {
loader := func(key string) (string, time.Duration, error) {
return "", 0, errors.New("some error")
}
_, err := cache.GetWithLoader("key", loader)
Expect(err).To(HaveOccurred())
})
})
Describe("Keys and Values", func() {
It("should return all keys and all values", func() {
err := cache.Add("key1", "value1")
Expect(err).NotTo(HaveOccurred())
err = cache.Add("key2", "value2")
Expect(err).NotTo(HaveOccurred())
keys := cache.Keys()
Expect(keys).To(ConsistOf("key1", "key2"))
values := cache.Values()
Expect(values).To(ConsistOf("value1", "value2"))
})
Context("when there are expired items in the cache", func() {
It("should not return expired items", func() {
Expect(cache.Add("key0", "value0")).To(Succeed())
for i := 1; i <= 3; i++ {
err := cache.AddWithTTL(fmt.Sprintf("key%d", i), fmt.Sprintf("value%d", i), 10*time.Millisecond)
Expect(err).NotTo(HaveOccurred())
}
time.Sleep(50 * time.Millisecond)
Expect(cache.Keys()).To(ConsistOf("key0"))
Expect(cache.Values()).To(ConsistOf("value0"))
})
})
})
Describe("Options", func() {
Context("when size limit is set", func() {
BeforeEach(func() {
cache = NewSimpleCache[string, string](Options{
SizeLimit: 2,
})
})
It("should drop the oldest item when the size limit is reached", func() {
for i := 1; i <= 3; i++ {
err := cache.Add(fmt.Sprintf("key%d", i), fmt.Sprintf("value%d", i))
Expect(err).NotTo(HaveOccurred())
}
Expect(cache.Keys()).To(ConsistOf("key2", "key3"))
})
})
Context("when default TTL is set", func() {
BeforeEach(func() {
cache = NewSimpleCache[string, string](Options{
DefaultTTL: 10 * time.Millisecond,
})
})
It("should expire items after the default TTL", func() {
Expect(cache.AddWithTTL("key0", "value0", 1*time.Minute)).To(Succeed())
for i := 1; i <= 3; i++ {
err := cache.Add(fmt.Sprintf("key%d", i), fmt.Sprintf("value%d", i))
Expect(err).NotTo(HaveOccurred())
}
time.Sleep(50 * time.Millisecond)
for i := 1; i <= 3; i++ {
_, err := cache.Get(fmt.Sprintf("key%d", i))
Expect(err).To(HaveOccurred())
}
Expect(cache.Get("key0")).To(Equal("value0"))
})
})
Describe("OnExpiration", func() {
It("should call callback when item expires", func() {
cache = NewSimpleCache[string, string]()
expired := make(chan struct{})
cache.OnExpiration(func(k, v string) { close(expired) })
Expect(cache.AddWithTTL("key", "value", 10*time.Millisecond)).To(Succeed())
select {
case <-expired:
case <-time.After(100 * time.Millisecond):
Fail("expiration callback not called")
}
})
})
})
})

110
utils/cache/spread_fs.go vendored Normal file
View File

@@ -0,0 +1,110 @@
package cache
import (
"crypto/sha1"
"fmt"
"io/fs"
"os"
"path/filepath"
"strings"
"github.com/djherbis/atime"
"github.com/djherbis/fscache"
"github.com/djherbis/stream"
"github.com/navidrome/navidrome/log"
)
type spreadFS struct {
root string
mode os.FileMode
init func() error
}
// NewSpreadFS returns a FileSystem rooted at directory dir. This FS hashes the key and
// distributes all files in a layout like XX/XX/XXXXXXXXXX. Ex:
//
// Key is abc123.300x300.jpg
// Hash would be: c574aeb3caafcf93ee337f0cf34e31a428ba3f13
// File in cache would be: c5 / 74 / c574aeb3caafcf93ee337f0cf34e31a428ba3f13
//
// The idea is to avoid having too many files in one dir, which could potentially cause performance issues
// and may hit limitations depending on the OS.
// See discussion here: https://github.com/djherbis/fscache/issues/8#issuecomment-614319323
//
// dir is created with specified mode if it doesn't exist.
func NewSpreadFS(dir string, mode os.FileMode) (*spreadFS, error) {
f := &spreadFS{root: dir, mode: mode, init: func() error {
return os.MkdirAll(dir, mode)
}}
return f, f.init()
}
func (sfs *spreadFS) Reload(f func(key string, name string)) error {
count := 0
err := filepath.WalkDir(sfs.root, func(absoluteFilePath string, de fs.DirEntry, err error) error {
if err != nil {
log.Error("Error loading cache", "dir", sfs.root, err)
}
path, err := filepath.Rel(sfs.root, absoluteFilePath)
if err != nil {
return nil //nolint:nilerr
}
// Skip if name is not in the format XX/XX/XXXXXXXXXXXX
parts := strings.Split(path, string(os.PathSeparator))
if len(parts) != 3 || len(parts[0]) != 2 || len(parts[1]) != 2 || len(parts[2]) != 40 {
return nil
}
f(absoluteFilePath, absoluteFilePath)
count++
return nil
})
if err == nil {
log.Debug("Loaded cache", "dir", sfs.root, "numItems", count)
}
return err
}
func (sfs *spreadFS) Create(name string) (stream.File, error) {
path := filepath.Dir(name)
err := os.MkdirAll(path, sfs.mode)
if err != nil {
return nil, err
}
return os.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600)
}
func (sfs *spreadFS) Open(name string) (stream.File, error) {
return os.Open(name)
}
func (sfs *spreadFS) Remove(name string) error {
return os.Remove(name)
}
func (sfs *spreadFS) Stat(name string) (fscache.FileInfo, error) {
stat, err := os.Stat(name)
if err != nil {
return fscache.FileInfo{}, err
}
return fscache.FileInfo{FileInfo: stat, Atime: atime.Get(stat)}, nil
}
func (sfs *spreadFS) RemoveAll() error {
if err := os.RemoveAll(sfs.root); err != nil {
return err
}
return sfs.init()
}
func (sfs *spreadFS) KeyMapper(key string) string {
// When running the Haunter, fscache can call this KeyMapper with the cached filepath instead of the key.
// That's because we don't inform the original cache keys when reloading in the Reload function above.
// If that's the case, just return the file path, as it is the actual mapped key.
if strings.HasPrefix(key, sfs.root) {
return key
}
hash := fmt.Sprintf("%x", sha1.Sum([]byte(key)))
return filepath.Join(sfs.root, hash[0:2], hash[2:4], hash)
}

69
utils/cache/spread_fs_test.go vendored Normal file
View File

@@ -0,0 +1,69 @@
package cache
import (
"os"
"path/filepath"
"strings"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var _ = Describe("Spread FS", func() {
var fs *spreadFS
var rootDir string
BeforeEach(func() {
var err error
rootDir, _ = os.MkdirTemp("", "spread_fs")
fs, err = NewSpreadFS(rootDir, 0755)
Expect(err).To(BeNil())
})
AfterEach(func() {
_ = os.RemoveAll(rootDir)
})
Describe("KeyMapper", func() {
It("creates a file with proper name format", func() {
mapped := fs.KeyMapper("abc")
Expect(mapped).To(HavePrefix(fs.root))
mapped = strings.TrimPrefix(mapped, fs.root)
parts := strings.Split(mapped, string(filepath.Separator))
Expect(parts).To(HaveLen(4))
Expect(parts[3]).To(HaveLen(40))
})
It("returns the unmodified key if it is a cache file path", func() {
mapped := fs.KeyMapper("abc")
Expect(mapped).To(HavePrefix(fs.root))
Expect(fs.KeyMapper(mapped)).To(Equal(mapped))
})
})
Describe("Reload", func() {
var files []string
BeforeEach(func() {
files = []string{"aaaaa", "bbbbb", "ccccc"}
for _, content := range files {
file := fs.KeyMapper(content)
f, err := fs.Create(file)
Expect(err).To(BeNil())
_, _ = f.Write([]byte(content))
_ = f.Close()
}
})
It("loads all files from fs", func() {
var actual []string
err := fs.Reload(func(key string, name string) {
Expect(key).To(Equal(name))
data, err := os.ReadFile(name)
Expect(err).To(BeNil())
actual = append(actual, string(data))
})
Expect(err).To(BeNil())
Expect(actual).To(HaveLen(len(files)))
Expect(actual).To(ContainElements(files[0], files[1], files[2]))
})
})
})