update
Some checks failed
Pipeline: Test, Lint, Build / Get version info (push) Has been cancelled
Pipeline: Test, Lint, Build / Lint Go code (push) Has been cancelled
Pipeline: Test, Lint, Build / Test Go code (push) Has been cancelled
Pipeline: Test, Lint, Build / Test JS code (push) Has been cancelled
Pipeline: Test, Lint, Build / Lint i18n files (push) Has been cancelled
Pipeline: Test, Lint, Build / Check Docker configuration (push) Has been cancelled
Pipeline: Test, Lint, Build / Build (darwin/amd64) (push) Has been cancelled
Pipeline: Test, Lint, Build / Build (darwin/arm64) (push) Has been cancelled
Pipeline: Test, Lint, Build / Build (linux/386) (push) Has been cancelled
Pipeline: Test, Lint, Build / Build (linux/amd64) (push) Has been cancelled
Pipeline: Test, Lint, Build / Build (linux/arm/v5) (push) Has been cancelled
Pipeline: Test, Lint, Build / Build (linux/arm/v6) (push) Has been cancelled
Pipeline: Test, Lint, Build / Build (linux/arm/v7) (push) Has been cancelled
Pipeline: Test, Lint, Build / Build (linux/arm64) (push) Has been cancelled
Pipeline: Test, Lint, Build / Build (windows/386) (push) Has been cancelled
Pipeline: Test, Lint, Build / Build (windows/amd64) (push) Has been cancelled
Pipeline: Test, Lint, Build / Push to GHCR (push) Has been cancelled
Pipeline: Test, Lint, Build / Push to Docker Hub (push) Has been cancelled
Pipeline: Test, Lint, Build / Cleanup digest artifacts (push) Has been cancelled
Pipeline: Test, Lint, Build / Build Windows installers (push) Has been cancelled
Pipeline: Test, Lint, Build / Package/Release (push) Has been cancelled
Pipeline: Test, Lint, Build / Upload Linux PKG (push) Has been cancelled
Close stale issues and PRs / stale (push) Has been cancelled
POEditor import / update-translations (push) Has been cancelled
Some checks failed
Pipeline: Test, Lint, Build / Get version info (push) Has been cancelled
Pipeline: Test, Lint, Build / Lint Go code (push) Has been cancelled
Pipeline: Test, Lint, Build / Test Go code (push) Has been cancelled
Pipeline: Test, Lint, Build / Test JS code (push) Has been cancelled
Pipeline: Test, Lint, Build / Lint i18n files (push) Has been cancelled
Pipeline: Test, Lint, Build / Check Docker configuration (push) Has been cancelled
Pipeline: Test, Lint, Build / Build (darwin/amd64) (push) Has been cancelled
Pipeline: Test, Lint, Build / Build (darwin/arm64) (push) Has been cancelled
Pipeline: Test, Lint, Build / Build (linux/386) (push) Has been cancelled
Pipeline: Test, Lint, Build / Build (linux/amd64) (push) Has been cancelled
Pipeline: Test, Lint, Build / Build (linux/arm/v5) (push) Has been cancelled
Pipeline: Test, Lint, Build / Build (linux/arm/v6) (push) Has been cancelled
Pipeline: Test, Lint, Build / Build (linux/arm/v7) (push) Has been cancelled
Pipeline: Test, Lint, Build / Build (linux/arm64) (push) Has been cancelled
Pipeline: Test, Lint, Build / Build (windows/386) (push) Has been cancelled
Pipeline: Test, Lint, Build / Build (windows/amd64) (push) Has been cancelled
Pipeline: Test, Lint, Build / Push to GHCR (push) Has been cancelled
Pipeline: Test, Lint, Build / Push to Docker Hub (push) Has been cancelled
Pipeline: Test, Lint, Build / Cleanup digest artifacts (push) Has been cancelled
Pipeline: Test, Lint, Build / Build Windows installers (push) Has been cancelled
Pipeline: Test, Lint, Build / Package/Release (push) Has been cancelled
Pipeline: Test, Lint, Build / Upload Linux PKG (push) Has been cancelled
Close stale issues and PRs / stale (push) Has been cancelled
POEditor import / update-translations (push) Has been cancelled
This commit is contained in:
25
core/storage/interface.go
Normal file
25
core/storage/interface.go
Normal file
@@ -0,0 +1,25 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io/fs"
|
||||
|
||||
"github.com/navidrome/navidrome/model/metadata"
|
||||
)
|
||||
|
||||
type Storage interface {
|
||||
FS() (MusicFS, error)
|
||||
}
|
||||
|
||||
// MusicFS is an interface that extends the fs.FS interface with the ability to read tags from files
|
||||
type MusicFS interface {
|
||||
fs.FS
|
||||
ReadTags(path ...string) (map[string]metadata.Info, error)
|
||||
}
|
||||
|
||||
// Watcher is a storage with the ability watch the FS and notify changes
|
||||
type Watcher interface {
|
||||
// Start starts a watcher on the whole FS and returns a channel to send detected changes.
|
||||
// The watcher must be stopped when the context is done.
|
||||
Start(context.Context) (<-chan string, error)
|
||||
}
|
||||
29
core/storage/local/extractors.go
Normal file
29
core/storage/local/extractors.go
Normal file
@@ -0,0 +1,29 @@
|
||||
package local
|
||||
|
||||
import (
|
||||
"io/fs"
|
||||
"sync"
|
||||
|
||||
"github.com/navidrome/navidrome/model/metadata"
|
||||
)
|
||||
|
||||
// Extractor is an interface that defines the methods that a tag/metadata extractor must implement
|
||||
type Extractor interface {
|
||||
Parse(files ...string) (map[string]metadata.Info, error)
|
||||
Version() string
|
||||
}
|
||||
|
||||
type extractorConstructor func(fs.FS, string) Extractor
|
||||
|
||||
var (
|
||||
extractors = map[string]extractorConstructor{}
|
||||
lock sync.RWMutex
|
||||
)
|
||||
|
||||
// RegisterExtractor registers a new extractor, so it can be used by the local storage. The one to be used is
|
||||
// defined with the configuration option Scanner.Extractor.
|
||||
func RegisterExtractor(id string, f extractorConstructor) {
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
extractors[id] = f
|
||||
}
|
||||
91
core/storage/local/local.go
Normal file
91
core/storage/local/local.go
Normal file
@@ -0,0 +1,91 @@
|
||||
package local
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/djherbis/times"
|
||||
"github.com/navidrome/navidrome/conf"
|
||||
"github.com/navidrome/navidrome/core/storage"
|
||||
"github.com/navidrome/navidrome/log"
|
||||
"github.com/navidrome/navidrome/model/metadata"
|
||||
)
|
||||
|
||||
// localStorage implements a Storage that reads the files from the local filesystem and uses registered extractors
|
||||
// to extract the metadata and tags from the files.
|
||||
type localStorage struct {
|
||||
u url.URL
|
||||
extractor Extractor
|
||||
resolvedPath string
|
||||
watching atomic.Bool
|
||||
}
|
||||
|
||||
func newLocalStorage(u url.URL) storage.Storage {
|
||||
newExtractor, ok := extractors[conf.Server.Scanner.Extractor]
|
||||
if !ok || newExtractor == nil {
|
||||
log.Fatal("Extractor not found", "path", conf.Server.Scanner.Extractor)
|
||||
}
|
||||
isWindowsPath := filepath.VolumeName(u.Host) != ""
|
||||
if u.Scheme == storage.LocalSchemaID && isWindowsPath {
|
||||
u.Path = filepath.Join(u.Host, u.Path)
|
||||
}
|
||||
resolvedPath, err := filepath.EvalSymlinks(u.Path)
|
||||
if err != nil {
|
||||
log.Warn("Error resolving path", "path", u.Path, "err", err)
|
||||
resolvedPath = u.Path
|
||||
}
|
||||
return &localStorage{u: u, extractor: newExtractor(os.DirFS(u.Path), u.Path), resolvedPath: resolvedPath}
|
||||
}
|
||||
|
||||
func (s *localStorage) FS() (storage.MusicFS, error) {
|
||||
path := s.u.Path
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
return nil, fmt.Errorf("%w: %s", err, path)
|
||||
}
|
||||
return &localFS{FS: os.DirFS(path), extractor: s.extractor}, nil
|
||||
}
|
||||
|
||||
type localFS struct {
|
||||
fs.FS
|
||||
extractor Extractor
|
||||
}
|
||||
|
||||
func (lfs *localFS) ReadTags(path ...string) (map[string]metadata.Info, error) {
|
||||
res, err := lfs.extractor.Parse(path...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for path, v := range res {
|
||||
if v.FileInfo == nil {
|
||||
info, err := fs.Stat(lfs, path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
v.FileInfo = localFileInfo{info}
|
||||
res[path] = v
|
||||
}
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// localFileInfo is a wrapper around fs.FileInfo that adds a BirthTime method, to make it compatible
|
||||
// with metadata.FileInfo
|
||||
type localFileInfo struct {
|
||||
fs.FileInfo
|
||||
}
|
||||
|
||||
func (lfi localFileInfo) BirthTime() time.Time {
|
||||
if ts := times.Get(lfi.FileInfo); ts.HasBirthTime() {
|
||||
return ts.BirthTime()
|
||||
}
|
||||
return time.Now()
|
||||
}
|
||||
|
||||
func init() {
|
||||
storage.Register(storage.LocalSchemaID, newLocalStorage)
|
||||
}
|
||||
17
core/storage/local/local_suite_test.go
Normal file
17
core/storage/local/local_suite_test.go
Normal file
@@ -0,0 +1,17 @@
|
||||
package local
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/navidrome/navidrome/log"
|
||||
"github.com/navidrome/navidrome/tests"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
func TestLocal(t *testing.T) {
|
||||
tests.Init(t, false)
|
||||
log.SetLevel(log.LevelFatal)
|
||||
RegisterFailHandler(Fail)
|
||||
RunSpecs(t, "Local Storage Suite")
|
||||
}
|
||||
428
core/storage/local/local_test.go
Normal file
428
core/storage/local/local_test.go
Normal file
@@ -0,0 +1,428 @@
|
||||
package local
|
||||
|
||||
import (
|
||||
"io/fs"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/navidrome/navidrome/conf"
|
||||
"github.com/navidrome/navidrome/conf/configtest"
|
||||
"github.com/navidrome/navidrome/core/storage"
|
||||
"github.com/navidrome/navidrome/model/metadata"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = Describe("LocalStorage", func() {
|
||||
var tempDir string
|
||||
var testExtractor *mockTestExtractor
|
||||
|
||||
BeforeEach(func() {
|
||||
DeferCleanup(configtest.SetupConfig())
|
||||
|
||||
// Create a temporary directory for testing
|
||||
var err error
|
||||
tempDir, err = os.MkdirTemp("", "navidrome-local-storage-test-")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
DeferCleanup(func() {
|
||||
os.RemoveAll(tempDir)
|
||||
})
|
||||
|
||||
// Create and register a test extractor
|
||||
testExtractor = &mockTestExtractor{
|
||||
results: make(map[string]metadata.Info),
|
||||
}
|
||||
RegisterExtractor("test", func(fs.FS, string) Extractor {
|
||||
return testExtractor
|
||||
})
|
||||
conf.Server.Scanner.Extractor = "test"
|
||||
})
|
||||
|
||||
Describe("newLocalStorage", func() {
|
||||
Context("with valid path", func() {
|
||||
It("should create a localStorage instance with correct path", func() {
|
||||
u, err := url.Parse("file://" + tempDir)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
storage := newLocalStorage(*u)
|
||||
localStorage := storage.(*localStorage)
|
||||
|
||||
Expect(localStorage.u.Scheme).To(Equal("file"))
|
||||
// Check that the path is set correctly (could be resolved to real path on macOS)
|
||||
Expect(localStorage.u.Path).To(ContainSubstring("navidrome-local-storage-test"))
|
||||
Expect(localStorage.resolvedPath).To(ContainSubstring("navidrome-local-storage-test"))
|
||||
Expect(localStorage.extractor).ToNot(BeNil())
|
||||
})
|
||||
|
||||
It("should handle URL-decoded paths correctly", func() {
|
||||
// Create a directory with spaces to test URL decoding
|
||||
spacedDir := filepath.Join(tempDir, "test folder")
|
||||
err := os.MkdirAll(spacedDir, 0755)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
// Use proper URL construction instead of manual escaping
|
||||
u := &url.URL{
|
||||
Scheme: "file",
|
||||
Path: spacedDir,
|
||||
}
|
||||
|
||||
storage := newLocalStorage(*u)
|
||||
localStorage, ok := storage.(*localStorage)
|
||||
Expect(ok).To(BeTrue())
|
||||
|
||||
Expect(localStorage.u.Path).To(Equal(spacedDir))
|
||||
})
|
||||
|
||||
It("should resolve symlinks when possible", func() {
|
||||
// Create a real directory and a symlink to it
|
||||
realDir := filepath.Join(tempDir, "real")
|
||||
linkDir := filepath.Join(tempDir, "link")
|
||||
|
||||
err := os.MkdirAll(realDir, 0755)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
err = os.Symlink(realDir, linkDir)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
u, err := url.Parse("file://" + linkDir)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
storage := newLocalStorage(*u)
|
||||
localStorage, ok := storage.(*localStorage)
|
||||
Expect(ok).To(BeTrue())
|
||||
|
||||
Expect(localStorage.u.Path).To(Equal(linkDir))
|
||||
// Check that the resolved path contains the real directory name
|
||||
Expect(localStorage.resolvedPath).To(ContainSubstring("real"))
|
||||
})
|
||||
|
||||
It("should use u.Path as resolvedPath when symlink resolution fails", func() {
|
||||
// Use a non-existent path to trigger symlink resolution failure
|
||||
nonExistentPath := filepath.Join(tempDir, "non-existent")
|
||||
|
||||
u, err := url.Parse("file://" + nonExistentPath)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
storage := newLocalStorage(*u)
|
||||
localStorage, ok := storage.(*localStorage)
|
||||
Expect(ok).To(BeTrue())
|
||||
|
||||
Expect(localStorage.u.Path).To(Equal(nonExistentPath))
|
||||
Expect(localStorage.resolvedPath).To(Equal(nonExistentPath))
|
||||
})
|
||||
})
|
||||
|
||||
Context("with Windows path", func() {
|
||||
BeforeEach(func() {
|
||||
if runtime.GOOS != "windows" {
|
||||
Skip("Windows-specific test")
|
||||
}
|
||||
})
|
||||
|
||||
It("should handle Windows drive letters correctly", func() {
|
||||
u, err := url.Parse("file://C:/music")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
storage := newLocalStorage(*u)
|
||||
localStorage, ok := storage.(*localStorage)
|
||||
Expect(ok).To(BeTrue())
|
||||
|
||||
Expect(localStorage.u.Path).To(Equal("C:/music"))
|
||||
})
|
||||
})
|
||||
|
||||
Context("with invalid extractor", func() {
|
||||
It("should handle extractor validation correctly", func() {
|
||||
// Note: The actual implementation uses log.Fatal which exits the process,
|
||||
// so we test the normal path where extractors exist
|
||||
|
||||
u, err := url.Parse("file://" + tempDir)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
storage := newLocalStorage(*u)
|
||||
Expect(storage).ToNot(BeNil())
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
Describe("localStorage.FS", func() {
|
||||
Context("with existing directory", func() {
|
||||
It("should return a localFS instance", func() {
|
||||
u, err := url.Parse("file://" + tempDir)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
storage := newLocalStorage(*u)
|
||||
musicFS, err := storage.FS()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(musicFS).ToNot(BeNil())
|
||||
|
||||
_, ok := musicFS.(*localFS)
|
||||
Expect(ok).To(BeTrue())
|
||||
})
|
||||
})
|
||||
|
||||
Context("with non-existent directory", func() {
|
||||
It("should return an error", func() {
|
||||
nonExistentPath := filepath.Join(tempDir, "non-existent")
|
||||
u, err := url.Parse("file://" + nonExistentPath)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
storage := newLocalStorage(*u)
|
||||
_, err = storage.FS()
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err.Error()).To(ContainSubstring(nonExistentPath))
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
Describe("localFS.ReadTags", func() {
|
||||
var testFile string
|
||||
|
||||
BeforeEach(func() {
|
||||
// Create a test file
|
||||
testFile = filepath.Join(tempDir, "test.mp3")
|
||||
err := os.WriteFile(testFile, []byte("test data"), 0600)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
// Reset extractor state
|
||||
testExtractor.results = make(map[string]metadata.Info)
|
||||
testExtractor.err = nil
|
||||
})
|
||||
|
||||
Context("when extractor returns complete metadata", func() {
|
||||
It("should return the metadata as-is", func() {
|
||||
expectedInfo := metadata.Info{
|
||||
Tags: map[string][]string{
|
||||
"title": {"Test Song"},
|
||||
"artist": {"Test Artist"},
|
||||
},
|
||||
AudioProperties: metadata.AudioProperties{
|
||||
Duration: 180,
|
||||
BitRate: 320,
|
||||
},
|
||||
FileInfo: &testFileInfo{name: "test.mp3"},
|
||||
}
|
||||
|
||||
testExtractor.results["test.mp3"] = expectedInfo
|
||||
|
||||
u, err := url.Parse("file://" + tempDir)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
storage := newLocalStorage(*u)
|
||||
musicFS, err := storage.FS()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
results, err := musicFS.ReadTags("test.mp3")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(results).To(HaveKey("test.mp3"))
|
||||
Expect(results["test.mp3"]).To(Equal(expectedInfo))
|
||||
})
|
||||
})
|
||||
|
||||
Context("when extractor returns metadata without FileInfo", func() {
|
||||
It("should populate FileInfo from filesystem", func() {
|
||||
incompleteInfo := metadata.Info{
|
||||
Tags: map[string][]string{
|
||||
"title": {"Test Song"},
|
||||
},
|
||||
FileInfo: nil, // Missing FileInfo
|
||||
}
|
||||
|
||||
testExtractor.results["test.mp3"] = incompleteInfo
|
||||
|
||||
u, err := url.Parse("file://" + tempDir)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
storage := newLocalStorage(*u)
|
||||
musicFS, err := storage.FS()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
results, err := musicFS.ReadTags("test.mp3")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(results).To(HaveKey("test.mp3"))
|
||||
|
||||
result := results["test.mp3"]
|
||||
Expect(result.FileInfo).ToNot(BeNil())
|
||||
Expect(result.FileInfo.Name()).To(Equal("test.mp3"))
|
||||
|
||||
// Should be wrapped in localFileInfo
|
||||
_, ok := result.FileInfo.(localFileInfo)
|
||||
Expect(ok).To(BeTrue())
|
||||
})
|
||||
})
|
||||
|
||||
Context("when filesystem stat fails", func() {
|
||||
It("should return an error", func() {
|
||||
incompleteInfo := metadata.Info{
|
||||
Tags: map[string][]string{"title": {"Test Song"}},
|
||||
FileInfo: nil,
|
||||
}
|
||||
|
||||
testExtractor.results["non-existent.mp3"] = incompleteInfo
|
||||
|
||||
u, err := url.Parse("file://" + tempDir)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
storage := newLocalStorage(*u)
|
||||
musicFS, err := storage.FS()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
_, err = musicFS.ReadTags("non-existent.mp3")
|
||||
Expect(err).To(HaveOccurred())
|
||||
})
|
||||
})
|
||||
|
||||
Context("when extractor fails", func() {
|
||||
It("should return the extractor error", func() {
|
||||
testExtractor.err = &extractorError{message: "extractor failed"}
|
||||
|
||||
u, err := url.Parse("file://" + tempDir)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
storage := newLocalStorage(*u)
|
||||
musicFS, err := storage.FS()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
_, err = musicFS.ReadTags("test.mp3")
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err.Error()).To(ContainSubstring("extractor failed"))
|
||||
})
|
||||
})
|
||||
|
||||
Context("with multiple files", func() {
|
||||
It("should process all files correctly", func() {
|
||||
// Create another test file
|
||||
testFile2 := filepath.Join(tempDir, "test2.mp3")
|
||||
err := os.WriteFile(testFile2, []byte("test data 2"), 0600)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
info1 := metadata.Info{
|
||||
Tags: map[string][]string{"title": {"Song 1"}},
|
||||
FileInfo: &testFileInfo{name: "test.mp3"},
|
||||
}
|
||||
info2 := metadata.Info{
|
||||
Tags: map[string][]string{"title": {"Song 2"}},
|
||||
FileInfo: nil, // This one needs FileInfo populated
|
||||
}
|
||||
|
||||
testExtractor.results["test.mp3"] = info1
|
||||
testExtractor.results["test2.mp3"] = info2
|
||||
|
||||
u, err := url.Parse("file://" + tempDir)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
storage := newLocalStorage(*u)
|
||||
musicFS, err := storage.FS()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
results, err := musicFS.ReadTags("test.mp3", "test2.mp3")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(results).To(HaveLen(2))
|
||||
|
||||
Expect(results["test.mp3"].FileInfo).To(Equal(&testFileInfo{name: "test.mp3"}))
|
||||
Expect(results["test2.mp3"].FileInfo).ToNot(BeNil())
|
||||
Expect(results["test2.mp3"].FileInfo.Name()).To(Equal("test2.mp3"))
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
Describe("localFileInfo", func() {
|
||||
var testFile string
|
||||
var fileInfo fs.FileInfo
|
||||
|
||||
BeforeEach(func() {
|
||||
testFile = filepath.Join(tempDir, "test.mp3")
|
||||
err := os.WriteFile(testFile, []byte("test data"), 0600)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
fileInfo, err = os.Stat(testFile)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
})
|
||||
|
||||
Describe("BirthTime", func() {
|
||||
It("should return birth time when available", func() {
|
||||
lfi := localFileInfo{FileInfo: fileInfo}
|
||||
birthTime := lfi.BirthTime()
|
||||
|
||||
// Birth time should be a valid time (not zero value)
|
||||
Expect(birthTime).ToNot(BeZero())
|
||||
// Should be around the current time (within last few minutes)
|
||||
Expect(birthTime).To(BeTemporally("~", time.Now(), 5*time.Minute))
|
||||
})
|
||||
})
|
||||
|
||||
It("should delegate all other FileInfo methods", func() {
|
||||
lfi := localFileInfo{FileInfo: fileInfo}
|
||||
|
||||
Expect(lfi.Name()).To(Equal(fileInfo.Name()))
|
||||
Expect(lfi.Size()).To(Equal(fileInfo.Size()))
|
||||
Expect(lfi.Mode()).To(Equal(fileInfo.Mode()))
|
||||
Expect(lfi.ModTime()).To(Equal(fileInfo.ModTime()))
|
||||
Expect(lfi.IsDir()).To(Equal(fileInfo.IsDir()))
|
||||
Expect(lfi.Sys()).To(Equal(fileInfo.Sys()))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("Storage registration", func() {
|
||||
It("should register localStorage for file scheme", func() {
|
||||
// This tests the init() function indirectly
|
||||
storage, err := storage.For("file://" + tempDir)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(storage).To(BeAssignableToTypeOf(&localStorage{}))
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
// Test extractor for testing
|
||||
type mockTestExtractor struct {
|
||||
results map[string]metadata.Info
|
||||
err error
|
||||
}
|
||||
|
||||
func (m *mockTestExtractor) Parse(files ...string) (map[string]metadata.Info, error) {
|
||||
if m.err != nil {
|
||||
return nil, m.err
|
||||
}
|
||||
|
||||
result := make(map[string]metadata.Info)
|
||||
for _, file := range files {
|
||||
if info, exists := m.results[file]; exists {
|
||||
result[file] = info
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (m *mockTestExtractor) Version() string {
|
||||
return "test-1.0"
|
||||
}
|
||||
|
||||
type extractorError struct {
|
||||
message string
|
||||
}
|
||||
|
||||
func (e *extractorError) Error() string {
|
||||
return e.message
|
||||
}
|
||||
|
||||
// Test FileInfo that implements metadata.FileInfo
|
||||
type testFileInfo struct {
|
||||
name string
|
||||
size int64
|
||||
mode fs.FileMode
|
||||
modTime time.Time
|
||||
isDir bool
|
||||
birthTime time.Time
|
||||
}
|
||||
|
||||
func (t *testFileInfo) Name() string { return t.name }
|
||||
func (t *testFileInfo) Size() int64 { return t.size }
|
||||
func (t *testFileInfo) Mode() fs.FileMode { return t.mode }
|
||||
func (t *testFileInfo) ModTime() time.Time { return t.modTime }
|
||||
func (t *testFileInfo) IsDir() bool { return t.isDir }
|
||||
func (t *testFileInfo) Sys() any { return nil }
|
||||
func (t *testFileInfo) BirthTime() time.Time {
|
||||
if t.birthTime.IsZero() {
|
||||
return time.Now()
|
||||
}
|
||||
return t.birthTime
|
||||
}
|
||||
5
core/storage/local/watch_events_darwin.go
Normal file
5
core/storage/local/watch_events_darwin.go
Normal file
@@ -0,0 +1,5 @@
|
||||
package local
|
||||
|
||||
import "github.com/rjeczalik/notify"
|
||||
|
||||
const WatchEvents = notify.All | notify.FSEventsInodeMetaMod
|
||||
7
core/storage/local/watch_events_default.go
Normal file
7
core/storage/local/watch_events_default.go
Normal file
@@ -0,0 +1,7 @@
|
||||
//go:build !linux && !darwin && !windows
|
||||
|
||||
package local
|
||||
|
||||
import "github.com/rjeczalik/notify"
|
||||
|
||||
const WatchEvents = notify.All
|
||||
5
core/storage/local/watch_events_linux.go
Normal file
5
core/storage/local/watch_events_linux.go
Normal file
@@ -0,0 +1,5 @@
|
||||
package local
|
||||
|
||||
import "github.com/rjeczalik/notify"
|
||||
|
||||
const WatchEvents = notify.All | notify.InModify | notify.InAttrib
|
||||
5
core/storage/local/watch_events_windows.go
Normal file
5
core/storage/local/watch_events_windows.go
Normal file
@@ -0,0 +1,5 @@
|
||||
package local
|
||||
|
||||
import "github.com/rjeczalik/notify"
|
||||
|
||||
const WatchEvents = notify.All | notify.FileNotifyChangeAttributes
|
||||
57
core/storage/local/watcher.go
Normal file
57
core/storage/local/watcher.go
Normal file
@@ -0,0 +1,57 @@
|
||||
package local
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/navidrome/navidrome/log"
|
||||
"github.com/rjeczalik/notify"
|
||||
)
|
||||
|
||||
// Start starts a watcher on the whole FS and returns a channel to send detected changes.
|
||||
// It uses `notify` to detect changes in the filesystem, so it may not work on all platforms/use-cases.
|
||||
// Notoriously, it does not work on some networked mounts and Windows with WSL2.
|
||||
func (s *localStorage) Start(ctx context.Context) (<-chan string, error) {
|
||||
if !s.watching.CompareAndSwap(false, true) {
|
||||
return nil, errors.New("watcher already started")
|
||||
}
|
||||
input := make(chan notify.EventInfo, 1)
|
||||
output := make(chan string, 1)
|
||||
|
||||
started := make(chan struct{})
|
||||
go func() {
|
||||
defer close(input)
|
||||
defer close(output)
|
||||
|
||||
libPath := filepath.Join(s.u.Path, "...")
|
||||
log.Debug(ctx, "Starting watcher", "lib", libPath)
|
||||
err := notify.Watch(libPath, input, WatchEvents)
|
||||
if err != nil {
|
||||
log.Error("Error starting watcher", "lib", libPath, err)
|
||||
return
|
||||
}
|
||||
defer notify.Stop(input)
|
||||
close(started) // signals the main goroutine we have started
|
||||
|
||||
for {
|
||||
select {
|
||||
case event := <-input:
|
||||
log.Trace(ctx, "Detected change", "event", event, "lib", s.u.Path)
|
||||
name := event.Path()
|
||||
name = strings.Replace(name, s.resolvedPath, s.u.Path, 1)
|
||||
output <- name
|
||||
case <-ctx.Done():
|
||||
log.Debug(ctx, "Stopping watcher", "path", s.u.Path)
|
||||
s.watching.Store(false)
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
select {
|
||||
case <-started:
|
||||
case <-ctx.Done():
|
||||
}
|
||||
return output, nil
|
||||
}
|
||||
139
core/storage/local/watcher_test.go
Normal file
139
core/storage/local/watcher_test.go
Normal file
@@ -0,0 +1,139 @@
|
||||
package local_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/navidrome/navidrome/conf"
|
||||
"github.com/navidrome/navidrome/core/storage"
|
||||
"github.com/navidrome/navidrome/core/storage/local"
|
||||
_ "github.com/navidrome/navidrome/core/storage/local"
|
||||
"github.com/navidrome/navidrome/model/metadata"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = XDescribe("Watcher", func() {
|
||||
var lsw storage.Watcher
|
||||
var tmpFolder string
|
||||
|
||||
BeforeEach(func() {
|
||||
tmpFolder = GinkgoT().TempDir()
|
||||
|
||||
local.RegisterExtractor("noop", func(fs fs.FS, path string) local.Extractor { return noopExtractor{} })
|
||||
conf.Server.Scanner.Extractor = "noop"
|
||||
|
||||
ls, err := storage.For(tmpFolder)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
// It should implement Watcher
|
||||
var ok bool
|
||||
lsw, ok = ls.(storage.Watcher)
|
||||
Expect(ok).To(BeTrue())
|
||||
|
||||
// Make sure temp folder is created
|
||||
Eventually(func() error {
|
||||
_, err := os.Stat(tmpFolder)
|
||||
return err
|
||||
}).Should(Succeed())
|
||||
})
|
||||
|
||||
It("should start and stop watcher", func() {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
w, err := lsw.Start(ctx)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
cancel()
|
||||
Eventually(w).Should(BeClosed())
|
||||
})
|
||||
|
||||
It("should return error if watcher is already started", func() {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
_, err := lsw.Start(ctx)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
_, err = lsw.Start(ctx)
|
||||
Expect(err).To(HaveOccurred())
|
||||
})
|
||||
|
||||
It("should detect new files", func() {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
changes, err := lsw.Start(ctx)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
_, err = os.Create(filepath.Join(tmpFolder, "test.txt"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Eventually(changes).WithTimeout(2 * time.Second).Should(Receive(Equal(tmpFolder)))
|
||||
})
|
||||
|
||||
It("should detect new subfolders", func() {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
changes, err := lsw.Start(ctx)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(os.Mkdir(filepath.Join(tmpFolder, "subfolder"), 0755)).To(Succeed())
|
||||
|
||||
Eventually(changes).WithTimeout(2 * time.Second).Should(Receive(Equal(filepath.Join(tmpFolder, "subfolder"))))
|
||||
})
|
||||
|
||||
It("should detect changes in subfolders recursively", func() {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
subfolder := filepath.Join(tmpFolder, "subfolder1/subfolder2")
|
||||
Expect(os.MkdirAll(subfolder, 0755)).To(Succeed())
|
||||
|
||||
changes, err := lsw.Start(ctx)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
filePath := filepath.Join(subfolder, "test.txt")
|
||||
Expect(os.WriteFile(filePath, []byte("test"), 0600)).To(Succeed())
|
||||
|
||||
Eventually(changes).WithTimeout(2 * time.Second).Should(Receive(Equal(filePath)))
|
||||
})
|
||||
|
||||
It("should detect removed in files", func() {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
changes, err := lsw.Start(ctx)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
filePath := filepath.Join(tmpFolder, "test.txt")
|
||||
Expect(os.WriteFile(filePath, []byte("test"), 0600)).To(Succeed())
|
||||
|
||||
Eventually(changes).WithTimeout(2 * time.Second).Should(Receive(Equal(filePath)))
|
||||
|
||||
Expect(os.Remove(filePath)).To(Succeed())
|
||||
Eventually(changes).WithTimeout(2 * time.Second).Should(Receive(Equal(filePath)))
|
||||
})
|
||||
|
||||
It("should detect file moves", func() {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
filePath := filepath.Join(tmpFolder, "test.txt")
|
||||
Expect(os.WriteFile(filePath, []byte("test"), 0600)).To(Succeed())
|
||||
|
||||
changes, err := lsw.Start(ctx)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
newPath := filepath.Join(tmpFolder, "test2.txt")
|
||||
Expect(os.Rename(filePath, newPath)).To(Succeed())
|
||||
Eventually(changes).WithTimeout(2 * time.Second).Should(Receive(Equal(newPath)))
|
||||
})
|
||||
})
|
||||
|
||||
type noopExtractor struct{}
|
||||
|
||||
func (s noopExtractor) Parse(files ...string) (map[string]metadata.Info, error) { return nil, nil }
|
||||
func (s noopExtractor) Version() string { return "0" }
|
||||
60
core/storage/storage.go
Normal file
60
core/storage/storage.go
Normal file
@@ -0,0 +1,60 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net/url"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/navidrome/navidrome/utils/slice"
|
||||
)
|
||||
|
||||
const LocalSchemaID = "file"
|
||||
|
||||
type constructor func(url.URL) Storage
|
||||
|
||||
var (
|
||||
registry = map[string]constructor{}
|
||||
lock sync.RWMutex
|
||||
)
|
||||
|
||||
func Register(schema string, c constructor) {
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
registry[schema] = c
|
||||
}
|
||||
|
||||
// For returns a Storage implementation for the given URI.
|
||||
// It uses the schema part of the URI to find the correct registered
|
||||
// Storage constructor.
|
||||
// If the URI does not contain a schema, it is treated as a file:// URI.
|
||||
func For(uri string) (Storage, error) {
|
||||
lock.RLock()
|
||||
defer lock.RUnlock()
|
||||
parts := strings.Split(uri, "://")
|
||||
|
||||
// Paths without schema are treated as file:// and use the default LocalStorage implementation
|
||||
if len(parts) < 2 {
|
||||
uri, _ = filepath.Abs(uri)
|
||||
uri = filepath.ToSlash(uri)
|
||||
|
||||
// Properly escape each path component using URL standards
|
||||
pathParts := strings.Split(uri, "/")
|
||||
escapedParts := slice.Map(pathParts, func(s string) string {
|
||||
return url.PathEscape(s)
|
||||
})
|
||||
|
||||
uri = LocalSchemaID + "://" + strings.Join(escapedParts, "/")
|
||||
}
|
||||
|
||||
u, err := url.Parse(uri)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c, ok := registry[u.Scheme]
|
||||
if !ok {
|
||||
return nil, errors.New("schema '" + u.Scheme + "' not registered")
|
||||
}
|
||||
return c(*u), nil
|
||||
}
|
||||
93
core/storage/storage_test.go
Normal file
93
core/storage/storage_test.go
Normal file
@@ -0,0 +1,93 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
func TestApp(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
RunSpecs(t, "Storage Test Suite")
|
||||
}
|
||||
|
||||
var _ = Describe("Storage", func() {
|
||||
When("schema is not registered", func() {
|
||||
BeforeEach(func() {
|
||||
registry = map[string]constructor{}
|
||||
})
|
||||
|
||||
It("should return error", func() {
|
||||
_, err := For("file:///tmp")
|
||||
Expect(err).To(HaveOccurred())
|
||||
})
|
||||
})
|
||||
When("schema is registered", func() {
|
||||
BeforeEach(func() {
|
||||
registry = map[string]constructor{}
|
||||
Register("file", func(url url.URL) Storage { return &fakeLocalStorage{u: url} })
|
||||
Register("s3", func(url url.URL) Storage { return &fakeS3Storage{u: url} })
|
||||
})
|
||||
|
||||
It("should return correct implementation", func() {
|
||||
s, err := For("file:///tmp")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(s).To(BeAssignableToTypeOf(&fakeLocalStorage{}))
|
||||
Expect(s.(*fakeLocalStorage).u.Scheme).To(Equal("file"))
|
||||
Expect(s.(*fakeLocalStorage).u.Path).To(Equal("/tmp"))
|
||||
|
||||
s, err = For("s3:///bucket")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(s).To(BeAssignableToTypeOf(&fakeS3Storage{}))
|
||||
Expect(s.(*fakeS3Storage).u.Scheme).To(Equal("s3"))
|
||||
Expect(s.(*fakeS3Storage).u.Path).To(Equal("/bucket"))
|
||||
})
|
||||
It("should return a file implementation when schema is not specified", func() {
|
||||
s, err := For("/tmp")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(s).To(BeAssignableToTypeOf(&fakeLocalStorage{}))
|
||||
Expect(s.(*fakeLocalStorage).u.Scheme).To(Equal("file"))
|
||||
Expect(s.(*fakeLocalStorage).u.Path).To(Equal("/tmp"))
|
||||
})
|
||||
It("should return a file implementation for a relative folder", func() {
|
||||
s, err := For("tmp")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
cwd, _ := os.Getwd()
|
||||
Expect(s).To(BeAssignableToTypeOf(&fakeLocalStorage{}))
|
||||
Expect(s.(*fakeLocalStorage).u.Scheme).To(Equal("file"))
|
||||
Expect(s.(*fakeLocalStorage).u.Path).To(Equal(filepath.Join(cwd, "tmp")))
|
||||
})
|
||||
It("should return error if schema is unregistered", func() {
|
||||
_, err := For("webdav:///tmp")
|
||||
Expect(err).To(HaveOccurred())
|
||||
})
|
||||
DescribeTable("should handle paths with special characters correctly",
|
||||
func(inputPath string) {
|
||||
s, err := For(inputPath)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(s).To(BeAssignableToTypeOf(&fakeLocalStorage{}))
|
||||
Expect(s.(*fakeLocalStorage).u.Scheme).To(Equal("file"))
|
||||
// The path should be exactly the same as the input - after URL parsing it gets decoded back
|
||||
Expect(s.(*fakeLocalStorage).u.Path).To(Equal(inputPath))
|
||||
},
|
||||
Entry("hash symbols", "/tmp/test#folder/file.mp3"),
|
||||
Entry("spaces", "/tmp/test folder/file with spaces.mp3"),
|
||||
Entry("question marks", "/tmp/test?query/file.mp3"),
|
||||
Entry("ampersands", "/tmp/test&/file.mp3"),
|
||||
Entry("multiple special chars", "/tmp/Song #1 & More?.mp3"),
|
||||
)
|
||||
})
|
||||
})
|
||||
|
||||
type fakeLocalStorage struct {
|
||||
Storage
|
||||
u url.URL
|
||||
}
|
||||
type fakeS3Storage struct {
|
||||
Storage
|
||||
u url.URL
|
||||
}
|
||||
323
core/storage/storagetest/fake_storage.go
Normal file
323
core/storage/storagetest/fake_storage.go
Normal file
@@ -0,0 +1,323 @@
|
||||
//nolint:unused
|
||||
package storagetest
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"net/url"
|
||||
"path"
|
||||
"testing/fstest"
|
||||
"time"
|
||||
|
||||
"github.com/navidrome/navidrome/core/storage"
|
||||
"github.com/navidrome/navidrome/log"
|
||||
"github.com/navidrome/navidrome/model/metadata"
|
||||
"github.com/navidrome/navidrome/utils/random"
|
||||
)
|
||||
|
||||
// FakeStorage is a fake storage that provides a FakeFS.
|
||||
// It is used for testing purposes.
|
||||
type FakeStorage struct{ fs *FakeFS }
|
||||
|
||||
// Register registers the FakeStorage for the given scheme. To use it, set the model.Library's Path to "fake:///music",
|
||||
// and register a FakeFS with schema = "fake". The storage registered will always return the same FakeFS instance.
|
||||
func Register(schema string, fs *FakeFS) {
|
||||
storage.Register(schema, func(url url.URL) storage.Storage { return &FakeStorage{fs: fs} })
|
||||
}
|
||||
|
||||
func (s FakeStorage) FS() (storage.MusicFS, error) {
|
||||
return s.fs, nil
|
||||
}
|
||||
|
||||
// FakeFS is a fake filesystem that can be used for testing purposes.
|
||||
// It implements the storage.MusicFS interface and keeps all files in memory, by using a fstest.MapFS internally.
|
||||
// You must NOT add files directly in the MapFS property, but use SetFiles and its other methods instead.
|
||||
// This is because the FakeFS keeps track of the latest modification time of directories, simulating the
|
||||
// behavior of a real filesystem, and you should not bypass this logic.
|
||||
type FakeFS struct {
|
||||
fstest.MapFS
|
||||
properInit bool
|
||||
}
|
||||
|
||||
func (ffs *FakeFS) SetFiles(files fstest.MapFS) {
|
||||
ffs.properInit = true
|
||||
ffs.MapFS = files
|
||||
ffs.createDirTimestamps()
|
||||
}
|
||||
|
||||
func (ffs *FakeFS) Add(filePath string, file *fstest.MapFile, when ...time.Time) {
|
||||
if len(when) == 0 {
|
||||
when = append(when, time.Now())
|
||||
}
|
||||
ffs.MapFS[filePath] = file
|
||||
ffs.touchContainingFolder(filePath, when[0])
|
||||
ffs.createDirTimestamps()
|
||||
}
|
||||
|
||||
func (ffs *FakeFS) Remove(filePath string, when ...time.Time) *fstest.MapFile {
|
||||
filePath = path.Clean(filePath)
|
||||
if len(when) == 0 {
|
||||
when = append(when, time.Now())
|
||||
}
|
||||
if f, ok := ffs.MapFS[filePath]; ok {
|
||||
ffs.touchContainingFolder(filePath, when[0])
|
||||
delete(ffs.MapFS, filePath)
|
||||
return f
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ffs *FakeFS) Move(srcPath string, destPath string, when ...time.Time) {
|
||||
if len(when) == 0 {
|
||||
when = append(when, time.Now())
|
||||
}
|
||||
srcPath = path.Clean(srcPath)
|
||||
destPath = path.Clean(destPath)
|
||||
ffs.MapFS[destPath] = ffs.MapFS[srcPath]
|
||||
ffs.touchContainingFolder(destPath, when[0])
|
||||
ffs.Remove(srcPath, when...)
|
||||
}
|
||||
|
||||
// Touch sets the modification time of a file.
|
||||
func (ffs *FakeFS) Touch(filePath string, when ...time.Time) {
|
||||
if len(when) == 0 {
|
||||
when = append(when, time.Now())
|
||||
}
|
||||
filePath = path.Clean(filePath)
|
||||
file, ok := ffs.MapFS[filePath]
|
||||
if ok {
|
||||
file.ModTime = when[0]
|
||||
} else {
|
||||
ffs.MapFS[filePath] = &fstest.MapFile{ModTime: when[0]}
|
||||
}
|
||||
ffs.touchContainingFolder(filePath, file.ModTime)
|
||||
}
|
||||
|
||||
func (ffs *FakeFS) touchContainingFolder(filePath string, ts time.Time) {
|
||||
dir := path.Dir(filePath)
|
||||
dirFile, ok := ffs.MapFS[dir]
|
||||
if !ok {
|
||||
log.Fatal("Directory not found. Forgot to call SetFiles?", "file", filePath)
|
||||
}
|
||||
if dirFile.ModTime.Before(ts) {
|
||||
dirFile.ModTime = ts
|
||||
}
|
||||
}
|
||||
|
||||
// SetError sets an error that will be returned when trying to read the file.
|
||||
func (ffs *FakeFS) SetError(filePath string, err error) {
|
||||
filePath = path.Clean(filePath)
|
||||
if ffs.MapFS[filePath] == nil {
|
||||
ffs.MapFS[filePath] = &fstest.MapFile{Data: []byte{}}
|
||||
}
|
||||
ffs.MapFS[filePath].Sys = err
|
||||
ffs.Touch(filePath)
|
||||
}
|
||||
|
||||
// ClearError clears the error set by SetError.
|
||||
func (ffs *FakeFS) ClearError(filePath string) {
|
||||
filePath = path.Clean(filePath)
|
||||
if file := ffs.MapFS[filePath]; file != nil {
|
||||
file.Sys = nil
|
||||
}
|
||||
ffs.Touch(filePath)
|
||||
}
|
||||
|
||||
func (ffs *FakeFS) UpdateTags(filePath string, newTags map[string]any, when ...time.Time) {
|
||||
f, ok := ffs.MapFS[filePath]
|
||||
if !ok {
|
||||
panic(fmt.Errorf("file %s not found", filePath))
|
||||
}
|
||||
var tags map[string]any
|
||||
err := json.Unmarshal(f.Data, &tags)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
for k, v := range newTags {
|
||||
tags[k] = v
|
||||
}
|
||||
data, _ := json.Marshal(tags)
|
||||
f.Data = data
|
||||
ffs.Touch(filePath, when...)
|
||||
}
|
||||
|
||||
// createDirTimestamps loops through all entries and create/updates directories entries in the map with the
|
||||
// latest ModTime from any children of that directory.
|
||||
func (ffs *FakeFS) createDirTimestamps() bool {
|
||||
var changed bool
|
||||
for filePath, file := range ffs.MapFS {
|
||||
dir := path.Dir(filePath)
|
||||
dirFile, ok := ffs.MapFS[dir]
|
||||
if !ok {
|
||||
dirFile = &fstest.MapFile{Mode: fs.ModeDir}
|
||||
ffs.MapFS[dir] = dirFile
|
||||
}
|
||||
if dirFile.ModTime.IsZero() {
|
||||
dirFile.ModTime = file.ModTime
|
||||
changed = true
|
||||
}
|
||||
}
|
||||
if changed {
|
||||
// If we updated any directory, we need to re-run the loop to create any parent directories
|
||||
ffs.createDirTimestamps()
|
||||
}
|
||||
return changed
|
||||
}
|
||||
|
||||
func ModTime(ts string) map[string]any { return map[string]any{fakeFileInfoModTime: ts} }
|
||||
func BirthTime(ts string) map[string]any { return map[string]any{fakeFileInfoBirthTime: ts} }
|
||||
|
||||
func Template(t ...map[string]any) func(...map[string]any) *fstest.MapFile {
|
||||
return func(tags ...map[string]any) *fstest.MapFile {
|
||||
return MP3(append(t, tags...)...)
|
||||
}
|
||||
}
|
||||
|
||||
func Track(num int, title string, tags ...map[string]any) map[string]any {
|
||||
ts := audioProperties("mp3", 320)
|
||||
ts["title"] = title
|
||||
ts["track"] = num
|
||||
for _, t := range tags {
|
||||
for k, v := range t {
|
||||
ts[k] = v
|
||||
}
|
||||
}
|
||||
return ts
|
||||
}
|
||||
|
||||
func MP3(tags ...map[string]any) *fstest.MapFile {
|
||||
ts := audioProperties("mp3", 320)
|
||||
if _, ok := ts[fakeFileInfoSize]; !ok {
|
||||
duration := ts["duration"].(int64)
|
||||
bitrate := ts["bitrate"].(int)
|
||||
ts[fakeFileInfoSize] = duration * int64(bitrate) / 8 * 1000
|
||||
}
|
||||
return File(append([]map[string]any{ts}, tags...)...)
|
||||
}
|
||||
|
||||
func File(tags ...map[string]any) *fstest.MapFile {
|
||||
ts := map[string]any{}
|
||||
for _, t := range tags {
|
||||
for k, v := range t {
|
||||
ts[k] = v
|
||||
}
|
||||
}
|
||||
modTime := time.Now()
|
||||
if mt, ok := ts[fakeFileInfoModTime]; !ok {
|
||||
ts[fakeFileInfoModTime] = time.Now().Format(time.RFC3339)
|
||||
} else {
|
||||
modTime, _ = time.Parse(time.RFC3339, mt.(string))
|
||||
}
|
||||
if _, ok := ts[fakeFileInfoBirthTime]; !ok {
|
||||
ts[fakeFileInfoBirthTime] = time.Now().Format(time.RFC3339)
|
||||
}
|
||||
if _, ok := ts[fakeFileInfoMode]; !ok {
|
||||
ts[fakeFileInfoMode] = fs.ModePerm
|
||||
}
|
||||
data, _ := json.Marshal(ts)
|
||||
if _, ok := ts[fakeFileInfoSize]; !ok {
|
||||
ts[fakeFileInfoSize] = int64(len(data))
|
||||
}
|
||||
return &fstest.MapFile{Data: data, ModTime: modTime, Mode: ts[fakeFileInfoMode].(fs.FileMode)}
|
||||
}
|
||||
|
||||
func audioProperties(suffix string, bitrate int) map[string]any {
|
||||
duration := random.Int64N(300) + 120
|
||||
return map[string]any{
|
||||
"suffix": suffix,
|
||||
"bitrate": bitrate,
|
||||
"duration": duration,
|
||||
"samplerate": 44100,
|
||||
"bitdepth": 16,
|
||||
"channels": 2,
|
||||
}
|
||||
}
|
||||
|
||||
func (ffs *FakeFS) ReadTags(paths ...string) (map[string]metadata.Info, error) {
|
||||
if !ffs.properInit {
|
||||
log.Fatal("FakeFS not initialized properly. Use SetFiles")
|
||||
}
|
||||
result := make(map[string]metadata.Info)
|
||||
var errs []error
|
||||
for _, file := range paths {
|
||||
p, err := ffs.parseFile(file)
|
||||
if err != nil {
|
||||
log.Warn("Error reading metadata from file", "file", file, "err", err)
|
||||
errs = append(errs, err)
|
||||
} else {
|
||||
result[file] = *p
|
||||
}
|
||||
}
|
||||
if len(errs) > 0 {
|
||||
return result, fmt.Errorf("errors reading metadata: %w", errors.Join(errs...))
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (ffs *FakeFS) parseFile(filePath string) (*metadata.Info, error) {
|
||||
// Check if it should throw an error when reading this file
|
||||
stat, err := ffs.Stat(filePath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if stat.Sys() != nil {
|
||||
return nil, stat.Sys().(error)
|
||||
}
|
||||
|
||||
// Read the file contents and parse the tags
|
||||
contents, err := fs.ReadFile(ffs, filePath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
data := map[string]any{}
|
||||
err = json.Unmarshal(contents, &data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p := metadata.Info{
|
||||
Tags: map[string][]string{},
|
||||
AudioProperties: metadata.AudioProperties{},
|
||||
HasPicture: data["has_picture"] == "true",
|
||||
}
|
||||
if d, ok := data["duration"].(float64); ok {
|
||||
p.AudioProperties.Duration = time.Duration(d) * time.Second
|
||||
}
|
||||
getInt := func(key string) int { v, _ := data[key].(float64); return int(v) }
|
||||
p.AudioProperties.BitRate = getInt("bitrate")
|
||||
p.AudioProperties.BitDepth = getInt("bitdepth")
|
||||
p.AudioProperties.SampleRate = getInt("samplerate")
|
||||
p.AudioProperties.Channels = getInt("channels")
|
||||
for k, v := range data {
|
||||
p.Tags[k] = []string{fmt.Sprintf("%v", v)}
|
||||
}
|
||||
file := ffs.MapFS[filePath]
|
||||
p.FileInfo = &fakeFileInfo{path: filePath, tags: data, file: file}
|
||||
return &p, nil
|
||||
}
|
||||
|
||||
const (
|
||||
fakeFileInfoMode = "_mode"
|
||||
fakeFileInfoSize = "_size"
|
||||
fakeFileInfoModTime = "_modtime"
|
||||
fakeFileInfoBirthTime = "_birthtime"
|
||||
)
|
||||
|
||||
type fakeFileInfo struct {
|
||||
path string
|
||||
file *fstest.MapFile
|
||||
tags map[string]any
|
||||
}
|
||||
|
||||
func (ffi *fakeFileInfo) Name() string { return path.Base(ffi.path) }
|
||||
func (ffi *fakeFileInfo) Size() int64 { v, _ := ffi.tags[fakeFileInfoSize].(float64); return int64(v) }
|
||||
func (ffi *fakeFileInfo) Mode() fs.FileMode { return ffi.file.Mode }
|
||||
func (ffi *fakeFileInfo) IsDir() bool { return false }
|
||||
func (ffi *fakeFileInfo) Sys() any { return nil }
|
||||
func (ffi *fakeFileInfo) ModTime() time.Time { return ffi.file.ModTime }
|
||||
func (ffi *fakeFileInfo) BirthTime() time.Time { return ffi.parseTime(fakeFileInfoBirthTime) }
|
||||
func (ffi *fakeFileInfo) parseTime(key string) time.Time {
|
||||
t, _ := time.Parse(time.RFC3339, ffi.tags[key].(string))
|
||||
return t
|
||||
}
|
||||
139
core/storage/storagetest/fake_storage_test.go
Normal file
139
core/storage/storagetest/fake_storage_test.go
Normal file
@@ -0,0 +1,139 @@
|
||||
//nolint:unused
|
||||
package storagetest_test
|
||||
|
||||
import (
|
||||
"io/fs"
|
||||
"testing"
|
||||
"testing/fstest"
|
||||
"time"
|
||||
|
||||
. "github.com/navidrome/navidrome/core/storage/storagetest"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
type _t = map[string]any
|
||||
|
||||
func TestFakeStorage(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
RunSpecs(t, "Fake Storage Test Suite")
|
||||
}
|
||||
|
||||
var _ = Describe("FakeFS", func() {
|
||||
var ffs FakeFS
|
||||
var startTime time.Time
|
||||
|
||||
BeforeEach(func() {
|
||||
startTime = time.Now().Add(-time.Hour)
|
||||
boy := Template(_t{"albumartist": "U2", "album": "Boy", "year": 1980, "genre": "Rock"})
|
||||
files := fstest.MapFS{
|
||||
"U2/Boy/I Will Follow.mp3": boy(Track(1, "I Will Follow")),
|
||||
"U2/Boy/Twilight.mp3": boy(Track(2, "Twilight")),
|
||||
"U2/Boy/An Cat Dubh.mp3": boy(Track(3, "An Cat Dubh")),
|
||||
}
|
||||
ffs.SetFiles(files)
|
||||
})
|
||||
|
||||
It("should implement a fs.FS", func() {
|
||||
Expect(fstest.TestFS(ffs, "U2/Boy/I Will Follow.mp3")).To(Succeed())
|
||||
})
|
||||
|
||||
It("should read file info", func() {
|
||||
props, err := ffs.ReadTags("U2/Boy/I Will Follow.mp3", "U2/Boy/Twilight.mp3")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
prop := props["U2/Boy/Twilight.mp3"]
|
||||
Expect(prop).ToNot(BeNil())
|
||||
Expect(prop.AudioProperties.Channels).To(Equal(2))
|
||||
Expect(prop.AudioProperties.BitRate).To(Equal(320))
|
||||
Expect(prop.FileInfo.Name()).To(Equal("Twilight.mp3"))
|
||||
Expect(prop.Tags["albumartist"]).To(ConsistOf("U2"))
|
||||
Expect(prop.FileInfo.ModTime()).To(BeTemporally(">=", startTime))
|
||||
|
||||
prop = props["U2/Boy/I Will Follow.mp3"]
|
||||
Expect(prop).ToNot(BeNil())
|
||||
Expect(prop.FileInfo.Name()).To(Equal("I Will Follow.mp3"))
|
||||
})
|
||||
|
||||
It("should return ModTime for directories", func() {
|
||||
root := ffs.MapFS["."]
|
||||
dirInfo1, err := ffs.Stat("U2")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
dirInfo2, err := ffs.Stat("U2/Boy")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(dirInfo1.ModTime()).To(Equal(root.ModTime))
|
||||
Expect(dirInfo1.ModTime()).To(BeTemporally(">=", startTime))
|
||||
Expect(dirInfo1.ModTime()).To(Equal(dirInfo2.ModTime()))
|
||||
})
|
||||
|
||||
When("the file is touched", func() {
|
||||
It("should only update the file and the file's directory ModTime", func() {
|
||||
root, _ := ffs.Stat(".")
|
||||
u2Dir, _ := ffs.Stat("U2")
|
||||
boyDir, _ := ffs.Stat("U2/Boy")
|
||||
previousTime := root.ModTime()
|
||||
|
||||
aTimeStamp := previousTime.Add(time.Hour)
|
||||
ffs.Touch("U2/./Boy/Twilight.mp3", aTimeStamp)
|
||||
|
||||
twilightFile, err := ffs.Stat("U2/Boy/Twilight.mp3")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(twilightFile.ModTime()).To(Equal(aTimeStamp))
|
||||
|
||||
Expect(root.ModTime()).To(Equal(previousTime))
|
||||
Expect(u2Dir.ModTime()).To(Equal(previousTime))
|
||||
Expect(boyDir.ModTime()).To(Equal(aTimeStamp))
|
||||
})
|
||||
})
|
||||
|
||||
When("adding/removing files", func() {
|
||||
It("should keep the timestamps correct", func() {
|
||||
root, _ := ffs.Stat(".")
|
||||
u2Dir, _ := ffs.Stat("U2")
|
||||
boyDir, _ := ffs.Stat("U2/Boy")
|
||||
previousTime := root.ModTime()
|
||||
aTimeStamp := previousTime.Add(time.Hour)
|
||||
|
||||
ffs.Add("U2/Boy/../Boy/Another.mp3", &fstest.MapFile{ModTime: aTimeStamp}, aTimeStamp)
|
||||
Expect(u2Dir.ModTime()).To(Equal(previousTime))
|
||||
Expect(boyDir.ModTime()).To(Equal(aTimeStamp))
|
||||
|
||||
aTimeStamp = aTimeStamp.Add(time.Hour)
|
||||
ffs.Remove("U2/./Boy/Twilight.mp3", aTimeStamp)
|
||||
|
||||
_, err := ffs.Stat("U2/Boy/Twilight.mp3")
|
||||
Expect(err).To(MatchError(fs.ErrNotExist))
|
||||
Expect(u2Dir.ModTime()).To(Equal(previousTime))
|
||||
Expect(boyDir.ModTime()).To(Equal(aTimeStamp))
|
||||
})
|
||||
})
|
||||
|
||||
When("moving files", func() {
|
||||
It("should allow relative paths", func() {
|
||||
ffs.Move("U2/../U2/Boy/Twilight.mp3", "./Twilight.mp3")
|
||||
Expect(ffs.MapFS).To(HaveKey("Twilight.mp3"))
|
||||
file, err := ffs.Stat("Twilight.mp3")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(file.Name()).To(Equal("Twilight.mp3"))
|
||||
})
|
||||
It("should keep the timestamps correct", func() {
|
||||
root, _ := ffs.Stat(".")
|
||||
u2Dir, _ := ffs.Stat("U2")
|
||||
boyDir, _ := ffs.Stat("U2/Boy")
|
||||
previousTime := root.ModTime()
|
||||
twilightFile, _ := ffs.Stat("U2/Boy/Twilight.mp3")
|
||||
filePreviousTime := twilightFile.ModTime()
|
||||
aTimeStamp := previousTime.Add(time.Hour)
|
||||
|
||||
ffs.Move("U2/Boy/Twilight.mp3", "Twilight.mp3", aTimeStamp)
|
||||
|
||||
Expect(root.ModTime()).To(Equal(aTimeStamp))
|
||||
Expect(u2Dir.ModTime()).To(Equal(previousTime))
|
||||
Expect(boyDir.ModTime()).To(Equal(aTimeStamp))
|
||||
|
||||
Expect(ffs.MapFS).ToNot(HaveKey("U2/Boy/Twilight.mp3"))
|
||||
twilight := ffs.MapFS["Twilight.mp3"]
|
||||
Expect(twilight.ModTime).To(Equal(filePreviousTime))
|
||||
})
|
||||
})
|
||||
})
|
||||
Reference in New Issue
Block a user