update
Some checks failed
Pipeline: Test, Lint, Build / Get version info (push) Has been cancelled
Pipeline: Test, Lint, Build / Lint Go code (push) Has been cancelled
Pipeline: Test, Lint, Build / Test Go code (push) Has been cancelled
Pipeline: Test, Lint, Build / Test JS code (push) Has been cancelled
Pipeline: Test, Lint, Build / Lint i18n files (push) Has been cancelled
Pipeline: Test, Lint, Build / Check Docker configuration (push) Has been cancelled
Pipeline: Test, Lint, Build / Build (darwin/amd64) (push) Has been cancelled
Pipeline: Test, Lint, Build / Build (darwin/arm64) (push) Has been cancelled
Pipeline: Test, Lint, Build / Build (linux/386) (push) Has been cancelled
Pipeline: Test, Lint, Build / Build (linux/amd64) (push) Has been cancelled
Pipeline: Test, Lint, Build / Build (linux/arm/v5) (push) Has been cancelled
Pipeline: Test, Lint, Build / Build (linux/arm/v6) (push) Has been cancelled
Pipeline: Test, Lint, Build / Build (linux/arm/v7) (push) Has been cancelled
Pipeline: Test, Lint, Build / Build (linux/arm64) (push) Has been cancelled
Pipeline: Test, Lint, Build / Build (windows/386) (push) Has been cancelled
Pipeline: Test, Lint, Build / Build (windows/amd64) (push) Has been cancelled
Pipeline: Test, Lint, Build / Push to GHCR (push) Has been cancelled
Pipeline: Test, Lint, Build / Push to Docker Hub (push) Has been cancelled
Pipeline: Test, Lint, Build / Cleanup digest artifacts (push) Has been cancelled
Pipeline: Test, Lint, Build / Build Windows installers (push) Has been cancelled
Pipeline: Test, Lint, Build / Package/Release (push) Has been cancelled
Pipeline: Test, Lint, Build / Upload Linux PKG (push) Has been cancelled
Close stale issues and PRs / stale (push) Has been cancelled
POEditor import / update-translations (push) Has been cancelled
Some checks failed
Pipeline: Test, Lint, Build / Get version info (push) Has been cancelled
Pipeline: Test, Lint, Build / Lint Go code (push) Has been cancelled
Pipeline: Test, Lint, Build / Test Go code (push) Has been cancelled
Pipeline: Test, Lint, Build / Test JS code (push) Has been cancelled
Pipeline: Test, Lint, Build / Lint i18n files (push) Has been cancelled
Pipeline: Test, Lint, Build / Check Docker configuration (push) Has been cancelled
Pipeline: Test, Lint, Build / Build (darwin/amd64) (push) Has been cancelled
Pipeline: Test, Lint, Build / Build (darwin/arm64) (push) Has been cancelled
Pipeline: Test, Lint, Build / Build (linux/386) (push) Has been cancelled
Pipeline: Test, Lint, Build / Build (linux/amd64) (push) Has been cancelled
Pipeline: Test, Lint, Build / Build (linux/arm/v5) (push) Has been cancelled
Pipeline: Test, Lint, Build / Build (linux/arm/v6) (push) Has been cancelled
Pipeline: Test, Lint, Build / Build (linux/arm/v7) (push) Has been cancelled
Pipeline: Test, Lint, Build / Build (linux/arm64) (push) Has been cancelled
Pipeline: Test, Lint, Build / Build (windows/386) (push) Has been cancelled
Pipeline: Test, Lint, Build / Build (windows/amd64) (push) Has been cancelled
Pipeline: Test, Lint, Build / Push to GHCR (push) Has been cancelled
Pipeline: Test, Lint, Build / Push to Docker Hub (push) Has been cancelled
Pipeline: Test, Lint, Build / Cleanup digest artifacts (push) Has been cancelled
Pipeline: Test, Lint, Build / Build Windows installers (push) Has been cancelled
Pipeline: Test, Lint, Build / Package/Release (push) Has been cancelled
Pipeline: Test, Lint, Build / Upload Linux PKG (push) Has been cancelled
Close stale issues and PRs / stale (push) Has been cancelled
POEditor import / update-translations (push) Has been cancelled
This commit is contained in:
29
core/storage/local/extractors.go
Normal file
29
core/storage/local/extractors.go
Normal file
@@ -0,0 +1,29 @@
|
||||
package local
|
||||
|
||||
import (
|
||||
"io/fs"
|
||||
"sync"
|
||||
|
||||
"github.com/navidrome/navidrome/model/metadata"
|
||||
)
|
||||
|
||||
// Extractor is an interface that defines the methods that a tag/metadata extractor must implement
|
||||
type Extractor interface {
|
||||
Parse(files ...string) (map[string]metadata.Info, error)
|
||||
Version() string
|
||||
}
|
||||
|
||||
type extractorConstructor func(fs.FS, string) Extractor
|
||||
|
||||
var (
|
||||
extractors = map[string]extractorConstructor{}
|
||||
lock sync.RWMutex
|
||||
)
|
||||
|
||||
// RegisterExtractor registers a new extractor, so it can be used by the local storage. The one to be used is
|
||||
// defined with the configuration option Scanner.Extractor.
|
||||
func RegisterExtractor(id string, f extractorConstructor) {
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
extractors[id] = f
|
||||
}
|
||||
91
core/storage/local/local.go
Normal file
91
core/storage/local/local.go
Normal file
@@ -0,0 +1,91 @@
|
||||
package local
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/djherbis/times"
|
||||
"github.com/navidrome/navidrome/conf"
|
||||
"github.com/navidrome/navidrome/core/storage"
|
||||
"github.com/navidrome/navidrome/log"
|
||||
"github.com/navidrome/navidrome/model/metadata"
|
||||
)
|
||||
|
||||
// localStorage implements a Storage that reads the files from the local filesystem and uses registered extractors
|
||||
// to extract the metadata and tags from the files.
|
||||
type localStorage struct {
|
||||
u url.URL
|
||||
extractor Extractor
|
||||
resolvedPath string
|
||||
watching atomic.Bool
|
||||
}
|
||||
|
||||
func newLocalStorage(u url.URL) storage.Storage {
|
||||
newExtractor, ok := extractors[conf.Server.Scanner.Extractor]
|
||||
if !ok || newExtractor == nil {
|
||||
log.Fatal("Extractor not found", "path", conf.Server.Scanner.Extractor)
|
||||
}
|
||||
isWindowsPath := filepath.VolumeName(u.Host) != ""
|
||||
if u.Scheme == storage.LocalSchemaID && isWindowsPath {
|
||||
u.Path = filepath.Join(u.Host, u.Path)
|
||||
}
|
||||
resolvedPath, err := filepath.EvalSymlinks(u.Path)
|
||||
if err != nil {
|
||||
log.Warn("Error resolving path", "path", u.Path, "err", err)
|
||||
resolvedPath = u.Path
|
||||
}
|
||||
return &localStorage{u: u, extractor: newExtractor(os.DirFS(u.Path), u.Path), resolvedPath: resolvedPath}
|
||||
}
|
||||
|
||||
func (s *localStorage) FS() (storage.MusicFS, error) {
|
||||
path := s.u.Path
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
return nil, fmt.Errorf("%w: %s", err, path)
|
||||
}
|
||||
return &localFS{FS: os.DirFS(path), extractor: s.extractor}, nil
|
||||
}
|
||||
|
||||
type localFS struct {
|
||||
fs.FS
|
||||
extractor Extractor
|
||||
}
|
||||
|
||||
func (lfs *localFS) ReadTags(path ...string) (map[string]metadata.Info, error) {
|
||||
res, err := lfs.extractor.Parse(path...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for path, v := range res {
|
||||
if v.FileInfo == nil {
|
||||
info, err := fs.Stat(lfs, path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
v.FileInfo = localFileInfo{info}
|
||||
res[path] = v
|
||||
}
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// localFileInfo is a wrapper around fs.FileInfo that adds a BirthTime method, to make it compatible
|
||||
// with metadata.FileInfo
|
||||
type localFileInfo struct {
|
||||
fs.FileInfo
|
||||
}
|
||||
|
||||
func (lfi localFileInfo) BirthTime() time.Time {
|
||||
if ts := times.Get(lfi.FileInfo); ts.HasBirthTime() {
|
||||
return ts.BirthTime()
|
||||
}
|
||||
return time.Now()
|
||||
}
|
||||
|
||||
func init() {
|
||||
storage.Register(storage.LocalSchemaID, newLocalStorage)
|
||||
}
|
||||
17
core/storage/local/local_suite_test.go
Normal file
17
core/storage/local/local_suite_test.go
Normal file
@@ -0,0 +1,17 @@
|
||||
package local
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/navidrome/navidrome/log"
|
||||
"github.com/navidrome/navidrome/tests"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
func TestLocal(t *testing.T) {
|
||||
tests.Init(t, false)
|
||||
log.SetLevel(log.LevelFatal)
|
||||
RegisterFailHandler(Fail)
|
||||
RunSpecs(t, "Local Storage Suite")
|
||||
}
|
||||
428
core/storage/local/local_test.go
Normal file
428
core/storage/local/local_test.go
Normal file
@@ -0,0 +1,428 @@
|
||||
package local
|
||||
|
||||
import (
|
||||
"io/fs"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/navidrome/navidrome/conf"
|
||||
"github.com/navidrome/navidrome/conf/configtest"
|
||||
"github.com/navidrome/navidrome/core/storage"
|
||||
"github.com/navidrome/navidrome/model/metadata"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = Describe("LocalStorage", func() {
|
||||
var tempDir string
|
||||
var testExtractor *mockTestExtractor
|
||||
|
||||
BeforeEach(func() {
|
||||
DeferCleanup(configtest.SetupConfig())
|
||||
|
||||
// Create a temporary directory for testing
|
||||
var err error
|
||||
tempDir, err = os.MkdirTemp("", "navidrome-local-storage-test-")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
DeferCleanup(func() {
|
||||
os.RemoveAll(tempDir)
|
||||
})
|
||||
|
||||
// Create and register a test extractor
|
||||
testExtractor = &mockTestExtractor{
|
||||
results: make(map[string]metadata.Info),
|
||||
}
|
||||
RegisterExtractor("test", func(fs.FS, string) Extractor {
|
||||
return testExtractor
|
||||
})
|
||||
conf.Server.Scanner.Extractor = "test"
|
||||
})
|
||||
|
||||
Describe("newLocalStorage", func() {
|
||||
Context("with valid path", func() {
|
||||
It("should create a localStorage instance with correct path", func() {
|
||||
u, err := url.Parse("file://" + tempDir)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
storage := newLocalStorage(*u)
|
||||
localStorage := storage.(*localStorage)
|
||||
|
||||
Expect(localStorage.u.Scheme).To(Equal("file"))
|
||||
// Check that the path is set correctly (could be resolved to real path on macOS)
|
||||
Expect(localStorage.u.Path).To(ContainSubstring("navidrome-local-storage-test"))
|
||||
Expect(localStorage.resolvedPath).To(ContainSubstring("navidrome-local-storage-test"))
|
||||
Expect(localStorage.extractor).ToNot(BeNil())
|
||||
})
|
||||
|
||||
It("should handle URL-decoded paths correctly", func() {
|
||||
// Create a directory with spaces to test URL decoding
|
||||
spacedDir := filepath.Join(tempDir, "test folder")
|
||||
err := os.MkdirAll(spacedDir, 0755)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
// Use proper URL construction instead of manual escaping
|
||||
u := &url.URL{
|
||||
Scheme: "file",
|
||||
Path: spacedDir,
|
||||
}
|
||||
|
||||
storage := newLocalStorage(*u)
|
||||
localStorage, ok := storage.(*localStorage)
|
||||
Expect(ok).To(BeTrue())
|
||||
|
||||
Expect(localStorage.u.Path).To(Equal(spacedDir))
|
||||
})
|
||||
|
||||
It("should resolve symlinks when possible", func() {
|
||||
// Create a real directory and a symlink to it
|
||||
realDir := filepath.Join(tempDir, "real")
|
||||
linkDir := filepath.Join(tempDir, "link")
|
||||
|
||||
err := os.MkdirAll(realDir, 0755)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
err = os.Symlink(realDir, linkDir)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
u, err := url.Parse("file://" + linkDir)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
storage := newLocalStorage(*u)
|
||||
localStorage, ok := storage.(*localStorage)
|
||||
Expect(ok).To(BeTrue())
|
||||
|
||||
Expect(localStorage.u.Path).To(Equal(linkDir))
|
||||
// Check that the resolved path contains the real directory name
|
||||
Expect(localStorage.resolvedPath).To(ContainSubstring("real"))
|
||||
})
|
||||
|
||||
It("should use u.Path as resolvedPath when symlink resolution fails", func() {
|
||||
// Use a non-existent path to trigger symlink resolution failure
|
||||
nonExistentPath := filepath.Join(tempDir, "non-existent")
|
||||
|
||||
u, err := url.Parse("file://" + nonExistentPath)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
storage := newLocalStorage(*u)
|
||||
localStorage, ok := storage.(*localStorage)
|
||||
Expect(ok).To(BeTrue())
|
||||
|
||||
Expect(localStorage.u.Path).To(Equal(nonExistentPath))
|
||||
Expect(localStorage.resolvedPath).To(Equal(nonExistentPath))
|
||||
})
|
||||
})
|
||||
|
||||
Context("with Windows path", func() {
|
||||
BeforeEach(func() {
|
||||
if runtime.GOOS != "windows" {
|
||||
Skip("Windows-specific test")
|
||||
}
|
||||
})
|
||||
|
||||
It("should handle Windows drive letters correctly", func() {
|
||||
u, err := url.Parse("file://C:/music")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
storage := newLocalStorage(*u)
|
||||
localStorage, ok := storage.(*localStorage)
|
||||
Expect(ok).To(BeTrue())
|
||||
|
||||
Expect(localStorage.u.Path).To(Equal("C:/music"))
|
||||
})
|
||||
})
|
||||
|
||||
Context("with invalid extractor", func() {
|
||||
It("should handle extractor validation correctly", func() {
|
||||
// Note: The actual implementation uses log.Fatal which exits the process,
|
||||
// so we test the normal path where extractors exist
|
||||
|
||||
u, err := url.Parse("file://" + tempDir)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
storage := newLocalStorage(*u)
|
||||
Expect(storage).ToNot(BeNil())
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
Describe("localStorage.FS", func() {
|
||||
Context("with existing directory", func() {
|
||||
It("should return a localFS instance", func() {
|
||||
u, err := url.Parse("file://" + tempDir)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
storage := newLocalStorage(*u)
|
||||
musicFS, err := storage.FS()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(musicFS).ToNot(BeNil())
|
||||
|
||||
_, ok := musicFS.(*localFS)
|
||||
Expect(ok).To(BeTrue())
|
||||
})
|
||||
})
|
||||
|
||||
Context("with non-existent directory", func() {
|
||||
It("should return an error", func() {
|
||||
nonExistentPath := filepath.Join(tempDir, "non-existent")
|
||||
u, err := url.Parse("file://" + nonExistentPath)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
storage := newLocalStorage(*u)
|
||||
_, err = storage.FS()
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err.Error()).To(ContainSubstring(nonExistentPath))
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
Describe("localFS.ReadTags", func() {
|
||||
var testFile string
|
||||
|
||||
BeforeEach(func() {
|
||||
// Create a test file
|
||||
testFile = filepath.Join(tempDir, "test.mp3")
|
||||
err := os.WriteFile(testFile, []byte("test data"), 0600)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
// Reset extractor state
|
||||
testExtractor.results = make(map[string]metadata.Info)
|
||||
testExtractor.err = nil
|
||||
})
|
||||
|
||||
Context("when extractor returns complete metadata", func() {
|
||||
It("should return the metadata as-is", func() {
|
||||
expectedInfo := metadata.Info{
|
||||
Tags: map[string][]string{
|
||||
"title": {"Test Song"},
|
||||
"artist": {"Test Artist"},
|
||||
},
|
||||
AudioProperties: metadata.AudioProperties{
|
||||
Duration: 180,
|
||||
BitRate: 320,
|
||||
},
|
||||
FileInfo: &testFileInfo{name: "test.mp3"},
|
||||
}
|
||||
|
||||
testExtractor.results["test.mp3"] = expectedInfo
|
||||
|
||||
u, err := url.Parse("file://" + tempDir)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
storage := newLocalStorage(*u)
|
||||
musicFS, err := storage.FS()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
results, err := musicFS.ReadTags("test.mp3")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(results).To(HaveKey("test.mp3"))
|
||||
Expect(results["test.mp3"]).To(Equal(expectedInfo))
|
||||
})
|
||||
})
|
||||
|
||||
Context("when extractor returns metadata without FileInfo", func() {
|
||||
It("should populate FileInfo from filesystem", func() {
|
||||
incompleteInfo := metadata.Info{
|
||||
Tags: map[string][]string{
|
||||
"title": {"Test Song"},
|
||||
},
|
||||
FileInfo: nil, // Missing FileInfo
|
||||
}
|
||||
|
||||
testExtractor.results["test.mp3"] = incompleteInfo
|
||||
|
||||
u, err := url.Parse("file://" + tempDir)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
storage := newLocalStorage(*u)
|
||||
musicFS, err := storage.FS()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
results, err := musicFS.ReadTags("test.mp3")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(results).To(HaveKey("test.mp3"))
|
||||
|
||||
result := results["test.mp3"]
|
||||
Expect(result.FileInfo).ToNot(BeNil())
|
||||
Expect(result.FileInfo.Name()).To(Equal("test.mp3"))
|
||||
|
||||
// Should be wrapped in localFileInfo
|
||||
_, ok := result.FileInfo.(localFileInfo)
|
||||
Expect(ok).To(BeTrue())
|
||||
})
|
||||
})
|
||||
|
||||
Context("when filesystem stat fails", func() {
|
||||
It("should return an error", func() {
|
||||
incompleteInfo := metadata.Info{
|
||||
Tags: map[string][]string{"title": {"Test Song"}},
|
||||
FileInfo: nil,
|
||||
}
|
||||
|
||||
testExtractor.results["non-existent.mp3"] = incompleteInfo
|
||||
|
||||
u, err := url.Parse("file://" + tempDir)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
storage := newLocalStorage(*u)
|
||||
musicFS, err := storage.FS()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
_, err = musicFS.ReadTags("non-existent.mp3")
|
||||
Expect(err).To(HaveOccurred())
|
||||
})
|
||||
})
|
||||
|
||||
Context("when extractor fails", func() {
|
||||
It("should return the extractor error", func() {
|
||||
testExtractor.err = &extractorError{message: "extractor failed"}
|
||||
|
||||
u, err := url.Parse("file://" + tempDir)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
storage := newLocalStorage(*u)
|
||||
musicFS, err := storage.FS()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
_, err = musicFS.ReadTags("test.mp3")
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err.Error()).To(ContainSubstring("extractor failed"))
|
||||
})
|
||||
})
|
||||
|
||||
Context("with multiple files", func() {
|
||||
It("should process all files correctly", func() {
|
||||
// Create another test file
|
||||
testFile2 := filepath.Join(tempDir, "test2.mp3")
|
||||
err := os.WriteFile(testFile2, []byte("test data 2"), 0600)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
info1 := metadata.Info{
|
||||
Tags: map[string][]string{"title": {"Song 1"}},
|
||||
FileInfo: &testFileInfo{name: "test.mp3"},
|
||||
}
|
||||
info2 := metadata.Info{
|
||||
Tags: map[string][]string{"title": {"Song 2"}},
|
||||
FileInfo: nil, // This one needs FileInfo populated
|
||||
}
|
||||
|
||||
testExtractor.results["test.mp3"] = info1
|
||||
testExtractor.results["test2.mp3"] = info2
|
||||
|
||||
u, err := url.Parse("file://" + tempDir)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
storage := newLocalStorage(*u)
|
||||
musicFS, err := storage.FS()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
results, err := musicFS.ReadTags("test.mp3", "test2.mp3")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(results).To(HaveLen(2))
|
||||
|
||||
Expect(results["test.mp3"].FileInfo).To(Equal(&testFileInfo{name: "test.mp3"}))
|
||||
Expect(results["test2.mp3"].FileInfo).ToNot(BeNil())
|
||||
Expect(results["test2.mp3"].FileInfo.Name()).To(Equal("test2.mp3"))
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
Describe("localFileInfo", func() {
|
||||
var testFile string
|
||||
var fileInfo fs.FileInfo
|
||||
|
||||
BeforeEach(func() {
|
||||
testFile = filepath.Join(tempDir, "test.mp3")
|
||||
err := os.WriteFile(testFile, []byte("test data"), 0600)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
fileInfo, err = os.Stat(testFile)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
})
|
||||
|
||||
Describe("BirthTime", func() {
|
||||
It("should return birth time when available", func() {
|
||||
lfi := localFileInfo{FileInfo: fileInfo}
|
||||
birthTime := lfi.BirthTime()
|
||||
|
||||
// Birth time should be a valid time (not zero value)
|
||||
Expect(birthTime).ToNot(BeZero())
|
||||
// Should be around the current time (within last few minutes)
|
||||
Expect(birthTime).To(BeTemporally("~", time.Now(), 5*time.Minute))
|
||||
})
|
||||
})
|
||||
|
||||
It("should delegate all other FileInfo methods", func() {
|
||||
lfi := localFileInfo{FileInfo: fileInfo}
|
||||
|
||||
Expect(lfi.Name()).To(Equal(fileInfo.Name()))
|
||||
Expect(lfi.Size()).To(Equal(fileInfo.Size()))
|
||||
Expect(lfi.Mode()).To(Equal(fileInfo.Mode()))
|
||||
Expect(lfi.ModTime()).To(Equal(fileInfo.ModTime()))
|
||||
Expect(lfi.IsDir()).To(Equal(fileInfo.IsDir()))
|
||||
Expect(lfi.Sys()).To(Equal(fileInfo.Sys()))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("Storage registration", func() {
|
||||
It("should register localStorage for file scheme", func() {
|
||||
// This tests the init() function indirectly
|
||||
storage, err := storage.For("file://" + tempDir)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(storage).To(BeAssignableToTypeOf(&localStorage{}))
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
// Test extractor for testing
|
||||
type mockTestExtractor struct {
|
||||
results map[string]metadata.Info
|
||||
err error
|
||||
}
|
||||
|
||||
func (m *mockTestExtractor) Parse(files ...string) (map[string]metadata.Info, error) {
|
||||
if m.err != nil {
|
||||
return nil, m.err
|
||||
}
|
||||
|
||||
result := make(map[string]metadata.Info)
|
||||
for _, file := range files {
|
||||
if info, exists := m.results[file]; exists {
|
||||
result[file] = info
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (m *mockTestExtractor) Version() string {
|
||||
return "test-1.0"
|
||||
}
|
||||
|
||||
type extractorError struct {
|
||||
message string
|
||||
}
|
||||
|
||||
func (e *extractorError) Error() string {
|
||||
return e.message
|
||||
}
|
||||
|
||||
// Test FileInfo that implements metadata.FileInfo
|
||||
type testFileInfo struct {
|
||||
name string
|
||||
size int64
|
||||
mode fs.FileMode
|
||||
modTime time.Time
|
||||
isDir bool
|
||||
birthTime time.Time
|
||||
}
|
||||
|
||||
func (t *testFileInfo) Name() string { return t.name }
|
||||
func (t *testFileInfo) Size() int64 { return t.size }
|
||||
func (t *testFileInfo) Mode() fs.FileMode { return t.mode }
|
||||
func (t *testFileInfo) ModTime() time.Time { return t.modTime }
|
||||
func (t *testFileInfo) IsDir() bool { return t.isDir }
|
||||
func (t *testFileInfo) Sys() any { return nil }
|
||||
func (t *testFileInfo) BirthTime() time.Time {
|
||||
if t.birthTime.IsZero() {
|
||||
return time.Now()
|
||||
}
|
||||
return t.birthTime
|
||||
}
|
||||
5
core/storage/local/watch_events_darwin.go
Normal file
5
core/storage/local/watch_events_darwin.go
Normal file
@@ -0,0 +1,5 @@
|
||||
package local
|
||||
|
||||
import "github.com/rjeczalik/notify"
|
||||
|
||||
const WatchEvents = notify.All | notify.FSEventsInodeMetaMod
|
||||
7
core/storage/local/watch_events_default.go
Normal file
7
core/storage/local/watch_events_default.go
Normal file
@@ -0,0 +1,7 @@
|
||||
//go:build !linux && !darwin && !windows
|
||||
|
||||
package local
|
||||
|
||||
import "github.com/rjeczalik/notify"
|
||||
|
||||
const WatchEvents = notify.All
|
||||
5
core/storage/local/watch_events_linux.go
Normal file
5
core/storage/local/watch_events_linux.go
Normal file
@@ -0,0 +1,5 @@
|
||||
package local
|
||||
|
||||
import "github.com/rjeczalik/notify"
|
||||
|
||||
const WatchEvents = notify.All | notify.InModify | notify.InAttrib
|
||||
5
core/storage/local/watch_events_windows.go
Normal file
5
core/storage/local/watch_events_windows.go
Normal file
@@ -0,0 +1,5 @@
|
||||
package local
|
||||
|
||||
import "github.com/rjeczalik/notify"
|
||||
|
||||
const WatchEvents = notify.All | notify.FileNotifyChangeAttributes
|
||||
57
core/storage/local/watcher.go
Normal file
57
core/storage/local/watcher.go
Normal file
@@ -0,0 +1,57 @@
|
||||
package local
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/navidrome/navidrome/log"
|
||||
"github.com/rjeczalik/notify"
|
||||
)
|
||||
|
||||
// Start starts a watcher on the whole FS and returns a channel to send detected changes.
|
||||
// It uses `notify` to detect changes in the filesystem, so it may not work on all platforms/use-cases.
|
||||
// Notoriously, it does not work on some networked mounts and Windows with WSL2.
|
||||
func (s *localStorage) Start(ctx context.Context) (<-chan string, error) {
|
||||
if !s.watching.CompareAndSwap(false, true) {
|
||||
return nil, errors.New("watcher already started")
|
||||
}
|
||||
input := make(chan notify.EventInfo, 1)
|
||||
output := make(chan string, 1)
|
||||
|
||||
started := make(chan struct{})
|
||||
go func() {
|
||||
defer close(input)
|
||||
defer close(output)
|
||||
|
||||
libPath := filepath.Join(s.u.Path, "...")
|
||||
log.Debug(ctx, "Starting watcher", "lib", libPath)
|
||||
err := notify.Watch(libPath, input, WatchEvents)
|
||||
if err != nil {
|
||||
log.Error("Error starting watcher", "lib", libPath, err)
|
||||
return
|
||||
}
|
||||
defer notify.Stop(input)
|
||||
close(started) // signals the main goroutine we have started
|
||||
|
||||
for {
|
||||
select {
|
||||
case event := <-input:
|
||||
log.Trace(ctx, "Detected change", "event", event, "lib", s.u.Path)
|
||||
name := event.Path()
|
||||
name = strings.Replace(name, s.resolvedPath, s.u.Path, 1)
|
||||
output <- name
|
||||
case <-ctx.Done():
|
||||
log.Debug(ctx, "Stopping watcher", "path", s.u.Path)
|
||||
s.watching.Store(false)
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
select {
|
||||
case <-started:
|
||||
case <-ctx.Done():
|
||||
}
|
||||
return output, nil
|
||||
}
|
||||
139
core/storage/local/watcher_test.go
Normal file
139
core/storage/local/watcher_test.go
Normal file
@@ -0,0 +1,139 @@
|
||||
package local_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/navidrome/navidrome/conf"
|
||||
"github.com/navidrome/navidrome/core/storage"
|
||||
"github.com/navidrome/navidrome/core/storage/local"
|
||||
_ "github.com/navidrome/navidrome/core/storage/local"
|
||||
"github.com/navidrome/navidrome/model/metadata"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = XDescribe("Watcher", func() {
|
||||
var lsw storage.Watcher
|
||||
var tmpFolder string
|
||||
|
||||
BeforeEach(func() {
|
||||
tmpFolder = GinkgoT().TempDir()
|
||||
|
||||
local.RegisterExtractor("noop", func(fs fs.FS, path string) local.Extractor { return noopExtractor{} })
|
||||
conf.Server.Scanner.Extractor = "noop"
|
||||
|
||||
ls, err := storage.For(tmpFolder)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
// It should implement Watcher
|
||||
var ok bool
|
||||
lsw, ok = ls.(storage.Watcher)
|
||||
Expect(ok).To(BeTrue())
|
||||
|
||||
// Make sure temp folder is created
|
||||
Eventually(func() error {
|
||||
_, err := os.Stat(tmpFolder)
|
||||
return err
|
||||
}).Should(Succeed())
|
||||
})
|
||||
|
||||
It("should start and stop watcher", func() {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
w, err := lsw.Start(ctx)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
cancel()
|
||||
Eventually(w).Should(BeClosed())
|
||||
})
|
||||
|
||||
It("should return error if watcher is already started", func() {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
_, err := lsw.Start(ctx)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
_, err = lsw.Start(ctx)
|
||||
Expect(err).To(HaveOccurred())
|
||||
})
|
||||
|
||||
It("should detect new files", func() {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
changes, err := lsw.Start(ctx)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
_, err = os.Create(filepath.Join(tmpFolder, "test.txt"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Eventually(changes).WithTimeout(2 * time.Second).Should(Receive(Equal(tmpFolder)))
|
||||
})
|
||||
|
||||
It("should detect new subfolders", func() {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
changes, err := lsw.Start(ctx)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(os.Mkdir(filepath.Join(tmpFolder, "subfolder"), 0755)).To(Succeed())
|
||||
|
||||
Eventually(changes).WithTimeout(2 * time.Second).Should(Receive(Equal(filepath.Join(tmpFolder, "subfolder"))))
|
||||
})
|
||||
|
||||
It("should detect changes in subfolders recursively", func() {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
subfolder := filepath.Join(tmpFolder, "subfolder1/subfolder2")
|
||||
Expect(os.MkdirAll(subfolder, 0755)).To(Succeed())
|
||||
|
||||
changes, err := lsw.Start(ctx)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
filePath := filepath.Join(subfolder, "test.txt")
|
||||
Expect(os.WriteFile(filePath, []byte("test"), 0600)).To(Succeed())
|
||||
|
||||
Eventually(changes).WithTimeout(2 * time.Second).Should(Receive(Equal(filePath)))
|
||||
})
|
||||
|
||||
It("should detect removed in files", func() {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
changes, err := lsw.Start(ctx)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
filePath := filepath.Join(tmpFolder, "test.txt")
|
||||
Expect(os.WriteFile(filePath, []byte("test"), 0600)).To(Succeed())
|
||||
|
||||
Eventually(changes).WithTimeout(2 * time.Second).Should(Receive(Equal(filePath)))
|
||||
|
||||
Expect(os.Remove(filePath)).To(Succeed())
|
||||
Eventually(changes).WithTimeout(2 * time.Second).Should(Receive(Equal(filePath)))
|
||||
})
|
||||
|
||||
It("should detect file moves", func() {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
filePath := filepath.Join(tmpFolder, "test.txt")
|
||||
Expect(os.WriteFile(filePath, []byte("test"), 0600)).To(Succeed())
|
||||
|
||||
changes, err := lsw.Start(ctx)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
newPath := filepath.Join(tmpFolder, "test2.txt")
|
||||
Expect(os.Rename(filePath, newPath)).To(Succeed())
|
||||
Eventually(changes).WithTimeout(2 * time.Second).Should(Receive(Equal(newPath)))
|
||||
})
|
||||
})
|
||||
|
||||
type noopExtractor struct{}
|
||||
|
||||
func (s noopExtractor) Parse(files ...string) (map[string]metadata.Info, error) { return nil, nil }
|
||||
func (s noopExtractor) Version() string { return "0" }
|
||||
Reference in New Issue
Block a user