Add reading torrent list from file cache, add ffprobe check, bring back proxy in config

This commit is contained in:
Ben Adrian Sarmiento
2024-06-17 17:16:24 +02:00
parent bf9adfb764
commit f33c2411e0
6 changed files with 89 additions and 22 deletions

View File

@@ -4,6 +4,7 @@ import (
"fmt" "fmt"
netHttp "net/http" netHttp "net/http"
"os" "os"
"os/exec"
"strings" "strings"
"time" "time"
@@ -52,7 +53,13 @@ func MainApp(configPath string) {
os.Exit(1) os.Exit(1)
} }
proxyURL := os.Getenv("PROXY") var proxyURL string
if config.GetProxy() != "" {
proxyURL = config.GetProxy()
zurglog.Infof("Using proxy: %s", proxyURL)
} else {
proxyURL = os.Getenv("PROXY")
}
repoClient4 := http.NewHTTPClient("", 0, 1, false, []string{}, proxyURL, log.Named("network_test")) repoClient4 := http.NewHTTPClient("", 0, 1, false, []string{}, proxyURL, log.Named("network_test"))
repoClient6 := http.NewHTTPClient("", 0, 1, true, []string{}, proxyURL, log.Named("network_test")) repoClient6 := http.NewHTTPClient("", 0, 1, true, []string{}, proxyURL, log.Named("network_test"))
@@ -135,6 +142,11 @@ func MainApp(configPath string) {
log.Named("router"), log.Named("router"),
) )
_, err = exec.LookPath("ffprobe")
if err != nil {
zurglog.Warn("ffprobe not found in PATH (do you have ffmpeg installed?), you won't be able to perform media analysis")
}
//// pprof //// pprof
// workerPool.Submit(func() { // workerPool.Submit(func() {
// if err := netHttp.ListenAndServe(":6060", nil); err != nil && err != netHttp.ErrServerClosed { // if err := netHttp.ListenAndServe(":6060", nil); err != nil && err != netHttp.ErrServerClosed {

View File

@@ -29,6 +29,11 @@ func NetworkTest(testURL string) {
log := logutil.NewLogger(logPath) log := logutil.NewLogger(logPath)
proxyURL := os.Getenv("PROXY") proxyURL := os.Getenv("PROXY")
if proxyURL != "" {
log.Infof("Using proxy: %s", proxyURL)
} else {
log.Info("You can set a proxy by setting the PROXY environment variable")
}
repoClient4 := http.NewHTTPClient("", 0, 1, false, []string{}, proxyURL, log.Named("network_test")) repoClient4 := http.NewHTTPClient("", 0, 1, false, []string{}, proxyURL, log.Named("network_test"))
repoClient6 := http.NewHTTPClient("", 0, 1, true, []string{}, proxyURL, log.Named("network_test")) repoClient6 := http.NewHTTPClient("", 0, 1, true, []string{}, proxyURL, log.Named("network_test"))

View File

@@ -34,6 +34,7 @@ type ConfigInterface interface {
GetDownloadsEveryMins() int GetDownloadsEveryMins() int
GetDumpTorrentsEveryMins() int GetDumpTorrentsEveryMins() int
GetPlayableExtensions() []string GetPlayableExtensions() []string
GetProxy() string
} }
type ZurgConfig struct { type ZurgConfig struct {
@@ -54,6 +55,7 @@ type ZurgConfig struct {
Password string `yaml:"password" json:"password"` Password string `yaml:"password" json:"password"`
PlayableExtensions []string `yaml:"addl_playable_extensions" json:"addl_playable_extensions"` PlayableExtensions []string `yaml:"addl_playable_extensions" json:"addl_playable_extensions"`
Port string `yaml:"port" json:"port"` Port string `yaml:"port" json:"port"`
Proxy string `yaml:"proxy" json:"proxy"`
RefreshEverySecs int `yaml:"check_for_changes_every_secs" json:"check_for_changes_every_secs"` RefreshEverySecs int `yaml:"check_for_changes_every_secs" json:"check_for_changes_every_secs"`
RepairEveryMins int `yaml:"repair_every_mins" json:"repair_every_mins"` RepairEveryMins int `yaml:"repair_every_mins" json:"repair_every_mins"`
RetainFolderNameExtension bool `yaml:"retain_folder_name_extension" json:"retain_folder_name_extension"` RetainFolderNameExtension bool `yaml:"retain_folder_name_extension" json:"retain_folder_name_extension"`
@@ -205,3 +207,7 @@ func (z *ZurgConfig) GetPlayableExtensions() []string {
} }
return z.PlayableExtensions return z.PlayableExtensions
} }
func (z *ZurgConfig) GetProxy() string {
return z.Proxy
}

View File

@@ -24,7 +24,7 @@ type RealDebrid struct {
} }
func NewRealDebrid(apiClient, unrestrictClient, downloadClient *zurghttp.HTTPClient, workerPool *ants.Pool, cfg config.ConfigInterface, log *logutil.Logger) *RealDebrid { func NewRealDebrid(apiClient, unrestrictClient, downloadClient *zurghttp.HTTPClient, workerPool *ants.Pool, cfg config.ConfigInterface, log *logutil.Logger) *RealDebrid {
return &RealDebrid{ rd := &RealDebrid{
torrentsCache: []Torrent{}, torrentsCache: []Torrent{},
apiClient: apiClient, apiClient: apiClient,
unrestrictClient: unrestrictClient, unrestrictClient: unrestrictClient,
@@ -33,6 +33,8 @@ func NewRealDebrid(apiClient, unrestrictClient, downloadClient *zurghttp.HTTPCli
cfg: cfg, cfg: cfg,
log: log, log: log,
} }
rd.readCachedTorrents()
return rd
} }
// currently unused // currently unused

View File

@@ -2,8 +2,10 @@ package realdebrid
import ( import (
"fmt" "fmt"
"io"
"net/http" "net/http"
"net/url" "net/url"
"os"
"strconv" "strconv"
) )
@@ -52,29 +54,28 @@ func (rd *RealDebrid) GetTorrents(onlyOne bool) ([]Torrent, int, error) {
allResults <- rd.fetchPageOfTorrents(page+idx, pageSize) allResults <- rd.fetchPageOfTorrents(page+idx, pageSize)
}) })
} }
// Collect results from all goroutines batches := make([][]Torrent, maxParallelThreads)
buffer := make([][]Torrent, maxParallelThreads)
for i := 0; i < maxParallelThreads; i++ { for i := 0; i < maxParallelThreads; i++ {
result := <-allResults result := <-allResults
bufferIdx := (result.page - 1) % maxParallelThreads
buffer[bufferIdx] = []Torrent{}
if result.err != nil { if result.err != nil {
rd.log.Warnf("Ignoring error when fetching torrents pg %d: %v", result.page, result.err) rd.log.Warnf("Ignoring error when fetching torrents pg %d: %v", result.page, result.err)
continue continue
} }
buffer[bufferIdx] = append(buffer[bufferIdx], result.torrents...) bIdx := (result.page - 1) % maxParallelThreads
batches[bIdx] = []Torrent{}
batches[bIdx] = append(batches[bIdx], result.torrents...)
} }
for bIdx, batch := range buffer { for bIdx, batch := range batches { // 4 batches
for tIdx, torrent := range batch { cachedCount := len(rd.torrentsCache)
for cIdx, cached := range rd.torrentsCache { for cIdx, cached := range rd.torrentsCache { // N cached torrents
cIdxEnd := cachedCount - 1 - cIdx
for tIdx, torrent := range batch { // 250 torrents
tIdxEnd := indexFromEnd(tIdx, page+bIdx, pageSize, result.total) tIdxEnd := indexFromEnd(tIdx, page+bIdx, pageSize, result.total)
cIdxEnd := len(rd.torrentsCache) - 1 - cIdx
if torrent.ID == cached.ID && tIdxEnd == cIdxEnd { if torrent.ID == cached.ID && tIdxEnd == cIdxEnd {
allTorrents = append(allTorrents, batch[:tIdx]...) allTorrents = append(allTorrents, batch[:tIdx]...)
allTorrents = append(allTorrents, rd.torrentsCache[cIdx:]...) allTorrents = append(allTorrents, rd.torrentsCache[cIdx:]...)
rd.log.Debugf("Fresh %d, cached %d", len(batch[:tIdx]), len(rd.torrentsCache[cIdx:]))
rd.log.Debugf("Got %d/%d torrents", len(allTorrents), result.total) rd.log.Debugf("Got %d/%d torrents", len(allTorrents), result.total)
rd.torrentsCache = allTorrents rd.cacheTorrents(allTorrents)
return allTorrents, len(allTorrents), nil return allTorrents, len(allTorrents), nil
} }
} }
@@ -91,7 +92,7 @@ func (rd *RealDebrid) GetTorrents(onlyOne bool) ([]Torrent, int, error) {
page += maxParallelThreads page += maxParallelThreads
} }
rd.torrentsCache = allTorrents rd.cacheTorrents(allTorrents)
return allTorrents, len(allTorrents), nil return allTorrents, len(allTorrents), nil
} }
@@ -172,3 +173,51 @@ func (rd *RealDebrid) fetchPageOfTorrents(page, limit int) fetchTorrentsResult {
err: nil, err: nil,
} }
} }
func (rd *RealDebrid) cacheTorrents(torrents []Torrent) {
filePath := "data/info/all.json"
file, err := os.Create(filePath)
if err != nil {
rd.log.Warnf("Cannot create info file %s: %v", filePath, err)
return
}
defer file.Close()
jsonData, err := json.Marshal(torrents)
if err != nil {
rd.log.Warnf("Cannot marshal torrent info: %v", err)
return
}
if _, err := file.Write(jsonData); err != nil {
rd.log.Warnf("Cannot write to info file %s: %v", filePath, err)
return
}
rd.torrentsCache = torrents
}
func (rd *RealDebrid) readCachedTorrents() {
filePath := "data/info/all.json"
file, err := os.Open(filePath)
if err != nil {
rd.log.Warnf("Cannot open info file %s: %v", filePath, err)
return
}
defer file.Close()
jsonData, err := io.ReadAll(file)
if err != nil {
rd.log.Warnf("Cannot read info file %s: %v", filePath, err)
return
}
var torrents []Torrent
err = json.Unmarshal(jsonData, &torrents)
if err != nil {
rd.log.Warnf("Cannot unmarshal torrent info: %v", err)
return
}
rd.torrentsCache = torrents
}

View File

@@ -1,14 +1,7 @@
package realdebrid package realdebrid
func indexFromEnd(subIndex int, pageNumber int, pageSize int, totalElements int) int { func indexFromEnd(subIndex int, pageNumber int, pageSize int, totalElements int) int {
// Adjust pageNumber for 1-based index
adjustedPageNumber := pageNumber - 1 adjustedPageNumber := pageNumber - 1
// Calculate the overall index in the array
overallIndex := (adjustedPageNumber * pageSize) + subIndex overallIndex := (adjustedPageNumber * pageSize) + subIndex
return totalElements - 1 - overallIndex
// Calculate the index from the end
indexFromEnd := totalElements - 1 - overallIndex
return indexFromEnd
} }