Add reading torrent list from file cache, add ffprobe check, bring back proxy in config

This commit is contained in:
Ben Adrian Sarmiento
2024-06-17 17:16:24 +02:00
parent bf9adfb764
commit f33c2411e0
6 changed files with 89 additions and 22 deletions

View File

@@ -24,7 +24,7 @@ type RealDebrid struct {
}
func NewRealDebrid(apiClient, unrestrictClient, downloadClient *zurghttp.HTTPClient, workerPool *ants.Pool, cfg config.ConfigInterface, log *logutil.Logger) *RealDebrid {
return &RealDebrid{
rd := &RealDebrid{
torrentsCache: []Torrent{},
apiClient: apiClient,
unrestrictClient: unrestrictClient,
@@ -33,6 +33,8 @@ func NewRealDebrid(apiClient, unrestrictClient, downloadClient *zurghttp.HTTPCli
cfg: cfg,
log: log,
}
rd.readCachedTorrents()
return rd
}
// currently unused

View File

@@ -2,8 +2,10 @@ package realdebrid
import (
"fmt"
"io"
"net/http"
"net/url"
"os"
"strconv"
)
@@ -52,29 +54,28 @@ func (rd *RealDebrid) GetTorrents(onlyOne bool) ([]Torrent, int, error) {
allResults <- rd.fetchPageOfTorrents(page+idx, pageSize)
})
}
// Collect results from all goroutines
buffer := make([][]Torrent, maxParallelThreads)
batches := make([][]Torrent, maxParallelThreads)
for i := 0; i < maxParallelThreads; i++ {
result := <-allResults
bufferIdx := (result.page - 1) % maxParallelThreads
buffer[bufferIdx] = []Torrent{}
if result.err != nil {
rd.log.Warnf("Ignoring error when fetching torrents pg %d: %v", result.page, result.err)
continue
}
buffer[bufferIdx] = append(buffer[bufferIdx], result.torrents...)
bIdx := (result.page - 1) % maxParallelThreads
batches[bIdx] = []Torrent{}
batches[bIdx] = append(batches[bIdx], result.torrents...)
}
for bIdx, batch := range buffer {
for tIdx, torrent := range batch {
for cIdx, cached := range rd.torrentsCache {
for bIdx, batch := range batches { // 4 batches
cachedCount := len(rd.torrentsCache)
for cIdx, cached := range rd.torrentsCache { // N cached torrents
cIdxEnd := cachedCount - 1 - cIdx
for tIdx, torrent := range batch { // 250 torrents
tIdxEnd := indexFromEnd(tIdx, page+bIdx, pageSize, result.total)
cIdxEnd := len(rd.torrentsCache) - 1 - cIdx
if torrent.ID == cached.ID && tIdxEnd == cIdxEnd {
allTorrents = append(allTorrents, batch[:tIdx]...)
allTorrents = append(allTorrents, rd.torrentsCache[cIdx:]...)
rd.log.Debugf("Fresh %d, cached %d", len(batch[:tIdx]), len(rd.torrentsCache[cIdx:]))
rd.log.Debugf("Got %d/%d torrents", len(allTorrents), result.total)
rd.torrentsCache = allTorrents
rd.cacheTorrents(allTorrents)
return allTorrents, len(allTorrents), nil
}
}
@@ -91,7 +92,7 @@ func (rd *RealDebrid) GetTorrents(onlyOne bool) ([]Torrent, int, error) {
page += maxParallelThreads
}
rd.torrentsCache = allTorrents
rd.cacheTorrents(allTorrents)
return allTorrents, len(allTorrents), nil
}
@@ -172,3 +173,51 @@ func (rd *RealDebrid) fetchPageOfTorrents(page, limit int) fetchTorrentsResult {
err: nil,
}
}
func (rd *RealDebrid) cacheTorrents(torrents []Torrent) {
filePath := "data/info/all.json"
file, err := os.Create(filePath)
if err != nil {
rd.log.Warnf("Cannot create info file %s: %v", filePath, err)
return
}
defer file.Close()
jsonData, err := json.Marshal(torrents)
if err != nil {
rd.log.Warnf("Cannot marshal torrent info: %v", err)
return
}
if _, err := file.Write(jsonData); err != nil {
rd.log.Warnf("Cannot write to info file %s: %v", filePath, err)
return
}
rd.torrentsCache = torrents
}
func (rd *RealDebrid) readCachedTorrents() {
filePath := "data/info/all.json"
file, err := os.Open(filePath)
if err != nil {
rd.log.Warnf("Cannot open info file %s: %v", filePath, err)
return
}
defer file.Close()
jsonData, err := io.ReadAll(file)
if err != nil {
rd.log.Warnf("Cannot read info file %s: %v", filePath, err)
return
}
var torrents []Torrent
err = json.Unmarshal(jsonData, &torrents)
if err != nil {
rd.log.Warnf("Cannot unmarshal torrent info: %v", err)
return
}
rd.torrentsCache = torrents
}

View File

@@ -1,14 +1,7 @@
package realdebrid
func indexFromEnd(subIndex int, pageNumber int, pageSize int, totalElements int) int {
// Adjust pageNumber for 1-based index
adjustedPageNumber := pageNumber - 1
// Calculate the overall index in the array
overallIndex := (adjustedPageNumber * pageSize) + subIndex
// Calculate the index from the end
indexFromEnd := totalElements - 1 - overallIndex
return indexFromEnd
return totalElements - 1 - overallIndex
}