Proper cached index computation

This commit is contained in:
Ben Adrian Sarmiento
2024-06-21 12:34:14 +02:00
parent d0baeb3d40
commit 8bf39d58de

View File

@@ -23,21 +23,23 @@ func (rd *RealDebrid) GetTorrents(onlyOne bool) ([]Torrent, int, error) {
return nil, 0, result.err return nil, 0, result.err
} }
totalElements := result.total
if onlyOne { if onlyOne {
return result.torrents, result.total, nil return result.torrents, totalElements, nil
} }
allTorrents := []Torrent{} allTorrents := []Torrent{}
page := 1 page := 1
pageSize := 250 pageSize := 250
maxPages := (result.total + pageSize - 1) / pageSize maxPages := (totalElements + pageSize - 1) / pageSize
rd.log.Debugf("Torrents total count is %d", result.total) rd.log.Debugf("Torrents total count is %d", totalElements)
maxParallelThreads := 4 maxParallelThreads := 4
if maxPages < maxParallelThreads { if maxPages < maxParallelThreads {
maxParallelThreads = maxPages maxParallelThreads = maxPages
} }
found := false found := -1
for { for {
allResults := make(chan fetchTorrentsResult, maxParallelThreads) // Channel to collect results from goroutines allResults := make(chan fetchTorrentsResult, maxParallelThreads) // Channel to collect results from goroutines
for i := 0; i < maxParallelThreads; i++ { // Launch GET_PARALLEL concurrent fetches for i := 0; i < maxParallelThreads; i++ { // Launch GET_PARALLEL concurrent fetches
@@ -47,7 +49,7 @@ func (rd *RealDebrid) GetTorrents(onlyOne bool) ([]Torrent, int, error) {
allResults <- fetchTorrentsResult{ allResults <- fetchTorrentsResult{
torrents: nil, torrents: nil,
page: page + idx, page: page + idx,
total: result.total, total: totalElements,
err: nil, err: nil,
} }
return return
@@ -67,32 +69,37 @@ func (rd *RealDebrid) GetTorrents(onlyOne bool) ([]Torrent, int, error) {
batches[bIdx] = append(batches[bIdx], result.torrents...) batches[bIdx] = append(batches[bIdx], result.torrents...)
} }
for bIdx, batch := range batches { // 4 batches for bIdx, batch := range batches { // 4 batches
if !found && len(batch) > 0 { if found < 0 && len(batch) > 0 {
cachedCount := len(rd.torrentsCache) cachedCount := len(rd.torrentsCache)
for cIdx, cached := range rd.torrentsCache { // N cached torrents for cIdx, cached := range rd.torrentsCache { // N cached torrents
cIdxEnd := cachedCount - 1 - cIdx cIdxEnd := cachedCount - 1 - cIdx
for tIdx, torrent := range batch { // 250 torrents in batch for tIdx, torrent := range batch { // 250 torrents in batch
tIdxEnd := indexFromEnd(tIdx, page+bIdx, pageSize, result.total) tIdxEnd := indexFromEnd(tIdx, page+bIdx, pageSize, totalElements)
if torrent.ID == cached.ID && torrent.Progress == cached.Progress && tIdxEnd == cIdxEnd { if torrent.ID == cached.ID && torrent.Progress == cached.Progress && tIdxEnd == cIdxEnd {
found = true found = ((page + bIdx - 1) * pageSize) + tIdx
break break
} }
} }
if found { if found >= 0 {
// rd.log.Debugf("From torrent %s (id=%s) onwards, the torrents were untouched", cached.Name, cached.ID)
break break
} }
} }
} }
allTorrents = append(allTorrents, batch...) allTorrents = append(allTorrents, batch...)
} }
if found { if found >= 0 {
allTorrents = append(allTorrents, rd.torrentsCache[len(allTorrents):]...) tIdx := found % pageSize
pageNum := (found / pageSize) + 1
tIdxEnd := indexFromEnd(tIdx, pageNum, pageSize, totalElements)
cIdx := len(rd.torrentsCache) - 1 - tIdxEnd
last := len(allTorrents) - 1
cIdx += last - found + 1
allTorrents = append(allTorrents, rd.torrentsCache[cIdx:]...)
} }
rd.log.Debugf("Got %d/%d torrents", len(allTorrents), result.total) rd.log.Debugf("Got %d/%d torrents", len(allTorrents), totalElements)
if len(allTorrents) >= result.total || page >= maxPages { if len(allTorrents) >= totalElements || page >= maxPages {
break break
} }