package torrent import ( "bufio" "encoding/gob" "fmt" "math" "net/url" "os" "path/filepath" "sort" "strings" "sync" "time" "github.com/debridmediamanager/zurg/internal/config" "github.com/debridmediamanager/zurg/pkg/dav" "github.com/debridmediamanager/zurg/pkg/realdebrid" "github.com/debridmediamanager/zurg/pkg/utils" "github.com/dgraph-io/ristretto" cmap "github.com/orcaman/concurrent-map/v2" "github.com/panjf2000/ants/v2" "go.uber.org/zap" ) const ( INT_ALL = "int__all__" INT_INFO_CACHE = "int__info__" ) type TorrentManager struct { Config config.ConfigInterface Api *realdebrid.RealDebrid DirectoryMap cmap.ConcurrentMap[string, cmap.ConcurrentMap[string, *Torrent]] // directory -> accessKey -> Torrent DownloadCache cmap.ConcurrentMap[string, *realdebrid.Download] ResponseCache *ristretto.Cache latestState *LibraryState requiredVersion string workerPool *ants.Pool log *zap.SugaredLogger } // NewTorrentManager creates a new torrent manager // it will fetch all torrents and their info in the background // and store them in-memory and cached in files func NewTorrentManager(cfg config.ConfigInterface, api *realdebrid.RealDebrid, p *ants.Pool, cache *ristretto.Cache, log *zap.SugaredLogger) *TorrentManager { initialSate := EmptyState() t := &TorrentManager{ Config: cfg, Api: api, DirectoryMap: cmap.New[cmap.ConcurrentMap[string, *Torrent]](), ResponseCache: cache, latestState: &initialSate, requiredVersion: "18.11.2023", workerPool: p, log: log, } // create internal directories t.DirectoryMap.Set(INT_ALL, cmap.New[*Torrent]()) // key is AccessKey t.DirectoryMap.Set(INT_INFO_CACHE, cmap.New[*Torrent]()) // key is Torrent ID // create directory maps for _, directory := range cfg.GetDirectories() { t.DirectoryMap.Set(directory, cmap.New[*Torrent]()) } // Fetch downloads t.DownloadCache = cmap.New[*realdebrid.Download]() _ = t.workerPool.Submit(func() { page := 1 offset := 0 for { downloads, totalDownloads, err := t.Api.GetDownloads(page, offset) if err != nil { t.log.Fatalf("Cannot get downloads: %v\n", err) } for i := range downloads { if !t.DownloadCache.Has(downloads[i].Link) { t.DownloadCache.Set(downloads[i].Link, &downloads[i]) } } offset += len(downloads) page++ if offset >= totalDownloads { break } } }) var newTorrents []realdebrid.Torrent var err error newTorrents, _, err = t.Api.GetTorrents(0) if err != nil { t.log.Fatalf("Cannot get torrents: %v\n", err) } t.log.Infof("Fetched %d downloads", t.DownloadCache.Count()) torrentsChan := make(chan *Torrent, len(newTorrents)) var wg sync.WaitGroup for i := range newTorrents { wg.Add(1) idx := i // capture the loop variable _ = t.workerPool.Submit(func() { defer wg.Done() torrentsChan <- t.getMoreInfo(newTorrents[idx]) }) } wg.Wait() close(torrentsChan) t.log.Infof("Fetched info for %d torrents", len(newTorrents)) noInfoCount := 0 allTorrents, _ := t.DirectoryMap.Get(INT_ALL) for tor := range torrentsChan { if tor == nil { noInfoCount++ continue } if torrent, exists := allTorrents.Get(tor.AccessKey); !exists { allTorrents.Set(tor.AccessKey, tor) } else { mainTorrent := t.mergeToMain(torrent, tor) allTorrents.Set(tor.AccessKey, mainTorrent) } } allTorrents.IterCb(func(_ string, torrent *Torrent) { dav, html := t.buildTorrentResponses(torrent) t.AssignedDirectoryCb(torrent, func(directory string) { torrents, _ := t.DirectoryMap.Get(directory) torrents.Set(torrent.AccessKey, torrent) // torrent responses newHtml := strings.ReplaceAll(html, "$dir", directory) t.ResponseCache.Set(directory+"/"+torrent.AccessKey+".html", &newHtml, 1) newDav := strings.ReplaceAll(dav, "$dir", directory) t.ResponseCache.Set(directory+"/"+torrent.AccessKey+".dav", &newDav, 1) }) }) t.updateDirectoryResponsesCache() t.log.Infof("Compiled into %d torrents, %d were missing info", allTorrents.Count(), noInfoCount) t.SetNewLatestState(t.getCurrentState()) if t.Config.EnableRepair() { t.log.Info("Checking for torrents to repair") t.repairAll() t.log.Info("Finished checking for torrents to repair") } _ = t.workerPool.Submit(func() { t.startRefreshJob() }) t.log.Info("Finished initializing torrent manager") return t } func (t *TorrentManager) mergeToMain(mainTorrent, torrentToMerge *Torrent) *Torrent { // Merge SelectedFiles - itercb accesses a different copy of the selectedfiles map torrentToMerge.SelectedFiles.IterCb(func(filepath string, fileToMerge *File) { // see if it already exists in the main torrent if _, ok := mainTorrent.SelectedFiles.Get(filepath); !ok { // if it doesn't exist in the main torrent, add it mainTorrent.SelectedFiles.Set(filepath, fileToMerge) } else { // if it exists, compare the LatestAdded property and the link if mainTorrent.LatestAdded < torrentToMerge.LatestAdded && strings.HasPrefix(fileToMerge.Link, "http") { // if torrentToMerge is more recent and its file has a link, update the main torrent's file mainTorrent.SelectedFiles.Set(filepath, fileToMerge) } // else do nothing, the main torrent's file is more recent or has a valid link } }) // Merge Instances mainTorrent.Instances = append(mainTorrent.Instances, torrentToMerge.Instances...) // LatestAdded if mainTorrent.LatestAdded < torrentToMerge.LatestAdded { mainTorrent.LatestAdded = torrentToMerge.LatestAdded } return mainTorrent } // proxy func (t *TorrentManager) UnrestrictUntilOk(link string) *realdebrid.Download { retChan := make(chan *realdebrid.Download, 1) t.workerPool.Submit(func() { retChan <- t.Api.UnrestrictUntilOk(link, t.Config.ShouldServeFromRclone()) }) defer close(retChan) return <-retChan // return t.api.UnrestrictUntilOk(link, t.cfg.ShouldServeFromRclone()) } func (t *TorrentManager) SetNewLatestState(checksum LibraryState) { t.latestState.DownloadingCount = checksum.DownloadingCount t.latestState.FirstTorrent = checksum.FirstTorrent t.latestState.TotalCount = checksum.TotalCount } func (t *TorrentManager) ScheduleForRefresh() { t.SetNewLatestState(EmptyState()) } type torrentsResp struct { torrents []realdebrid.Torrent totalCount int } // generates a checksum based on the number of torrents, the first torrent id and the number of active torrents func (t *TorrentManager) getCurrentState() LibraryState { torrentsChan := make(chan torrentsResp, 1) countChan := make(chan int, 1) errChan := make(chan error, 2) // accommodate errors from both goroutines _ = t.workerPool.Submit(func() { torrents, totalCount, err := t.Api.GetTorrents(1) if err != nil { errChan <- err return } torrentsChan <- torrentsResp{torrents: torrents, totalCount: totalCount} }) _ = t.workerPool.Submit(func() { count, err := t.Api.GetActiveTorrentCount() if err != nil { errChan <- err return } countChan <- count.DownloadingCount }) // Existing goroutines for GetTorrents and GetActiveTorrentCount var torrents []realdebrid.Torrent var totalCount, count int for i := 0; i < 2; i++ { select { case resp := <-torrentsChan: torrents = resp.torrents totalCount = resp.totalCount case count = <-countChan: case err := <-errChan: t.log.Warnf("Checksum API Error: %v\n", err) return EmptyState() } } if len(torrents) == 0 { t.log.Error("Huh, no torrents returned") return EmptyState() } return LibraryState{ TotalCount: totalCount, FirstTorrent: &torrents[0], DownloadingCount: count, } } // startRefreshJob periodically refreshes the torrents func (t *TorrentManager) startRefreshJob() { t.log.Info("Starting periodic refresh") for { <-time.After(time.Duration(t.Config.GetRefreshEverySeconds()) * time.Second) checksum := t.getCurrentState() if t.latestState.equal(checksum) { continue } newTorrents, _, err := t.Api.GetTorrents(0) if err != nil { t.log.Warnf("Cannot get torrents: %v\n", err) continue } t.log.Infof("Detected changes! Refreshing %d torrents", len(newTorrents)) // handle deleted torrents in info cache keep := make(map[string]bool) for _, torrent := range newTorrents { keep[torrent.ID] = true } var toDelete []string infoCache, _ := t.DirectoryMap.Get(INT_INFO_CACHE) infoCache.IterCb(func(torrentID string, torrent *Torrent) { if _, ok := keep[torrentID]; !ok { toDelete = append(toDelete, torrentID) } }) for _, torrentID := range toDelete { infoCache.Remove(torrentID) } // end info cache cleanup torrentsChan := make(chan *Torrent, len(newTorrents)) var wg sync.WaitGroup for i := range newTorrents { wg.Add(1) idx := i // capture the loop variable _ = t.workerPool.Submit(func() { defer wg.Done() torrentsChan <- t.getMoreInfo(newTorrents[idx]) }) } wg.Wait() close(torrentsChan) t.log.Infof("Fetched info for %d torrents", len(newTorrents)) noInfoCount := 0 oldTorrents, _ := t.DirectoryMap.Get(INT_ALL) newSet := cmap.New[*Torrent]() for info := range torrentsChan { if info == nil { noInfoCount++ continue } if torrent, exists := oldTorrents.Get(info.AccessKey); !exists { oldTorrents.Set(info.AccessKey, info) newSet.Set(info.AccessKey, info) } else { mainTorrent := t.mergeToMain(torrent, info) oldTorrents.Set(info.AccessKey, mainTorrent) newSet.Set(info.AccessKey, mainTorrent) } } var updatedPaths []string newSet.IterCb(func(_ string, torrent *Torrent) { dav, html := t.buildTorrentResponses(torrent) t.AssignedDirectoryCb(torrent, func(directory string) { torrents, _ := t.DirectoryMap.Get(directory) torrents.Set(torrent.AccessKey, torrent) if torrent.LatestAdded > t.latestState.FirstTorrent.Added { updatedPaths = append(updatedPaths, fmt.Sprintf("%s/%s", directory, torrent.AccessKey)) } // torrent responses cacheKey := directory + "/" + torrent.AccessKey newHtml := strings.ReplaceAll(html, "$dir", directory) t.ResponseCache.Set(cacheKey+".html", &newHtml, 1) newDav := strings.ReplaceAll(dav, "$dir", directory) t.ResponseCache.Set(cacheKey+".dav", &newDav, 1) }) }) // delete torrents that no longer exist oldAccessKeys := oldTorrents.Keys() for _, oldAccessKey := range oldAccessKeys { if _, ok := newSet.Get(oldAccessKey); !ok { t.DirectoryMap.IterCb(func(_ string, torrents cmap.ConcurrentMap[string, *Torrent]) { torrents.Remove(oldAccessKey) }) t.log.Infof("Deleted torrent: %s\n", oldAccessKey) } } t.updateDirectoryResponsesCache() t.log.Infof("Compiled into %d torrents, %d were missing info", oldTorrents.Count(), noInfoCount) t.SetNewLatestState(t.getCurrentState()) if t.Config.EnableRepair() { t.log.Info("Checking for torrents to repair") t.repairAll() t.log.Info("Finished checking for torrents to repair") } else { t.log.Info("Repair is disabled, skipping repair check") } _ = t.workerPool.Submit(func() { OnLibraryUpdateHook(updatedPaths, t.Config, t.log) }) t.log.Info("Finished refreshing torrents") } } // getMoreInfo gets original name, size and files for a torrent func (t *TorrentManager) getMoreInfo(rdTorrent realdebrid.Torrent) *Torrent { infoCache, _ := t.DirectoryMap.Get(INT_INFO_CACHE) if infoCache.Has(rdTorrent.ID) { tor, _ := infoCache.Get(rdTorrent.ID) return tor } var info *realdebrid.TorrentInfo var err error // file cache torrentFromFile := t.readTorrentFromFile(rdTorrent.ID) if torrentFromFile != nil && len(torrentFromFile.ID) > 0 && len(torrentFromFile.Links) > 0 && len(torrentFromFile.Links) == len(rdTorrent.Links) && torrentFromFile.Links[0] == rdTorrent.Links[0] { info = torrentFromFile info.Progress = rdTorrent.Progress } else { torrentFromFile = nil } if info == nil { info, err = t.Api.GetTorrentInfo(rdTorrent.ID) if err != nil { t.log.Warnf("Cannot get info for id=%s: %v\n", rdTorrent.ID, err) return nil } } // SelectedFiles is a subset of Files with only the selected ones // it also has a Link field, which can be empty // if it is empty, it means the file is no longer available // Files+Links together are the same as SelectedFiles var selectedFiles []*File // if some Links are empty, we need to repair it for _, file := range info.Files { if file.Selected == 0 { continue } selectedFiles = append(selectedFiles, &File{ File: file, Added: info.Added, Ended: info.Ended, Link: "", // no link yet }) } if len(selectedFiles) > len(info.Links) && info.Progress == 100 { t.log.Warnf("Torrent id=%s is partly expired, it has %d selected files but only %d links", info.ID, len(selectedFiles), len(info.Links)) } else if len(selectedFiles) == len(info.Links) { // all links are still intact! good! for i, file := range selectedFiles { file.Link = info.Links[i] i++ } } torrent := Torrent{ AccessKey: t.getName(info.Name, info.OriginalName), LatestAdded: info.Added, Instances: []realdebrid.TorrentInfo{*info}, } torrent.SelectedFiles = cmap.New[*File]() for _, file := range selectedFiles { torrent.SelectedFiles.Set(filepath.Base(file.Path), file) } if len(selectedFiles) > 0 && torrentFromFile == nil { t.writeTorrentToFile(info) // only when there are selected files, else it's useless } infoCache.Set(rdTorrent.ID, &torrent) return &torrent } func (t *TorrentManager) getName(name, originalName string) string { if t.Config.EnableRetainRDTorrentName() { return name } // drop the extension from the name if t.Config.EnableRetainFolderNameExtension() && strings.Contains(name, originalName) { return name } else { ret := strings.TrimSuffix(originalName, ".mp4") ret = strings.TrimSuffix(ret, ".mkv") return ret } } func (t *TorrentManager) writeTorrentToFile(torrent *realdebrid.TorrentInfo) error { filePath := "data/" + torrent.ID + ".bin" file, err := os.Create(filePath) if err != nil { return fmt.Errorf("failed creating file: %w", err) } defer file.Close() w := bufio.NewWriter(file) defer w.Flush() torrent.Version = t.requiredVersion dataEncoder := gob.NewEncoder(w) if err := dataEncoder.Encode(torrent); err != nil { return fmt.Errorf("failed encoding torrent: %w", err) } return nil } func (t *TorrentManager) readTorrentFromFile(torrentID string) *realdebrid.TorrentInfo { filePath := "data/" + torrentID + ".bin" file, err := os.Open(filePath) if err != nil { if os.IsNotExist(err) { return nil } return nil } defer file.Close() r := bufio.NewReader(file) var torrent realdebrid.TorrentInfo dataDecoder := gob.NewDecoder(r) if err := dataDecoder.Decode(&torrent); err != nil { return nil } if torrent.Version != t.requiredVersion { return nil } return &torrent } func (t *TorrentManager) organizeChaos(links []string, selectedFiles []*File) ([]*File, bool) { type Result struct { Response *realdebrid.Download } resultsChan := make(chan Result, len(links)) var wg sync.WaitGroup for _, link := range links { wg.Add(1) link := link // redeclare to avoid closure on loop variable // Use the existing worker pool to submit tasks _ = t.workerPool.Submit(func() { defer wg.Done() if t.DownloadCache.Has(link) { download, _ := t.DownloadCache.Get(link) resultsChan <- Result{Response: download} return } resp := t.Api.UnrestrictUntilOk(link, t.Config.ShouldServeFromRclone()) resultsChan <- Result{Response: resp} }) } wg.Wait() close(resultsChan) isChaotic := false for result := range resultsChan { if result.Response == nil { continue } found := false for _, file := range selectedFiles { if strings.Contains(file.Path, result.Response.Filename) { file.Link = result.Response.Link found = true } } if !found { if result.Response.Streamable == 1 { now := time.Now().Format(time.RFC3339) selectedFiles = append(selectedFiles, &File{ File: realdebrid.File{ ID: math.MaxInt32, Path: result.Response.Filename, Bytes: result.Response.Filesize, Selected: 1, }, Added: now, Ended: now, Link: result.Response.Link, }) } else { isChaotic = true } } } return selectedFiles, isChaotic } func (t *TorrentManager) repairAll() { proceed := t.canCapacityHandle() // blocks for approx 45 minutes if active torrents are full if !proceed { t.log.Error("Reached the max number of active torrents, cannot start repair") return } var toDelete []string allTorrents, _ := t.DirectoryMap.Get(INT_ALL) allTorrents.IterCb(func(_ string, torrent *Torrent) { if torrent.AnyInProgress() { t.log.Debugf("Skipping %s for repairs because it is in progress", torrent.AccessKey) return } forRepair := false unselected := 0 torrent.SelectedFiles.IterCb(func(_ string, file *File) { if file.Link == "repair" && !forRepair { file.Link = "repairing" t.log.Debugf("Found a file to repair for torrent %s", torrent.AccessKey) forRepair = true } if file.Link == "unselect" { unselected++ } }) if forRepair { t.log.Infof("Repairing %s", torrent.AccessKey) t.Repair(torrent.AccessKey) } if unselected == torrent.SelectedFiles.Count() && unselected > 0 { t.log.Infof("Deleting %s", torrent.AccessKey) toDelete = append(toDelete, torrent.AccessKey) } }) for _, accessKey := range toDelete { t.Delete(accessKey) } } func (t *TorrentManager) Delete(accessKey string) { infoCache, _ := t.DirectoryMap.Get(INT_INFO_CACHE) t.log.Infof("Deleting torrent %s", accessKey) allTorrents, _ := t.DirectoryMap.Get(INT_ALL) if torrent, ok := allTorrents.Get(accessKey); ok { for _, instance := range torrent.Instances { infoCache.Remove(instance.ID) t.Api.DeleteTorrent(instance.ID) } } t.DirectoryMap.IterCb(func(_ string, torrents cmap.ConcurrentMap[string, *Torrent]) { if _, ok := torrents.Get(accessKey); ok { torrents.Remove(accessKey) } }) } func (t *TorrentManager) Repair(accessKey string) { if !t.Config.EnableRepair() { t.log.Warn("Repair is disabled; if you do not have other zurg instances running, you should enable repair") return } allTorrents, _ := t.DirectoryMap.Get(INT_ALL) torrent, _ := allTorrents.Get(accessKey) if torrent == nil { t.log.Warnf("Cannot find torrent %s anymore so we are not repairing it", accessKey) return } if torrent.AnyInProgress() { t.log.Infof("Torrent %s is in progress, cannot repair", torrent.AccessKey) return } // make the file messy t.log.Infof("Evaluating whole torrent to find the correct files for torrent: %s", torrent.AccessKey) var selectedFiles []*File var isChaotic bool var links []string streamableCount := 0 torrent.SelectedFiles.IterCb(func(_ string, file *File) { if utils.IsStreamable(file.Path) { streamableCount++ } fileCopy := &File{ File: file.File, Added: file.Added, Ended: file.Ended, Link: file.Link, } selectedFiles = append(selectedFiles, fileCopy) if strings.HasPrefix(fileCopy.Link, "http") { links = append(links, fileCopy.Link) } fileCopy.Link = "" // empty the links = chaos! }) // chaotic file means RD will not output the desired file selection // e.g. even if we select just a single mkv, it will output a rar selectedFiles, isChaotic = t.organizeChaos(links, selectedFiles) if isChaotic { t.log.Warnf("Torrent %s is always returning an unplayable rar file (it will no longer show up in your directories, zurg suggests you delete it)", torrent.AccessKey) t.DirectoryMap.IterCb(func(_ string, torrents cmap.ConcurrentMap[string, *Torrent]) { torrents.Remove(torrent.AccessKey) }) t.ScheduleForRefresh() // t.log.Debugf("You can try fixing it yourself magnet:?xt=urn:btih:%s", info.Hash) return } else if streamableCount == 1 { t.log.Warnf("Torrent %s only file has expired (it will no longer show up in your directories, zurg suggests you delete it)", torrent.AccessKey) t.log.Debugf("You can try fixing it yourself magnet:?xt=urn:btih:%s", torrent.Instances[0].Hash) t.DirectoryMap.IterCb(func(_ string, torrents cmap.ConcurrentMap[string, *Torrent]) { torrents.Remove(torrent.AccessKey) }) t.ScheduleForRefresh() return } // t.log.Debugf("Identified the expired files of torrent id=%s", info.ID) for _, newFile := range selectedFiles { if file, exists := torrent.SelectedFiles.Get(filepath.Base(newFile.Path)); exists { file.Link = newFile.Link } else { torrent.SelectedFiles.Set(filepath.Base(newFile.Path), newFile) } } proceed := t.canCapacityHandle() // blocks for approx 45 minutes if active torrents are full if !proceed { t.log.Error("Reached the max number of active torrents, cannot continue with the repair") return } // first solution: add the same selection, maybe it can be fixed by reinsertion? if t.reinsertTorrent(torrent, "") { t.log.Infof("Successfully downloaded torrent %s to repair it", torrent.AccessKey) return } // if all the selected files are missing but there are other streamable files var missingFiles []File torrent.SelectedFiles.IterCb(func(_ string, file *File) { if !strings.HasPrefix(file.Link, "http") { missingFiles = append(missingFiles, *file) } }) // if we download a single file, it will be named differently // so we need to download 1 extra file to preserve the name // this is only relevant if we enable retain_rd_torrent_name if len(missingFiles) == 1 && streamableCount > 1 { // add the first file link encountered with a prefix of http for _, file := range torrent.SelectedFiles.Items() { if strings.HasPrefix(file.Link, "http") { missingFiles = append(missingFiles, *file) break } } } if len(missingFiles) > 0 { t.log.Infof("Redownloading in multiple batches the %d missing files for torrent %s", len(missingFiles), torrent.AccessKey) // if not, last resort: add only the missing files but do it in 2 batches half := len(missingFiles) / 2 missingFiles1 := strings.Join(getFileIDs(missingFiles[:half]), ",") missingFiles2 := strings.Join(getFileIDs(missingFiles[half:]), ",") if missingFiles1 != "" { t.reinsertTorrent(torrent, missingFiles1) } if missingFiles2 != "" { t.reinsertTorrent(torrent, missingFiles2) } } else { t.log.Warnf("Torrent %s has no missing files to repair", torrent.AccessKey) } } func (t *TorrentManager) reinsertTorrent(torrent *Torrent, missingFiles string) bool { // if missingFiles is not provided, missing files means missing links if missingFiles == "" { tmpSelection := "" torrent.SelectedFiles.IterCb(func(_ string, file *File) { if !strings.HasPrefix(file.Link, "http") { tmpSelection += fmt.Sprintf("%d,", file.ID) } }) if tmpSelection == "" { return false } if len(tmpSelection) > 0 { missingFiles = tmpSelection[:len(tmpSelection)-1] } } // redownload torrent resp, err := t.Api.AddMagnetHash(torrent.Instances[0].Hash) if err != nil { t.log.Warnf("Cannot redownload torrent: %v", err) return false } time.Sleep(1 * time.Second) // select files newTorrentID := resp.ID err = t.Api.SelectTorrentFiles(newTorrentID, missingFiles) if err != nil { t.log.Warnf("Cannot start redownloading: %v", err) t.Api.DeleteTorrent(newTorrentID) return false } time.Sleep(10 * time.Second) // see if the torrent is ready info, err := t.Api.GetTorrentInfo(newTorrentID) if err != nil { t.log.Warnf("Cannot get info on redownloaded torrent id=%s : %v", newTorrentID, err) t.Api.DeleteTorrent(newTorrentID) return false } if info.Status == "magnet_error" || info.Status == "error" || info.Status == "virus" || info.Status == "dead" { t.log.Warnf("The redownloaded torrent id=%s is in error state: %s", newTorrentID, info.Status) t.Api.DeleteTorrent(newTorrentID) return false } if info.Progress != 100 { t.log.Infof("Torrent id=%s is not cached anymore so we have to wait until completion (this should fix the issue already)", info.ID) return true } missingCount := len(strings.Split(missingFiles, ",")) if len(info.Links) != missingCount { t.log.Infof("It did not fix the issue for id=%s, only got %d files but we need %d, undoing", info.ID, len(info.Links), missingCount) t.Api.DeleteTorrent(newTorrentID) return false } t.log.Infof("Repair successful id=%s", newTorrentID) return true } func (t *TorrentManager) canCapacityHandle() bool { // max waiting time is 45 minutes const maxRetries = 50 const baseDelay = 1 * time.Second const maxDelay = 60 * time.Second retryCount := 0 for { count, err := t.Api.GetActiveTorrentCount() if err != nil { t.log.Warnf("Cannot get active downloads count: %v", err) if retryCount >= maxRetries { t.log.Error("Max retries reached. Exiting.") return false } delay := time.Duration(math.Pow(2, float64(retryCount))) * baseDelay if delay > maxDelay { delay = maxDelay } time.Sleep(delay) retryCount++ continue } if count.DownloadingCount < count.MaxNumberOfTorrents { // t.log.Infof("We can still add a new torrent, we have capacity for %d more", count.MaxNumberOfTorrents-count.DownloadingCount) return true } delay := time.Duration(math.Pow(2, float64(retryCount))) * baseDelay if delay > maxDelay { delay = maxDelay } t.log.Infof("We have reached the max number of active torrents, waiting for %s seconds before retrying", delay) if retryCount >= maxRetries { t.log.Error("Max retries reached, exiting") return false } time.Sleep(delay) retryCount++ } } func (t *TorrentManager) updateDirectoryResponsesCache() { t.DirectoryMap.IterCb(func(directory string, torrents cmap.ConcurrentMap[string, *Torrent]) { allKeys := torrents.Keys() sort.Strings(allKeys) davRet := "" htmlRet := "" for _, accessKey := range allKeys { if tor, ok := torrents.Get(accessKey); ok { if tor.AnyInProgress() { continue } davRet += dav.Directory(tor.AccessKey, tor.LatestAdded) htmlRet += fmt.Sprintf("
  • %s
  • ", directory, tor.AccessKey, tor.AccessKey) } } cacheKey := directory davRet = "" + dav.BaseDirectory(directory, "") + dav.BaseDirectory(directory, "") + davRet + "" t.ResponseCache.Set(cacheKey+".dav", &davRet, 1) htmlRet = "
      " + htmlRet t.ResponseCache.Set(cacheKey+".html", &htmlRet, 1) }) } func (t *TorrentManager) buildTorrentResponses(tor *Torrent) (string, string) { davRet := "" + dav.BaseDirectory(filepath.Join("$dir", tor.AccessKey), tor.LatestAdded) htmlRet := "
        " filenames := tor.SelectedFiles.Keys() sort.Strings(filenames) for _, filename := range filenames { file, _ := tor.SelectedFiles.Get(filename) if file == nil || !strings.HasPrefix(file.Link, "http") { // will be caught by torrent manager's repairAll // just skip it for now continue } davRet += dav.File(filename, file.Bytes, file.Ended) filePath := filepath.Join("$dir", tor.AccessKey, url.PathEscape(filename)) htmlRet += fmt.Sprintf("
      1. %s
      2. ", filePath, filename) } davRet += "" return davRet, htmlRet } func (t *TorrentManager) AssignedDirectoryCb(tor *Torrent, cb func(string)) { var torrentIDs []string for _, instance := range tor.Instances { torrentIDs = append(torrentIDs, instance.ID) } // get filenames needed for directory conditions filenames := tor.SelectedFiles.Keys() // Map torrents to directories switch t.Config.GetVersion() { case "v1": configV1 := t.Config.(*config.ZurgConfigV1) for _, directories := range configV1.GetGroupMap() { for _, directory := range directories { if t.Config.MeetsConditions(directory, tor.AccessKey, torrentIDs, filenames) { cb(directory) break } } } } }