Retry function handles fair usage limit

This commit is contained in:
Ben Adrian Sarmiento
2024-07-07 23:15:15 +02:00
parent 1373bbb975
commit f48f352816

View File

@@ -27,7 +27,7 @@ type HTTPClient struct {
maxRetries int
timeoutSecs int
rateLimitSleepSecs int
backoff func(attempt int) time.Duration
backoff func(int, int) time.Duration
dnsCache cmap.ConcurrentMap[string, string]
hosts []string
log *logutil.Logger
@@ -65,7 +65,7 @@ func NewHTTPClient(
client: &http.Client{},
maxRetries: maxRetries,
timeoutSecs: timeoutSecs,
rateLimitSleepSecs: 4,
rateLimitSleepSecs: 2,
backoff: backoffFunc,
dnsCache: cmap.New[string](),
hosts: hosts,
@@ -160,6 +160,7 @@ func (r *HTTPClient) Do(req *http.Request) (*http.Response, error) {
if resp != nil && resp.StatusCode >= http.StatusBadRequest {
body, _ := io.ReadAll(resp.Body)
if req.Host == "api.real-debrid.com" {
// api servers
if body != nil {
var errResp ApiErrorResponse
jsonErr := json.Unmarshal(body, &errResp)
@@ -173,28 +174,20 @@ func (r *HTTPClient) Do(req *http.Request) (*http.Response, error) {
}
} else {
// download servers
errResp := DownloadErrorResponse{
err = &DownloadErrorResponse{
Message: resp.Header.Get("X-Error"),
Code: resp.StatusCode,
}
err = &errResp
}
}
incr := r.shouldRetry(req, resp, err, r.rateLimitSleepSecs)
if incr > 0 {
attempt += incr
if attempt > r.maxRetries {
err = fmt.Errorf("max retries exceeded: %w", err)
break
}
time.Sleep(r.backoff(attempt))
} else if incr == 0 {
time.Sleep(10 * time.Millisecond)
} else {
// don't retry anymore
incr := r.shouldRetry(req, resp, err, attempt, r.rateLimitSleepSecs)
if incr == -1 {
break
} else if incr == 0 {
continue
}
attempt += incr
}
return resp, err
}
@@ -247,77 +240,71 @@ func (r *HTTPClient) proxyDialer(proxyURL *url.URL) (proxy.Dialer, error) {
return nil, fmt.Errorf("unsupported proxy scheme: %s", proxyURL.Scheme)
}
func (r *HTTPClient) shouldRetry(req *http.Request, resp *http.Response, err error, rateLimitSleep int) int {
if strings.HasSuffix(req.URL.Path, "torrents/addMagnet") {
return -1 // don't retry to prevent duplicate torrents
// shouldRetry returns a number indicating whether the request should be retried
// -1: don't retry
// 0: retry indefinitely
// 1: retry until maxRetries
func (r *HTTPClient) shouldRetry(req *http.Request, resp *http.Response, err error, attempts, rateLimitSleep int) int {
if attempts >= r.maxRetries {
return -1
}
// assume that all addMagnet requests are always successful;
// don't retry to prevent duplicate torrents
if req.Host == "api.real-debrid.com" && strings.HasSuffix(req.URL.Path, "torrents/addMagnet") {
return -1
}
if apiErr, ok := err.(*ApiErrorResponse); ok {
switch apiErr.Code {
case -1: // Internal error
return 1
case 5: // Slow down (retry infinitely), default: 4 secs
time.Sleep(time.Duration(rateLimitSleep) * time.Second)
return 0
case 6: // Ressource unreachable
return 1
case 17: // Hoster in maintenance
return 1
case 18: // Hoster limit reached
return 1
case 25: // Service unavailable
return 1
case 34: // Too many requests (retry infinitely), default: 4 secs
time.Sleep(time.Duration(rateLimitSleep) * time.Second)
case 5: // Slow down (retry infinitely)
case 34: // Too many requests (retry infinitely)
secs := r.backoff(attempts, rateLimitSleep)
r.log.Warnf("API rate limit reached, retrying in %d seconds", secs/time.Second)
time.Sleep(secs)
return 0
case 36: // Fair Usage Limit
time.Sleep(time.Duration(rateLimitSleep) * time.Second)
secs := r.backoff(attempts, rateLimitSleep)
r.log.Warnf("Fair usage limit reached, retrying in %d seconds", secs/time.Second)
time.Sleep(secs)
return 0
case -1: // Internal error
return 1
default:
return -1 // don't retry
return -1
}
} else if downloadErr, ok := err.(*DownloadErrorResponse); ok {
switch downloadErr.Message {
case "bytes_limit_reached": // 503
return -1
case "invalid_download_code": // 404
time.Sleep(time.Duration(rateLimitSleep) * time.Second)
secs := r.backoff(attempts, rateLimitSleep)
r.log.Warnf("Invalid download code, retrying in %d seconds", secs/time.Second)
time.Sleep(r.backoff(attempts, rateLimitSleep))
return 1
default:
return 1 // retry
}
}
if err != nil && strings.Contains(err.Error(), "timeout") {
return 1
}
// succesful requests
if resp != nil {
if resp.StatusCode == http.StatusTooManyRequests {
// Too many requests: retry infinitely, default: 4 secs
time.Sleep(time.Duration(rateLimitSleep) * time.Second)
return 0
}
if resp.StatusCode >= http.StatusBadRequest && resp.StatusCode < http.StatusInternalServerError {
// other client errors: retry
return 1
}
if resp.StatusCode >= http.StatusInternalServerError {
// server errors: don't retry
return -1
}
okResponseCode := resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusPartialContent
// if the request has a Range header but the server doesn't respond with a Content-Range header
hasRangeHeader := req.Header.Get("Range") != "" && !strings.HasPrefix(req.Header.Get("Range"), "bytes=0-")
if hasRangeHeader && okResponseCode && resp.Header.Get("Content-Range") == "" {
if okResponseCode && hasRangeHeader && resp.Header.Get("Content-Range") == "" {
time.Sleep(10 * time.Millisecond)
return 0
}
return -1 // don't retry
return -1
}
return 1
}
func backoffFunc(attempt int) time.Duration {
func backoffFunc(attempt, base int) time.Duration {
maxDuration := 60
backoff := int(math.Pow(2, float64(attempt)))
backoff := int(math.Pow(float64(base), float64(attempt+1)))
if backoff > maxDuration {
backoff = maxDuration
}