Skip to content

Commit

Permalink
Add 504 to the list of responses we retry on
Browse files Browse the repository at this point in the history
This is "gateway timeout" which we do see from Docker Hub from time to time (probably due to some HTTP middleware, redeploys, etc).
  • Loading branch information
tianon committed Jan 15, 2025
1 parent e7b1446 commit 0560d0f
Showing 1 changed file with 3 additions and 2 deletions.
5 changes: 3 additions & 2 deletions registry/rate-limits.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ package registry

import (
"net/http"
"slices"
"time"

"golang.org/x/time/rate"
Expand All @@ -24,7 +25,7 @@ func (d *rateLimitedRetryingRoundTripper) RoundTrip(req *http.Request) (*http.Re
// cap request retries at once per second
requestRetryLimiter = rate.NewLimiter(rate.Every(time.Second), 1)

// if we see 3x (503 or 502 or 500) during retry, we should bail
// if we see 50x three times during retry, we should bail
maxTry50X = 3

ctx = req.Context()
Expand Down Expand Up @@ -54,7 +55,7 @@ func (d *rateLimitedRetryingRoundTripper) RoundTrip(req *http.Request) (*http.Re
}

// certain status codes should result in a few auto-retries (especially with the automatic retry delay this injects), but up to a limit so we don't contribute to the "thundering herd" too much in a serious outage
if (res.StatusCode == 503 || res.StatusCode == 502 || res.StatusCode == 500) && maxTry50X > 1 {
if maxTry50X > 1 && slices.Contains([]int{500, 502, 503, 504}, res.StatusCode) {
maxTry50X--
doRetry = true
// no need to eat up the rate limiter tokens as we do for 429 because this is not a rate limiting error (and we have the "requestRetryLimiter" that separately limits our retries of *this* request)
Expand Down

0 comments on commit 0560d0f

Please sign in to comment.