-
Notifications
You must be signed in to change notification settings - Fork 532
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
revert: fixed window ratelimiting #2116
Changes from 2 commits
05e80f6
ad026f4
fd2e682
18c370c
0135dce
9486128
29d67ee
8f97d37
63ecd51
d4d7f17
cf97625
b5e5529
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -20,9 +20,8 @@ func (s *service) Mitigate(ctx context.Context, req *ratelimitv1.MitigateRequest | |
duration := time.Duration(req.Duration) * time.Millisecond | ||
bucket, _ := s.getBucket(bucketKey{req.Identifier, req.Limit, duration}) | ||
bucket.Lock() | ||
defer bucket.Unlock() | ||
|
||
bucket.windows[req.Window.GetSequence()] = req.Window | ||
bucket.Unlock() | ||
|
||
return &ratelimitv1.MitigateResponse{}, nil | ||
} | ||
|
@@ -51,12 +50,14 @@ func (s *service) broadcastMitigation(req mitigateWindowRequest) { | |
return | ||
} | ||
for _, peer := range peers { | ||
_, err := peer.client.Mitigate(ctx, connect.NewRequest(&ratelimitv1.MitigateRequest{ | ||
Identifier: req.identifier, | ||
Limit: req.limit, | ||
Duration: req.duration.Milliseconds(), | ||
Window: req.window, | ||
})) | ||
_, err := s.mitigateCircuitBreaker.Do(ctx, func(innerCtx context.Context) (*connect.Response[ratelimitv1.MitigateResponse], error) { | ||
return peer.client.Mitigate(innerCtx, connect.NewRequest(&ratelimitv1.MitigateRequest{ | ||
Identifier: req.identifier, | ||
Limit: req.limit, | ||
Duration: req.duration.Milliseconds(), | ||
Window: req.window, | ||
})) | ||
}) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Consider using a context with timeout to prevent hanging calls Currently, Apply this diff to use a context with timeout: func (s *service) broadcastMitigation(req mitigateWindowRequest) {
- ctx := context.Background()
+ ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
node, err := s.cluster.FindNode(bucketKey{req.identifier, req.limit, req.duration}.toString())
if err != nil {
s.logger.Err(err).Msg("failed to find node")
return
} Ensure that the context with timeout is used in the circuit breaker call: for _, peer := range peers {
_, err := s.mitigateCircuitBreaker.Do(ctx, func(innerCtx context.Context) (*connect.Response[ratelimitv1.MitigateResponse], error) {
- return peer.client.Mitigate(innerCtx, connect.NewRequest(&ratelimitv1.MitigateRequest{
+ return peer.client.Mitigate(ctx, connect.NewRequest(&ratelimitv1.MitigateRequest{
Identifier: req.identifier,
Limit: req.limit,
Duration: req.duration.Milliseconds(),
Window: req.window,
}))
})
|
||
if err != nil { | ||
s.logger.Err(err).Msg("failed to call mitigate") | ||
} else { | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Consider using
defer
for unlocking to ensure lock is always releasedReplacing
defer bucket.Unlock()
with an explicitbucket.Unlock()
may lead to the lock not being released if a panic occurs between the lock and unlock calls. Usingdefer
ensures that the lock is always released, even in the event of an error or panic.Apply this diff to revert to using
defer
:Committable suggestion