forked from digitalocean/go-workers2
-
Notifications
You must be signed in to change notification settings - Fork 1
/
middleware_retry.go
120 lines (97 loc) · 2.72 KB
/
middleware_retry.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
package workers
import (
"context"
"fmt"
"math"
"math/rand"
"time"
)
// RetriesExhaustedFunc gets executed when retry attempts have been exhausted.
type RetriesExhaustedFunc func(queue string, message *Msg, err error)
const (
// DefaultRetryMax is default for max number of retries for a job
DefaultRetryMax = 25
// RetryTimeFormat is default for retry time format
RetryTimeFormat = "2006-01-02 15:04:05 MST"
)
func retryProcessError(queue string, mgr *Manager, message *Msg, err error) error {
if !retry(message) {
return err
}
if retryCount(message) < retryMax(message) {
message.Set("queue", queue)
message.Set("error_message", fmt.Sprintf("%v", err))
retryCount := incrementRetry(message)
waitDuration := durationToSecondsWithNanoPrecision(
time.Duration(
secondsToDelay(retryCount),
) * time.Second,
)
err = mgr.opts.store.EnqueueRetriedMessage(context.Background(), nowToSecondsWithNanoPrecision()+waitDuration, message.ToJson())
// If we can't add the job to the retry queue,
// then we shouldn't acknowledge the job, otherwise
// it'll disappear into the void.
if err != nil {
message.ack = false
}
} else {
for _, retriesExhaustedHandler := range mgr.retriesExhaustedHandlers {
retriesExhaustedHandler(queue, message, err)
}
}
return err
}
// RetryMiddleware middleware that allows retries for jobs failures
func RetryMiddleware(queue string, mgr *Manager, next JobFunc) JobFunc {
return func(message *Msg) (err error) {
defer func() {
if e := recover(); e != nil {
var ok bool
if err, ok = e.(error); !ok {
err = fmt.Errorf("%v", e)
}
if err != nil {
err = retryProcessError(queue, mgr, message, err)
}
}
}()
err = next(message)
if err != nil {
err = retryProcessError(queue, mgr, message, err)
}
return
}
}
func retry(message *Msg) bool {
retry := false
if param, err := message.Get("retry").Bool(); err == nil {
retry = param
}
return retry
}
func retryCount(message *Msg) int {
count, _ := message.Get("retry_count").Int()
return count
}
func retryMax(message *Msg) int {
max := DefaultRetryMax
if messageRetryMax, err := message.Get("retry_max").Int(); err == nil && messageRetryMax >= 0 {
max = messageRetryMax
}
return max
}
func incrementRetry(message *Msg) (retryCount int) {
retryCount = 0
if count, err := message.Get("retry_count").Int(); err != nil {
message.Set("failed_at", time.Now().UTC().Format(RetryTimeFormat))
} else {
message.Set("retried_at", time.Now().UTC().Format(RetryTimeFormat))
retryCount = count + 1
}
message.Set("retry_count", retryCount)
return
}
func secondsToDelay(count int) int {
power := math.Pow(float64(count), 4)
return int(power) + 15 + (rand.Intn(30) * (count + 1))
}