Skip to content

Commit

Permalink
Change log level of retryable errors to WARN
Browse files Browse the repository at this point in the history
  • Loading branch information
vitalif committed Oct 17, 2023
1 parent 89bb9b5 commit 3f9be08
Show file tree
Hide file tree
Showing 4 changed files with 35 additions and 26 deletions.
30 changes: 20 additions & 10 deletions internal/backend_s3.go
Original file line number Diff line number Diff line change
Expand Up @@ -516,8 +516,9 @@ func (s *S3Backend) getRequestId(r *request.Request) string {
func (s *S3Backend) HeadBlob(req *HeadBlobInput) (resp *HeadBlobOutput, err error) {
s.readBackoff(func(attempt int) error {
resp, err = s.tryHeadBlob(req)
if err != nil && shouldRetry(err) {
s3Log.Errorf("Error getting metadata of %v (attempt %v): %v\n", req.Key, attempt, err)
if err != nil {
fn := errorOrWarnf(s3Log, err)
fn("Error getting metadata of %v (attempt %v): %v\n", req.Key, attempt, err)
}
return err
})
Expand Down Expand Up @@ -558,8 +559,9 @@ func (s *S3Backend) tryHeadBlob(param *HeadBlobInput) (*HeadBlobOutput, error) {
func (s *S3Backend) ListBlobs(req *ListBlobsInput) (resp *ListBlobsOutput, err error) {
s.readBackoff(func(attempt int) error {
resp, err = s.tryListBlobs(req)
if err != nil && shouldRetry(err) {
s3Log.Errorf("Error listing objects with prefix=%v delimiter=%v start-after=%v max-keys=%v (attempt %v): %v\n",
if err != nil {
fn := errorOrWarnf(s3Log, err)
fn("Error listing objects with prefix=%v delimiter=%v start-after=%v max-keys=%v (attempt %v): %v\n",
NilStr(req.Prefix), NilStr(req.Delimiter), NilStr(req.StartAfter), NilUInt32(req.MaxKeys), attempt, err)
}
return err
Expand Down Expand Up @@ -682,7 +684,7 @@ func (s *S3Backend) mpuCopyPart(from string, to string, mpuId string, bytes stri

resp, err := s.UploadPartCopy(params)
if err != nil {
s3Log.Errorf("UploadPartCopy %v = %v", params, err)
s3Log.Warnf("UploadPartCopy %v = %v", params, err)
*errout = err
return
}
Expand Down Expand Up @@ -799,7 +801,7 @@ func (s *S3Backend) copyObjectMultipart(size int64, from string, to string, mpuI
req, _ := s.CompleteMultipartUploadRequest(params)
err = req.Send()
if err != nil {
s3Log.Errorf("Complete MPU %v = %v", params, err)
s3Log.Warnf("Complete MPU %v = %v", params, err)
} else {
requestId = s.getRequestId(req)
}
Expand Down Expand Up @@ -886,7 +888,7 @@ func (s *S3Backend) CopyBlob(param *CopyBlobInput) (*CopyBlobOutput, error) {
req.Config.HTTPClient.Timeout = 15 * time.Minute
err := req.Send()
if err != nil {
s3Log.Errorf("CopyObject %v = %v", params, err)
s3Log.Warnf("CopyObject %v = %v", params, err)
return nil, err
}

Expand All @@ -899,6 +901,13 @@ func shouldRetry(err error) bool {
err != syscall.EACCES && err != syscall.ENOTSUP && err != syscall.ERANGE
}

func errorOrWarnf(logger *cfg.LogHandle, err error) func(format string, args ...interface{}) {
if shouldRetry(err) {
return logger.Warnf
}
return logger.Errorf
}

// FIXME: Add similar write backoff (now it's handled by file/dir code)
func (s *S3Backend) readBackoff(try func(attempt int) error) (err error) {
interval := s.flags.ReadRetryInterval
Expand Down Expand Up @@ -927,8 +936,9 @@ func (s *S3Backend) readBackoff(try func(attempt int) error) (err error) {
func (s *S3Backend) GetBlob(req *GetBlobInput) (resp *GetBlobOutput, err error) {
s.readBackoff(func(attempt int) error {
resp, err = s.tryGetBlob(req)
if err != nil && shouldRetry(err) {
log.Errorf("Error reading %v +%v of %v (attempt %v): %v", req.Start, req.Count, req.Key, attempt, err)
if err != nil {
fn := errorOrWarnf(s3Log, err)
fn("Error reading %v +%v of %v (attempt %v): %v", req.Start, req.Count, req.Key, attempt, err)
}
return err
})
Expand Down Expand Up @@ -1070,7 +1080,7 @@ func (s *S3Backend) MultipartBlobBegin(param *MultipartBlobBeginInput) (*Multipa

resp, err := s.CreateMultipartUpload(&mpu)
if err != nil {
s3Log.Errorf("CreateMultipartUpload %v = %v", param.Key, err)
s3Log.Warnf("CreateMultipartUpload %v = %v", param.Key, err)
return nil, err
}

Expand Down
7 changes: 3 additions & 4 deletions internal/dir.go
Original file line number Diff line number Diff line change
Expand Up @@ -248,7 +248,6 @@ func (parent *Inode) listObjectsSlurp(inode *Inode, startAfter string, sealEnd b
resp, err := cloud.ListBlobs(params)
if err != nil {
parent.fs.completeInflightListing(myList)
s3Log.Errorf("ListObjects %v = %v", params, err)
return
}
s3Log.Debug(resp)
Expand Down Expand Up @@ -1152,7 +1151,7 @@ func (inode *Inode) SendDelete() {
}
inode.recordFlushError(err)
if err != nil {
log.Errorf("Failed to delete object %v: %v", key, err)
log.Warnf("Failed to delete object %v: %v", key, err)
inode.mu.Unlock()
inode.fs.WakeupFlusher()
return
Expand Down Expand Up @@ -1414,7 +1413,7 @@ func (dir *Inode) SendMkDir() {
dir.IsFlushing -= dir.fs.flags.MaxParallelParts
dir.recordFlushError(err)
if err != nil {
log.Errorf("Failed to create directory object %v: %v", key, err)
log.Warnf("Failed to create directory object %v: %v", key, err)
dir.fs.WakeupFlusher()
return
}
Expand Down Expand Up @@ -1729,7 +1728,7 @@ func renameInCache(fromInode *Inode, newParent *Inode, to string) {
err = os.Rename(oldFileName, newFileName)
}
if err != nil {
log.Errorf("Error renaming %v to %v: %v", oldFileName, newFileName, err)
log.Warnf("Error renaming %v to %v: %v", oldFileName, newFileName, err)
if fromInode.DiskCacheFD != nil {
fromInode.DiskCacheFD.Close()
fromInode.DiskCacheFD = nil
Expand Down
20 changes: 10 additions & 10 deletions internal/file.go
Original file line number Diff line number Diff line change
Expand Up @@ -1373,7 +1373,7 @@ func (inode *Inode) SendUpload() bool {
oldParent.mu.Unlock()
}
} else {
log.Debugf("Failed to copy %v to %v (rename): %v", from, key, err)
log.Warnf("Failed to copy %v to %v (rename): %v", from, key, err)
inode.mu.Lock()
inode.recordFlushError(err)
if inode.Parent == oldParent && inode.Name == oldName {
Expand Down Expand Up @@ -1502,7 +1502,7 @@ func (inode *Inode) SendUpload() bool {
s3Log.Warnf("Conflict detected (inode %v): File %v is deleted or resized remotely, discarding local changes", inode.Id, inode.FullName())
inode.resetCache()
}
log.Errorf("Error flushing metadata using COPY for %v: %v", key, err)
log.Warnf("Error flushing metadata using COPY for %v: %v", key, err)
} else if inode.CacheState == ST_MODIFIED && !inode.isStillDirty() {
inode.SetCacheState(ST_CACHED)
inode.SetAttrTime(time.Now())
Expand Down Expand Up @@ -1551,7 +1551,7 @@ func (inode *Inode) SendUpload() bool {
inode.mu.Lock()
inode.recordFlushError(err)
if err != nil {
log.Errorf("Failed to initiate multipart upload for %v: %v", key, err)
log.Warnf("Failed to initiate multipart upload for %v: %v", key, err)
} else {
log.Debugf("Started multi-part upload of object %v", key)
inode.mpu = resp
Expand Down Expand Up @@ -1709,7 +1709,7 @@ func (inode *Inode) resetCache() {
go func(mpu *MultipartBlobCommitInput) {
_, abortErr := cloud.MultipartBlobAbort(mpu)
if abortErr != nil {
log.Errorf("Failed to abort multi-part upload of object %v: %v", key, abortErr)
log.Warnf("Failed to abort multi-part upload of object %v: %v", key, abortErr)
}
}(inode.mpu)
inode.mpu = nil
Expand Down Expand Up @@ -1776,7 +1776,7 @@ func (inode *Inode) FlushSmallObject() {
go func(mpu *MultipartBlobCommitInput) {
_, abortErr := cloud.MultipartBlobAbort(mpu)
if abortErr != nil {
log.Errorf("Failed to abort multi-part upload of object %v: %v", key, abortErr)
log.Warnf("Failed to abort multi-part upload of object %v: %v", key, abortErr)
}
}(inode.mpu)
inode.mpu = nil
Expand All @@ -1789,7 +1789,7 @@ func (inode *Inode) FlushSmallObject() {

inode.recordFlushError(err)
if err != nil {
log.Errorf("Failed to flush small file %v: %v", key, err)
log.Warnf("Failed to flush small file %v: %v", key, err)
if params.Metadata != nil {
inode.userMetadataDirty = 2
}
Expand Down Expand Up @@ -1890,7 +1890,7 @@ func (inode *Inode) copyUnmodifiedParts(numParts uint64) (err error) {
Size: size,
})
if requestErr != nil {
log.Errorf("Failed to copy unmodified range %v-%v MB of object %v: %v",
log.Warnf("Failed to copy unmodified range %v-%v MB of object %v: %v",
offset/1024/1024, (offset+size+1024*1024-1)/1024/1024, key, requestErr)
err = requestErr
} else {
Expand Down Expand Up @@ -1943,7 +1943,7 @@ func (inode *Inode) FlushPart(part uint64) {
return
}
if err != nil {
log.Errorf("Failed to load part %v of object %v to flush it: %v", part, key, err)
log.Warnf("Failed to load part %v of object %v to flush it: %v", part, key, err)
return
}
// File size may have been changed again
Expand Down Expand Up @@ -1981,7 +1981,7 @@ func (inode *Inode) FlushPart(part uint64) {
}
inode.recordFlushError(err)
if err != nil {
log.Errorf("Failed to flush part %v of object %v: %v", part, key, err)
log.Warnf("Failed to flush part %v of object %v: %v", part, key, err)
} else {
if inode.mpu != nil {
// It could become nil if the file was deleted remotely in the meantime
Expand Down Expand Up @@ -2044,7 +2044,7 @@ func (inode *Inode) completeMultipart() {
if inode.CacheState == ST_CREATED || inode.CacheState == ST_MODIFIED {
inode.recordFlushError(err)
if err != nil {
log.Errorf("Failed to finalize multi-part upload of object %v: %v", key, err)
log.Warnf("Failed to finalize multi-part upload of object %v: %v", key, err)
if inode.mpu.Metadata != nil {
inode.userMetadataDirty = 2
}
Expand Down
4 changes: 2 additions & 2 deletions internal/goofys.go
Original file line number Diff line number Diff line change
Expand Up @@ -1134,14 +1134,14 @@ func mapAwsError(err error) error {
if err != nil {
return err
} else {
s3Log.Errorf("http=%v %v s3=%v request=%v\n",
s3Log.Warnf("http=%v %v s3=%v request=%v\n",
reqErr.StatusCode(), reqErr.Message(),
awsErr.Code(), reqErr.RequestID())
return reqErr
}
} else {
// Generic AWS Error with Code, Message, and original error (if any)
s3Log.Errorf("code=%v msg=%v, err=%v\n", awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
s3Log.Warnf("code=%v msg=%v, err=%v\n", awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
return awsErr
}
} else {
Expand Down

0 comments on commit 3f9be08

Please sign in to comment.