Skip to content

Commit

Permalink
meta/redis: support new backup (#5356)
Browse files Browse the repository at this point in the history
Signed-off-by: jiefenghuang <[email protected]>
Co-authored-by: Davies Liu <[email protected]>
  • Loading branch information
jiefenghuang and davies authored Dec 10, 2024
1 parent f5fcaae commit 0b1a218
Show file tree
Hide file tree
Showing 7 changed files with 939 additions and 14 deletions.
1 change: 0 additions & 1 deletion pkg/meta/backup.go
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,6 @@ const (
segTypeStat
segTypeQuota
segTypeParent // for redis/tkv only
segTypeMix // for redis/tkv only
segTypeMax
)

Expand Down
6 changes: 3 additions & 3 deletions pkg/meta/load_dump_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -403,7 +403,7 @@ func TestLoadDumpV2(t *testing.T) {
engines := map[string][]string{
"sqlite3": {"sqlite3://dev.db", "sqlite3://dev2.db"},
// "mysql": {"mysql://root:@/dev", "mysql://root:@/dev2"},
// "redis": {"redis://127.0.0.1:6379/2", "redis://127.0.0.1:6379/3"},
"redis": {"redis://127.0.0.1:6379/2", "redis://127.0.0.1:6379/3"},
// "tikv": {"tikv://127.0.0.1:2379/jfs-load-dump-1", "tikv://127.0.0.1:2379/jfs-load-dump-2"},
}

Expand Down Expand Up @@ -547,7 +547,7 @@ func BenchmarkLoadDumpV2(b *testing.B) {
defer fp.Close()
b.ResetTimer()
if err = m.DumpMetaV2(Background, fp, &DumpOption{CoNum: 10}); err != nil {
if err = m.DumpMetaV2(Background, fp, &DumpOption{Threads: 10}); err != nil {
b.Fatalf("dump meta: %s", err)
}
fp.Sync()
Expand All @@ -565,7 +565,7 @@ func BenchmarkLoadDumpV2(b *testing.B) {
defer fp.Close()
b.ResetTimer()
if err = m.LoadMetaV2(Background, fp, &LoadOption{CoNum: 10}); err != nil {
if err = m.LoadMetaV2(Background, fp, &LoadOption{Threads: 10}); err != nil {
b.Fatalf("load meta: %s", err)
}
})
Expand Down
19 changes: 13 additions & 6 deletions pkg/meta/redis.go
Original file line number Diff line number Diff line change
Expand Up @@ -65,14 +65,15 @@ import (
Removed files: delfiles -> [$inode:$length -> seconds]
detached nodes: detachedNodes -> [$inode -> seconds]
Slices refs: k$sliceId_$size -> refcount
Slices refs: sliceRef -> {k$sliceId_$size -> refcount}
Dir data length: dirDataLength -> { $inode -> length }
Dir used space: dirUsedSpace -> { $inode -> usedSpace }
Dir used inodes: dirUsedInodes -> { $inode -> usedInodes }
Quota: dirQuota -> { $inode -> {maxSpace, maxInodes} }
Quota used space: dirQuotaUsedSpace -> { $inode -> usedSpace }
Quota used inodes: dirQuotaUsedInodes -> { $inode -> usedInodes }
Acl: acl -> { $acl_id -> acl }
Redis features:
Sorted Set: 1.2+
Expand Down Expand Up @@ -386,7 +387,7 @@ func (m *redisMeta) doNewSession(sinfo []byte, update bool) error {
}

func (m *redisMeta) getCounter(name string) (int64, error) {
v, err := m.rdb.Get(Background, m.prefix+name).Int64()
v, err := m.rdb.Get(Background, m.counterKey(name)).Int64()
if err == redis.Nil {
err = nil
}
Expand All @@ -397,15 +398,14 @@ func (m *redisMeta) incrCounter(name string, value int64) (int64, error) {
if m.conf.ReadOnly {
return 0, syscall.EROFS
}
key := m.counterKey(name)
if name == "nextInode" || name == "nextChunk" {
// for nextinode, nextchunk
// the current one is already used
v, err := m.rdb.IncrBy(Background, m.prefix+strings.ToLower(name), value).Result()
v, err := m.rdb.IncrBy(Background, key, value).Result()
return v + 1, err
} else if name == "nextSession" {
name = "nextsession"
}
return m.rdb.IncrBy(Background, m.prefix+name, value).Result()
return m.rdb.IncrBy(Background, key, value).Result()
}

func (m *redisMeta) setIfSmall(name string, value, diff int64) (bool, error) {
Expand Down Expand Up @@ -607,6 +607,13 @@ func (m *redisMeta) nextTrashKey() string {
return m.prefix + "nextTrash"
}

func (m *redisMeta) counterKey(name string) string {
if name == "nextInode" || name == "nextChunk" || name == "nextSession" {
name = strings.ToLower(name)
}
return m.prefix + name
}

func (m *redisMeta) dirDataLengthKey() string {
return m.prefix + "dirDataLength"
}
Expand Down
Loading

0 comments on commit 0b1a218

Please sign in to comment.