From 004cdf67492e22df14602709d05a73e226efa95f Mon Sep 17 00:00:00 2001 From: Jiang Liu Date: Thu, 13 Apr 2023 14:55:45 +0800 Subject: [PATCH] storage: refine the way to implement BlobIoChunk Backport the new implementation of BlobIoChunk from master into v2.1. Fixes: https://github.com/dragonflyoss/image-service/issues/1198 Signed-off-by: Jiang Liu --- rafs/src/fs.rs | 7 ++-- rafs/src/metadata/cached_v5.rs | 19 +++++++---- rafs/src/metadata/direct_v5.rs | 10 ++++-- rafs/src/metadata/direct_v6.rs | 59 ++++++++++++++++++++++++--------- rafs/src/metadata/layout/v5.rs | 2 +- rafs/src/metadata/md_v5.rs | 8 +++-- rafs/src/metadata/md_v6.rs | 3 +- rafs/src/metadata/mod.rs | 25 +++++++++----- rafs/src/mock/mock_inode.rs | 10 ++++-- storage/src/cache/cachedfile.rs | 49 ++++++--------------------- storage/src/cache/dummycache.rs | 4 +++ storage/src/cache/mod.rs | 3 ++ storage/src/device.rs | 48 ++++++++++----------------- 13 files changed, 137 insertions(+), 110 deletions(-) diff --git a/rafs/src/fs.rs b/rafs/src/fs.rs index 43087306e63..a35b0452bb7 100644 --- a/rafs/src/fs.rs +++ b/rafs/src/fs.rs @@ -554,7 +554,7 @@ impl Rafs { // - prefetch listed passed in by user // - or file prefetch list in metadata let inodes = prefetch_files.map(|files| Self::convert_file_list(&files, &sb)); - let res = sb.prefetch_files(&mut reader, root_ino, inodes, &|desc| { + let res = sb.prefetch_files(&device, &mut reader, root_ino, inodes, &|desc| { if desc.bi_size > 0 { device.prefetch(&[desc], &[]).unwrap_or_else(|e| { warn!("Prefetch error, {:?}", e); @@ -603,7 +603,7 @@ impl Rafs { } } else { let root = vec![root_ino]; - let res = sb.prefetch_files(&mut reader, root_ino, Some(root), &|desc| { + let res = sb.prefetch_files(&device, &mut reader, root_ino, Some(root), &|desc| { if desc.bi_size > 0 { device.prefetch(&[desc], &[]).unwrap_or_else(|e| { warn!("Prefetch error, {:?}", e); @@ -761,7 +761,7 @@ impl FileSystem for Rafs { let real_size = cmp::min(size as u64, inode_size - offset); let mut result = 0; - let mut descs = inode.alloc_bio_vecs(offset, real_size as usize, true)?; + let mut descs = inode.alloc_bio_vecs(&self.device, offset, real_size as usize, true)?; debug_assert!(!descs.is_empty() && !descs[0].bi_vec.is_empty()); // Try to amplify user io for Rafs v5, to improve performance. @@ -775,6 +775,7 @@ impl FileSystem for Rafs { if actual_size < self.amplify_io as u64 { let window_size = self.amplify_io as u64 - actual_size; self.sb.amplify_io( + &self.device, self.amplify_io, &mut descs, &inode, diff --git a/rafs/src/metadata/cached_v5.rs b/rafs/src/metadata/cached_v5.rs index d9c5ff24d44..9024ae9fa92 100644 --- a/rafs/src/metadata/cached_v5.rs +++ b/rafs/src/metadata/cached_v5.rs @@ -23,7 +23,7 @@ use fuse_backend_rs::api::filesystem::Entry; use nydus_utils::digest::Algorithm; use nydus_utils::{digest::RafsDigest, ByteSize}; use storage::device::v5::BlobV5ChunkInfo; -use storage::device::{BlobChunkFlags, BlobChunkInfo, BlobInfo}; +use storage::device::{BlobChunkFlags, BlobChunkInfo, BlobDevice, BlobInfo}; use crate::metadata::layout::v5::{ rafsv5_alloc_bio_vecs, rafsv5_validate_digest, RafsV5BlobTable, RafsV5ChunkInfo, RafsV5Inode, @@ -598,7 +598,13 @@ impl RafsInode for CachedInodeV5 { Ok(0) } - fn alloc_bio_vecs(&self, offset: u64, size: usize, user_io: bool) -> Result> { + fn alloc_bio_vecs( + &self, + _device: &BlobDevice, + offset: u64, + size: usize, + user_io: bool, + ) -> Result> { rafsv5_alloc_bio_vecs(self, offset, size, user_io) } @@ -774,7 +780,7 @@ mod cached_tests { use std::sync::Arc; use nydus_utils::ByteSize; - use storage::device::BlobFeatures; + use storage::device::{BlobDevice, BlobFeatures}; use crate::metadata::cached_v5::{CachedInodeV5, CachedSuperBlockV5}; use crate::metadata::layout::v5::{ @@ -952,7 +958,8 @@ mod cached_tests { ); let mut cached_inode = CachedInodeV5::new(blob_table, meta.clone()); cached_inode.load(&meta, &mut reader).unwrap(); - let descs = cached_inode.alloc_bio_vecs(0, 100, true).unwrap(); + let device = BlobDevice::default(); + let descs = cached_inode.alloc_bio_vecs(&device, 0, 100, true).unwrap(); let desc1 = &descs[0]; assert_eq!(desc1.bi_size, 100); assert_eq!(desc1.bi_vec.len(), 1); @@ -960,7 +967,7 @@ mod cached_tests { assert_eq!(desc1.bi_vec[0].blob.blob_id(), "123333"); let descs = cached_inode - .alloc_bio_vecs(1024 * 1024 - 100, 200, true) + .alloc_bio_vecs(&device, 1024 * 1024 - 100, 200, true) .unwrap(); let desc2 = &descs[0]; assert_eq!(desc2.bi_size, 200); @@ -971,7 +978,7 @@ mod cached_tests { assert_eq!(desc2.bi_vec[1].size, 100); let descs = cached_inode - .alloc_bio_vecs(1024 * 1024 + 8192, 1024 * 1024 * 4, true) + .alloc_bio_vecs(&device, 1024 * 1024 + 8192, 1024 * 1024 * 4, true) .unwrap(); let desc3 = &descs[0]; assert_eq!(desc3.bi_size, 1024 * 1024 * 2); diff --git a/rafs/src/metadata/direct_v5.rs b/rafs/src/metadata/direct_v5.rs index d787abea84e..548fa2b89bb 100644 --- a/rafs/src/metadata/direct_v5.rs +++ b/rafs/src/metadata/direct_v5.rs @@ -32,7 +32,7 @@ use std::sync::Arc; use arc_swap::{ArcSwap, Guard}; use nydus_utils::digest::{Algorithm, RafsDigest}; use storage::device::v5::BlobV5ChunkInfo; -use storage::device::{BlobChunkFlags, BlobChunkInfo, BlobInfo, BlobIoVec}; +use storage::device::{BlobChunkFlags, BlobChunkInfo, BlobDevice, BlobInfo, BlobIoVec}; use storage::utils::readahead; use crate::metadata::layout::v5::{ @@ -834,7 +834,13 @@ impl RafsInode for OndiskInodeWrapper { Ok(0) } - fn alloc_bio_vecs(&self, offset: u64, size: usize, user_io: bool) -> Result> { + fn alloc_bio_vecs( + &self, + _device: &BlobDevice, + offset: u64, + size: usize, + user_io: bool, + ) -> Result> { rafsv5_alloc_bio_vecs(self, offset, size, user_io) } diff --git a/rafs/src/metadata/direct_v6.rs b/rafs/src/metadata/direct_v6.rs index 20e78d61c7e..c9fb6c98022 100644 --- a/rafs/src/metadata/direct_v6.rs +++ b/rafs/src/metadata/direct_v6.rs @@ -59,8 +59,7 @@ use nydus_utils::{ div_round_up, round_up, }; use storage::device::{ - v5::BlobV5ChunkInfo, BlobChunkFlags, BlobChunkInfo, BlobInfo, BlobIoChunk, BlobIoDesc, - BlobIoVec, + v5::BlobV5ChunkInfo, BlobChunkFlags, BlobChunkInfo, BlobDevice, BlobInfo, BlobIoDesc, BlobIoVec, }; use storage::utils::readahead; @@ -570,23 +569,33 @@ impl OndiskInodeWrapper { fn make_chunk_io( &self, + state: &Guard>, + device: &BlobDevice, chunk_addr: &RafsV6InodeChunkAddr, content_offset: u32, content_len: u32, user_io: bool, - ) -> BlobIoDesc { - let state = self.mapping.state.load(); - let blob_table = &state.blob_table.entries; - - // As ondisk blobs table contains bootstrap as the first blob device - // while `blob_table` doesn't, it is subtracted 1. - let blob_index = chunk_addr.blob_index() - 1; + ) -> Option { + let blob_index = chunk_addr.blob_index(); + let blob_index = if blob_index == 0 { + u32::MAX + } else { + blob_index as u32 - 1 + }; let chunk_index = chunk_addr.blob_comp_index(); - let io_chunk = BlobIoChunk::Address(blob_index as u32, chunk_index); - - let blob = blob_table[blob_index as usize].clone(); - BlobIoDesc::new(blob, io_chunk, content_offset, content_len, user_io) + match state.blob_table.get(blob_index) { + Err(e) => { + warn!( + "failed to get blob with index {} for chunk address {:?}, {}", + blob_index, chunk_addr, e + ); + None + } + Ok(blob) => device + .create_io_chunk(blob.blob_index(), chunk_index) + .map(|v| BlobIoDesc::new(blob, v, content_offset, content_len, user_io)), + } } fn chunk_size(&self) -> u32 { @@ -1238,7 +1247,14 @@ impl RafsInode for OndiskInodeWrapper { Ok(0) } - fn alloc_bio_vecs(&self, offset: u64, size: usize, user_io: bool) -> Result> { + fn alloc_bio_vecs( + &self, + device: &BlobDevice, + offset: u64, + size: usize, + user_io: bool, + ) -> Result> { + let state = self.mapping.state.load(); let chunk_size = self.chunk_size(); let head_chunk_index = offset / chunk_size as u64; @@ -1258,7 +1274,16 @@ impl RafsInode for OndiskInodeWrapper { // Safe to unwrap because chunks is not empty to reach here. let first_chunk_addr = chunks.first().unwrap(); - let desc = self.make_chunk_io(first_chunk_addr, content_offset, content_len, user_io); + let desc = self + .make_chunk_io( + &state, + device, + first_chunk_addr, + content_offset, + content_len, + user_io, + ) + .ok_or_else(|| einval!("failed to get chunk information"))?; let mut descs = BlobIoVec::new(); descs.bi_vec.push(desc); @@ -1269,7 +1294,9 @@ impl RafsInode for OndiskInodeWrapper { // Handle the rest of chunks since they shares the same content length = 0. for c in chunks.iter().skip(1) { content_len = std::cmp::min(chunk_size, left); - let desc = self.make_chunk_io(c, 0, content_len, user_io); + let desc = self + .make_chunk_io(&state, device, c, 0, content_len, user_io) + .ok_or_else(|| einval!("failed to get chunk information"))?; if desc.blob.blob_index() != descs.bi_vec[0].blob.blob_index() { trace!( diff --git a/rafs/src/metadata/layout/v5.rs b/rafs/src/metadata/layout/v5.rs index 78610cd315a..234cf4f7836 100644 --- a/rafs/src/metadata/layout/v5.rs +++ b/rafs/src/metadata/layout/v5.rs @@ -1319,7 +1319,7 @@ fn add_chunk_to_bio_desc( let bio = BlobIoDesc::new( blob, - BlobIoChunk::Base(io_chunk), + BlobIoChunk(io_chunk), chunk_start as u32, (chunk_end - chunk_start) as u32, user_io, diff --git a/rafs/src/metadata/md_v5.rs b/rafs/src/metadata/md_v5.rs index 427ae245167..5b01295bf07 100644 --- a/rafs/src/metadata/md_v5.rs +++ b/rafs/src/metadata/md_v5.rs @@ -87,6 +87,7 @@ impl RafsSuper { pub(crate) fn prefetch_data_v5( &self, + device: &BlobDevice, r: &mut RafsIoReader, root_ino: Inode, fetcher: F, @@ -123,7 +124,7 @@ impl RafsSuper { found_root_inode = true; } debug!("hint prefetch inode {}", ino); - self.prefetch_data(ino as u64, &mut state, &mut hardlinks, &fetcher) + self.prefetch_data(ino as u64, device, &mut state, &mut hardlinks, &fetcher) .map_err(|e| RafsError::Prefetch(e.to_string()))?; } for (_id, mut desc) in state.drain() { @@ -172,6 +173,7 @@ impl RafsSuper { // expect that those chunks are likely to be continuous with user IO's chunks. pub(crate) fn amplify_io( &self, + device: &BlobDevice, max_size: u32, descs: &mut [BlobIoVec], inode: &Arc, @@ -190,7 +192,7 @@ impl RafsSuper { if window_base < inode_size { let size = inode_size - window_base; let sz = std::cmp::min(size, window_size); - let amplified_io_vec = inode.alloc_bio_vecs(window_base, sz as usize, false)?; + let amplified_io_vec = inode.alloc_bio_vecs(device, window_base, sz as usize, false)?; debug_assert!(!amplified_io_vec.is_empty() && !amplified_io_vec[0].bi_vec.is_empty()); // caller should ensure that `window_base` won't overlap last chunk of user IO. Self::merge_chunks_io(last_desc, &lified_io_vec); @@ -216,7 +218,7 @@ impl RafsSuper { } let sz = std::cmp::min(window_size, next_size); - let amplified_io_vec = ni.alloc_bio_vecs(0, sz as usize, false)?; + let amplified_io_vec = ni.alloc_bio_vecs(device, 0, sz as usize, false)?; debug_assert!( !amplified_io_vec.is_empty() && !amplified_io_vec[0].bi_vec.is_empty() ); diff --git a/rafs/src/metadata/md_v6.rs b/rafs/src/metadata/md_v6.rs index 10e4aca4595..a02fa52c495 100644 --- a/rafs/src/metadata/md_v6.rs +++ b/rafs/src/metadata/md_v6.rs @@ -102,6 +102,7 @@ impl RafsSuper { pub(crate) fn prefetch_data_v6( &self, + device: &BlobDevice, r: &mut RafsIoReader, root_ino: Inode, fetcher: F, @@ -139,7 +140,7 @@ impl RafsSuper { found_root_inode = true; } debug!("hint prefetch inode {}", ino); - self.prefetch_data(ino as u64, &mut state, &mut hardlinks, &fetcher) + self.prefetch_data(ino as u64, device, &mut state, &mut hardlinks, &fetcher) .map_err(|e| RafsError::Prefetch(e.to_string()))?; } // The left chunks whose size is smaller than 4MB will be fetched here. diff --git a/rafs/src/metadata/mod.rs b/rafs/src/metadata/mod.rs index a123f18b1e6..4a98ee7b050 100644 --- a/rafs/src/metadata/mod.rs +++ b/rafs/src/metadata/mod.rs @@ -24,7 +24,7 @@ use fuse_backend_rs::api::filesystem::Entry; use nydus_utils::compress; use nydus_utils::digest::{self, RafsDigest}; use serde::Serialize; -use storage::device::{BlobChunkInfo, BlobInfo, BlobIoMerge, BlobIoVec}; +use storage::device::{BlobChunkInfo, BlobDevice, BlobInfo, BlobIoMerge, BlobIoVec}; use self::layout::v5::RafsV5PrefetchTable; use self::layout::v6::RafsV6PrefetchTable; @@ -211,7 +211,13 @@ pub trait RafsInode: Any { ) -> Result; /// Allocate blob io vectors to read file data in range [offset, offset + size). - fn alloc_bio_vecs(&self, offset: u64, size: usize, user_io: bool) -> Result>; + fn alloc_bio_vecs( + &self, + device: &BlobDevice, + offset: u64, + size: usize, + user_io: bool, + ) -> Result>; fn as_any(&self) -> &dyn Any; @@ -651,6 +657,7 @@ impl RafsSuper { /// Return Ok(true) means root inode is found during performing prefetching and all files should be prefetched. pub fn prefetch_files( &self, + device: &BlobDevice, r: &mut RafsIoReader, root_ino: Inode, files: Option>, @@ -662,7 +669,7 @@ impl RafsSuper { let mut hardlinks: HashSet = HashSet::new(); let mut state = BlobIoMerge::default(); for f_ino in files { - self.prefetch_data(f_ino, &mut state, &mut hardlinks, fetcher) + self.prefetch_data(f_ino, device, &mut state, &mut hardlinks, fetcher) .map_err(|e| RafsError::Prefetch(e.to_string()))?; } for (_id, mut desc) in state.drain() { @@ -671,9 +678,9 @@ impl RafsSuper { // Flush the pending prefetch requests. Ok(false) } else if self.meta.is_v5() { - self.prefetch_data_v5(r, root_ino, fetcher) + self.prefetch_data_v5(device, r, root_ino, fetcher) } else if self.meta.is_v6() { - self.prefetch_data_v6(r, root_ino, fetcher) + self.prefetch_data_v6(device, r, root_ino, fetcher) } else { Err(RafsError::Prefetch( "Unknown filesystem version, prefetch disabled".to_string(), @@ -684,6 +691,7 @@ impl RafsSuper { #[inline] fn prefetch_inode( inode: &Arc, + device: &BlobDevice, state: &mut BlobIoMerge, hardlinks: &mut HashSet, prefetcher: F, @@ -700,7 +708,7 @@ impl RafsSuper { } } - let descs = inode.alloc_bio_vecs(0, inode.size() as usize, false)?; + let descs = inode.alloc_bio_vecs(device, 0, inode.size() as usize, false)?; for desc in descs { state.append(desc); prefetcher(state); @@ -712,6 +720,7 @@ impl RafsSuper { fn prefetch_data( &self, ino: u64, + device: &BlobDevice, state: &mut BlobIoMerge, hardlinks: &mut HashSet, fetcher: F, @@ -741,7 +750,7 @@ impl RafsSuper { let mut descendants = Vec::new(); let _ = inode.collect_descendants_inodes(&mut descendants)?; for i in descendants.iter() { - Self::prefetch_inode(i, state, hardlinks, try_prefetch)?; + Self::prefetch_inode(i, device, state, hardlinks, try_prefetch)?; } } else if !inode.is_empty_size() && inode.is_reg() { // An empty regular file will also be packed into nydus image, @@ -749,7 +758,7 @@ impl RafsSuper { // Moreover, for rafs v5, symlink has size of zero but non-zero size // for symlink size. For rafs v6, symlink size is also represented by i_size. // So we have to restrain the condition here. - Self::prefetch_inode(&inode, state, hardlinks, try_prefetch)?; + Self::prefetch_inode(&inode, device, state, hardlinks, try_prefetch)?; } Ok(()) diff --git a/rafs/src/mock/mock_inode.rs b/rafs/src/mock/mock_inode.rs index 16278e9caf4..8a4c35b8397 100644 --- a/rafs/src/mock/mock_inode.rs +++ b/rafs/src/mock/mock_inode.rs @@ -14,7 +14,7 @@ use fuse_backend_rs::abi::fuse_abi; use fuse_backend_rs::api::filesystem::Entry; use nydus_utils::{digest::RafsDigest, ByteSize}; -use storage::device::{BlobChunkInfo, BlobInfo, BlobIoVec}; +use storage::device::{BlobChunkInfo, BlobDevice, BlobInfo, BlobIoVec}; use super::mock_chunk::MockChunkInfo; use super::mock_super::CHUNK_SIZE; @@ -237,7 +237,13 @@ impl RafsInode for MockInode { Ok(0) } - fn alloc_bio_vecs(&self, offset: u64, size: usize, user_io: bool) -> Result> { + fn alloc_bio_vecs( + &self, + _device: &BlobDevice, + offset: u64, + size: usize, + user_io: bool, + ) -> Result> { rafsv5_alloc_bio_vecs(self, offset, size, user_io) } diff --git a/storage/src/cache/cachedfile.rs b/storage/src/cache/cachedfile.rs index ce844aa7679..96125beb804 100644 --- a/storage/src/cache/cachedfile.rs +++ b/storage/src/cache/cachedfile.rs @@ -30,8 +30,8 @@ use crate::cache::state::ChunkMap; use crate::cache::worker::{AsyncPrefetchConfig, AsyncPrefetchMessage, AsyncWorkerMgr}; use crate::cache::{BlobCache, BlobIoMergeState}; use crate::device::{ - BlobChunkInfo, BlobInfo, BlobIoChunk, BlobIoDesc, BlobIoRange, BlobIoSegment, BlobIoTag, - BlobIoVec, BlobObject, BlobPrefetchRequest, + BlobChunkInfo, BlobInfo, BlobIoDesc, BlobIoRange, BlobIoSegment, BlobIoTag, BlobIoVec, + BlobObject, BlobPrefetchRequest, }; use crate::meta::{BlobMetaChunk, BlobMetaInfo}; use crate::utils::{alloc_buf, copyv, readv, MemSliceCursor}; @@ -185,6 +185,13 @@ impl BlobCache for FileCacheEntry { &self.chunk_map } + fn get_chunk_info(&self, chunk_index: u32) -> Option> { + self.meta + .as_ref() + .and_then(|v| v.get_blob_meta()) + .map(|v| BlobMetaChunk::new(chunk_index as usize, &v.state)) + } + fn get_blob_object(&self) -> Option<&dyn BlobObject> { if self.is_get_blob_object_supported { Some(self) @@ -200,27 +207,8 @@ impl BlobCache for FileCacheEntry { bios: &[BlobIoDesc], ) -> StorageResult { let mut bios = bios.to_vec(); - let mut fail = false; - bios.iter_mut().for_each(|b| { - if let BlobIoChunk::Address(_blob_index, chunk_index) = b.chunkinfo { - if let Some(meta) = self.meta.as_ref() { - if let Some(bm) = meta.get_blob_meta() { - let cki = BlobMetaChunk::new(chunk_index as usize, &bm.state); - // TODO: Improve the type conversion - b.chunkinfo = BlobIoChunk::Base(cki); - } else { - warn!("failed to get blob.meta for prefetch"); - fail = true; - } - } - } - }); - if fail { - bios = vec![]; - } else { - bios.sort_by_key(|entry| entry.chunkinfo.compressed_offset()); - self.metrics.prefetch_unmerged_chunks.add(bios.len() as u64); - } + bios.sort_by_key(|entry| entry.chunkinfo.compressed_offset()); + self.metrics.prefetch_unmerged_chunks.add(bios.len() as u64); // Handle blob prefetch request first, it may help performance. for req in prefetches { @@ -371,21 +359,6 @@ impl BlobCache for FileCacheEntry { self.metrics.total.inc(); self.workers.consume_prefetch_budget(iovec.bi_size); - if let Some(meta) = self.meta.as_ref() { - if let Some(bm) = meta.get_blob_meta() { - // Convert `BlocIoChunk::Address` to `BlobIoChunk::Base` since rafs v6 has no chunks' meta - // in bootstrap. - for b in iovec.bi_vec.iter_mut() { - if let BlobIoChunk::Address(_blob_index, chunk_index) = b.chunkinfo { - b.chunkinfo = - BlobIoChunk::Base(BlobMetaChunk::new(chunk_index as usize, &bm.state)); - } - } - } else { - return Err(einval!("failed to get blob.meta for read")); - } - } - if iovec.bi_vec.is_empty() { Ok(0) } else if iovec.bi_vec.len() == 1 { diff --git a/storage/src/cache/dummycache.rs b/storage/src/cache/dummycache.rs index 90a73c70528..b573c8f7851 100644 --- a/storage/src/cache/dummycache.rs +++ b/storage/src/cache/dummycache.rs @@ -80,6 +80,10 @@ impl BlobCache for DummyCache { &self.chunk_map } + fn get_chunk_info(&self, _chunk_index: u32) -> Option> { + None + } + fn prefetch( &self, _blob_cache: Arc, diff --git a/storage/src/cache/mod.rs b/storage/src/cache/mod.rs index 6e5e5fb5f6f..8261d2df7ec 100644 --- a/storage/src/cache/mod.rs +++ b/storage/src/cache/mod.rs @@ -148,6 +148,9 @@ pub trait BlobCache: Send + Sync { /// Get the underlying `ChunkMap` object. fn get_chunk_map(&self) -> &Arc; + /// Get the `BlobChunkInfo` object corresponding to `chunk_index`. + fn get_chunk_info(&self, chunk_index: u32) -> Option>; + /// Get a `BlobObject` instance to directly access uncompressed blob file. fn get_blob_object(&self) -> Option<&dyn BlobObject> { None diff --git a/storage/src/device.rs b/storage/src/device.rs index afda6a55298..4e711c7aff3 100644 --- a/storage/src/device.rs +++ b/storage/src/device.rs @@ -409,34 +409,22 @@ pub trait BlobChunkInfo: Any + Sync + Send { /// This helps to feed unified IO description to storage subsystem from both rafs v6 and v5 since /// rafs v6 have a different ChunkInfo definition on bootstrap. #[derive(Clone)] -pub enum BlobIoChunk { - // For rafs v6 to pass chunk info to storage module. - // (blob_index, chunk_index) since it can't load chunks info from bootstrap - Address(u32, u32), - Base(Arc), -} +pub struct BlobIoChunk(pub Arc); impl BlobIoChunk { /// Convert a [BlobIoChunk] to a reference to [BlobChunkInfo] trait object. pub fn as_base(&self) -> &(dyn BlobChunkInfo) { - match self { - BlobIoChunk::Base(v) => v.as_ref(), - _ => panic!("Chunk is not fully loaded"), - } + self.0.as_ref() } pub fn inner(&self) -> Arc { - match self { - BlobIoChunk::Base(v) => v.clone(), - // TODO: Don't panic? - _ => panic!("Chunk is not fully loaded"), - } + self.0.clone() } } impl From> for BlobIoChunk { fn from(v: Arc) -> Self { - BlobIoChunk::Base(v) + BlobIoChunk(v) } } @@ -446,13 +434,7 @@ impl BlobChunkInfo for BlobIoChunk { } fn id(&self) -> u32 { - // BlobIoChunk::Address is a medium type to pass chunk IO description - // for rafs v6. It can't implement BlobChunkInfo and calling `as_base` - // causes panic. So this is a workaround to avoid panic. - match self { - Self::Address(_, index) => *index, - _ => self.as_base().id(), - } + self.0.id() } fn blob_index(&self) -> u32 { @@ -460,13 +442,7 @@ impl BlobChunkInfo for BlobIoChunk { } fn compressed_offset(&self) -> u64 { - // BlobIoChunk::Address is a medium type to pass chunk IO description - // for rafs v6. It can't implement BlobChunkInfo and calling `as_base` - // causes panic. So this is a workaround to avoid panic. - match self { - Self::Address(_, _) => 0, - _ => self.as_base().compressed_offset(), - } + self.0.compressed_offset() } fn compressed_size(&self) -> u32 { @@ -864,6 +840,7 @@ pub trait BlobObject: AsRawFd { /// /// All blob Io requests are actually served by the underlying [BlobCache] object. A new method /// [update()]() is added to switch the storage backend on demand. +#[derive(Default)] pub struct BlobDevice { //meta: ArcSwap>, blobs: ArcSwap>>, @@ -1019,6 +996,17 @@ impl BlobDevice { true } + /// RAFS V6: create a `BlobIoChunk` for chunk with index `chunk_index`. + pub fn create_io_chunk(&self, blob_index: u32, chunk_index: u32) -> Option { + if (blob_index as usize) < self.blob_count { + let state = self.blobs.load(); + let blob = &state[blob_index as usize]; + blob.get_chunk_info(chunk_index).map(|v| v.into()) + } else { + None + } + } + fn get_blob_by_iovec(&self, iovec: &BlobIoVec) -> Option> { if let Some(blob_index) = iovec.get_target_blob_index() { if (blob_index as usize) < self.blob_count {