Skip to content

Commit

Permalink
chore: add more traces
Browse files Browse the repository at this point in the history
  • Loading branch information
WenyXu committed Dec 12, 2024
1 parent bab36b9 commit 2317985
Show file tree
Hide file tree
Showing 2 changed files with 15 additions and 0 deletions.
7 changes: 7 additions & 0 deletions src/mito2/src/cache/index.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,8 @@ use std::sync::Arc;
use api::v1::index::InvertedIndexMetas;
use async_trait::async_trait;
use common_base::BitVec;
use common_telemetry::debug;
use common_telemetry::tracing::{self};
use index::inverted_index::error::DecodeFstSnafu;
use index::inverted_index::format::reader::InvertedIndexReader;
use index::inverted_index::FstMap;
Expand Down Expand Up @@ -57,6 +59,7 @@ where
{
/// Gets given range of index data from cache, and loads from source if the file
/// is not already cached.
#[tracing::instrument(level = tracing::Level::DEBUG, skip_all)]
async fn get_or_load(
&mut self,
offset: u64,
Expand Down Expand Up @@ -95,6 +98,10 @@ where
}
}
if !cache_miss_range.is_empty() {
debug!(
"Fetching cache miss range: {:?}, offset: {}, size: {}",
cache_miss_range, offset, size
);
let pages = self.inner.read_vec(&cache_miss_range).await?;
for (i, page) in cache_miss_idx.into_iter().zip(pages.into_iter()) {
let page = Arc::new(page);
Expand Down
8 changes: 8 additions & 0 deletions src/mito2/src/sst/parquet/helper.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ use std::ops::Range;
use std::sync::Arc;

use bytes::Bytes;
use common_telemetry::{debug, tracing};
use object_store::ObjectStore;
use parquet::basic::ColumnOrder;
use parquet::file::metadata::{FileMetaData, ParquetMetaData, RowGroupMetaData};
Expand Down Expand Up @@ -95,11 +96,18 @@ const MERGE_GAP: usize = 512 * 1024;
///
/// * `FETCH_PARALLELISM` - The number of concurrent fetch operations.
/// * `MERGE_GAP` - The maximum gap size (in bytes) to merge small byte ranges for optimized fetching.
#[tracing::instrument(level = tracing::Level::DEBUG, skip_all)]
pub async fn fetch_byte_ranges(
file_path: &str,
object_store: ObjectStore,
ranges: &[Range<u64>],
) -> object_store::Result<Vec<Bytes>> {
debug!(
"fetch_byte_ranges: {:?}, file_path: {}, scheme: {:?}",
ranges,
file_path,
object_store.info().scheme(),
);
Ok(object_store
.reader_with(file_path)
.concurrent(FETCH_PARALLELISM)
Expand Down

0 comments on commit 2317985

Please sign in to comment.