Skip to content

Commit

Permalink
fixup: refactor the backfill optimization
Browse files Browse the repository at this point in the history
Simplify calculating addr and size.

Getting rid of a few ifs shouldn't impair the performance as well!

Signed-off-by: Michal Mielewczyk <[email protected]>
  • Loading branch information
mmichal10 committed Sep 23, 2024
1 parent 2855326 commit 1972ee1
Showing 1 changed file with 38 additions and 37 deletions.
75 changes: 38 additions & 37 deletions src/engine/engine_bf.c
Original file line number Diff line number Diff line change
Expand Up @@ -68,11 +68,21 @@ static void _ocf_backfill_complete(struct ocf_request *req, int error)
}
}

#define __entries_not_adjacent(__req, __i, __j) \
__req->map[__i].coll_idx + 1 != __req->map[__j].coll_idx

#define __is_the_last_chunk(__req, __i) (__i == (__req->core_line_count - 1))

#define __skip_on_the_last_entry(__skip, __req, __i) \
__skip * (int)__is_the_last_chunk(__req, __i)

static int _ocf_backfill_do(struct ocf_request *req)
{
ocf_cache_t cache = req->cache;
uint64_t addr, bytes, total_bytes = 0, addr_next = 0;
uint64_t seek, skip;
uint64_t metadata_offset = cache->device->metadata_offset;
ocf_cache_line_size_t cache_line_size = ocf_line_size(cache);
uint64_t addr, bytes, total_bytes = 0;
uint64_t seek, skip, last_chunk_size;
uint32_t i;

req->data = req->cp_data;
Expand All @@ -84,9 +94,9 @@ static int _ocf_backfill_do(struct ocf_request *req)
req->cache_forward_end = _ocf_backfill_complete;

if (ocf_engine_is_sequential(req)) {
addr = cache->device->metadata_offset;
addr += req->map[0].coll_idx * ocf_line_size(cache);
addr += req->addr % ocf_line_size(cache);
addr = metadata_offset;
addr += req->map[0].coll_idx * cache_line_size;
addr += req->addr % cache_line_size;

ocf_core_stats_cache_block_update(req->core, req->part_id,
OCF_WRITE, req->bytes);
Expand All @@ -96,49 +106,40 @@ static int _ocf_backfill_do(struct ocf_request *req)
return 0;
}

seek = req->addr % cache_line_size;
last_chunk_size = (req->addr + req->bytes) % cache_line_size;
skip = (cache_line_size - last_chunk_size) % cache_line_size;

ocf_req_forward_cache_get(req);
for (i = 0; i < req->core_line_count; i++) {
if (addr_next) {
addr = addr_next;
} else {
addr = req->map[i].coll_idx;
addr *= ocf_line_size(cache);
addr += cache->device->metadata_offset;
}
bytes = ocf_line_size(cache);

if (i == 0) {
seek = req->addr % ocf_line_size(cache);
addr += seek;
bytes -= seek;
}

if (req->map[i].status == LOOKUP_HIT) {
/* This is the 1st cache line in the interval,
* and it's a hit. Don't write it to the cache.
*/
addr_next = 0;
total_bytes += bytes;
/* This is the 1st cache line in the interval, and it's
a hit. Don't write it to the cache */
total_bytes += cache_line_size;
total_bytes -= seek;
/* Seek should be taken into account for the first chunk
only */
seek = 0;
continue;
}

for (; i < (req->core_line_count - 1); i++) {
addr_next = req->map[i + 1].coll_idx;
addr_next *= ocf_line_size(cache);
addr_next += cache->device->metadata_offset;
addr = metadata_offset;
addr += req->map[i].coll_idx * cache_line_size;

if (addr_next != (addr + bytes))
bytes = cache_line_size;
for (; i < (req->core_line_count - 1); i++) {
if (__entries_not_adjacent(req, i, i + 1))
break;

bytes += ocf_line_size(cache);
bytes += cache_line_size;
}

if (i == (req->core_line_count - 1)) {
skip = (ocf_line_size(cache) -
((req->addr + req->bytes) %
ocf_line_size(cache))) % ocf_line_size(cache);
bytes -= skip;
}
/* Seek should be taken into account for the first chunk only */
addr += seek;
bytes -= seek;
seek = 0;

bytes -= __skip_on_the_last_entry(skip, req, i);

bytes = OCF_MIN(bytes, req->bytes - total_bytes);

Expand Down

0 comments on commit 1972ee1

Please sign in to comment.