diff --git a/example/simple/src/main.c b/example/simple/src/main.c index eae974584..0f989497a 100644 --- a/example/simple/src/main.c +++ b/example/simple/src/main.c @@ -1,6 +1,7 @@ /* * Copyright(c) 2019-2022 Intel Corporation * Copyright(c) 2024 Huawei Technologies + * Copyright(c) 2026 Unvertical * SPDX-License-Identifier: BSD-3-Clause */ @@ -315,7 +316,7 @@ void perform_workload(ocf_core_t core) } strcpy(data1->ptr, "This is some test data"); /* Prepare and submit write IO to the core */ - submit_io(core, data1, 0, 512, OCF_WRITE, complete_write); + submit_io(core, data1, 0, 4096, OCF_WRITE, complete_write); /* After write completes, complete_write() callback will be called. */ /* @@ -330,7 +331,7 @@ void perform_workload(ocf_core_t core) return; } /* Prepare and submit read IO to the core */ - submit_io(core, data2, 0, 512, OCF_READ, complete_read); + submit_io(core, data2, 0, 4096, OCF_READ, complete_read); /* After read completes, complete_read() callback will be called, * where we print our example data to stdout. */ diff --git a/inc/ocf_def.h b/inc/ocf_def.h index 929561f7b..008e7151c 100644 --- a/inc/ocf_def.h +++ b/inc/ocf_def.h @@ -9,6 +9,7 @@ #ifndef __OCF_DEF_H__ #define __OCF_DEF_H__ +#include "ocf_env.h" #include "ocf_cfg.h" /** * @file @@ -58,10 +59,17 @@ /** * @name OCF cores definitions */ +/** + * Number of core id bits + */ +#define OCF_CORE_ID_BITS 14 /** * Maximum numbers of cores per cache instance + * Must be smaller than (1 << OCF_CORE_ID_BITS) to leave space + * for invalid OCF_CORE_ID_INVALID. */ -#define OCF_CORE_MAX OCF_CONFIG_MAX_CORES +#define OCF_CORE_NUM OCF_CONFIG_MAX_CORES +_Static_assert(OCF_CORE_NUM < (1 << OCF_CORE_ID_BITS)); /** * Minimum value of a valid core ID */ @@ -69,11 +77,11 @@ /** * Maximum value of a valid core ID */ -#define OCF_CORE_ID_MAX (OCF_CORE_MAX - 1) +#define OCF_CORE_ID_MAX (OCF_CORE_NUM - 1) /** * Invalid value of core id */ -#define OCF_CORE_ID_INVALID OCF_CORE_MAX +#define OCF_CORE_ID_INVALID OCF_CORE_NUM /** * Size of core name */ diff --git a/inc/ocf_metadata.h b/inc/ocf_metadata.h index dd3c282ca..2e99e8cb7 100644 --- a/inc/ocf_metadata.h +++ b/inc/ocf_metadata.h @@ -1,5 +1,7 @@ /* * Copyright(c) 2012-2021 Intel Corporation + * Copyright(c) 2023 Huawei Technologies + * Copyright(c) 2026 Unvertical * SPDX-License-Identifier: BSD-3-Clause */ @@ -93,7 +95,7 @@ void ocf_metadata_probe(ocf_ctx_t ctx, ocf_volume_t volume, ocf_metadata_probe_end_t cmpl, void *priv); /** - * @brief Check if sectors in cache line before given address are invalid + * @brief Check if blocks in cache line before given address are invalid * * It might be used by volume which supports * atomic writes - (write of data and metadata in one buffer) @@ -101,13 +103,13 @@ void ocf_metadata_probe(ocf_ctx_t ctx, ocf_volume_t volume, * @param[in] cache OCF cache instance * @param[in] addr Sector address in bytes * - * @retval 0 Not all sectors before given address are invalid - * @retval Non-zero Number of sectors before given address + * @retval 0 Not all blocks before given address are invalid + * @retval Non-zero Number of blocks before given address */ int ocf_metadata_check_invalid_before(ocf_cache_t cache, uint64_t addr); /** - * @brief Check if sectors in cache line after given end address are invalid + * @brief Check if blocks in cache line after given end address are invalid * * It might be used by volume which supports * atomic writes - (write of data and metadata in one buffer) @@ -116,8 +118,8 @@ int ocf_metadata_check_invalid_before(ocf_cache_t cache, uint64_t addr); * @param[in] addr Sector address in bytes * @param[in] bytes IO size in bytes * - * @retval 0 Not all sectors after given end address are invalid - * @retval Non-zero Number of sectors after given end address + * @retval 0 Not all blocks after given end address are invalid + * @retval Non-zero Number of blocks after given end address */ int ocf_metadata_check_invalid_after(ocf_cache_t cache, uint64_t addr, uint32_t bytes); diff --git a/src/cleaning/acp.c b/src/cleaning/acp.c index e5a9af23c..13c0836d7 100644 --- a/src/cleaning/acp.c +++ b/src/cleaning/acp.c @@ -109,10 +109,10 @@ struct acp_context { env_rwsem chunks_lock; /* number of chunks per core */ - uint64_t num_chunks[OCF_CORE_MAX]; + uint64_t num_chunks[OCF_CORE_NUM]; /* per core array of all chunks */ - struct acp_chunk_info *chunk_info[OCF_CORE_MAX]; + struct acp_chunk_info *chunk_info[OCF_CORE_NUM]; struct acp_bucket bucket_info[ACP_MAX_BUCKETS]; @@ -302,7 +302,7 @@ struct ocf_acp_populate_context { ocf_cache_t cache; struct { - uint16_t *chunk[OCF_CORE_MAX]; + uint16_t *chunk[OCF_CORE_NUM]; struct { struct list_head chunk_list; } bucket[ACP_MAX_BUCKETS]; @@ -334,7 +334,7 @@ static int ocf_acp_populate_handle(ocf_parallelize_t parallelize, OCF_COND_RESCHED_DEFAULT(step); - if (core_id == OCF_CORE_MAX) + if (core_id == OCF_CORE_NUM) continue; if (!metadata_test_dirty(cache, cline)) { diff --git a/src/cleaning/alru.c b/src/cleaning/alru.c index 219b493f4..a12f9938c 100644 --- a/src/cleaning/alru.c +++ b/src/cleaning/alru.c @@ -480,7 +480,7 @@ static int ocf_alru_populate_handle(ocf_parallelize_t parallelize, OCF_COND_RESCHED_DEFAULT(step); - if (core_id == OCF_CORE_MAX) + if (core_id == OCF_CORE_NUM) continue; if (!metadata_test_dirty(cache, cline)) { diff --git a/src/engine/engine_common.c b/src/engine/engine_common.c index faf013860..c3f5d4dc9 100644 --- a/src/engine/engine_common.c +++ b/src/engine/engine_common.c @@ -31,9 +31,8 @@ void ocf_engine_error(struct ocf_request *req, if (ocf_cache_log_rl(cache)) { ocf_core_log(req->core, log_err, - "%s sector: %" ENV_PRIu64 ", bytes: %u\n", msg, - BYTES_TO_SECTORS(req->addr), - req->bytes); + "%s addr: %" ENV_PRIu64 ", bytes: %u\n", msg, + req->addr, req->bytes); } } @@ -135,12 +134,12 @@ void ocf_engine_patch_req_info(struct ocf_cache *cache, static void ocf_engine_update_req_info(struct ocf_cache *cache, struct ocf_request *req, uint32_t idx) { - uint8_t start_sector = 0; - uint8_t end_sector = ocf_line_end_sector(cache); + uint8_t start_block = 0; + uint8_t end_block = ocf_line_end_block(cache); struct ocf_map_info *entry = &(req->map[idx]); - start_sector = ocf_map_line_start_sector(req, idx); - end_sector = ocf_map_line_end_sector(req, idx); + start_block = ocf_map_line_start_block(req, idx); + end_block = ocf_map_line_end_block(req, idx); ENV_BUG_ON(entry->status != LOOKUP_HIT && entry->status != LOOKUP_MISS && @@ -150,7 +149,7 @@ static void ocf_engine_update_req_info(struct ocf_cache *cache, /* Handle return value */ if (entry->status == LOOKUP_HIT) { if (metadata_test_valid_sec(cache, entry->coll_idx, - start_sector, end_sector)) { + start_block, end_block)) { req->info.hit_no++; } else { req->info.invalid_no++; @@ -163,7 +162,7 @@ static void ocf_engine_update_req_info(struct ocf_cache *cache, /* Check if cache line is fully dirty */ if (metadata_test_dirty_all_sec(cache, entry->coll_idx, - start_sector, end_sector)) + start_block, end_block)) req->info.dirty_all++; } } @@ -360,7 +359,7 @@ static void ocf_engine_map_hndl_error(struct ocf_cache *cache, entry->coll_idx); set_cache_line_invalid_no_flush(cache, 0, - ocf_line_end_sector(cache), + ocf_line_end_block(cache), entry->coll_idx); ocf_metadata_end_collision_shared_access(cache, diff --git a/src/engine/engine_common.h b/src/engine/engine_common.h index 4474a2203..eeb8ce492 100644 --- a/src/engine/engine_common.h +++ b/src/engine/engine_common.h @@ -1,6 +1,7 @@ /* * Copyright(c) 2012-2022 Intel Corporation * Copyright(c) 2024 Huawei Technologies + * Copyright(c) 2026 Unvertical * SPDX-License-Identifier: BSD-3-Clause */ @@ -150,8 +151,8 @@ static inline uint32_t ocf_engine_io_count(struct ocf_request *req) static inline bool ocf_engine_map_all_sec_dirty(struct ocf_request *req, uint32_t line) { - uint8_t start = ocf_map_line_start_sector(req, line); - uint8_t end = ocf_map_line_end_sector(req, line); + uint8_t start = ocf_map_line_start_block(req, line); + uint8_t end = ocf_map_line_end_block(req, line); if (req->map[line].status != LOOKUP_HIT) return false; @@ -163,8 +164,8 @@ bool ocf_engine_map_all_sec_dirty(struct ocf_request *req, uint32_t line) static inline bool ocf_engine_map_all_sec_clean(struct ocf_request *req, uint32_t line) { - uint8_t start = ocf_map_line_start_sector(req, line); - uint8_t end = ocf_map_line_end_sector(req, line); + uint8_t start = ocf_map_line_start_block(req, line); + uint8_t end = ocf_map_line_end_block(req, line); if (req->map[line].status != LOOKUP_HIT) return false; @@ -181,8 +182,8 @@ bool ocf_engine_map_all_sec_clean(struct ocf_request *req, uint32_t line) static inline bool ocf_engine_map_all_sec_valid(struct ocf_request *req, uint32_t line) { - uint8_t start = ocf_map_line_start_sector(req, line); - uint8_t end = ocf_map_line_end_sector(req, line); + uint8_t start = ocf_map_line_start_block(req, line); + uint8_t end = ocf_map_line_end_block(req, line); if (req->map[line].status != LOOKUP_HIT) return false; diff --git a/src/engine/engine_wi.c b/src/engine/engine_wi.c index a72c59247..ff836fd2a 100644 --- a/src/engine/engine_wi.c +++ b/src/engine/engine_wi.c @@ -1,6 +1,7 @@ /* * Copyright(c) 2012-2022 Intel Corporation * Copyright(c) 2024 Huawei Technologies + * Copyright(c) 2026 Unvertical * SPDX-License-Identifier: BSD-3-Clause */ @@ -32,8 +33,8 @@ static int _ocf_write_wi_next_pass(struct ocf_request *req) only if concurrent I/O had inserted target LBAs to cache after this request did traversation. These LBAs might have been written by this request behind the concurrent I/O's back, - resulting in making these sectors effectively invalid. - In this case we must update these sectors metadata to + resulting in making these blocks effectively invalid. + In this case we must update these blocks metadata to reflect this. However we won't know about this after we traverse the request again - hence calling ocf_write_wi again with req->wi_second_pass set to indicate that this diff --git a/src/engine/engine_wo.c b/src/engine/engine_wo.c index 16a93625c..9d771cd2c 100644 --- a/src/engine/engine_wo.c +++ b/src/engine/engine_wo.c @@ -1,6 +1,7 @@ /* * Copyright(c) 2019-2022 Intel Corporation * Copyright(c) 2024 Huawei Technologies + * Copyright(c) 2026 Unvertical * SPDX-License-Identifier: BSD-3-Clause */ @@ -68,8 +69,8 @@ static int ocf_read_wo_cache_do(struct ocf_request *req) for (line = 0; line < req->core_line_count; ++line) { entry = &req->map[line]; - s = ocf_map_line_start_sector(req, line); - e = ocf_map_line_end_sector(req, line); + s = ocf_map_line_start_block(req, line); + e = ocf_map_line_end_block(req, line); ocf_hb_cline_prot_lock_rd(&cache->metadata.lock, req->lock_idx, entry->core_id, @@ -85,20 +86,20 @@ static int ocf_read_wo_cache_do(struct ocf_request *req) io = false; } - /* try to seek directly to the last sector */ + /* try to seek directly to the last block */ if (entry->status == LOOKUP_MISS) { - /* all sectors invalid */ + /* all blocks invalid */ i = e + 1; - increment = SECTORS_TO_BYTES(e - s + 1); + increment = BLOCKS_TO_BYTES(e - s + 1); valid = false; } else if (ocf_engine_map_all_sec_valid(req, line)) { - /* all sectors valid */ + /* all blocks valid */ i = e + 1; - increment = SECTORS_TO_BYTES(e - s + 1); + increment = BLOCKS_TO_BYTES(e - s + 1); valid = true; } else { - /* need to iterate through CL sector by sector */ + /* need to iterate through CL block by block */ i = s; } @@ -109,7 +110,7 @@ static int ocf_read_wo_cache_do(struct ocf_request *req) increment = 0; do { ++i; - increment += SECTORS_TO_BYTES(1); + increment += BLOCKS_TO_BYTES(1); } while (i <= e && metadata_test_valid_one( cache, entry->coll_idx, i) == valid); diff --git a/src/engine/engine_zero.c b/src/engine/engine_zero.c index 9647e86b8..0a74216f3 100644 --- a/src/engine/engine_zero.c +++ b/src/engine/engine_zero.c @@ -1,6 +1,7 @@ /* * Copyright(c) 2012-2022 Intel Corporation * Copyright(c) 2024 Huawei Technologies + * Copyright(c) 2026 Unvertical * SPDX-License-Identifier: BSD-3-Clause */ @@ -72,19 +73,19 @@ static inline void ocf_zero_map_info(struct ocf_request *req) continue; start_bit = 0; - end_bit = ocf_line_end_sector(cache); + end_bit = ocf_line_end_block(cache); if (map_idx == 0) { /* First */ - start_bit = (BYTES_TO_SECTORS(req->addr) - % ocf_line_sectors(cache)); + start_bit = BYTES_TO_BLOCKS_ROUND_DOWN(req->addr) + % ocf_line_blocks(cache); } if (map_idx == (count - 1)) { /* Last */ - end_bit = (BYTES_TO_SECTORS(req->addr + - req->bytes - 1) % - ocf_line_sectors(cache)); + end_bit = BYTES_TO_BLOCKS_ROUND_DOWN(req->addr + + req->bytes - 1) % + ocf_line_blocks(cache); } ocf_metadata_flush_mark(cache, req, map_idx, INVALID, diff --git a/src/metadata/metadata.c b/src/metadata/metadata.c index 8662c1c9c..2770dddb8 100644 --- a/src/metadata/metadata.c +++ b/src/metadata/metadata.c @@ -52,8 +52,28 @@ enum { static inline size_t ocf_metadata_status_sizeof(ocf_cache_line_size_t line_size) { - /* Number of bytes required to mark cache line status */ - size_t size = BYTES_TO_SECTORS(line_size) / 8; + size_t size; + + switch (line_size) { + case ocf_cache_line_size_4: +#ifdef OCF_BLOCK_SIZE_4K + /* + * We only need one valid and one dirty per line. + * Use bitfields from struct ocf_metadata_map. + */ + size = 0; + break; +#endif + case ocf_cache_line_size_8: + case ocf_cache_line_size_16: + case ocf_cache_line_size_32: + case ocf_cache_line_size_64: + /* Number of bytes required to mark cache line status */ + size = OCF_DIV_ROUND_UP(BYTES_TO_BLOCKS(line_size), 8); + break; + default: + ENV_BUG(); + } /* Number of types of status (valid, dirty, etc...) */ size *= ocf_metadata_status_type_max; @@ -99,13 +119,13 @@ static ocf_cache_line_t ocf_metadata_get_entries( return OCF_NUM_PARTITIONS; case metadata_segment_core_config: - return OCF_CORE_MAX; + return OCF_CORE_NUM; case metadata_segment_core_runtime: - return OCF_CORE_MAX; + return OCF_CORE_NUM; case metadata_segment_core_uuid: - return OCF_CORE_MAX; + return OCF_CORE_NUM; default: break; @@ -459,7 +479,7 @@ static inline void ocf_metadata_config_init(ocf_cache_t cache, size_t size) OCF_DEBUG_PARAM(cache, "Cache line size = %lu, bits count = %llu, " "status size = %lu", - size, ocf_line_sectors(cache), + size, ocf_line_blocks(cache), ocf_metadata_status_sizeof(size)); } @@ -828,7 +848,7 @@ static inline void _ocf_init_collision_entry(struct ocf_cache *cache, ocf_metadata_set_collision_info(cache, idx, invalid_idx, invalid_idx); ocf_metadata_set_core_info(cache, idx, - OCF_CORE_MAX, ULONG_MAX); + OCF_CORE_NUM, ULONG_MAX); metadata_init_status_bits(cache, idx); } @@ -1178,7 +1198,7 @@ void ocf_metadata_flush_mark(struct ocf_cache *cache, * line persistent in case of recovery */ - /* Collision table to get mapping cache line to HDD sector*/ + /* Collision table to get mapping cache line to HDD block */ ocf_metadata_raw_flush_mark(cache, &(ctrl->raw_desc[metadata_segment_collision]), req, map_idx, to_state, start, stop); @@ -1385,8 +1405,8 @@ static int ocf_metadata_load_atomic_metadata_drain(void *priv, for (i = 0; i < sector_no; i++) { ctx_data_rd_check(cache->owner, &meta, data, sizeof(meta)); - line = (sector_addr + i) / ocf_line_sectors(cache); - pos = (sector_addr + i) % ocf_line_sectors(cache); + line = (sector_addr + i) / ocf_line_blocks(cache); + pos = (sector_addr + i) % ocf_line_blocks(cache); core_seq_no = meta.core_seq_no; core_line = meta.core_line; @@ -1562,50 +1582,100 @@ void ocf_metadata_set_hash(struct ocf_cache *cache, ocf_cache_line_t index, #include "metadata_bit.h" +#ifdef OCF_BLOCK_SIZE_4K #define _ocf_metadata_funcs_5arg(what) \ bool ocf_metadata_##what(struct ocf_cache *cache, \ ocf_cache_line_t line, uint8_t start, uint8_t stop, bool all) \ { \ switch (cache->metadata.line_size) { \ - case ocf_cache_line_size_4: \ - return _ocf_metadata_##what##_u8(cache, line, start, stop, all); \ - case ocf_cache_line_size_8: \ - return _ocf_metadata_##what##_u16(cache, line, start, stop, all); \ - case ocf_cache_line_size_16: \ - return _ocf_metadata_##what##_u32(cache, line, start, stop, all); \ - case ocf_cache_line_size_32: \ - return _ocf_metadata_##what##_u64(cache, line, start, stop, all); \ - case ocf_cache_line_size_64: \ - return _ocf_metadata_##what##_u128(cache, line, start, stop, all); \ - case ocf_cache_line_size_none: \ - default: \ - ENV_BUG_ON(1); \ - return false; \ + case ocf_cache_line_size_4: \ + return _ocf_metadata_##what(cache, line, start, stop, all); \ + case ocf_cache_line_size_8: \ + case ocf_cache_line_size_16: \ + case ocf_cache_line_size_32: \ + return _ocf_metadata_##what##_u8(cache, line, \ + start, stop, all); \ + case ocf_cache_line_size_64: \ + return _ocf_metadata_##what##_u16(cache, line, \ + start, stop, all); \ + case ocf_cache_line_size_none: \ + default: \ + ENV_BUG_ON(1); \ + return false; \ } \ -} \ - +} +#else +#define _ocf_metadata_funcs_5arg(what) \ +bool ocf_metadata_##what(struct ocf_cache *cache, \ + ocf_cache_line_t line, uint8_t start, uint8_t stop, bool all) \ +{ \ + switch (cache->metadata.line_size) { \ + case ocf_cache_line_size_4: \ + return _ocf_metadata_##what##_u8(cache, line, \ + start, stop, all); \ + case ocf_cache_line_size_8: \ + return _ocf_metadata_##what##_u16(cache, line, \ + start, stop, all); \ + case ocf_cache_line_size_16: \ + return _ocf_metadata_##what##_u32(cache, line, \ + start, stop, all); \ + case ocf_cache_line_size_32: \ + return _ocf_metadata_##what##_u64(cache, line, \ + start, stop, all); \ + case ocf_cache_line_size_64: \ + return _ocf_metadata_##what##_u128(cache, line, \ + start, stop, all); \ + case ocf_cache_line_size_none: \ + default: \ + ENV_BUG_ON(1); \ + return false; \ + } \ +} +#endif +#ifdef OCF_BLOCK_SIZE_4K +#define _ocf_metadata_funcs_4arg(what) \ +bool ocf_metadata_##what(struct ocf_cache *cache, \ + ocf_cache_line_t line, uint8_t start, uint8_t stop) \ +{ \ + switch (cache->metadata.line_size) { \ + case ocf_cache_line_size_4: \ + return _ocf_metadata_##what(cache, line, start, stop); \ + case ocf_cache_line_size_8: \ + case ocf_cache_line_size_16: \ + case ocf_cache_line_size_32: \ + return _ocf_metadata_##what##_u8(cache, line, start, stop); \ + case ocf_cache_line_size_64: \ + return _ocf_metadata_##what##_u16(cache, line, start, stop); \ + case ocf_cache_line_size_none: \ + default: \ + ENV_BUG_ON(1); \ + return false; \ + } \ +} +#else #define _ocf_metadata_funcs_4arg(what) \ bool ocf_metadata_##what(struct ocf_cache *cache, \ - ocf_cache_line_t line, uint8_t start, uint8_t stop) \ + ocf_cache_line_t line, uint8_t start, uint8_t stop) \ { \ switch (cache->metadata.line_size) { \ - case ocf_cache_line_size_4: \ - return _ocf_metadata_##what##_u8(cache, line, start, stop); \ - case ocf_cache_line_size_8: \ - return _ocf_metadata_##what##_u16(cache, line, start, stop); \ - case ocf_cache_line_size_16: \ - return _ocf_metadata_##what##_u32(cache, line, start, stop); \ - case ocf_cache_line_size_32: \ - return _ocf_metadata_##what##_u64(cache, line, start, stop); \ - case ocf_cache_line_size_64: \ - return _ocf_metadata_##what##_u128(cache, line, start, stop); \ - case ocf_cache_line_size_none: \ - default: \ - ENV_BUG_ON(1); \ - return false; \ + case ocf_cache_line_size_4: \ + return _ocf_metadata_##what##_u8(cache, line, start, stop); \ + case ocf_cache_line_size_8: \ + return _ocf_metadata_##what##_u16(cache, line, start, stop); \ + case ocf_cache_line_size_16: \ + return _ocf_metadata_##what##_u32(cache, line, start, stop); \ + case ocf_cache_line_size_32: \ + return _ocf_metadata_##what##_u64(cache, line, start, stop); \ + case ocf_cache_line_size_64: \ + return _ocf_metadata_##what##_u128(cache, line, start, stop); \ + case ocf_cache_line_size_none: \ + default: \ + ENV_BUG_ON(1); \ + return false; \ } \ -} \ +} +#endif #define _ocf_metadata_funcs(what) \ _ocf_metadata_funcs_5arg(test_##what) \ @@ -1618,76 +1688,143 @@ bool ocf_metadata_##what(struct ocf_cache *cache, \ _ocf_metadata_funcs(dirty) _ocf_metadata_funcs(valid) +#ifdef OCF_BLOCK_SIZE_4K +bool ocf_metadata_clear_valid_if_clean(struct ocf_cache *cache, + ocf_cache_line_t line, uint8_t start, uint8_t stop) +{ + switch (cache->metadata.line_size) { + case ocf_cache_line_size_4: + return _ocf_metadata_clear_valid_if_clean(cache, + line, start, stop); + case ocf_cache_line_size_8: + case ocf_cache_line_size_16: + case ocf_cache_line_size_32: + return _ocf_metadata_clear_valid_if_clean_u8(cache, + line, start, stop); + case ocf_cache_line_size_64: + return _ocf_metadata_clear_valid_if_clean_u16(cache, + line, start, stop); + case ocf_cache_line_size_none: + default: + ENV_BUG_ON(1); + return false; + } +} +#else bool ocf_metadata_clear_valid_if_clean(struct ocf_cache *cache, - ocf_cache_line_t line, uint8_t start, uint8_t stop) + ocf_cache_line_t line, uint8_t start, uint8_t stop) { switch (cache->metadata.line_size) { - case ocf_cache_line_size_4: - return _ocf_metadata_clear_valid_if_clean_u8(cache, - line, start, stop); - case ocf_cache_line_size_8: - return _ocf_metadata_clear_valid_if_clean_u16(cache, - line, start, stop); - case ocf_cache_line_size_16: - return _ocf_metadata_clear_valid_if_clean_u32(cache, - line, start, stop); - case ocf_cache_line_size_32: - return _ocf_metadata_clear_valid_if_clean_u64(cache, - line, start, stop); - case ocf_cache_line_size_64: - return _ocf_metadata_clear_valid_if_clean_u128(cache, - line, start, stop); - case ocf_cache_line_size_none: - default: - ENV_BUG_ON(1); - return false; + case ocf_cache_line_size_4: + return _ocf_metadata_clear_valid_if_clean_u8(cache, + line, start, stop); + case ocf_cache_line_size_8: + return _ocf_metadata_clear_valid_if_clean_u16(cache, + line, start, stop); + case ocf_cache_line_size_16: + return _ocf_metadata_clear_valid_if_clean_u32(cache, + line, start, stop); + case ocf_cache_line_size_32: + return _ocf_metadata_clear_valid_if_clean_u64(cache, + line, start, stop); + case ocf_cache_line_size_64: + return _ocf_metadata_clear_valid_if_clean_u128(cache, + line, start, stop); + case ocf_cache_line_size_none: + default: + ENV_BUG_ON(1); + return false; } } +#endif +#ifdef OCF_BLOCK_SIZE_4K void ocf_metadata_clear_dirty_if_invalid(struct ocf_cache *cache, - ocf_cache_line_t line, uint8_t start, uint8_t stop) + ocf_cache_line_t line, uint8_t start, uint8_t stop) { switch (cache->metadata.line_size) { - case ocf_cache_line_size_4: - return _ocf_metadata_clear_dirty_if_invalid_u8(cache, - line, start, stop); - case ocf_cache_line_size_8: - return _ocf_metadata_clear_dirty_if_invalid_u16(cache, - line, start, stop); - case ocf_cache_line_size_16: - return _ocf_metadata_clear_dirty_if_invalid_u32(cache, - line, start, stop); - case ocf_cache_line_size_32: - return _ocf_metadata_clear_dirty_if_invalid_u64(cache, - line, start, stop); - case ocf_cache_line_size_64: - return _ocf_metadata_clear_dirty_if_invalid_u128(cache, - line, start, stop); - case ocf_cache_line_size_none: - default: - ENV_BUG(); + case ocf_cache_line_size_4: + return _ocf_metadata_clear_dirty_if_invalid(cache, + line, start, stop); + case ocf_cache_line_size_8: + case ocf_cache_line_size_16: + case ocf_cache_line_size_32: + return _ocf_metadata_clear_dirty_if_invalid_u8(cache, + line, start, stop); + case ocf_cache_line_size_64: + return _ocf_metadata_clear_dirty_if_invalid_u16(cache, + line, start, stop); + case ocf_cache_line_size_none: + default: + ENV_BUG(); } } +#else +void ocf_metadata_clear_dirty_if_invalid(struct ocf_cache *cache, + ocf_cache_line_t line, uint8_t start, uint8_t stop) +{ + switch (cache->metadata.line_size) { + case ocf_cache_line_size_4: + return _ocf_metadata_clear_dirty_if_invalid_u8(cache, + line, start, stop); + case ocf_cache_line_size_8: + return _ocf_metadata_clear_dirty_if_invalid_u16(cache, + line, start, stop); + case ocf_cache_line_size_16: + return _ocf_metadata_clear_dirty_if_invalid_u32(cache, + line, start, stop); + case ocf_cache_line_size_32: + return _ocf_metadata_clear_dirty_if_invalid_u64(cache, + line, start, stop); + case ocf_cache_line_size_64: + return _ocf_metadata_clear_dirty_if_invalid_u128(cache, + line, start, stop); + case ocf_cache_line_size_none: + default: + ENV_BUG(); + } +} +#endif +#ifdef OCF_BLOCK_SIZE_4K +bool ocf_metadata_check(struct ocf_cache *cache, ocf_cache_line_t line) +{ + switch (cache->metadata.line_size) { + case ocf_cache_line_size_4: + return _ocf_metadata_check(cache, line); + case ocf_cache_line_size_8: + case ocf_cache_line_size_16: + case ocf_cache_line_size_32: + return _ocf_metadata_check_u8(cache, line); + case ocf_cache_line_size_64: + return _ocf_metadata_check_u16(cache, line); + case ocf_cache_line_size_none: + default: + ENV_BUG_ON(1); + return false; + } +} +#else bool ocf_metadata_check(struct ocf_cache *cache, ocf_cache_line_t line) { switch (cache->metadata.line_size) { - case ocf_cache_line_size_4: - return _ocf_metadata_check_u8(cache, line); - case ocf_cache_line_size_8: - return _ocf_metadata_check_u16(cache, line); - case ocf_cache_line_size_16: - return _ocf_metadata_check_u32(cache, line); - case ocf_cache_line_size_32: - return _ocf_metadata_check_u64(cache, line); - case ocf_cache_line_size_64: - return _ocf_metadata_check_u128(cache, line); - case ocf_cache_line_size_none: - default: - ENV_BUG_ON(1); - return false; + case ocf_cache_line_size_4: + return _ocf_metadata_check_u8(cache, line); + case ocf_cache_line_size_8: + return _ocf_metadata_check_u16(cache, line); + case ocf_cache_line_size_16: + return _ocf_metadata_check_u32(cache, line); + case ocf_cache_line_size_32: + return _ocf_metadata_check_u64(cache, line); + case ocf_cache_line_size_64: + return _ocf_metadata_check_u128(cache, line); + case ocf_cache_line_size_none: + default: + ENV_BUG_ON(1); + return false; } } +#endif int ocf_metadata_init(struct ocf_cache *cache, ocf_cache_line_size_t cache_line_size) diff --git a/src/metadata/metadata_bit.h b/src/metadata/metadata_bit.h index 59f56624d..5c7c5471e 100644 --- a/src/metadata/metadata_bit.h +++ b/src/metadata/metadata_bit.h @@ -1,5 +1,7 @@ /* * Copyright(c) 2012-2021 Intel Corporation + * Copyright(c) 2023 Huawei Technologies + * Copyright(c) 2026 Unvertical * SPDX-License-Identifier: BSD-3-Clause */ @@ -51,6 +53,194 @@ struct ocf_metadata_map_##type { \ type dirty; \ } __attribute__((packed)) +#ifdef OCF_BLOCK_SIZE_4K +#define ocf_metadata_bit_func_no_type(what) \ +static bool _ocf_metadata_test_##what(struct ocf_cache *cache, \ + ocf_cache_line_t line, uint8_t start, uint8_t stop, bool all) \ +{ \ + struct ocf_metadata_ctrl *ctrl = \ + (struct ocf_metadata_ctrl *) cache->metadata.priv; \ +\ + struct ocf_metadata_raw *raw = \ + &ctrl->raw_desc[metadata_segment_collision]; \ +\ + const struct ocf_metadata_map *map = raw->mem_pool; \ +\ + ENV_BUG_ON(start != stop); \ +\ + _raw_bug_on(raw, line); \ +\ + if (map[line]._##what) { \ + return true; \ + } else { \ + return false; \ + } \ +} \ +\ +static bool _ocf_metadata_test_out_##what(struct ocf_cache *cache, \ + ocf_cache_line_t line, uint8_t start, uint8_t stop) \ +{ \ + return false; \ +} \ +\ +static bool _ocf_metadata_clear_##what(struct ocf_cache *cache, \ + ocf_cache_line_t line, uint8_t start, uint8_t stop) \ +{ \ + struct ocf_metadata_ctrl *ctrl = \ + (struct ocf_metadata_ctrl *) cache->metadata.priv; \ +\ + struct ocf_metadata_raw *raw = \ + &ctrl->raw_desc[metadata_segment_collision]; \ +\ + struct ocf_metadata_map *map = raw->mem_pool; \ +\ + ENV_BUG_ON(start != stop); \ +\ + _raw_bug_on(raw, line); \ +\ + map[line]._##what = 0; \ +\ + return false; \ +} \ +\ +static bool _ocf_metadata_set_##what(struct ocf_cache *cache, \ + ocf_cache_line_t line, uint8_t start, uint8_t stop) \ +{ \ + bool result; \ +\ + struct ocf_metadata_ctrl *ctrl = \ + (struct ocf_metadata_ctrl *) cache->metadata.priv; \ +\ + struct ocf_metadata_raw *raw = \ + &ctrl->raw_desc[metadata_segment_collision]; \ +\ + struct ocf_metadata_map *map = raw->mem_pool; \ +\ + ENV_BUG_ON(start != stop); \ + _raw_bug_on(raw, line); \ +\ + result = map[line]._##what ? true : false; \ +\ + map[line]._##what = 1; \ +\ + return result; \ +} \ +\ +static bool _ocf_metadata_test_and_set_##what( \ + struct ocf_cache *cache, ocf_cache_line_t line, \ + uint8_t start, uint8_t stop, bool all) \ +{ \ + bool test; \ + struct ocf_metadata_ctrl *ctrl = \ + (struct ocf_metadata_ctrl *) cache->metadata.priv; \ +\ + struct ocf_metadata_raw *raw = \ + &ctrl->raw_desc[metadata_segment_collision]; \ +\ + struct ocf_metadata_map *map = raw->mem_pool; \ +\ + ENV_BUG_ON(start != stop); \ +\ + _raw_bug_on(raw, line); \ +\ + if (map[line]._##what) { \ + test = true; \ + } else { \ + test = false; \ + } \ +\ + map[line]._##what = 1; \ + return test; \ +} \ +\ +static bool _ocf_metadata_test_and_clear_##what( \ + struct ocf_cache *cache, ocf_cache_line_t line, \ + uint8_t start, uint8_t stop, bool all) \ +{ \ + bool test; \ + struct ocf_metadata_ctrl *ctrl = \ + (struct ocf_metadata_ctrl *) cache->metadata.priv; \ +\ + struct ocf_metadata_raw *raw = \ + &ctrl->raw_desc[metadata_segment_collision]; \ +\ + struct ocf_metadata_map *map = raw->mem_pool; \ +\ + ENV_BUG_ON(start != stop); \ +\ + _raw_bug_on(raw, line); \ +\ + if (map[line]._##what) { \ + test = true; \ + } else { \ + test = false; \ + } \ +\ + map[line]._##what = 0; \ + return test; \ +} + +#define ocf_metadata_bit_func_basic_no_type() \ +static bool _ocf_metadata_clear_valid_if_clean(struct ocf_cache *cache, \ + ocf_cache_line_t line, uint8_t start, uint8_t stop) \ +{ \ + struct ocf_metadata_ctrl *ctrl = \ + (struct ocf_metadata_ctrl *) cache->metadata.priv; \ +\ + struct ocf_metadata_raw *raw = \ + &ctrl->raw_desc[metadata_segment_collision]; \ +\ + struct ocf_metadata_map *map = raw->mem_pool; \ +\ + ENV_BUG_ON(start != stop); \ +\ + _raw_bug_on(raw, line); \ +\ + map[line]._valid = (!map[line]._dirty) ? 0 : map[line]._valid; \ +\ + if (map[line]._valid) { \ + return true; \ + } else { \ + return false; \ + } \ +} \ +\ +static void _ocf_metadata_clear_dirty_if_invalid(struct ocf_cache *cache, \ + ocf_cache_line_t line, uint8_t start, uint8_t stop) \ +{ \ + struct ocf_metadata_ctrl *ctrl = \ + (struct ocf_metadata_ctrl *) cache->metadata.priv; \ +\ + struct ocf_metadata_raw *raw = \ + &ctrl->raw_desc[metadata_segment_collision]; \ +\ + struct ocf_metadata_map *map = raw->mem_pool; \ +\ + ENV_BUG_ON(start != stop); \ +\ + _raw_bug_on(raw, line); \ +\ + map[line]._dirty = (!map[line]._valid) ? 0 : map[line]._dirty; \ +} \ +\ +/* true if no incorrect combination of status bits */ \ +static bool _ocf_metadata_check(struct ocf_cache *cache, \ + ocf_cache_line_t line) \ +{ \ + struct ocf_metadata_ctrl *ctrl = \ + (struct ocf_metadata_ctrl *) cache->metadata.priv; \ +\ + struct ocf_metadata_raw *raw = \ + &ctrl->raw_desc[metadata_segment_collision]; \ +\ + struct ocf_metadata_map *map = raw->mem_pool; \ +\ + _raw_bug_on(raw, line); \ +\ + return (map[line]._dirty & (!map[line]._valid)) == 0; \ +} +#endif + #define ocf_metadata_bit_func(what, type) \ static bool _ocf_metadata_test_##what##_##type(struct ocf_cache *cache, \ ocf_cache_line_t line, uint8_t start, uint8_t stop, bool all) \ @@ -219,7 +409,7 @@ static bool _ocf_metadata_test_and_clear_##what##_##type( \ \ map[line].what &= ~mask; \ return test; \ -} \ +} #define ocf_metadata_bit_func_basic(type) \ static bool _ocf_metadata_clear_valid_if_clean_##type(struct ocf_cache *cache, \ @@ -279,16 +469,28 @@ static bool _ocf_metadata_check_##type(struct ocf_cache *cache, \ _raw_bug_on(raw, line); \ \ return (map[line].dirty & (~map[line].valid)) == 0; \ -} \ +} + +#ifdef OCF_BLOCK_SIZE_4K +#define ocf_metadata_bit_funcs_no_type() \ +ocf_metadata_bit_func_no_type(dirty); \ +ocf_metadata_bit_func_no_type(valid); \ +ocf_metadata_bit_func_basic_no_type() +#endif #define ocf_metadata_bit_funcs(type) \ ocf_metadata_bit_struct(type); \ ocf_metadata_bit_func(dirty, type); \ ocf_metadata_bit_func(valid, type); \ -ocf_metadata_bit_func_basic(type); \ +ocf_metadata_bit_func_basic(type) +#ifdef OCF_BLOCK_SIZE_4K +ocf_metadata_bit_funcs_no_type(); +#endif ocf_metadata_bit_funcs(u8); ocf_metadata_bit_funcs(u16); +#ifndef OCF_BLOCK_SIZE_4K ocf_metadata_bit_funcs(u32); ocf_metadata_bit_funcs(u64); ocf_metadata_bit_funcs(u128); +#endif diff --git a/src/metadata/metadata_cache_line.h b/src/metadata/metadata_cache_line.h index 0fbfd9936..83a41e97b 100644 --- a/src/metadata/metadata_cache_line.h +++ b/src/metadata/metadata_cache_line.h @@ -18,14 +18,14 @@ static inline ocf_cache_line_t ocf_line_count(struct ocf_cache *cache) return cache->conf_meta->cachelines; } -static inline uint64_t ocf_line_sectors(struct ocf_cache *cache) +static inline uint64_t ocf_line_blocks(struct ocf_cache *cache) { - return BYTES_TO_SECTORS(cache->metadata.line_size); + return BYTES_TO_BLOCKS_ROUND_DOWN(cache->metadata.line_size); } -static inline uint64_t ocf_line_end_sector(struct ocf_cache *cache) +static inline uint64_t ocf_line_end_block(struct ocf_cache *cache) { - return ocf_line_sectors(cache) - 1; + return ocf_line_blocks(cache) - 1; } #endif /* __METADATA_CACHE_LINE_H__ */ diff --git a/src/metadata/metadata_collision.c b/src/metadata/metadata_collision.c index 5ee86cec3..7ef7ffd8f 100644 --- a/src/metadata/metadata_collision.c +++ b/src/metadata/metadata_collision.c @@ -133,7 +133,7 @@ void ocf_metadata_remove_from_collision(struct ocf_cache *cache, ocf_cache_line_t line, ocf_part_id_t part_id) { ocf_core_id_t core_id; - uint64_t core_sector; + uint64_t core_line; ocf_cache_line_t hash_father; ocf_cache_line_t prev_line, next_line; ocf_cache_line_t line_entries = cache->device->collision_table_entries; @@ -151,12 +151,12 @@ void ocf_metadata_remove_from_collision(struct ocf_cache *cache, if (next_line != line_entries) ocf_metadata_set_collision_prev(cache, next_line, prev_line); - ocf_metadata_get_core_info(cache, line, &core_id, &core_sector); + ocf_metadata_get_core_info(cache, line, &core_id, &core_line); /* Update hash table, because if it was pointing to the given node it * must now point to the given's node next */ - hash_father = ocf_metadata_hash_func(cache, core_sector, core_id); + hash_father = ocf_metadata_hash_func(cache, core_line, core_id); ENV_BUG_ON(!(hash_father < hash_entries)); if (ocf_metadata_get_hash(cache, hash_father) == line) @@ -166,7 +166,7 @@ void ocf_metadata_remove_from_collision(struct ocf_cache *cache, line_entries, line_entries); ocf_metadata_set_core_info(cache, line, - OCF_CORE_MAX, ULLONG_MAX); + OCF_CORE_NUM, ULLONG_MAX); } /* must be called under global metadata read(shared) lock */ diff --git a/src/metadata/metadata_collision.h b/src/metadata/metadata_collision.h index 5502b4ac8..edc12a080 100644 --- a/src/metadata/metadata_collision.h +++ b/src/metadata/metadata_collision.h @@ -8,6 +8,8 @@ #ifndef __METADATA_COLLISION_H__ #define __METADATA_COLLISION_H__ +#include "../ocf_def_priv.h" + /** * @brief Metadata map structure */ @@ -27,9 +29,17 @@ struct ocf_metadata_map { uint64_t core_line; /*!< Core line addres on cache mapped by this strcture */ - uint16_t core_id; + uint16_t core_id : OCF_CORE_ID_BITS; /*!< ID of core where is assigned this cache line*/ +#ifdef OCF_BLOCK_SIZE_4K + uint16_t _valid : 1; + /*!< valid bit for 4K cache line */ + + uint16_t _dirty : 1; + /*!< dirty bit for 4K cache line */ +#endif + uint8_t status[]; /*!< Entry status structure e.g. valid, dirty...*/ } __attribute__((packed)); diff --git a/src/metadata/metadata_core.c b/src/metadata/metadata_core.c index a2701d675..795f3c3bf 100644 --- a/src/metadata/metadata_core.c +++ b/src/metadata/metadata_core.c @@ -1,5 +1,6 @@ /* * Copyright(c) 2020-2021 Intel Corporation + * Copyright(c) 2026 Unvertical * SPDX-License-Identifier: BSD-3-Clause */ @@ -12,7 +13,7 @@ void ocf_metadata_get_core_info(struct ocf_cache *cache, ocf_cache_line_t line, ocf_core_id_t *core_id, - uint64_t *core_sector) + uint64_t *core_line) { const struct ocf_metadata_map *collision; struct ocf_metadata_ctrl *ctrl = @@ -25,13 +26,13 @@ void ocf_metadata_get_core_info(struct ocf_cache *cache, if (core_id) *core_id = collision->core_id; - if (core_sector) - *core_sector = collision->core_line; + if (core_line) + *core_line = collision->core_line; } void ocf_metadata_set_core_info(struct ocf_cache *cache, ocf_cache_line_t line, ocf_core_id_t core_id, - uint64_t core_sector) + uint64_t core_line) { struct ocf_metadata_map *collision; struct ocf_metadata_ctrl *ctrl = @@ -42,7 +43,7 @@ void ocf_metadata_set_core_info(struct ocf_cache *cache, if (collision) { collision->core_id = core_id; - collision->core_line = core_sector; + collision->core_line = core_line; } else { ocf_metadata_error(cache); } @@ -62,7 +63,7 @@ ocf_core_id_t ocf_metadata_get_core_id(struct ocf_cache *cache, return collision->core_id; ocf_metadata_error(cache); - return OCF_CORE_MAX; + return OCF_CORE_NUM; } struct ocf_metadata_uuid *ocf_metadata_get_core_uuid( diff --git a/src/metadata/metadata_core.h b/src/metadata/metadata_core.h index 11186b30f..9776a9021 100644 --- a/src/metadata/metadata_core.h +++ b/src/metadata/metadata_core.h @@ -1,5 +1,6 @@ /* * Copyright(c) 2012-2021 Intel Corporation + * Copyright(c) 2026 Unvertical * SPDX-License-Identifier: BSD-3-Clause */ @@ -10,11 +11,11 @@ void ocf_metadata_get_core_info(struct ocf_cache *cache, ocf_cache_line_t line, ocf_core_id_t *core_id, - uint64_t *core_sector); + uint64_t *core_line); void ocf_metadata_set_core_info(struct ocf_cache *cache, ocf_cache_line_t line, ocf_core_id_t core_id, - uint64_t core_sector); + uint64_t core_line); ocf_core_id_t ocf_metadata_get_core_id( struct ocf_cache *cache, ocf_cache_line_t line); diff --git a/src/metadata/metadata_io.c b/src/metadata/metadata_io.c index ccb8cab1e..7145d5c6b 100644 --- a/src/metadata/metadata_io.c +++ b/src/metadata/metadata_io.c @@ -131,7 +131,7 @@ int metadata_io_read_i_atomic(ocf_cache_t cache, ocf_queue_t queue, void *priv, { struct metadata_io_read_i_atomic_context *context; uint64_t io_sectors_count = cache->device->collision_table_entries * - ocf_line_sectors(cache); + ocf_line_blocks(cache); struct ocf_request *req; OCF_DEBUG_TRACE(cache); diff --git a/src/metadata/metadata_misc.c b/src/metadata/metadata_misc.c index ef60083b5..e5906acc7 100644 --- a/src/metadata/metadata_misc.c +++ b/src/metadata/metadata_misc.c @@ -26,7 +26,7 @@ void ocf_metadata_sparse_cache_line(struct ocf_cache *cache, { ocf_metadata_start_collision_shared_access(cache, cache_line); - set_cache_line_invalid_no_flush(cache, 0, ocf_line_end_sector(cache), + set_cache_line_invalid_no_flush(cache, 0, ocf_line_end_block(cache), cache_line); /* @@ -65,7 +65,7 @@ int ocf_metadata_detach_cline_range(ocf_cache_t cache, ocf_cache_line_t begin, ocf_metadata_start_collision_shared_access(cache, cline); - set_cache_line_unavailable(cache, 0, ocf_line_end_sector(cache), + set_cache_line_unavailable(cache, 0, ocf_line_end_block(cache), cline); /* diff --git a/src/metadata/metadata_raw_atomic.c b/src/metadata/metadata_raw_atomic.c index dd6b9b516..eb7eb8621 100644 --- a/src/metadata/metadata_raw_atomic.c +++ b/src/metadata/metadata_raw_atomic.c @@ -1,6 +1,7 @@ /* * Copyright(c) 2012-2022 Intel Corporation * Copyright(c) 2024 Huawei Technologies + * Copyright(c) 2026 Unvertical * SPDX-License-Identifier: BSD-3-Clause */ @@ -90,9 +91,9 @@ static void _raw_atomic_flush_do_asynch_sec(struct ocf_cache *cache, start_addr *= ocf_line_size(cache); start_addr += cache->device->metadata_offset; - start_addr += SECTORS_TO_BYTES(map->start_flush); - len = SECTORS_TO_BYTES(map->stop_flush - map->start_flush); - len += SECTORS_TO_BYTES(1); + start_addr += BLOCKS_TO_BYTES(map->start_flush); + len = BLOCKS_TO_BYTES(map->stop_flush - map->start_flush); + len += BLOCKS_TO_BYTES(1); _raw_atomic_io_discard_do(req, start_addr, len); } @@ -157,7 +158,7 @@ int raw_atomic_flush_do_asynch(struct ocf_cache *cache, struct ocf_request *req, } } else if (i == (line_no - 1)) { /* Last */ - if (map->stop_flush != ocf_line_end_sector(cache)) { + if (map->stop_flush != ocf_line_end_block(cache)) { _raw_atomic_flush_do_asynch_sec(cache, req, i); } else { _raw_atomic_add_page(cache, clines_tab, diff --git a/src/metadata/metadata_status.h b/src/metadata/metadata_status.h index 1356b7c50..4eb31c280 100644 --- a/src/metadata/metadata_status.h +++ b/src/metadata/metadata_status.h @@ -1,6 +1,7 @@ /* * Copyright(c) 2012-2021 Intel Corporation * Copyright(c) 2025 Huawei Technologies + * Copyright(c) 2026 Unvertical * SPDX-License-Identifier: BSD-3-Clause */ @@ -47,8 +48,8 @@ void ocf_metadata_clear_dirty_if_invalid(struct ocf_cache *cache, static inline void metadata_init_status_bits(struct ocf_cache *cache, ocf_cache_line_t line) { - ocf_metadata_clear_dirty(cache, line, 0, ocf_line_end_sector(cache)); - ocf_metadata_clear_valid(cache, line, 0, ocf_line_end_sector(cache)); + ocf_metadata_clear_dirty(cache, line, 0, ocf_line_end_block(cache)); + ocf_metadata_clear_valid(cache, line, 0, ocf_line_end_block(cache)); } static inline bool metadata_test_dirty_all(struct ocf_cache *cache, @@ -57,7 +58,7 @@ static inline bool metadata_test_dirty_all(struct ocf_cache *cache, bool test; test = ocf_metadata_test_dirty(cache, line, 0, - ocf_line_end_sector(cache), true); + ocf_line_end_block(cache), true); return test; } @@ -68,7 +69,7 @@ static inline bool metadata_test_dirty(struct ocf_cache *cache, bool test; test = ocf_metadata_test_dirty(cache, line, 0, - ocf_line_end_sector(cache), false); + ocf_line_end_block(cache), false); return test; } @@ -76,27 +77,27 @@ static inline bool metadata_test_dirty(struct ocf_cache *cache, static inline void metadata_set_dirty(struct ocf_cache *cache, ocf_cache_line_t line) { - ocf_metadata_set_dirty(cache, line, 0, ocf_line_end_sector(cache)); + ocf_metadata_set_dirty(cache, line, 0, ocf_line_end_block(cache)); } static inline void metadata_clear_dirty(struct ocf_cache *cache, ocf_cache_line_t line) { - ocf_metadata_clear_dirty(cache, line, 0, ocf_line_end_sector(cache)); + ocf_metadata_clear_dirty(cache, line, 0, ocf_line_end_block(cache)); } static inline bool metadata_test_and_clear_dirty( struct ocf_cache *cache, ocf_cache_line_t line) { return ocf_metadata_test_and_clear_dirty(cache, line, 0, - ocf_line_end_sector(cache), false); + ocf_line_end_block(cache), false); } static inline bool metadata_test_and_set_dirty(struct ocf_cache *cache, ocf_cache_line_t line) { return ocf_metadata_test_and_set_dirty(cache, line, 0, - ocf_line_end_sector(cache), false); + ocf_line_end_block(cache), false); } /******************************************************************************* @@ -164,7 +165,7 @@ static inline bool metadata_test_and_clear_dirty_sec( /* * Marks given cache line's bits as clean * - * @return true if any cache line's sector was dirty and became clean + * @return true if any cache line's block was dirty and became clean * @return false for other cases */ static inline bool metadata_clear_dirty_sec_changed( @@ -184,7 +185,7 @@ static inline bool metadata_clear_dirty_sec_changed( /* * Marks given cache line's bits as dirty * - * @return true if any cache line's sector became dirty + * @return true if any cache line's block became dirty * @return false for other cases */ static inline bool metadata_set_dirty_sec_changed( @@ -209,54 +210,54 @@ static inline bool metadata_test_valid_any(struct ocf_cache *cache, ocf_cache_line_t line) { return ocf_metadata_test_valid(cache, line, 0, - ocf_line_end_sector(cache), false); + ocf_line_end_block(cache), false); } static inline bool metadata_test_valid(struct ocf_cache *cache, ocf_cache_line_t line) { return ocf_metadata_test_valid(cache, line, 0, - ocf_line_end_sector(cache), true); + ocf_line_end_block(cache), true); } static inline void metadata_set_valid(struct ocf_cache *cache, ocf_cache_line_t line) { - ocf_metadata_set_valid(cache, line, 0, ocf_line_end_sector(cache)); + ocf_metadata_set_valid(cache, line, 0, ocf_line_end_block(cache)); } static inline void metadata_clear_valid(struct ocf_cache *cache, ocf_cache_line_t line) { - ocf_metadata_clear_valid(cache, line, 0, ocf_line_end_sector(cache)); + ocf_metadata_clear_valid(cache, line, 0, ocf_line_end_block(cache)); } static inline bool metadata_clear_valid_if_clean(struct ocf_cache *cache, ocf_cache_line_t line) { return ocf_metadata_clear_valid_if_clean(cache, line, 0, - ocf_line_end_sector(cache)); + ocf_line_end_block(cache)); } static inline bool metadata_test_and_clear_valid( struct ocf_cache *cache, ocf_cache_line_t line) { return ocf_metadata_test_and_clear_valid(cache, line, 0, - ocf_line_end_sector(cache), true); + ocf_line_end_block(cache), true); } static inline bool metadata_test_and_set_valid(struct ocf_cache *cache, ocf_cache_line_t line) { return ocf_metadata_test_and_set_valid(cache, line, 0, - ocf_line_end_sector(cache), true); + ocf_line_end_block(cache), true); } static inline void metadata_clear_dirty_if_invalid(struct ocf_cache *cache, ocf_cache_line_t line) { ocf_metadata_clear_dirty_if_invalid(cache, line, 0, - ocf_line_end_sector(cache)); + ocf_line_end_block(cache)); } /******************************************************************************* @@ -317,11 +318,11 @@ static inline void metadata_set_valid_sec_one(struct ocf_cache *cache, ocf_metadata_set_valid(cache, line, pos, pos); } /* - * Marks given cache line's sectors as invalid + * Marks given cache line's blocks as invalid * - * @return true if line was valid and became invalid (all sectors invalid) + * @return true if line was valid and became invalid (all blocks invalid) * @return false if line was invalid and remains invalid or - * if line was valid and still has valid sectors + * if line was valid and still has valid blocks */ static inline bool metadata_clear_valid_sec_changed( struct ocf_cache *cache, ocf_cache_line_t line, @@ -330,7 +331,7 @@ static inline bool metadata_clear_valid_sec_changed( bool line_was_valid, _line_remains_valid; line_was_valid = ocf_metadata_test_valid(cache, line, 0, - ocf_line_end_sector(cache), false); + ocf_line_end_block(cache), false); _line_remains_valid = ocf_metadata_clear_valid(cache, line, start, stop); diff --git a/src/metadata/metadata_superblock.c b/src/metadata/metadata_superblock.c index 2ddf88f28..1e28f02a2 100644 --- a/src/metadata/metadata_superblock.c +++ b/src/metadata/metadata_superblock.c @@ -165,7 +165,7 @@ int ocf_metadata_validate_superblock(ocf_ctx_t ctx, return -OCF_ERR_INVAL; } - if (superblock->core_count > OCF_CORE_MAX) { + if (superblock->core_count > OCF_CORE_NUM) { ocf_log_invalid_superblock("core count"); return -OCF_ERR_INVAL; } @@ -227,7 +227,7 @@ static void _ocf_metadata_validate_core_config(ocf_pipeline_t pipeline, superblock = METADATA_MEM_POOL(ctrl, metadata_segment_sb_config); core_config = METADATA_MEM_POOL(ctrl, metadata_segment_core_config); - for (core_id = 0; core_id < OCF_CORE_MAX; core_id++) { + for (core_id = 0; core_id < OCF_CORE_NUM; core_id++) { valid_in_bitmap = env_bit_test(core_id, superblock->valid_core_bitmap); diff --git a/src/metadata/metadata_superblock.h b/src/metadata/metadata_superblock.h index cc84aadc6..6c2934c82 100644 --- a/src/metadata/metadata_superblock.h +++ b/src/metadata/metadata_superblock.h @@ -44,7 +44,7 @@ struct ocf_superblock_config { ocf_cache_line_size_t line_size; uint32_t core_count; - unsigned long valid_core_bitmap[OCF_DIV_ROUND_UP_STATIC(OCF_CORE_MAX, + unsigned long valid_core_bitmap[OCF_DIV_ROUND_UP_STATIC(OCF_CORE_NUM, sizeof(unsigned long) * 8)]; bool cleaner_disabled; diff --git a/src/mngt/ocf_mngt_cache.c b/src/mngt/ocf_mngt_cache.c index dfc2042b3..54c503d84 100644 --- a/src/mngt/ocf_mngt_cache.c +++ b/src/mngt/ocf_mngt_cache.c @@ -534,12 +534,12 @@ struct ocf_mngt_rebuild_metadata_context { struct { env_atomic lines; - } core[OCF_CORE_MAX]; + } core[OCF_CORE_NUM]; struct { struct { uint32_t lines; - } core[OCF_CORE_MAX]; + } core[OCF_CORE_NUM]; } shard[OCF_MNGT_REBUILD_METADATA_SHARDS_CNT]; env_atomic free_lines; @@ -551,7 +551,7 @@ struct ocf_mngt_rebuild_metadata_context { static void ocf_mngt_cline_reset_metadata(ocf_cache_t cache, ocf_cache_line_t cline, uint32_t lru_list) { - ocf_metadata_set_core_info(cache, cline, OCF_CORE_MAX, ULLONG_MAX); + ocf_metadata_set_core_info(cache, cline, OCF_CORE_NUM, ULLONG_MAX); metadata_init_status_bits(cache, cline); ocf_metadata_set_partition_id(cache, cline, PARTITION_FREELIST); @@ -609,7 +609,7 @@ static int ocf_mngt_rebuild_metadata_handle(ocf_parallelize_t parallelize, ocf_metadata_get_core_info(cache, cline, &core_id, &core_line); if (!ocf_metadata_check(cache, cline) || - core_id > OCF_CORE_MAX) { + core_id > OCF_CORE_NUM) { ocf_cache_log(cache, log_err, "Inconsistent mapping " "detected in on-disk metadata - " "refusing to recover cache.\n"); @@ -617,7 +617,7 @@ static int ocf_mngt_rebuild_metadata_handle(ocf_parallelize_t parallelize, } any_valid = metadata_clear_valid_if_clean(cache, cline); - if (!any_valid || core_id == OCF_CORE_MAX) { + if (!any_valid || core_id == OCF_CORE_NUM) { /* Reset metadata for not mapped or clean cache line */ ocf_mngt_cline_reset_metadata(cache, cline, shard_id); free_lines++; @@ -1053,7 +1053,13 @@ static int _ocf_mngt_init_prepare_cache(struct ocf_cache_mngt_init_params *param param->flags.cache_locked = true; +#ifdef OCF_BLOCK_SIZE_4K + /* In 4k mode OCF can't handle unaligned I/O, so PT is enforced */ + cache->pt_unaligned_io = true; +#else cache->pt_unaligned_io = cfg->pt_unaligned_io; +#endif + cache->use_submit_io_fast = cfg->use_submit_io_fast; cache->metadata.is_volatile = cfg->metadata_volatile; diff --git a/src/mngt/ocf_mngt_core.c b/src/mngt/ocf_mngt_core.c index a5819e749..d36ff9e50 100644 --- a/src/mngt/ocf_mngt_core.c +++ b/src/mngt/ocf_mngt_core.c @@ -182,23 +182,23 @@ static unsigned long _ffz(unsigned long word) static unsigned long _ocf_mngt_find_first_free_core(const unsigned long *bitmap) { unsigned long i; - unsigned long ret = OCF_CORE_MAX; + unsigned long ret = OCF_CORE_NUM; /* check core 0 availability */ bool zero_core_free = !(*bitmap & 0x1UL); /* check if any core id is free except 0 */ - for (i = 0; i * sizeof(unsigned long) * 8 < OCF_CORE_MAX; i++) { + for (i = 0; i * sizeof(unsigned long) * 8 < OCF_CORE_NUM; i++) { unsigned long long ignore_mask = (i == 0) ? 1UL : 0UL; if (~(bitmap[i] | ignore_mask)) { - ret = OCF_MIN(OCF_CORE_MAX, i * sizeof(unsigned long) * + ret = OCF_MIN(OCF_CORE_NUM, i * sizeof(unsigned long) * 8 + _ffz(bitmap[i] | ignore_mask)); break; } } /* return 0 only if no other core is free */ - if (ret == OCF_CORE_MAX && zero_core_free) + if (ret == OCF_CORE_NUM && zero_core_free) return 0; return ret; diff --git a/src/mngt/ocf_mngt_flush.c b/src/mngt/ocf_mngt_flush.c index 31dd4cefc..2e4cff5be 100644 --- a/src/mngt/ocf_mngt_flush.c +++ b/src/mngt/ocf_mngt_flush.c @@ -154,13 +154,13 @@ bool ocf_mngt_cache_is_dirty(ocf_cache_t cache) /************************FLUSH CORE CODE**************************************/ /* Returns: * 0 if OK and tbl & num is filled: - * * tbl - table with sectors&cacheline + * * tbl - table with blocks&cacheline * * num - number of items in this table. * other value means error. * NOTE: * Table is not sorted. */ -static int _ocf_mngt_get_sectors(ocf_cache_t cache, ocf_core_id_t core_id, +static int _ocf_mngt_get_blocks(ocf_cache_t cache, ocf_core_id_t core_id, struct flush_data **tbl, uint32_t *num) { ocf_core_t core = ocf_cache_get_core(cache, core_id); @@ -258,7 +258,7 @@ static int _ocf_mngt_get_flush_containers(ocf_cache_t cache, if (core_count == 0) goto unlock; - core_revmap = env_vzalloc(sizeof(*core_revmap) * OCF_CORE_MAX); + core_revmap = env_vzalloc(sizeof(*core_revmap) * OCF_CORE_NUM); if (!core_revmap) { ret = -OCF_ERR_NO_MEM; goto unlock; @@ -536,7 +536,7 @@ static void _ocf_mngt_flush_containers( return; } - /* Sort data. Smallest sectors first (0...n). */ + /* Sort data. Smallest blocks first (0...n). */ ocf_cleaner_sort_flush_containers(fctbl, fcnum); env_atomic_set(&context->fcs.error, 0); @@ -573,7 +573,7 @@ static void _ocf_mngt_flush_core( return; } - ret = _ocf_mngt_get_sectors(cache, core_id, + ret = _ocf_mngt_get_blocks(cache, core_id, &fc->flush_data, &fc->count); if (ret) { ocf_core_log(core, log_err, "Flushing operation aborted, " @@ -611,7 +611,7 @@ static void _ocf_mngt_flush_all_cores( env_atomic_set(&cache->flush_in_progress, 1); - /* Get all 'dirty' sectors for all cores */ + /* Get all 'dirty' blocks for all cores */ ret = _ocf_mngt_get_flush_containers(cache, &fctbl, &fcnum, context->begin, context->end); if (ret) { @@ -632,7 +632,7 @@ static void _ocf_mngt_flush_all_cores_complete( env_atomic_set(&cache->flush_in_progress, 0); - for (i = 0, j = 0; i < OCF_CORE_MAX; i++) { + for (i = 0, j = 0; i < OCF_CORE_NUM; i++) { if (!env_bit_test(i, cache->conf_meta->valid_core_bitmap)) continue; diff --git a/src/ocf_cache_priv.h b/src/ocf_cache_priv.h index e010e94a0..546eae086 100644 --- a/src/ocf_cache_priv.h +++ b/src/ocf_cache_priv.h @@ -98,7 +98,7 @@ struct ocf_cache { struct ocf_alock *concurrency; } standby; - struct ocf_core core[OCF_CORE_MAX]; + struct ocf_core core[OCF_CORE_NUM]; ocf_pipeline_t stop_pipeline; @@ -148,14 +148,14 @@ struct ocf_cache { static inline ocf_core_t ocf_cache_get_core(ocf_cache_t cache, ocf_core_id_t core_id) { - if (core_id >= OCF_CORE_MAX) + if (core_id >= OCF_CORE_NUM) return NULL; return &cache->core[core_id]; } #define for_each_core_all(_cache, _core, _id) \ - for (_id = 0; _core = &_cache->core[_id], _id < OCF_CORE_MAX; _id++) + for (_id = 0; _core = &_cache->core[_id], _id < OCF_CORE_NUM; _id++) #define for_each_core(_cache, _core, _id) \ for_each_core_all(_cache, _core, _id) \ diff --git a/src/ocf_core.c b/src/ocf_core.c index 5065377a8..a3b2877f0 100644 --- a/src/ocf_core.c +++ b/src/ocf_core.c @@ -130,7 +130,7 @@ int ocf_core_visit(ocf_cache_t cache, ocf_core_visitor_t visitor, void *cntx, if (!visitor) return -OCF_ERR_INVAL; - for (id = 0; id < OCF_CORE_MAX; id++) { + for (id = 0; id < OCF_CORE_NUM; id++) { if (!env_bit_test(id, cache->conf_meta->valid_core_bitmap)) continue; diff --git a/src/ocf_def_priv.h b/src/ocf_def_priv.h index c4c249cc9..9b361b61f 100644 --- a/src/ocf_def_priv.h +++ b/src/ocf_def_priv.h @@ -11,12 +11,24 @@ #include "ocf/ocf.h" #include "ocf_env.h" -#define BYTES_TO_SECTORS(x) ((x) >> ENV_SECTOR_SHIFT) -#define SECTORS_TO_BYTES(x) ((x) << ENV_SECTOR_SHIFT) +#define SECTOR_SHIFT 9 +#define SECTOR_SIZE (1 << SECTOR_SHIFT) #define BYTES_TO_PAGES(x) ((((uint64_t)x) + (PAGE_SIZE - 1)) / PAGE_SIZE) #define PAGES_TO_BYTES(x) (((uint64_t)x) * PAGE_SIZE) +#ifdef OCF_BLOCK_SIZE_4K +#define OCF_BLOCK_SHIFT 12 +#else +#define OCF_BLOCK_SHIFT SECTOR_SHIFT +#endif + +#define OCF_BLOCK_SIZE (1 << OCF_BLOCK_SHIFT) +#define BYTES_TO_BLOCKS(x) \ + (((uint64_t)(x) + (OCF_BLOCK_SIZE - 1)) >> OCF_BLOCK_SHIFT) +#define BLOCKS_TO_BYTES(x) ((uint64_t)(x) << OCF_BLOCK_SHIFT) +#define BYTES_TO_BLOCKS_ROUND_DOWN(x) ((x) >> OCF_BLOCK_SHIFT) + #define OCF_DIV_ROUND_UP_STATIC(n, d) \ (((n) + (d) - 1) / (d)) diff --git a/src/ocf_io.c b/src/ocf_io.c index 0d5c65b14..8b1929eb3 100644 --- a/src/ocf_io.c +++ b/src/ocf_io.c @@ -1,6 +1,7 @@ /* * Copyright(c) 2012-2022 Intel Corporation * Copyright(c) 2024-2025 Huawei Technologies + * Copyright(c) 2026 Unvertical * SPDX-License-Identifier: BSD-3-Clause */ @@ -78,9 +79,8 @@ ocf_io_t ocf_io_new(ocf_volume_t volume, ocf_queue_t queue, uint32_t io_class, uint64_t flags) { struct ocf_request *req; - uint32_t sector_size = SECTORS_TO_BYTES(1); - if ((addr % sector_size) || (bytes % sector_size)) + if ((addr % SECTOR_SIZE) || (bytes % SECTOR_SIZE)) return NULL; if (!env_refcnt_inc(&volume->refcnt)) diff --git a/src/ocf_lru.c b/src/ocf_lru.c index 1f8e6e72d..a98fd7f8c 100644 --- a/src/ocf_lru.c +++ b/src/ocf_lru.c @@ -754,7 +754,7 @@ static void ocf_lru_invalidate(ocf_cache_t cache, ocf_cache_line_t cline, ocf_metadata_start_collision_shared_access( cache, cline); metadata_clear_valid_sec(cache, cline, 0, - ocf_line_end_sector(cache)); + ocf_line_end_block(cache)); ocf_metadata_remove_from_collision(cache, cline, part_id); ocf_metadata_end_collision_shared_access( cache, cline); diff --git a/src/ocf_metadata.c b/src/ocf_metadata.c index 8531b3e54..9cc67667f 100644 --- a/src/ocf_metadata.c +++ b/src/ocf_metadata.c @@ -1,5 +1,7 @@ /* * Copyright(c) 2012-2022 Intel Corporation + * Copyright(c) 2023 Huawei Technologies + * Copyright(c) 2026 Unvertical * SPDX-License-Identifier: BSD-3-Clause */ #include "ocf_priv.h" @@ -17,8 +19,8 @@ static inline uint8_t ocf_atomic_addr2pos(struct ocf_cache *cache, uint64_t addr) { addr -= cache->device->metadata_offset; - addr = BYTES_TO_SECTORS(addr); - addr %= ocf_line_sectors(cache); + addr = BYTES_TO_BLOCKS_ROUND_DOWN(addr); + addr %= ocf_line_blocks(cache); return addr; } @@ -38,7 +40,7 @@ int ocf_metadata_get_atomic_entry(ocf_cache_t cache, } else { ocf_cache_line_t line = ocf_atomic_addr2line(cache, addr); uint8_t pos = ocf_atomic_addr2pos(cache, addr); - ocf_core_id_t core_id = OCF_CORE_MAX; + ocf_core_id_t core_id = OCF_CORE_NUM; ocf_core_t core; uint64_t core_line = 0; @@ -98,7 +100,7 @@ int ocf_metadata_check_invalid_after(ocf_cache_t cache, uint64_t addr, if (!pos || addr < cache->device->metadata_offset) return 0; - for (i = pos; i < ocf_line_sectors(cache); i++) { + for (i = pos; i < ocf_line_blocks(cache); i++) { if (metadata_test_valid_one(cache, line, i)) return 0; diff --git a/src/ocf_stats.c b/src/ocf_stats.c index bf8065cb2..78e01671a 100644 --- a/src/ocf_stats.c +++ b/src/ocf_stats.c @@ -226,7 +226,7 @@ int ocf_core_stats_initialize_all(ocf_cache_t cache) if (ocf_cache_is_standby(cache)) return -OCF_ERR_CACHE_STANDBY; - for (id = 0; id < OCF_CORE_MAX; id++) { + for (id = 0; id < OCF_CORE_NUM; id++) { if (!env_bit_test(id, cache->conf_meta->valid_core_bitmap)) continue; diff --git a/src/utils/utils_cache_line.c b/src/utils/utils_cache_line.c index bef402ad6..88b26d613 100644 --- a/src/utils/utils_cache_line.c +++ b/src/utils/utils_cache_line.c @@ -15,7 +15,7 @@ static void __set_cache_line_invalid(struct ocf_cache *cache, uint8_t start_bit, ocf_core_t core; bool line_remains_valid, line_became_invalid; - ENV_BUG_ON(core_id >= OCF_CORE_MAX); + ENV_BUG_ON(core_id >= OCF_CORE_NUM); core = ocf_cache_get_core(cache, core_id); line_became_invalid = metadata_clear_valid_sec_changed(cache, line, @@ -51,15 +51,15 @@ static void __detach_cache_line(struct ocf_cache *cache, uint8_t start_bit, bool is_valid = true; struct ocf_part *part; - ENV_BUG_ON(part_id == PARTITION_FREELIST && core_id != OCF_CORE_MAX); - ENV_BUG_ON(part_id != PARTITION_FREELIST && core_id == OCF_CORE_MAX); + ENV_BUG_ON(part_id == PARTITION_FREELIST && core_id != OCF_CORE_NUM); + ENV_BUG_ON(part_id != PARTITION_FREELIST && core_id == OCF_CORE_NUM); if (part_id != PARTITION_FREELIST) part = &cache->user_parts[part_id].part; else part = &cache->free; - if (core_id == OCF_CORE_MAX) + if (core_id == OCF_CORE_NUM) goto delete_invalid; if (metadata_clear_valid_sec_changed(cache, line, start_bit, end_bit, diff --git a/src/utils/utils_cache_line.h b/src/utils/utils_cache_line.h index 4ff8d0e0a..5029efe34 100644 --- a/src/utils/utils_cache_line.h +++ b/src/utils/utils_cache_line.h @@ -216,21 +216,22 @@ static inline void ocf_purge_map_info(struct ocf_request *req) continue; start_bit = 0; - end_bit = ocf_line_end_sector(cache); + end_bit = ocf_line_end_block(cache); if (map_idx == 0) { /* First */ - start_bit = (BYTES_TO_SECTORS(req->addr) - % ocf_line_sectors(cache)); + start_bit = BYTES_TO_BLOCKS_ROUND_DOWN(req->addr) + % ocf_line_blocks(cache); + } if (map_idx == (count - 1)) { /* Last */ - end_bit = (BYTES_TO_SECTORS(req->addr + + end_bit = BYTES_TO_BLOCKS_ROUND_DOWN(req->addr + req->bytes - 1) % - ocf_line_sectors(cache)); + ocf_line_blocks(cache); } ocf_metadata_start_collision_shared_access(cache, map[map_idx]. @@ -243,25 +244,26 @@ static inline void ocf_purge_map_info(struct ocf_request *req) } static inline -uint8_t ocf_map_line_start_sector(struct ocf_request *req, uint32_t line) +uint8_t ocf_map_line_start_block(struct ocf_request *req, uint32_t line) { if (line == 0) { - return (BYTES_TO_SECTORS(req->addr) - % ocf_line_sectors(req->cache)); + return BYTES_TO_BLOCKS_ROUND_DOWN(req->addr) + % ocf_line_blocks(req->cache); } return 0; } static inline -uint8_t ocf_map_line_end_sector(struct ocf_request *req, uint32_t line) +uint8_t ocf_map_line_end_block(struct ocf_request *req, uint32_t line) { if (line == req->core_line_count - 1) { - return (BYTES_TO_SECTORS(req->addr + req->bytes - 1) % - ocf_line_sectors(req->cache)); + return BYTES_TO_BLOCKS_ROUND_DOWN(req->addr + + req->bytes - 1) % + ocf_line_blocks(req->cache); } - return ocf_line_end_sector(req->cache); + return ocf_line_end_block(req->cache); } static inline void ocf_set_valid_map_info(struct ocf_request *req) @@ -273,7 +275,7 @@ static inline void ocf_set_valid_map_info(struct ocf_request *req) uint32_t count = req->core_line_count; struct ocf_map_info *map = req->map; - /* Set valid bits for sectors on the basis of map info + /* Set valid bits for blocks on the basis of map info * * | 01234567 | 01234567 | ... | 01234567 | 01234567 | * | -----+++ | ++++++++ | +++ | ++++++++ | +++++--- | @@ -282,8 +284,8 @@ static inline void ocf_set_valid_map_info(struct ocf_request *req) for (map_idx = 0; map_idx < count; map_idx++) { ENV_BUG_ON(map[map_idx].status == LOOKUP_MISS); - start_bit = ocf_map_line_start_sector(req, map_idx); - end_bit = ocf_map_line_end_sector(req, map_idx); + start_bit = ocf_map_line_start_block(req, map_idx); + end_bit = ocf_map_line_end_block(req, map_idx); ocf_metadata_start_collision_shared_access(cache, map[map_idx]. coll_idx); @@ -302,7 +304,7 @@ static inline void ocf_set_dirty_map_info(struct ocf_request *req) uint32_t count = req->core_line_count; struct ocf_map_info *map = req->map; - /* Set valid bits for sectors on the basis of map info + /* Set valid bits for blocks on the basis of map info * * | 01234567 | 01234567 | ... | 01234567 | 01234567 | * | -----+++ | ++++++++ | +++ | ++++++++ | +++++--- | @@ -310,8 +312,8 @@ static inline void ocf_set_dirty_map_info(struct ocf_request *req) */ for (map_idx = 0; map_idx < count; map_idx++) { - start_bit = ocf_map_line_start_sector(req, map_idx); - end_bit = ocf_map_line_end_sector(req, map_idx); + start_bit = ocf_map_line_start_block(req, map_idx); + end_bit = ocf_map_line_end_block(req, map_idx); ocf_metadata_start_collision_shared_access(cache, map[map_idx]. coll_idx); @@ -330,7 +332,7 @@ static inline void ocf_set_clean_map_info(struct ocf_request *req) uint32_t count = req->core_line_count; struct ocf_map_info *map = req->map; - /* Set valid bits for sectors on the basis of map info + /* Set valid bits for blocks on the basis of map info * * | 01234567 | 01234567 | ... | 01234567 | 01234567 | * | -----+++ | ++++++++ | +++ | ++++++++ | +++++--- | @@ -338,8 +340,8 @@ static inline void ocf_set_clean_map_info(struct ocf_request *req) */ for (map_idx = 0; map_idx < count; map_idx++) { - start_bit = ocf_map_line_start_sector(req, map_idx); - end_bit = ocf_map_line_end_sector(req, map_idx); + start_bit = ocf_map_line_start_block(req, map_idx); + end_bit = ocf_map_line_end_block(req, map_idx); ocf_metadata_start_collision_shared_access(cache, map[map_idx]. coll_idx); diff --git a/src/utils/utils_cleaner.c b/src/utils/utils_cleaner.c index 362dff8a4..bc25e9472 100644 --- a/src/utils/utils_cleaner.c +++ b/src/utils/utils_cleaner.c @@ -1,6 +1,7 @@ /* * Copyright(c) 2012-2022 Intel Corporation * Copyright(c) 2024-2025 Huawei Technologies + * Copyright(c) 2026 Unvertical * SPDX-License-Identifier: BSD-3-Clause */ @@ -251,11 +252,11 @@ static void _ocf_cleaner_cache_line_unlock(struct ocf_request *req) } } -static bool _ocf_cleaner_sector_is_dirty(struct ocf_cache *cache, - ocf_cache_line_t line, uint8_t sector) +static bool _ocf_cleaner_block_is_dirty(struct ocf_cache *cache, + ocf_cache_line_t line, uint8_t block) { - bool dirty = metadata_test_dirty_one(cache, line, sector); - bool valid = metadata_test_valid_one(cache, line, sector); + bool dirty = metadata_test_dirty_one(cache, line, block); + bool valid = metadata_test_valid_one(cache, line, block); if (!valid && dirty) { /* not valid but dirty - IMPROPER STATE!!! */ @@ -345,7 +346,7 @@ static int _ocf_cleaner_update_metadata(struct ocf_request *req) ocf_metadata_start_collision_shared_access(cache, cache_line); set_cache_line_clean(cache, 0, - ocf_line_end_sector(cache), req, i); + ocf_line_end_block(cache), req, i); ocf_metadata_end_collision_shared_access(cache, cache_line); } @@ -431,19 +432,19 @@ static void _ocf_cleaner_core_io_for_dirty_range(struct ocf_request *req, iter->coll_idx); addr = (ocf_line_size(cache) * iter->core_line) - + SECTORS_TO_BYTES(begin); + + BLOCKS_TO_BYTES(begin); offset = (ocf_line_size(cache) * iter->hash) - + SECTORS_TO_BYTES(begin); + + BLOCKS_TO_BYTES(begin); ocf_core_stats_core_block_update(req->core, part_id, OCF_WRITE, - SECTORS_TO_BYTES(end - begin)); + BLOCKS_TO_BYTES(end - begin)); OCF_DEBUG_PARAM(req->cache, "Core write, line = %llu, " - "sector = %llu, count = %llu", iter->core_line, begin, + "block = %llu, count = %llu", iter->core_line, begin, end - begin); ocf_req_forward_core_io(req, OCF_WRITE, addr, - SECTORS_TO_BYTES(end - begin), offset); + BLOCKS_TO_BYTES(end - begin), offset); } static void _ocf_cleaner_core_submit_io(struct ocf_request *req, @@ -458,14 +459,14 @@ static void _ocf_cleaner_core_submit_io(struct ocf_request *req, && metadata_test_dirty(cache, iter->coll_idx)) { _ocf_cleaner_core_io_for_dirty_range(req, iter, 0, - ocf_line_sectors(cache)); + ocf_line_blocks(cache)); return; } /* Sector cleaning, a little effort is required to this */ - for (i = 0; i < ocf_line_sectors(cache); i++) { - if (!_ocf_cleaner_sector_is_dirty(cache, iter->coll_idx, i)) { + for (i = 0; i < ocf_line_blocks(cache); i++) { + if (!_ocf_cleaner_block_is_dirty(cache, iter->coll_idx, i)) { if (counting_dirty) { counting_dirty = false; _ocf_cleaner_core_io_for_dirty_range(req, iter, @@ -669,7 +670,7 @@ static uint32_t ocf_cleaner_populate_req(struct ocf_request *req, uint32_t curr, uint32_t count = attribs->count; uint32_t map_max = req->core_line_count, map_curr; ocf_cache_line_t cache_line; - uint64_t core_sector; + uint64_t core_line; ocf_core_id_t core_id, last_core_id = OCF_CORE_ID_INVALID; for (map_curr = 0; map_curr < map_max && curr < count; curr++) { @@ -680,7 +681,7 @@ static uint32_t ocf_cleaner_populate_req(struct ocf_request *req, uint32_t curr, /* Get mapping info */ ocf_metadata_get_core_info(req->cache, cache_line, - &core_id, &core_sector); + &core_id, &core_line); if (last_core_id == OCF_CORE_ID_INVALID) { last_core_id = core_id; @@ -691,7 +692,7 @@ static uint32_t ocf_cleaner_populate_req(struct ocf_request *req, uint32_t curr, break; req->map[map_curr].core_id = core_id; - req->map[map_curr].core_line = core_sector; + req->map[map_curr].core_line = core_line; req->map[map_curr].coll_idx = cache_line; req->map[map_curr].status = LOOKUP_HIT; req->map[map_curr].hash = map_curr; diff --git a/tests/functional/pyocf/c/helpers/ocf_helpers.c b/tests/functional/pyocf/c/helpers/ocf_helpers.c new file mode 100644 index 000000000..dd149cc4c --- /dev/null +++ b/tests/functional/pyocf/c/helpers/ocf_helpers.c @@ -0,0 +1,17 @@ +/* + * Copyright(c) 2026 Unvertical + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include "ocf_helpers.h" +#include "ocf/ocf.h" +#include "../src/ocf/ocf_def_priv.h" + +bool ocf_is_block_size_4k(void) +{ +#ifdef OCF_BLOCK_SIZE_4K + return true; +#else + return false; +#endif +} diff --git a/tests/functional/pyocf/c/helpers/ocf_helpers.h b/tests/functional/pyocf/c/helpers/ocf_helpers.h new file mode 100644 index 000000000..0a61c30a0 --- /dev/null +++ b/tests/functional/pyocf/c/helpers/ocf_helpers.h @@ -0,0 +1,10 @@ +/* + * Copyright(c) 2026 Unvertical + * SPDX-License-Identifier: BSD-3-Clause + */ + +#pragma once + +#include + +bool ocf_is_block_size_4k(void); diff --git a/tests/functional/pyocf/helpers.py b/tests/functional/pyocf/helpers.py index d807c87a4..66523c1d1 100644 --- a/tests/functional/pyocf/helpers.py +++ b/tests/functional/pyocf/helpers.py @@ -1,5 +1,6 @@ # # Copyright(c) 2022 Intel Corporation +# Copyright(c) 2026 Unvertical # SPDX-License-Identifier: BSD-3-Clause # # @@ -39,3 +40,7 @@ def get_metadata_segment_is_flapped(cache, segment): def get_composite_volume_type_id(): lib = OcfLib.getInstance() return int(lib.ocf_get_composite_volume_type_id_helper()) + +def is_block_size_4k(): + lib = OcfLib.getInstance() + return bool(lib.ocf_is_block_size_4k()) diff --git a/tests/functional/pyocf/types/cache.py b/tests/functional/pyocf/types/cache.py index 1e7fe7cc7..50080b439 100644 --- a/tests/functional/pyocf/types/cache.py +++ b/tests/functional/pyocf/types/cache.py @@ -175,17 +175,26 @@ def write_insert(self): def read_insert(self): return self.value not in [CacheMode.PT, CacheMode.WO] + def __str__(self): + return self.name + class PromotionPolicy(IntEnum): ALWAYS = 0 NHIT = 1 DEFAULT = ALWAYS + def __str__(self): + return self.name + class NhitParams(IntEnum): INSERTION_THRESHOLD = 0 TRIGGER_THRESHOLD = 1 + def __str__(self): + return self.name + class CleaningPolicy(IntEnum): NOP = 0 @@ -193,6 +202,9 @@ class CleaningPolicy(IntEnum): ACP = 2 DEFAULT = ALRU + def __str__(self): + return self.name + class AlruParams(IntEnum): WAKE_UP_TIME = 0 @@ -200,17 +212,26 @@ class AlruParams(IntEnum): FLUSH_MAX_BUFFERS = 2 ACTIVITY_THRESHOLD = 3 + def __str__(self): + return self.name + class AcpParams(IntEnum): WAKE_UP_TIME = 0 FLUSH_MAX_BUFFERS = 1 + def __str__(self): + return self.name + class MetadataLayout(IntEnum): STRIPING = 0 SEQUENTIAL = 1 DEFAULT = STRIPING + def __str__(self): + return self.name + class Cache: DEFAULT_BACKFILL_QUEUE_SIZE = 65536 diff --git a/tests/functional/pyocf/types/shared.py b/tests/functional/pyocf/types/shared.py index 041de9d0b..74dcaf069 100644 --- a/tests/functional/pyocf/types/shared.py +++ b/tests/functional/pyocf/types/shared.py @@ -167,6 +167,9 @@ class CacheLineSize(IntEnum): LINE_64KiB = S.from_KiB(64) DEFAULT = LINE_4KiB + def __str__(self): + return f"{self.value // 1024}KiB" + class SeqCutOffPolicy(IntEnum): ALWAYS = 0 diff --git a/tests/functional/pyocf/utils.py b/tests/functional/pyocf/utils.py index de4e2d130..01b5dba64 100644 --- a/tests/functional/pyocf/utils.py +++ b/tests/functional/pyocf/utils.py @@ -1,6 +1,7 @@ # # Copyright(c) 2019-2022 Intel Corporation # Copyright(c) 2024 Huawei Technologies +# Copyright(c) 2026 Unvertical # SPDX-License-Identifier: BSD-3-Clause # @@ -70,8 +71,10 @@ class Size: "TiB": _TiB, } - def __init__(self, b: int, sector_aligned: bool = False): - if sector_aligned: + def __init__(self, b: int, sector_aligned: bool = False, page_aligned: bool = False): + if page_aligned: + self.bytes = int(((b + self._PAGE_SIZE - 1) // self._PAGE_SIZE) * self._PAGE_SIZE) + elif sector_aligned: self.bytes = int(((b + self._SECTOR_SIZE - 1) // self._SECTOR_SIZE) * self._SECTOR_SIZE) else: self.bytes = int(b) @@ -167,19 +170,23 @@ def blocks_4k(self): def __str__(self): if self.bytes < self._KiB: - return "{} B".format(self.B) + return "{}B".format(self.B) elif self.bytes < self._MiB: - return "{} KiB".format(self.KiB) + return "{}KiB".format(self.KiB) elif self.bytes < self._GiB: - return "{} MiB".format(self.MiB) + return "{}MiB".format(self.MiB) elif self.bytes < self._TiB: - return "{} GiB".format(self.GiB) + return "{}GiB".format(self.GiB) else: - return "{} TiB".format(self.TiB) + return "{}TiB".format(self.TiB) def __repr__(self): return f"Size({self.bytes})" + @property + def __name__(self): + return str(self) + def __eq__(self, other): return self.bytes == other.bytes diff --git a/tests/functional/tests/basic/test_pyocf.py b/tests/functional/tests/basic/test_pyocf.py index b209b5365..59e5e516c 100644 --- a/tests/functional/tests/basic/test_pyocf.py +++ b/tests/functional/tests/basic/test_pyocf.py @@ -1,6 +1,7 @@ # # Copyright(c) 2019-2022 Intel Corporation # Copyright(c) 2024 Huawei Technologies +# Copyright(c) 2026 Unvertical # SPDX-License-Identifier: BSD-3-Clause # @@ -35,7 +36,14 @@ def test_simple_wt_write(pyocf_ctx): cache_device.reset_stats() core_device.reset_stats() - r = Rio().target(vol).readwrite(ReadWrite.WRITE).size(S.from_sector(1)).run([queue]) + r = ( + Rio() + .target(vol) + .readwrite(ReadWrite.WRITE) + .size(S.from_page(1)) + .bs(S.from_page(1)) + .run([queue]) + ) assert cache_device.get_stats()[IoDir.WRITE] == 1 cache.settle() stats = cache.get_stats() diff --git a/tests/functional/tests/engine/test_errors.py b/tests/functional/tests/engine/test_errors.py index ece545be1..a0d7fe7b9 100644 --- a/tests/functional/tests/engine/test_errors.py +++ b/tests/functional/tests/engine/test_errors.py @@ -1,5 +1,6 @@ # # Copyright(c) 2024 Huawei Technologies +# Copyright(c) 2026 Unvertical # SPDX-License-Identifier: BSD-3-Clause # @@ -12,6 +13,7 @@ from pyocf.types.shared import CacheLineSize from pyocf.utils import Size from pyocf.rio import Rio, ReadWrite +from pyocf.helpers import is_block_size_4k BLOCK_SIZES = [Size(512), Size.from_KiB(1), Size.from_KiB(4), Size.from_KiB(64), Size.from_KiB(256)] @@ -20,6 +22,8 @@ @pytest.mark.parametrize("cache_mode", [c for c in CacheMode if not c.lazy_write()]) @pytest.mark.parametrize("rio_bs", BLOCK_SIZES) def test_strict_engine_errors(pyocf_ctx, cache_mode: CacheMode, cls: CacheLineSize, rio_bs: Size): + if is_block_size_4k() and rio_bs < Size.from_KiB(4): + pytest.skip("Sub-4K I/O not supported in 4K block mode") cache_vol_size = Size.from_MiB(50) ram_cache_volume = RamVolume(cache_vol_size) error_sectors = set(x for x in range(0, cache_vol_size, 512)) @@ -88,6 +92,8 @@ def test_strict_engine_errors(pyocf_ctx, cache_mode: CacheMode, cls: CacheLineSi @pytest.mark.parametrize("cache_mode", [c for c in CacheMode if c.lazy_write()]) @pytest.mark.parametrize("rio_bs", BLOCK_SIZES) def test_lazy_engine_errors(pyocf_ctx, cache_mode: CacheMode, cls: CacheLineSize, rio_bs: Size): + if is_block_size_4k() and rio_bs < Size.from_KiB(4): + pytest.skip("Sub-4K I/O not supported in 4K block mode") cache_vol_size = Size.from_MiB(50) ram_cache_volume = RamVolume(cache_vol_size) error_sectors = set(x for x in range(0, cache_vol_size, 512)) diff --git a/tests/functional/tests/engine/test_pp.py b/tests/functional/tests/engine/test_pp.py index 7c9783465..a01d0de7d 100644 --- a/tests/functional/tests/engine/test_pp.py +++ b/tests/functional/tests/engine/test_pp.py @@ -1,6 +1,7 @@ # # Copyright(c) 2019-2022 Intel Corporation # Copyright(c) 2024 Huawei Technologies +# Copyright(c) 2026 Unvertical # SPDX-License-Identifier: BSD-3-Clause # @@ -18,6 +19,7 @@ from pyocf.utils import Size from pyocf.types.shared import OcfCompletion from pyocf.rio import Rio, ReadWrite +from pyocf.helpers import is_block_size_4k @pytest.mark.parametrize("promotion_policy", PromotionPolicy) @@ -120,7 +122,7 @@ def fill_cache(cache, fill_ratio): .target(vol) .readwrite(ReadWrite.RANDWRITE) .size(bytes_to_fill) - .bs(Size(512)) + .bs(Size(4096)) .qd(10) .run([queue]) ) @@ -212,6 +214,9 @@ def test_partial_hit_promotion(pyocf_ctx): * occupancy should rise - partially hit request should bypass nhit criteria """ + if is_block_size_4k(): + pytest.skip("Sub-4K I/O not supported in 4K block mode") + # Step 1 cache_device = RamVolume(Size.from_MiB(50)) core_device = RamVolume(Size.from_MiB(50)) diff --git a/tests/functional/tests/engine/test_seq_cutoff.py b/tests/functional/tests/engine/test_seq_cutoff.py index 0e62d92d6..8b60280ae 100644 --- a/tests/functional/tests/engine/test_seq_cutoff.py +++ b/tests/functional/tests/engine/test_seq_cutoff.py @@ -1,5 +1,6 @@ # # Copyright(c) 2020-2022 Intel Corporation +# Copyright(c) 2026 Unvertical # SPDX-License-Identifier: BSD-3-Clause # @@ -16,6 +17,7 @@ from pyocf.types.io import IoDir from pyocf.utils import Size from pyocf.types.shared import OcfCompletion, SeqCutOffPolicy +from pyocf.helpers import is_block_size_4k class Stream: @@ -72,6 +74,12 @@ def test_seq_cutoff_max_streams(pyocf_ctx): handled by cache. It should no longer be tracked by OCF, because of request in step 3. which overflowed the OCF handling structure) """ + + if is_block_size_4k(): + smallest_io_size = Size.from_page(1) + else: + smallest_io_size = Size.from_sector(1) + MAX_STREAMS = 128 TEST_STREAMS = MAX_STREAMS + 1 # Number of streams used by test - one more than OCF can track core_size = Size.from_MiB(200) @@ -79,7 +87,7 @@ def test_seq_cutoff_max_streams(pyocf_ctx): streams = [ Stream( - last=Size((stream_no * int(core_size) // TEST_STREAMS), sector_aligned=True), + last=Size((stream_no * int(core_size) // TEST_STREAMS), page_aligned=True), length=Size(0), direction=choice(list(IoDir)), ) @@ -104,7 +112,7 @@ def test_seq_cutoff_max_streams(pyocf_ctx): # STEP 1 vol.open() shuffle(streams) - io_size = threshold - Size.from_sector(1) + io_size = threshold - smallest_io_size io_to_streams(vol, queue, streams, io_size) stats = cache.get_stats() @@ -119,7 +127,7 @@ def test_seq_cutoff_max_streams(pyocf_ctx): streams.remove(lru_stream) shuffle(streams) - io_to_streams(vol, queue, streams, Size.from_sector(1)) + io_to_streams(vol, queue, streams, smallest_io_size) stats = cache.get_stats() assert ( @@ -130,7 +138,7 @@ def test_seq_cutoff_max_streams(pyocf_ctx): ), "All streams should be handled in PT - cutoff engaged for all streams" # STEP 3 - io_to_streams(vol, queue, [non_active_stream], Size.from_sector(1)) + io_to_streams(vol, queue, [non_active_stream], smallest_io_size) stats = cache.get_stats() assert ( @@ -138,7 +146,7 @@ def test_seq_cutoff_max_streams(pyocf_ctx): ), "This request should be serviced by cache - no cutoff for inactive stream" # STEP 4 - io_to_streams(vol, queue, [lru_stream], Size.from_sector(1)) + io_to_streams(vol, queue, [lru_stream], smallest_io_size) vol.close() stats = cache.get_stats() diff --git a/tests/functional/tests/management/test_start_stop.py b/tests/functional/tests/management/test_start_stop.py index e9a1823be..baee1baf2 100644 --- a/tests/functional/tests/management/test_start_stop.py +++ b/tests/functional/tests/management/test_start_stop.py @@ -1,6 +1,7 @@ # # Copyright(c) 2019-2022 Intel Corporation # Copyright(c) 2024-2025 Huawei Technologies +# Copyright(c) 2026 Unvertical # SPDX-License-Identifier: BSD-3-Clause # @@ -45,6 +46,7 @@ from pyocf.types.volume import Volume, RamVolume from pyocf.types.volume_core import CoreVolume from pyocf.utils import Size +from pyocf.helpers import is_block_size_4k logger = logging.getLogger(__name__) @@ -77,6 +79,11 @@ def test_start_write_first_and_check_mode(pyocf_ctx, mode: CacheMode, cls: Cache After start check proper cache mode behaviour, starting with write operation. """ + if is_block_size_4k(): + smallest_io_size = Size.from_page(1) + else: + smallest_io_size = Size.from_sector(1) + cache_device = RamVolume(Size.from_MiB(50)) core_device = RamVolume(Size.from_MiB(10)) cache = Cache.start_on_device(cache_device, cache_mode=mode, cache_line_size=cls) @@ -90,21 +97,21 @@ def test_start_write_first_and_check_mode(pyocf_ctx, mode: CacheMode, cls: Cache cache_device.reset_stats() core_device.reset_stats() - test_data = Data.from_string("This is test data") - io_to_core(vol, queue, test_data, Size.from_sector(1).B) + test_data = Data.from_bytes(b"A"*smallest_io_size.B) + io_to_core(vol, queue, test_data, smallest_io_size.B) check_stats_write_empty(core, mode, cls) logger.info("[STAGE] Read from exported object after initial write") - io_from_exported_object(vol, queue, test_data.size, Size.from_sector(1).B) + io_from_exported_object(vol, queue, test_data.size, smallest_io_size.B) check_stats_read_after_write(core, mode, cls, True) logger.info("[STAGE] Write to exported object after read") cache_device.reset_stats() core_device.reset_stats() - test_data = Data.from_string("Changed test data") + test_data = Data.from_bytes(b"B"*smallest_io_size.B) - io_to_core(vol, queue, test_data, Size.from_sector(1).B) + io_to_core(vol, queue, test_data, smallest_io_size.B) check_stats_write_after_read(core, mode, cls) check_md5_sums(vol, mode) @@ -117,6 +124,11 @@ def test_start_read_first_and_check_mode(pyocf_ctx, mode: CacheMode, cls: CacheL After start check proper cache mode behaviour, starting with read operation. """ + if is_block_size_4k(): + smallest_io_size = Size.from_page(1) + else: + smallest_io_size = Size.from_sector(1) + cache_device = RamVolume(Size.from_MiB(50)) core_device = RamVolume(Size.from_MiB(5)) cache = Cache.start_on_device(cache_device, cache_mode=mode, cache_line_size=cls) @@ -128,28 +140,28 @@ def test_start_read_first_and_check_mode(pyocf_ctx, mode: CacheMode, cls: CacheL queue = cache.get_default_queue() logger.info("[STAGE] Initial write to core device") - test_data = Data.from_string("This is test data") - io_to_core(bottom_vol, queue, test_data, Size.from_sector(1).B) + test_data = Data.from_bytes(b"A"*smallest_io_size.B) + io_to_core(bottom_vol, queue, test_data, smallest_io_size.B) cache_device.reset_stats() core_device.reset_stats() logger.info("[STAGE] Initial read from exported object") - io_from_exported_object(front_vol, queue, test_data.size, Size.from_sector(1).B) + io_from_exported_object(front_vol, queue, test_data.size, smallest_io_size.B) check_stats_read_empty(core, mode, cls) logger.info("[STAGE] Write to exported object after initial read") cache_device.reset_stats() core_device.reset_stats() - test_data = Data.from_string("Changed test data") + test_data = Data.from_bytes(b"B"*smallest_io_size.B) - io_to_core(front_vol, queue, test_data, Size.from_sector(1).B) + io_to_core(front_vol, queue, test_data, smallest_io_size.B) check_stats_write_after_read(core, mode, cls, True) logger.info("[STAGE] Read from exported object after write") - io_from_exported_object(front_vol, queue, test_data.size, Size.from_sector(1).B) + io_from_exported_object(front_vol, queue, test_data.size, smallest_io_size.B) check_stats_read_after_write(core, mode, cls) check_md5_sums(front_vol, mode) diff --git a/tests/functional/tests/surprise_shutdown/test_management_surprise_shutdown.py b/tests/functional/tests/surprise_shutdown/test_management_surprise_shutdown.py index 43337c3ce..631669fbe 100644 --- a/tests/functional/tests/surprise_shutdown/test_management_surprise_shutdown.py +++ b/tests/functional/tests/surprise_shutdown/test_management_surprise_shutdown.py @@ -1,6 +1,7 @@ # # Copyright(c) 2021-2022 Intel Corporation # Copyright(c) 2024 Huawei Technologies +# Copyright(c) 2026 Unvertical # SPDX-License-Identifier: BSD-3-Clause # @@ -39,8 +40,8 @@ def ocf_write(vol, queue, val, offset): vol.open() - data = Data.from_bytes(bytes([val] * 512)) - io = vol.new_io(queue, offset, 512, IoDir.WRITE, 0, 0) + data = Data.from_bytes(bytes([val] * 4096)) + io = vol.new_io(queue, offset, 4096, IoDir.WRITE, 0, 0) io.set_data(data) Sync(io).submit() vol.close() @@ -48,8 +49,8 @@ def ocf_write(vol, queue, val, offset): def ocf_read(vol, queue, offset): vol.open() - data = Data(byte_count=512) - io = vol.new_io(queue, offset, 512, IoDir.READ, 0, 0) + data = Data(byte_count=4096) + io = vol.new_io(queue, offset, 4096, IoDir.READ, 0, 0) io.set_data(data) Sync(io).submit() vol.close() diff --git a/tests/unit/tests/metadata/metadata_collision.c/metadata_collision.c b/tests/unit/tests/metadata/metadata_collision.c/metadata_collision.c index d15edea7b..4fe9993b8 100644 --- a/tests/unit/tests/metadata/metadata_collision.c/metadata_collision.c +++ b/tests/unit/tests/metadata/metadata_collision.c/metadata_collision.c @@ -1,5 +1,6 @@ /* * Copyright(c) 2019-2022 Intel Corporation + * Copyright(c) 2026 Unvertical * SPDX-License-Identifier: BSD-3-Clause */ @@ -35,7 +36,7 @@ static void metadata_hash_func_test01(void **state) ocf_cache_line_t i; ocf_cache_line_t hash_cur, hash_next; unsigned c; - ocf_core_id_t core_ids[] = {0, 1, 2, 100, OCF_CORE_MAX}; + ocf_core_id_t core_ids[] = {0, 1, 2, 100, OCF_CORE_NUM}; ocf_core_id_t core_id; print_test_description("Verify that hash function increments by 1 and generates"