From aaa08d8ec99900ef7d7052f213465264df840e93 Mon Sep 17 00:00:00 2001 From: Protobuf Team Bot Date: Fri, 27 Dec 2024 02:02:21 +0000 Subject: [PATCH] Auto-generate files after cl/709918443 --- php/ext/google/protobuf/php-upb.c | 64 +++++++++++---------------- php/ext/google/protobuf/php-upb.h | 7 +-- ruby/ext/google/protobuf_c/ruby-upb.c | 64 +++++++++++---------------- ruby/ext/google/protobuf_c/ruby-upb.h | 7 +-- 4 files changed, 54 insertions(+), 88 deletions(-) diff --git a/php/ext/google/protobuf/php-upb.c b/php/ext/google/protobuf/php-upb.c index f92a8f91775e..31263b9f3eac 100644 --- a/php/ext/google/protobuf/php-upb.c +++ b/php/ext/google/protobuf/php-upb.c @@ -2896,8 +2896,7 @@ void upb_Arena_SetMaxBlockSize(size_t max) { } typedef struct upb_MemBlock { - // Atomic only for the benefit of SpaceAllocated(). - UPB_ATOMIC(struct upb_MemBlock*) next; + struct upb_MemBlock* next; uint32_t size; // Data follows. } upb_MemBlock; @@ -2930,9 +2929,11 @@ typedef struct upb_ArenaInternal { // == self when no other list members. UPB_ATOMIC(struct upb_ArenaInternal*) tail; - // Linked list of blocks to free/cleanup. Atomic only for the benefit of - // upb_Arena_SpaceAllocated(). - UPB_ATOMIC(upb_MemBlock*) blocks; + // Linked list of blocks to free/cleanup. + upb_MemBlock* blocks; + + // Total space allocated in blocks, atomic only for SpaceAllocated + UPB_ATOMIC(size_t) space_allocated; } upb_ArenaInternal; // All public + private state for an arena. @@ -3075,11 +3076,7 @@ size_t upb_Arena_SpaceAllocated(upb_Arena* arena, size_t* fused_count) { size_t local_fused_count = 0; while (ai != NULL) { - upb_MemBlock* block = upb_Atomic_Load(&ai->blocks, memory_order_relaxed); - while (block != NULL) { - memsize += sizeof(upb_MemBlock) + block->size; - block = upb_Atomic_Load(&block->next, memory_order_relaxed); - } + memsize += upb_Atomic_Load(&ai->space_allocated, memory_order_relaxed); ai = upb_Atomic_Load(&ai->next, memory_order_relaxed); local_fused_count++; } @@ -3088,21 +3085,6 @@ size_t upb_Arena_SpaceAllocated(upb_Arena* arena, size_t* fused_count) { return memsize; } -bool UPB_PRIVATE(_upb_Arena_Contains)(const upb_Arena* a, void* ptr) { - upb_ArenaInternal* ai = upb_Arena_Internal(a); - UPB_ASSERT(ai); - - upb_MemBlock* block = upb_Atomic_Load(&ai->blocks, memory_order_relaxed); - while (block) { - uintptr_t beg = (uintptr_t)block; - uintptr_t end = beg + block->size; - if ((uintptr_t)ptr >= beg && (uintptr_t)ptr < end) return true; - block = upb_Atomic_Load(&block->next, memory_order_relaxed); - } - - return false; -} - uint32_t upb_Arena_DebugRefCount(upb_Arena* a) { upb_ArenaInternal* ai = upb_Arena_Internal(a); // These loads could probably be relaxed, but given that this is debug-only, @@ -3119,10 +3101,10 @@ static void _upb_Arena_AddBlock(upb_Arena* a, void* ptr, size_t size) { upb_ArenaInternal* ai = upb_Arena_Internal(a); upb_MemBlock* block = ptr; - // Insert into linked list. block->size = (uint32_t)size; - upb_Atomic_Init(&block->next, ai->blocks); - upb_Atomic_Store(&ai->blocks, block, memory_order_release); + // Insert into linked list. + block->next = ai->blocks; + ai->blocks = block; a->UPB_PRIVATE(ptr) = UPB_PTR_AT(block, kUpb_MemblockReserve, char); a->UPB_PRIVATE(end) = UPB_PTR_AT(block, size, char); @@ -3135,7 +3117,7 @@ static bool _upb_Arena_AllocBlock(upb_Arena* a, size_t size) { upb_ArenaInternal* ai = upb_Arena_Internal(a); if (!ai->block_alloc) return false; size_t last_size = 128; - upb_MemBlock* last_block = upb_Atomic_Load(&ai->blocks, memory_order_relaxed); + upb_MemBlock* last_block = ai->blocks; if (last_block) { last_size = a->UPB_PRIVATE(end) - (char*)last_block; } @@ -3156,6 +3138,13 @@ static bool _upb_Arena_AllocBlock(upb_Arena* a, size_t size) { if (!block) return false; _upb_Arena_AddBlock(a, block, block_size); + // Atomic add not required here, as threads won't race allocating blocks, plus + // atomic fetch-add is slower than load/add/store on arm devices compiled + // targetting pre-v8.1. + size_t old_space_allocated = + upb_Atomic_Load(&ai->space_allocated, memory_order_relaxed); + upb_Atomic_Store(&ai->space_allocated, old_space_allocated + block_size, + memory_order_relaxed); UPB_ASSERT(UPB_PRIVATE(_upb_ArenaHas)(a) >= size); return true; } @@ -3184,7 +3173,8 @@ static upb_Arena* _upb_Arena_InitSlow(upb_alloc* alloc) { upb_Atomic_Init(&a->body.parent_or_count, _upb_Arena_TaggedFromRefcount(1)); upb_Atomic_Init(&a->body.next, NULL); upb_Atomic_Init(&a->body.tail, &a->body); - upb_Atomic_Init(&a->body.blocks, NULL); + upb_Atomic_Init(&a->body.space_allocated, n); + a->body.blocks = NULL; a->body.upb_alloc_cleanup = NULL; _upb_Arena_AddBlock(&a->head, mem, n); @@ -3223,7 +3213,8 @@ upb_Arena* upb_Arena_Init(void* mem, size_t n, upb_alloc* alloc) { upb_Atomic_Init(&a->body.parent_or_count, _upb_Arena_TaggedFromRefcount(1)); upb_Atomic_Init(&a->body.next, NULL); upb_Atomic_Init(&a->body.tail, &a->body); - upb_Atomic_Init(&a->body.blocks, NULL); + upb_Atomic_Init(&a->body.space_allocated, 0); + a->body.blocks = NULL; a->body.upb_alloc_cleanup = NULL; a->body.block_alloc = _upb_Arena_MakeBlockAlloc(alloc, 1); @@ -3242,12 +3233,11 @@ static void _upb_Arena_DoFree(upb_ArenaInternal* ai) { upb_ArenaInternal* next_arena = (upb_ArenaInternal*)upb_Atomic_Load(&ai->next, memory_order_acquire); upb_alloc* block_alloc = _upb_ArenaInternal_BlockAlloc(ai); - upb_MemBlock* block = upb_Atomic_Load(&ai->blocks, memory_order_acquire); + upb_MemBlock* block = ai->blocks; upb_AllocCleanupFunc* alloc_cleanup = *ai->upb_alloc_cleanup; while (block != NULL) { // Load first since we are deleting block. - upb_MemBlock* next_block = - upb_Atomic_Load(&block->next, memory_order_acquire); + upb_MemBlock* next_block = block->next; upb_free(block_alloc, block); block = next_block; } @@ -3469,8 +3459,7 @@ void UPB_PRIVATE(_upb_Arena_SwapIn)(upb_Arena* des, const upb_Arena* src) { *des = *src; desi->block_alloc = srci->block_alloc; - upb_MemBlock* blocks = upb_Atomic_Load(&srci->blocks, memory_order_relaxed); - upb_Atomic_Init(&desi->blocks, blocks); + desi->blocks = srci->blocks; } void UPB_PRIVATE(_upb_Arena_SwapOut)(upb_Arena* des, const upb_Arena* src) { @@ -3478,8 +3467,7 @@ void UPB_PRIVATE(_upb_Arena_SwapOut)(upb_Arena* des, const upb_Arena* src) { upb_ArenaInternal* srci = upb_Arena_Internal(src); *des = *src; - upb_MemBlock* blocks = upb_Atomic_Load(&srci->blocks, memory_order_relaxed); - upb_Atomic_Store(&desi->blocks, blocks, memory_order_relaxed); + desi->blocks = srci->blocks; } diff --git a/php/ext/google/protobuf/php-upb.h b/php/ext/google/protobuf/php-upb.h index 209d1ddefc20..22039aa1b7c8 100644 --- a/php/ext/google/protobuf/php-upb.h +++ b/php/ext/google/protobuf/php-upb.h @@ -613,7 +613,7 @@ UPB_INLINE void upb_gfree(void* ptr) { upb_free(&upb_alloc_global, ptr); } // // We need this because the decoder inlines a upb_Arena for performance but // the full struct is not visible outside of arena.c. Yes, I know, it's awful. -#define UPB_ARENA_SIZE_HACK 8 +#define UPB_ARENA_SIZE_HACK 9 // LINT.IfChange(upb_Arena) @@ -633,11 +633,6 @@ void UPB_PRIVATE(_upb_Arena_SwapIn)(struct upb_Arena* des, void UPB_PRIVATE(_upb_Arena_SwapOut)(struct upb_Arena* des, const struct upb_Arena* src); -// Returns whether |ptr| was allocated directly by |a| (so care must be used -// with fused arenas). -UPB_API bool UPB_ONLYBITS(_upb_Arena_Contains)(const struct upb_Arena* a, - void* ptr); - UPB_INLINE size_t UPB_PRIVATE(_upb_ArenaHas)(const struct upb_Arena* a) { return (size_t)(a->UPB_ONLYBITS(end) - a->UPB_ONLYBITS(ptr)); } diff --git a/ruby/ext/google/protobuf_c/ruby-upb.c b/ruby/ext/google/protobuf_c/ruby-upb.c index c97248aa2ad3..140ac60a644f 100644 --- a/ruby/ext/google/protobuf_c/ruby-upb.c +++ b/ruby/ext/google/protobuf_c/ruby-upb.c @@ -2896,8 +2896,7 @@ void upb_Arena_SetMaxBlockSize(size_t max) { } typedef struct upb_MemBlock { - // Atomic only for the benefit of SpaceAllocated(). - UPB_ATOMIC(struct upb_MemBlock*) next; + struct upb_MemBlock* next; uint32_t size; // Data follows. } upb_MemBlock; @@ -2930,9 +2929,11 @@ typedef struct upb_ArenaInternal { // == self when no other list members. UPB_ATOMIC(struct upb_ArenaInternal*) tail; - // Linked list of blocks to free/cleanup. Atomic only for the benefit of - // upb_Arena_SpaceAllocated(). - UPB_ATOMIC(upb_MemBlock*) blocks; + // Linked list of blocks to free/cleanup. + upb_MemBlock* blocks; + + // Total space allocated in blocks, atomic only for SpaceAllocated + UPB_ATOMIC(size_t) space_allocated; } upb_ArenaInternal; // All public + private state for an arena. @@ -3075,11 +3076,7 @@ size_t upb_Arena_SpaceAllocated(upb_Arena* arena, size_t* fused_count) { size_t local_fused_count = 0; while (ai != NULL) { - upb_MemBlock* block = upb_Atomic_Load(&ai->blocks, memory_order_relaxed); - while (block != NULL) { - memsize += sizeof(upb_MemBlock) + block->size; - block = upb_Atomic_Load(&block->next, memory_order_relaxed); - } + memsize += upb_Atomic_Load(&ai->space_allocated, memory_order_relaxed); ai = upb_Atomic_Load(&ai->next, memory_order_relaxed); local_fused_count++; } @@ -3088,21 +3085,6 @@ size_t upb_Arena_SpaceAllocated(upb_Arena* arena, size_t* fused_count) { return memsize; } -bool UPB_PRIVATE(_upb_Arena_Contains)(const upb_Arena* a, void* ptr) { - upb_ArenaInternal* ai = upb_Arena_Internal(a); - UPB_ASSERT(ai); - - upb_MemBlock* block = upb_Atomic_Load(&ai->blocks, memory_order_relaxed); - while (block) { - uintptr_t beg = (uintptr_t)block; - uintptr_t end = beg + block->size; - if ((uintptr_t)ptr >= beg && (uintptr_t)ptr < end) return true; - block = upb_Atomic_Load(&block->next, memory_order_relaxed); - } - - return false; -} - uint32_t upb_Arena_DebugRefCount(upb_Arena* a) { upb_ArenaInternal* ai = upb_Arena_Internal(a); // These loads could probably be relaxed, but given that this is debug-only, @@ -3119,10 +3101,10 @@ static void _upb_Arena_AddBlock(upb_Arena* a, void* ptr, size_t size) { upb_ArenaInternal* ai = upb_Arena_Internal(a); upb_MemBlock* block = ptr; - // Insert into linked list. block->size = (uint32_t)size; - upb_Atomic_Init(&block->next, ai->blocks); - upb_Atomic_Store(&ai->blocks, block, memory_order_release); + // Insert into linked list. + block->next = ai->blocks; + ai->blocks = block; a->UPB_PRIVATE(ptr) = UPB_PTR_AT(block, kUpb_MemblockReserve, char); a->UPB_PRIVATE(end) = UPB_PTR_AT(block, size, char); @@ -3135,7 +3117,7 @@ static bool _upb_Arena_AllocBlock(upb_Arena* a, size_t size) { upb_ArenaInternal* ai = upb_Arena_Internal(a); if (!ai->block_alloc) return false; size_t last_size = 128; - upb_MemBlock* last_block = upb_Atomic_Load(&ai->blocks, memory_order_relaxed); + upb_MemBlock* last_block = ai->blocks; if (last_block) { last_size = a->UPB_PRIVATE(end) - (char*)last_block; } @@ -3156,6 +3138,13 @@ static bool _upb_Arena_AllocBlock(upb_Arena* a, size_t size) { if (!block) return false; _upb_Arena_AddBlock(a, block, block_size); + // Atomic add not required here, as threads won't race allocating blocks, plus + // atomic fetch-add is slower than load/add/store on arm devices compiled + // targetting pre-v8.1. + size_t old_space_allocated = + upb_Atomic_Load(&ai->space_allocated, memory_order_relaxed); + upb_Atomic_Store(&ai->space_allocated, old_space_allocated + block_size, + memory_order_relaxed); UPB_ASSERT(UPB_PRIVATE(_upb_ArenaHas)(a) >= size); return true; } @@ -3184,7 +3173,8 @@ static upb_Arena* _upb_Arena_InitSlow(upb_alloc* alloc) { upb_Atomic_Init(&a->body.parent_or_count, _upb_Arena_TaggedFromRefcount(1)); upb_Atomic_Init(&a->body.next, NULL); upb_Atomic_Init(&a->body.tail, &a->body); - upb_Atomic_Init(&a->body.blocks, NULL); + upb_Atomic_Init(&a->body.space_allocated, n); + a->body.blocks = NULL; a->body.upb_alloc_cleanup = NULL; _upb_Arena_AddBlock(&a->head, mem, n); @@ -3223,7 +3213,8 @@ upb_Arena* upb_Arena_Init(void* mem, size_t n, upb_alloc* alloc) { upb_Atomic_Init(&a->body.parent_or_count, _upb_Arena_TaggedFromRefcount(1)); upb_Atomic_Init(&a->body.next, NULL); upb_Atomic_Init(&a->body.tail, &a->body); - upb_Atomic_Init(&a->body.blocks, NULL); + upb_Atomic_Init(&a->body.space_allocated, 0); + a->body.blocks = NULL; a->body.upb_alloc_cleanup = NULL; a->body.block_alloc = _upb_Arena_MakeBlockAlloc(alloc, 1); @@ -3242,12 +3233,11 @@ static void _upb_Arena_DoFree(upb_ArenaInternal* ai) { upb_ArenaInternal* next_arena = (upb_ArenaInternal*)upb_Atomic_Load(&ai->next, memory_order_acquire); upb_alloc* block_alloc = _upb_ArenaInternal_BlockAlloc(ai); - upb_MemBlock* block = upb_Atomic_Load(&ai->blocks, memory_order_acquire); + upb_MemBlock* block = ai->blocks; upb_AllocCleanupFunc* alloc_cleanup = *ai->upb_alloc_cleanup; while (block != NULL) { // Load first since we are deleting block. - upb_MemBlock* next_block = - upb_Atomic_Load(&block->next, memory_order_acquire); + upb_MemBlock* next_block = block->next; upb_free(block_alloc, block); block = next_block; } @@ -3469,8 +3459,7 @@ void UPB_PRIVATE(_upb_Arena_SwapIn)(upb_Arena* des, const upb_Arena* src) { *des = *src; desi->block_alloc = srci->block_alloc; - upb_MemBlock* blocks = upb_Atomic_Load(&srci->blocks, memory_order_relaxed); - upb_Atomic_Init(&desi->blocks, blocks); + desi->blocks = srci->blocks; } void UPB_PRIVATE(_upb_Arena_SwapOut)(upb_Arena* des, const upb_Arena* src) { @@ -3478,8 +3467,7 @@ void UPB_PRIVATE(_upb_Arena_SwapOut)(upb_Arena* des, const upb_Arena* src) { upb_ArenaInternal* srci = upb_Arena_Internal(src); *des = *src; - upb_MemBlock* blocks = upb_Atomic_Load(&srci->blocks, memory_order_relaxed); - upb_Atomic_Store(&desi->blocks, blocks, memory_order_relaxed); + desi->blocks = srci->blocks; } diff --git a/ruby/ext/google/protobuf_c/ruby-upb.h b/ruby/ext/google/protobuf_c/ruby-upb.h index f106ae5bbdef..881497010b53 100755 --- a/ruby/ext/google/protobuf_c/ruby-upb.h +++ b/ruby/ext/google/protobuf_c/ruby-upb.h @@ -615,7 +615,7 @@ UPB_INLINE void upb_gfree(void* ptr) { upb_free(&upb_alloc_global, ptr); } // // We need this because the decoder inlines a upb_Arena for performance but // the full struct is not visible outside of arena.c. Yes, I know, it's awful. -#define UPB_ARENA_SIZE_HACK 8 +#define UPB_ARENA_SIZE_HACK 9 // LINT.IfChange(upb_Arena) @@ -635,11 +635,6 @@ void UPB_PRIVATE(_upb_Arena_SwapIn)(struct upb_Arena* des, void UPB_PRIVATE(_upb_Arena_SwapOut)(struct upb_Arena* des, const struct upb_Arena* src); -// Returns whether |ptr| was allocated directly by |a| (so care must be used -// with fused arenas). -UPB_API bool UPB_ONLYBITS(_upb_Arena_Contains)(const struct upb_Arena* a, - void* ptr); - UPB_INLINE size_t UPB_PRIVATE(_upb_ArenaHas)(const struct upb_Arena* a) { return (size_t)(a->UPB_ONLYBITS(end) - a->UPB_ONLYBITS(ptr)); }