From 5c56011d8215bd519c2da1b709e9d503692ec02f Mon Sep 17 00:00:00 2001 From: Jeremy Gebben Date: Tue, 10 Sep 2024 09:25:37 -0600 Subject: [PATCH 1/2] cdl: Make checkpoint managers thread safe --- src/checkpoint.cpp | 20 ++++++++++++++++++-- src/checkpoint.h | 3 +++ 2 files changed, 21 insertions(+), 2 deletions(-) diff --git a/src/checkpoint.cpp b/src/checkpoint.cpp index 1de1fcb..19ef92f 100644 --- a/src/checkpoint.cpp +++ b/src/checkpoint.cpp @@ -55,33 +55,42 @@ std::unique_ptr BufferMarkerCheckpointMgr::Allocate(uint32_t initial return checkpoint; } -void BufferMarkerCheckpointMgr::Free(Checkpoint &c) {} +void BufferMarkerCheckpointMgr::Free(Checkpoint &c) { + std::lock_guard lock(checkpoint_mutex_); + checkpoint_data_.erase(c.Id()); +} void BufferMarkerCheckpointMgr::WriteTop(Checkpoint &c, VkCommandBuffer cmd, uint32_t value) { + std::lock_guard lock(checkpoint_mutex_); + auto iter = checkpoint_data_.find(c.Id()); assert(iter != checkpoint_data_.end()); iter->second.top_marker->Write(cmd, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, value); } void BufferMarkerCheckpointMgr::WriteBottom(Checkpoint &c, VkCommandBuffer cmd, uint32_t value) { + std::lock_guard lock(checkpoint_mutex_); auto iter = checkpoint_data_.find(c.Id()); assert(iter != checkpoint_data_.end()); iter->second.bottom_marker->Write(cmd, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, value); } uint32_t BufferMarkerCheckpointMgr::ReadTop(const Checkpoint &c) const { + std::lock_guard lock(checkpoint_mutex_); auto iter = checkpoint_data_.find(c.Id()); assert(iter != checkpoint_data_.end()); return iter->second.top_marker->Read(); } uint32_t BufferMarkerCheckpointMgr::ReadBottom(const Checkpoint &c) const { + std::lock_guard lock(checkpoint_mutex_); auto iter = checkpoint_data_.find(c.Id()); assert(iter != checkpoint_data_.end()); return iter->second.bottom_marker->Read(); } void BufferMarkerCheckpointMgr::Reset(Checkpoint &c) { + std::lock_guard lock(checkpoint_mutex_); auto iter = checkpoint_data_.find(c.Id()); assert(iter != checkpoint_data_.end()); iter->second.top_marker->Write(0); @@ -95,11 +104,15 @@ std::unique_ptr DiagnosticCheckpointMgr::Allocate(uint32_t initial_v Data data; data.top_value = initial_value; data.bottom_value = initial_value; + std::lock_guard lock(checkpoint_mutex_); checkpoint_data_.emplace(std::make_pair(checkpoint->Id(), std::move(data))); return checkpoint; } -void DiagnosticCheckpointMgr::Free(Checkpoint &c) { checkpoint_data_.erase(c.Id()); } +void DiagnosticCheckpointMgr::Free(Checkpoint &c) { + std::lock_guard lock(checkpoint_mutex_); + checkpoint_data_.erase(c.Id()); +} // NV checkpoints are both top and bottom markers. void DiagnosticCheckpointMgr::WriteTop(Checkpoint &c, VkCommandBuffer cmd, uint32_t value) {} @@ -110,18 +123,21 @@ void DiagnosticCheckpointMgr::WriteBottom(Checkpoint &c, VkCommandBuffer cmd, ui } uint32_t DiagnosticCheckpointMgr::ReadTop(const Checkpoint &c) const { + std::lock_guard lock(checkpoint_mutex_); auto iter = checkpoint_data_.find(c.Id()); assert(iter != checkpoint_data_.end()); return iter->second.top_value; } uint32_t DiagnosticCheckpointMgr::ReadBottom(const Checkpoint &c) const { + std::lock_guard lock(checkpoint_mutex_); auto iter = checkpoint_data_.find(c.Id()); assert(iter != checkpoint_data_.end()); return iter->second.bottom_value; } void DiagnosticCheckpointMgr::Reset(Checkpoint &c) { + std::lock_guard lock(checkpoint_mutex_); auto iter = checkpoint_data_.find(c.Id()); assert(iter != checkpoint_data_.end()); iter->second.top_value = 0; diff --git a/src/checkpoint.h b/src/checkpoint.h index 0b234ff..72f5a5c 100644 --- a/src/checkpoint.h +++ b/src/checkpoint.h @@ -17,6 +17,7 @@ #pragma once #include "marker.h" +#include namespace crash_diagnostic_layer { @@ -79,6 +80,7 @@ class BufferMarkerCheckpointMgr : public CheckpointMgr { BufferMarkerMgr markers_; + mutable std::mutex checkpoint_mutex_; std::unordered_map checkpoint_data_; uint32_t next_id_{1}; }; @@ -108,6 +110,7 @@ class DiagnosticCheckpointMgr : public CheckpointMgr { }; Device &device_; + mutable std::mutex checkpoint_mutex_; std::unordered_map checkpoint_data_; uint32_t next_id_{1}; }; From 111034f5d244b44de14dec88f1140c47def39a40 Mon Sep 17 00:00:00 2001 From: Jeremy Gebben Date: Tue, 10 Sep 2024 09:53:06 -0600 Subject: [PATCH 2/2] scripts: Don't shallow copy pNext in command recording This is a workaround for #102 --- .../generators/command_recorder_generator.py | 5 +- src/generated/command_recorder.cpp | 134 +++++++++--------- 2 files changed, 71 insertions(+), 68 deletions(-) diff --git a/scripts/generators/command_recorder_generator.py b/scripts/generators/command_recorder_generator.py index bb03ee9..cf92b9b 100644 --- a/scripts/generators/command_recorder_generator.py +++ b/scripts/generators/command_recorder_generator.py @@ -137,7 +137,10 @@ def generateSource(self): out.append(f' for (uint32_t j = 0; j < {vkmember.fixedSizeArray[0]}; ++j) {{\n') out.append(f' ptr[i].{vkmember.name}[j] = {src_struct}.{vkmember.name}[j];\n') out.append(' }\n') - elif vkmember.pointer and 'void' != vkmember.type and vkmember.name != 'pNext': + # https://github.com/LunarG/CrashDiagnosticLayer/issues/102 we need to deep copy the pNext chain here + elif vkmember.name == 'pNext': + out.append(f' ptr[i].{vkmember.name} = nullptr; // pNext deep copy not implemented\n') + elif vkmember.pointer and 'void' != vkmember.type: out.append(f' ptr[i].{vkmember.name} = nullptr;\n') out.append(f' if ({src_struct}.{vkmember.name}) {{\n') if vkmember.length is not None and len(vkmember.length) > 0: diff --git a/src/generated/command_recorder.cpp b/src/generated/command_recorder.cpp index 71febae..af400b8 100644 --- a/src/generated/command_recorder.cpp +++ b/src/generated/command_recorder.cpp @@ -411,7 +411,7 @@ VkBufferMemoryBarrier* CommandRecorder::CopyArray(const V auto ptr = reinterpret_cast(m_allocator.Alloc(sizeof(VkBufferMemoryBarrier) * count)); for (uint64_t i = 0; i < count; ++i) { ptr[i].sType = src[start_index + i].sType; - ptr[i].pNext = src[start_index + i].pNext; + ptr[i].pNext = nullptr; // pNext deep copy not implemented ptr[i].srcAccessMask = src[start_index + i].srcAccessMask; ptr[i].dstAccessMask = src[start_index + i].dstAccessMask; ptr[i].srcQueueFamilyIndex = src[start_index + i].srcQueueFamilyIndex; @@ -443,7 +443,7 @@ VkImageMemoryBarrier* CommandRecorder::CopyArray(const VkI auto ptr = reinterpret_cast(m_allocator.Alloc(sizeof(VkImageMemoryBarrier) * count)); for (uint64_t i = 0; i < count; ++i) { ptr[i].sType = src[start_index + i].sType; - ptr[i].pNext = src[start_index + i].pNext; + ptr[i].pNext = nullptr; // pNext deep copy not implemented ptr[i].srcAccessMask = src[start_index + i].srcAccessMask; ptr[i].dstAccessMask = src[start_index + i].dstAccessMask; ptr[i].oldLayout = src[start_index + i].oldLayout; @@ -462,7 +462,7 @@ VkMemoryBarrier* CommandRecorder::CopyArray(const VkMemoryBarri auto ptr = reinterpret_cast(m_allocator.Alloc(sizeof(VkMemoryBarrier) * count)); for (uint64_t i = 0; i < count; ++i) { ptr[i].sType = src[start_index + i].sType; - ptr[i].pNext = src[start_index + i].pNext; + ptr[i].pNext = nullptr; // pNext deep copy not implemented ptr[i].srcAccessMask = src[start_index + i].srcAccessMask; ptr[i].dstAccessMask = src[start_index + i].dstAccessMask; } @@ -513,7 +513,7 @@ VkWriteDescriptorSet* CommandRecorder::CopyArray(const VkW auto ptr = reinterpret_cast(m_allocator.Alloc(sizeof(VkWriteDescriptorSet) * count)); for (uint64_t i = 0; i < count; ++i) { ptr[i].sType = src[start_index + i].sType; - ptr[i].pNext = src[start_index + i].pNext; + ptr[i].pNext = nullptr; // pNext deep copy not implemented ptr[i].dstSet = src[start_index + i].dstSet; ptr[i].dstBinding = src[start_index + i].dstBinding; ptr[i].dstArrayElement = src[start_index + i].dstArrayElement; @@ -545,7 +545,7 @@ VkCommandBufferInheritanceInfo* CommandRecorder::CopyArray(c auto ptr = reinterpret_cast(m_allocator.Alloc(sizeof(VkCommandBufferBeginInfo) * count)); for (uint64_t i = 0; i < count; ++i) { ptr[i].sType = src[start_index + i].sType; - ptr[i].pNext = src[start_index + i].pNext; + ptr[i].pNext = nullptr; // pNext deep copy not implemented ptr[i].flags = src[start_index + i].flags; ptr[i].pInheritanceInfo = nullptr; if (src[start_index + i].pInheritanceInfo) { @@ -723,7 +723,7 @@ VkRenderPassBeginInfo* CommandRecorder::CopyArray(const V auto ptr = reinterpret_cast(m_allocator.Alloc(sizeof(VkRenderPassBeginInfo) * count)); for (uint64_t i = 0; i < count; ++i) { ptr[i].sType = src[start_index + i].sType; - ptr[i].pNext = src[start_index + i].pNext; + ptr[i].pNext = nullptr; // pNext deep copy not implemented ptr[i].renderPass = src[start_index + i].renderPass; ptr[i].framebuffer = src[start_index + i].framebuffer; ptr[i].renderArea = src[start_index + i].renderArea; @@ -743,7 +743,7 @@ VkSubpassBeginInfo* CommandRecorder::CopyArray(const VkSubpa auto ptr = reinterpret_cast(m_allocator.Alloc(sizeof(VkSubpassBeginInfo) * count)); for (uint64_t i = 0; i < count; ++i) { ptr[i].sType = src[start_index + i].sType; - ptr[i].pNext = src[start_index + i].pNext; + ptr[i].pNext = nullptr; // pNext deep copy not implemented ptr[i].contents = src[start_index + i].contents; } return ptr; @@ -755,7 +755,7 @@ VkSubpassEndInfo* CommandRecorder::CopyArray(const VkSubpassEn auto ptr = reinterpret_cast(m_allocator.Alloc(sizeof(VkSubpassEndInfo) * count)); for (uint64_t i = 0; i < count; ++i) { ptr[i].sType = src[start_index + i].sType; - ptr[i].pNext = src[start_index + i].pNext; + ptr[i].pNext = nullptr; // pNext deep copy not implemented } return ptr; } @@ -766,7 +766,7 @@ VkMemoryBarrier2* CommandRecorder::CopyArray(const VkMemoryBar auto ptr = reinterpret_cast(m_allocator.Alloc(sizeof(VkMemoryBarrier2) * count)); for (uint64_t i = 0; i < count; ++i) { ptr[i].sType = src[start_index + i].sType; - ptr[i].pNext = src[start_index + i].pNext; + ptr[i].pNext = nullptr; // pNext deep copy not implemented ptr[i].srcStageMask = src[start_index + i].srcStageMask; ptr[i].srcAccessMask = src[start_index + i].srcAccessMask; ptr[i].dstStageMask = src[start_index + i].dstStageMask; @@ -781,7 +781,7 @@ VkBufferMemoryBarrier2* CommandRecorder::CopyArray(const auto ptr = reinterpret_cast(m_allocator.Alloc(sizeof(VkBufferMemoryBarrier2) * count)); for (uint64_t i = 0; i < count; ++i) { ptr[i].sType = src[start_index + i].sType; - ptr[i].pNext = src[start_index + i].pNext; + ptr[i].pNext = nullptr; // pNext deep copy not implemented ptr[i].srcStageMask = src[start_index + i].srcStageMask; ptr[i].srcAccessMask = src[start_index + i].srcAccessMask; ptr[i].dstStageMask = src[start_index + i].dstStageMask; @@ -801,7 +801,7 @@ VkImageMemoryBarrier2* CommandRecorder::CopyArray(const V auto ptr = reinterpret_cast(m_allocator.Alloc(sizeof(VkImageMemoryBarrier2) * count)); for (uint64_t i = 0; i < count; ++i) { ptr[i].sType = src[start_index + i].sType; - ptr[i].pNext = src[start_index + i].pNext; + ptr[i].pNext = nullptr; // pNext deep copy not implemented ptr[i].srcStageMask = src[start_index + i].srcStageMask; ptr[i].srcAccessMask = src[start_index + i].srcAccessMask; ptr[i].dstStageMask = src[start_index + i].dstStageMask; @@ -822,7 +822,7 @@ VkDependencyInfo* CommandRecorder::CopyArray(const VkDependenc auto ptr = reinterpret_cast(m_allocator.Alloc(sizeof(VkDependencyInfo) * count)); for (uint64_t i = 0; i < count; ++i) { ptr[i].sType = src[start_index + i].sType; - ptr[i].pNext = src[start_index + i].pNext; + ptr[i].pNext = nullptr; // pNext deep copy not implemented ptr[i].dependencyFlags = src[start_index + i].dependencyFlags; ptr[i].memoryBarrierCount = src[start_index + i].memoryBarrierCount; ptr[i].pMemoryBarriers = nullptr; @@ -853,7 +853,7 @@ VkBufferCopy2* CommandRecorder::CopyArray(const VkBufferCopy2* sr auto ptr = reinterpret_cast(m_allocator.Alloc(sizeof(VkBufferCopy2) * count)); for (uint64_t i = 0; i < count; ++i) { ptr[i].sType = src[start_index + i].sType; - ptr[i].pNext = src[start_index + i].pNext; + ptr[i].pNext = nullptr; // pNext deep copy not implemented ptr[i].srcOffset = src[start_index + i].srcOffset; ptr[i].dstOffset = src[start_index + i].dstOffset; ptr[i].size = src[start_index + i].size; @@ -867,7 +867,7 @@ VkCopyBufferInfo2* CommandRecorder::CopyArray(const VkCopyBuf auto ptr = reinterpret_cast(m_allocator.Alloc(sizeof(VkCopyBufferInfo2) * count)); for (uint64_t i = 0; i < count; ++i) { ptr[i].sType = src[start_index + i].sType; - ptr[i].pNext = src[start_index + i].pNext; + ptr[i].pNext = nullptr; // pNext deep copy not implemented ptr[i].srcBuffer = src[start_index + i].srcBuffer; ptr[i].dstBuffer = src[start_index + i].dstBuffer; ptr[i].regionCount = src[start_index + i].regionCount; @@ -885,7 +885,7 @@ VkImageCopy2* CommandRecorder::CopyArray(const VkImageCopy2* src, auto ptr = reinterpret_cast(m_allocator.Alloc(sizeof(VkImageCopy2) * count)); for (uint64_t i = 0; i < count; ++i) { ptr[i].sType = src[start_index + i].sType; - ptr[i].pNext = src[start_index + i].pNext; + ptr[i].pNext = nullptr; // pNext deep copy not implemented ptr[i].srcSubresource = src[start_index + i].srcSubresource; ptr[i].srcOffset = src[start_index + i].srcOffset; ptr[i].dstSubresource = src[start_index + i].dstSubresource; @@ -901,7 +901,7 @@ VkCopyImageInfo2* CommandRecorder::CopyArray(const VkCopyImage auto ptr = reinterpret_cast(m_allocator.Alloc(sizeof(VkCopyImageInfo2) * count)); for (uint64_t i = 0; i < count; ++i) { ptr[i].sType = src[start_index + i].sType; - ptr[i].pNext = src[start_index + i].pNext; + ptr[i].pNext = nullptr; // pNext deep copy not implemented ptr[i].srcImage = src[start_index + i].srcImage; ptr[i].srcImageLayout = src[start_index + i].srcImageLayout; ptr[i].dstImage = src[start_index + i].dstImage; @@ -922,7 +922,7 @@ VkBufferImageCopy2* CommandRecorder::CopyArray(const VkBuffe auto ptr = reinterpret_cast(m_allocator.Alloc(sizeof(VkBufferImageCopy2) * count)); for (uint64_t i = 0; i < count; ++i) { ptr[i].sType = src[start_index + i].sType; - ptr[i].pNext = src[start_index + i].pNext; + ptr[i].pNext = nullptr; // pNext deep copy not implemented ptr[i].bufferOffset = src[start_index + i].bufferOffset; ptr[i].bufferRowLength = src[start_index + i].bufferRowLength; ptr[i].bufferImageHeight = src[start_index + i].bufferImageHeight; @@ -939,7 +939,7 @@ VkCopyBufferToImageInfo2* CommandRecorder::CopyArray(c auto ptr = reinterpret_cast(m_allocator.Alloc(sizeof(VkCopyBufferToImageInfo2) * count)); for (uint64_t i = 0; i < count; ++i) { ptr[i].sType = src[start_index + i].sType; - ptr[i].pNext = src[start_index + i].pNext; + ptr[i].pNext = nullptr; // pNext deep copy not implemented ptr[i].srcBuffer = src[start_index + i].srcBuffer; ptr[i].dstImage = src[start_index + i].dstImage; ptr[i].dstImageLayout = src[start_index + i].dstImageLayout; @@ -959,7 +959,7 @@ VkCopyImageToBufferInfo2* CommandRecorder::CopyArray(c auto ptr = reinterpret_cast(m_allocator.Alloc(sizeof(VkCopyImageToBufferInfo2) * count)); for (uint64_t i = 0; i < count; ++i) { ptr[i].sType = src[start_index + i].sType; - ptr[i].pNext = src[start_index + i].pNext; + ptr[i].pNext = nullptr; // pNext deep copy not implemented ptr[i].srcImage = src[start_index + i].srcImage; ptr[i].srcImageLayout = src[start_index + i].srcImageLayout; ptr[i].dstBuffer = src[start_index + i].dstBuffer; @@ -978,7 +978,7 @@ VkImageBlit2* CommandRecorder::CopyArray(const VkImageBlit2* src, auto ptr = reinterpret_cast(m_allocator.Alloc(sizeof(VkImageBlit2) * count)); for (uint64_t i = 0; i < count; ++i) { ptr[i].sType = src[start_index + i].sType; - ptr[i].pNext = src[start_index + i].pNext; + ptr[i].pNext = nullptr; // pNext deep copy not implemented ptr[i].srcSubresource = src[start_index + i].srcSubresource; for (uint32_t j = 0; j < 2; ++j) { ptr[i].srcOffsets[j] = src[start_index + i].srcOffsets[j]; @@ -997,7 +997,7 @@ VkBlitImageInfo2* CommandRecorder::CopyArray(const VkBlitImage auto ptr = reinterpret_cast(m_allocator.Alloc(sizeof(VkBlitImageInfo2) * count)); for (uint64_t i = 0; i < count; ++i) { ptr[i].sType = src[start_index + i].sType; - ptr[i].pNext = src[start_index + i].pNext; + ptr[i].pNext = nullptr; // pNext deep copy not implemented ptr[i].srcImage = src[start_index + i].srcImage; ptr[i].srcImageLayout = src[start_index + i].srcImageLayout; ptr[i].dstImage = src[start_index + i].dstImage; @@ -1019,7 +1019,7 @@ VkImageResolve2* CommandRecorder::CopyArray(const VkImageResolv auto ptr = reinterpret_cast(m_allocator.Alloc(sizeof(VkImageResolve2) * count)); for (uint64_t i = 0; i < count; ++i) { ptr[i].sType = src[start_index + i].sType; - ptr[i].pNext = src[start_index + i].pNext; + ptr[i].pNext = nullptr; // pNext deep copy not implemented ptr[i].srcSubresource = src[start_index + i].srcSubresource; ptr[i].srcOffset = src[start_index + i].srcOffset; ptr[i].dstSubresource = src[start_index + i].dstSubresource; @@ -1035,7 +1035,7 @@ VkResolveImageInfo2* CommandRecorder::CopyArray(const VkRes auto ptr = reinterpret_cast(m_allocator.Alloc(sizeof(VkResolveImageInfo2) * count)); for (uint64_t i = 0; i < count; ++i) { ptr[i].sType = src[start_index + i].sType; - ptr[i].pNext = src[start_index + i].pNext; + ptr[i].pNext = nullptr; // pNext deep copy not implemented ptr[i].srcImage = src[start_index + i].srcImage; ptr[i].srcImageLayout = src[start_index + i].srcImageLayout; ptr[i].dstImage = src[start_index + i].dstImage; @@ -1057,7 +1057,7 @@ VkRenderingAttachmentInfo* CommandRecorder::CopyArray reinterpret_cast(m_allocator.Alloc(sizeof(VkRenderingAttachmentInfo) * count)); for (uint64_t i = 0; i < count; ++i) { ptr[i].sType = src[start_index + i].sType; - ptr[i].pNext = src[start_index + i].pNext; + ptr[i].pNext = nullptr; // pNext deep copy not implemented ptr[i].imageView = src[start_index + i].imageView; ptr[i].imageLayout = src[start_index + i].imageLayout; ptr[i].resolveMode = src[start_index + i].resolveMode; @@ -1076,7 +1076,7 @@ VkRenderingInfo* CommandRecorder::CopyArray(const VkRenderingIn auto ptr = reinterpret_cast(m_allocator.Alloc(sizeof(VkRenderingInfo) * count)); for (uint64_t i = 0; i < count; ++i) { ptr[i].sType = src[start_index + i].sType; - ptr[i].pNext = src[start_index + i].pNext; + ptr[i].pNext = nullptr; // pNext deep copy not implemented ptr[i].flags = src[start_index + i].flags; ptr[i].renderArea = src[start_index + i].renderArea; ptr[i].layerCount = src[start_index + i].layerCount; @@ -1108,7 +1108,7 @@ VkVideoPictureResourceInfoKHR* CommandRecorder::CopyArray(m_allocator.Alloc(sizeof(VkVideoReferenceSlotInfoKHR) * count)); for (uint64_t i = 0; i < count; ++i) { ptr[i].sType = src[start_index + i].sType; - ptr[i].pNext = src[start_index + i].pNext; + ptr[i].pNext = nullptr; // pNext deep copy not implemented ptr[i].slotIndex = src[start_index + i].slotIndex; ptr[i].pPictureResource = nullptr; if (src[start_index + i].pPictureResource) { @@ -1142,7 +1142,7 @@ VkVideoBeginCodingInfoKHR* CommandRecorder::CopyArray reinterpret_cast(m_allocator.Alloc(sizeof(VkVideoBeginCodingInfoKHR) * count)); for (uint64_t i = 0; i < count; ++i) { ptr[i].sType = src[start_index + i].sType; - ptr[i].pNext = src[start_index + i].pNext; + ptr[i].pNext = nullptr; // pNext deep copy not implemented ptr[i].flags = src[start_index + i].flags; ptr[i].videoSession = src[start_index + i].videoSession; ptr[i].videoSessionParameters = src[start_index + i].videoSessionParameters; @@ -1162,7 +1162,7 @@ VkVideoEndCodingInfoKHR* CommandRecorder::CopyArray(con auto ptr = reinterpret_cast(m_allocator.Alloc(sizeof(VkVideoEndCodingInfoKHR) * count)); for (uint64_t i = 0; i < count; ++i) { ptr[i].sType = src[start_index + i].sType; - ptr[i].pNext = src[start_index + i].pNext; + ptr[i].pNext = nullptr; // pNext deep copy not implemented ptr[i].flags = src[start_index + i].flags; } return ptr; @@ -1175,7 +1175,7 @@ VkVideoCodingControlInfoKHR* CommandRecorder::CopyArray(m_allocator.Alloc(sizeof(VkVideoCodingControlInfoKHR) * count)); for (uint64_t i = 0; i < count; ++i) { ptr[i].sType = src[start_index + i].sType; - ptr[i].pNext = src[start_index + i].pNext; + ptr[i].pNext = nullptr; // pNext deep copy not implemented ptr[i].flags = src[start_index + i].flags; } return ptr; @@ -1187,7 +1187,7 @@ VkVideoDecodeInfoKHR* CommandRecorder::CopyArray(const VkV auto ptr = reinterpret_cast(m_allocator.Alloc(sizeof(VkVideoDecodeInfoKHR) * count)); for (uint64_t i = 0; i < count; ++i) { ptr[i].sType = src[start_index + i].sType; - ptr[i].pNext = src[start_index + i].pNext; + ptr[i].pNext = nullptr; // pNext deep copy not implemented ptr[i].flags = src[start_index + i].flags; ptr[i].srcBuffer = src[start_index + i].srcBuffer; ptr[i].srcBufferOffset = src[start_index + i].srcBufferOffset; @@ -1215,7 +1215,7 @@ VkRenderingAttachmentLocationInfoKHR* CommandRecorder::CopyArray(const VkV auto ptr = reinterpret_cast(m_allocator.Alloc(sizeof(VkVideoEncodeInfoKHR) * count)); for (uint64_t i = 0; i < count; ++i) { ptr[i].sType = src[start_index + i].sType; - ptr[i].pNext = src[start_index + i].pNext; + ptr[i].pNext = nullptr; // pNext deep copy not implemented ptr[i].flags = src[start_index + i].flags; ptr[i].dstBuffer = src[start_index + i].dstBuffer; ptr[i].dstBufferOffset = src[start_index + i].dstBufferOffset; @@ -1291,7 +1291,7 @@ VkBindDescriptorSetsInfoKHR* CommandRecorder::CopyArray(m_allocator.Alloc(sizeof(VkBindDescriptorSetsInfoKHR) * count)); for (uint64_t i = 0; i < count; ++i) { ptr[i].sType = src[start_index + i].sType; - ptr[i].pNext = src[start_index + i].pNext; + ptr[i].pNext = nullptr; // pNext deep copy not implemented ptr[i].stageFlags = src[start_index + i].stageFlags; ptr[i].layout = src[start_index + i].layout; ptr[i].firstSet = src[start_index + i].firstSet; @@ -1317,7 +1317,7 @@ VkPushConstantsInfoKHR* CommandRecorder::CopyArray(const auto ptr = reinterpret_cast(m_allocator.Alloc(sizeof(VkPushConstantsInfoKHR) * count)); for (uint64_t i = 0; i < count; ++i) { ptr[i].sType = src[start_index + i].sType; - ptr[i].pNext = src[start_index + i].pNext; + ptr[i].pNext = nullptr; // pNext deep copy not implemented ptr[i].layout = src[start_index + i].layout; ptr[i].stageFlags = src[start_index + i].stageFlags; ptr[i].offset = src[start_index + i].offset; @@ -1336,7 +1336,7 @@ VkPushDescriptorSetInfoKHR* CommandRecorder::CopyArray(m_allocator.Alloc(sizeof(VkPushDescriptorSetInfoKHR) * count)); for (uint64_t i = 0; i < count; ++i) { ptr[i].sType = src[start_index + i].sType; - ptr[i].pNext = src[start_index + i].pNext; + ptr[i].pNext = nullptr; // pNext deep copy not implemented ptr[i].stageFlags = src[start_index + i].stageFlags; ptr[i].layout = src[start_index + i].layout; ptr[i].set = src[start_index + i].set; @@ -1357,7 +1357,7 @@ VkPushDescriptorSetWithTemplateInfoKHR* CommandRecorder::CopyArray( m_allocator.Alloc(sizeof(VkBindDescriptorBufferEmbeddedSamplersInfoEXT) * count)); for (uint64_t i = 0; i < count; ++i) { ptr[i].sType = src[start_index + i].sType; - ptr[i].pNext = src[start_index + i].pNext; + ptr[i].pNext = nullptr; // pNext deep copy not implemented ptr[i].stageFlags = src[start_index + i].stageFlags; ptr[i].layout = src[start_index + i].layout; ptr[i].set = src[start_index + i].set; @@ -1415,7 +1415,7 @@ VkDebugMarkerMarkerInfoEXT* CommandRecorder::CopyArray(m_allocator.Alloc(sizeof(VkDebugMarkerMarkerInfoEXT) * count)); for (uint64_t i = 0; i < count; ++i) { ptr[i].sType = src[start_index + i].sType; - ptr[i].pNext = src[start_index + i].pNext; + ptr[i].pNext = nullptr; // pNext deep copy not implemented ptr[i].pMarkerName = nullptr; if (src[start_index + i].pMarkerName) { ptr[i].pMarkerName = @@ -1434,7 +1434,7 @@ VkCuLaunchInfoNVX* CommandRecorder::CopyArray(const VkCuLaunc auto ptr = reinterpret_cast(m_allocator.Alloc(sizeof(VkCuLaunchInfoNVX) * count)); for (uint64_t i = 0; i < count; ++i) { ptr[i].sType = src[start_index + i].sType; - ptr[i].pNext = src[start_index + i].pNext; + ptr[i].pNext = nullptr; // pNext deep copy not implemented ptr[i].function = src[start_index + i].function; ptr[i].gridDimX = src[start_index + i].gridDimX; ptr[i].gridDimY = src[start_index + i].gridDimY; @@ -1462,7 +1462,7 @@ VkConditionalRenderingBeginInfoEXT* CommandRecorder::CopyArray(const VkD auto ptr = reinterpret_cast(m_allocator.Alloc(sizeof(VkDebugUtilsLabelEXT) * count)); for (uint64_t i = 0; i < count; ++i) { ptr[i].sType = src[start_index + i].sType; - ptr[i].pNext = src[start_index + i].pNext; + ptr[i].pNext = nullptr; // pNext deep copy not implemented ptr[i].pLabelName = nullptr; if (src[start_index + i].pLabelName) { ptr[i].pLabelName = @@ -1559,7 +1559,7 @@ VkSampleLocationsInfoEXT* CommandRecorder::CopyArray(c auto ptr = reinterpret_cast(m_allocator.Alloc(sizeof(VkSampleLocationsInfoEXT) * count)); for (uint64_t i = 0; i < count; ++i) { ptr[i].sType = src[start_index + i].sType; - ptr[i].pNext = src[start_index + i].pNext; + ptr[i].pNext = nullptr; // pNext deep copy not implemented ptr[i].sampleLocationsPerPixel = src[start_index + i].sampleLocationsPerPixel; ptr[i].sampleLocationGridSize = src[start_index + i].sampleLocationGridSize; ptr[i].sampleLocationsCount = src[start_index + i].sampleLocationsCount; @@ -1623,7 +1623,7 @@ VkGeometryNV* CommandRecorder::CopyArray(const VkGeometryNV* src, auto ptr = reinterpret_cast(m_allocator.Alloc(sizeof(VkGeometryNV) * count)); for (uint64_t i = 0; i < count; ++i) { ptr[i].sType = src[start_index + i].sType; - ptr[i].pNext = src[start_index + i].pNext; + ptr[i].pNext = nullptr; // pNext deep copy not implemented ptr[i].geometryType = src[start_index + i].geometryType; ptr[i].geometry = src[start_index + i].geometry; ptr[i].flags = src[start_index + i].flags; @@ -1638,7 +1638,7 @@ VkAccelerationStructureInfoNV* CommandRecorder::CopyArray reinterpret_cast(m_allocator.Alloc(sizeof(VkGeneratedCommandsInfoNV) * count)); for (uint64_t i = 0; i < count; ++i) { ptr[i].sType = src[start_index + i].sType; - ptr[i].pNext = src[start_index + i].pNext; + ptr[i].pNext = nullptr; // pNext deep copy not implemented ptr[i].pipelineBindPoint = src[start_index + i].pipelineBindPoint; ptr[i].pipeline = src[start_index + i].pipeline; ptr[i].indirectCommandsLayout = src[start_index + i].indirectCommandsLayout; @@ -1740,7 +1740,7 @@ VkDepthBiasInfoEXT* CommandRecorder::CopyArray(const VkDepth auto ptr = reinterpret_cast(m_allocator.Alloc(sizeof(VkDepthBiasInfoEXT) * count)); for (uint64_t i = 0; i < count; ++i) { ptr[i].sType = src[start_index + i].sType; - ptr[i].pNext = src[start_index + i].pNext; + ptr[i].pNext = nullptr; // pNext deep copy not implemented ptr[i].depthBiasConstantFactor = src[start_index + i].depthBiasConstantFactor; ptr[i].depthBiasClamp = src[start_index + i].depthBiasClamp; ptr[i].depthBiasSlopeFactor = src[start_index + i].depthBiasSlopeFactor; @@ -1754,7 +1754,7 @@ VkCudaLaunchInfoNV* CommandRecorder::CopyArray(const VkCudaL auto ptr = reinterpret_cast(m_allocator.Alloc(sizeof(VkCudaLaunchInfoNV) * count)); for (uint64_t i = 0; i < count; ++i) { ptr[i].sType = src[start_index + i].sType; - ptr[i].pNext = src[start_index + i].pNext; + ptr[i].pNext = nullptr; // pNext deep copy not implemented ptr[i].function = src[start_index + i].function; ptr[i].gridDimX = src[start_index + i].gridDimX; ptr[i].gridDimY = src[start_index + i].gridDimY; @@ -1782,7 +1782,7 @@ VkDescriptorBufferBindingInfoEXT* CommandRecorder::CopyArray(const auto ptr = reinterpret_cast(m_allocator.Alloc(sizeof(VkMicromapBuildInfoEXT) * count)); for (uint64_t i = 0; i < count; ++i) { ptr[i].sType = src[start_index + i].sType; - ptr[i].pNext = src[start_index + i].pNext; + ptr[i].pNext = nullptr; // pNext deep copy not implemented ptr[i].type = src[start_index + i].type; ptr[i].flags = src[start_index + i].flags; ptr[i].mode = src[start_index + i].mode; @@ -1921,7 +1921,7 @@ VkCopyMicromapToMemoryInfoEXT* CommandRecorder::CopyArray(const V auto ptr = reinterpret_cast(m_allocator.Alloc(sizeof(VkCopyMicromapInfoEXT) * count)); for (uint64_t i = 0; i < count; ++i) { ptr[i].sType = src[start_index + i].sType; - ptr[i].pNext = src[start_index + i].pNext; + ptr[i].pNext = nullptr; // pNext deep copy not implemented ptr[i].src = src[start_index + i].src; ptr[i].dst = src[start_index + i].dst; ptr[i].mode = src[start_index + i].mode; @@ -2009,7 +2009,7 @@ VkOpticalFlowExecuteInfoNV* CommandRecorder::CopyArray(m_allocator.Alloc(sizeof(VkOpticalFlowExecuteInfoNV) * count)); for (uint64_t i = 0; i < count; ++i) { ptr[i].sType = src[start_index + i].sType; - ptr[i].pNext = src[start_index + i].pNext; + ptr[i].pNext = nullptr; // pNext deep copy not implemented ptr[i].flags = src[start_index + i].flags; ptr[i].regionCount = src[start_index + i].regionCount; ptr[i].pRegions = nullptr; @@ -2042,7 +2042,7 @@ VkAccelerationStructureGeometryKHR* CommandRecorder::CopyArray