From 34dfb78eac40e926886234d8c672607c1743e41f Mon Sep 17 00:00:00 2001 From: Svilen Kanev Date: Fri, 28 Jul 2017 13:32:31 -0700 Subject: [PATCH 1/3] i#2571 exit flags not reset: reset on init. Tested: ctest,internal repro. Fixes #2571 --- core/heap.c | 9 +++++---- core/vmareas.c | 1 + 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/core/heap.c b/core/heap.c index ca71b5fa846..0a72c523370 100644 --- a/core/heap.c +++ b/core/heap.c @@ -295,7 +295,7 @@ typedef struct _heap_t { uint num_dead; } heap_t; -/* no synch needed since only written once */ +/* protected by dynamo_vm_areas_lock() */ static bool heap_exiting = false; #ifdef DEBUG @@ -1401,6 +1401,9 @@ vmm_heap_init() if (DYNAMO_OPTION(vm_reserve)) { vmm_heap_unit_init(&heapmgt->vmheap, DYNAMO_OPTION(vm_size)); } + dynamo_vm_areas_lock(); + heap_exiting = false; + dynamo_vm_areas_unlock(); } void @@ -1547,8 +1550,6 @@ heap_init() * FIXME: ensure that release build aligns ok? * I would be quite surprised if static vars were not 4-byte-aligned! */ - ASSERT(ALIGN_BACKWARD(&heap_exiting, CACHE_LINE_SIZE()) == - ALIGN_BACKWARD(&heap_exiting + 1, CACHE_LINE_SIZE())); ASSERT(ALIGN_BACKWARD(&heap_unit_lock.owner, CACHE_LINE_SIZE()) == ALIGN_BACKWARD(&heap_unit_lock.owner + 1, CACHE_LINE_SIZE())); @@ -1645,9 +1646,9 @@ heap_exit() heap_unit_t *u, *next_u; heap_management_t *temp; - heap_exiting = true; /* FIXME: we shouldn't need either lock if executed last */ dynamo_vm_areas_lock(); + heap_exiting = true; acquire_recursive_lock(&heap_unit_lock); #ifdef WINDOWS diff --git a/core/vmareas.c b/core/vmareas.c index 05936913e10..4a5e77c6744 100644 --- a/core/vmareas.c +++ b/core/vmareas.c @@ -1566,6 +1566,7 @@ vm_areas_reset_init(void) void dynamo_vm_areas_init() { + vm_areas_exited = false; VMVECTOR_ALLOC_VECTOR(dynamo_areas, GLOBAL_DCONTEXT, VECTOR_SHARED, dynamo_areas); } From cc66df98846f042ee19fcfaeaf8120fdc17d670e Mon Sep 17 00:00:00 2001 From: Svilen Kanev Date: Fri, 28 Jul 2017 17:31:36 -0700 Subject: [PATCH 2/3] Revert "i#2571 exit flags not reset: reset on init." This reverts commit 34dfb78eac40e926886234d8c672607c1743e41f. --- core/heap.c | 9 ++++----- core/vmareas.c | 1 - 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/core/heap.c b/core/heap.c index 0a72c523370..ca71b5fa846 100644 --- a/core/heap.c +++ b/core/heap.c @@ -295,7 +295,7 @@ typedef struct _heap_t { uint num_dead; } heap_t; -/* protected by dynamo_vm_areas_lock() */ +/* no synch needed since only written once */ static bool heap_exiting = false; #ifdef DEBUG @@ -1401,9 +1401,6 @@ vmm_heap_init() if (DYNAMO_OPTION(vm_reserve)) { vmm_heap_unit_init(&heapmgt->vmheap, DYNAMO_OPTION(vm_size)); } - dynamo_vm_areas_lock(); - heap_exiting = false; - dynamo_vm_areas_unlock(); } void @@ -1550,6 +1547,8 @@ heap_init() * FIXME: ensure that release build aligns ok? * I would be quite surprised if static vars were not 4-byte-aligned! */ + ASSERT(ALIGN_BACKWARD(&heap_exiting, CACHE_LINE_SIZE()) == + ALIGN_BACKWARD(&heap_exiting + 1, CACHE_LINE_SIZE())); ASSERT(ALIGN_BACKWARD(&heap_unit_lock.owner, CACHE_LINE_SIZE()) == ALIGN_BACKWARD(&heap_unit_lock.owner + 1, CACHE_LINE_SIZE())); @@ -1646,9 +1645,9 @@ heap_exit() heap_unit_t *u, *next_u; heap_management_t *temp; + heap_exiting = true; /* FIXME: we shouldn't need either lock if executed last */ dynamo_vm_areas_lock(); - heap_exiting = true; acquire_recursive_lock(&heap_unit_lock); #ifdef WINDOWS diff --git a/core/vmareas.c b/core/vmareas.c index 4a5e77c6744..05936913e10 100644 --- a/core/vmareas.c +++ b/core/vmareas.c @@ -1566,7 +1566,6 @@ vm_areas_reset_init(void) void dynamo_vm_areas_init() { - vm_areas_exited = false; VMVECTOR_ALLOC_VECTOR(dynamo_areas, GLOBAL_DCONTEXT, VECTOR_SHARED, dynamo_areas); } From e5b22f4caaabf15c26268fd95735dc82320afb25 Mon Sep 17 00:00:00 2001 From: Svilen Kanev Date: Fri, 28 Jul 2017 17:31:02 -0700 Subject: [PATCH 3/3] Clear flags late in detach --- core/dynamo.c | 2 ++ core/heap.c | 6 ++++++ core/heap.h | 1 + core/vmareas.c | 6 ++++++ core/vmareas.h | 4 ++++ 5 files changed, 19 insertions(+) diff --git a/core/dynamo.c b/core/dynamo.c index c08c7f1bf11..ae1e608e0f1 100644 --- a/core/dynamo.c +++ b/core/dynamo.c @@ -1547,6 +1547,8 @@ dynamo_exit_post_detach(void) #ifdef UNIX post_execve = false; #endif + vm_areas_post_exit(); + heap_post_exit(); } dcontext_t * diff --git a/core/heap.c b/core/heap.c index ca71b5fa846..0e24de41c21 100644 --- a/core/heap.c +++ b/core/heap.c @@ -1706,6 +1706,12 @@ heap_exit() heapmgt = &temp_heapmgt; } +void +heap_post_exit() +{ + heap_exiting = false; +} + /* FIXME: * detect if the app is who we're fighting for memory, if so, don't * free memory, else the app will just keep grabbing more. diff --git a/core/heap.h b/core/heap.h index 7f24e0e9157..b632c997823 100644 --- a/core/heap.h +++ b/core/heap.h @@ -125,6 +125,7 @@ bool rel32_reachable_from_vmcode(byte *target); /* heap management */ void heap_init(void); void heap_exit(void); +void heap_post_exit(void); /* post exit to support reattach */ void heap_reset_init(void); void heap_reset_free(void); void heap_thread_init(dcontext_t *dcontext); diff --git a/core/vmareas.c b/core/vmareas.c index 05936913e10..521dfaa5335 100644 --- a/core/vmareas.c +++ b/core/vmareas.c @@ -1801,6 +1801,12 @@ vm_areas_exit() return 0; } +void +vm_areas_post_exit() +{ + vm_areas_exited = false; +} + void vm_areas_thread_reset_init(dcontext_t *dcontext) { diff --git a/core/vmareas.h b/core/vmareas.h index 74500e1f207..d7bb7abbb97 100644 --- a/core/vmareas.h +++ b/core/vmareas.h @@ -247,6 +247,10 @@ vm_areas_init(void); int vm_areas_exit(void); +/* post cleanup to support reattach */ +void +vm_areas_post_exit(void); + /* thread-shared initialization that should be repeated after a reset */ void vm_areas_reset_init(void);