Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

i#95: Detach on Linux #6513

Merged
merged 46 commits into from
Dec 30, 2023
Merged
Show file tree
Hide file tree
Changes from 29 commits
Commits
Show all changes
46 commits
Select commit Hold shift + click to select a range
e1b9e53
feat: enable detachment on Linux
onroadmuwl Dec 18, 2023
e96b744
feat: use the drconfig frontend for detachment
onroadmuwl Dec 18, 2023
34ce6b3
test: add and modify test case for detachment on Linux and Windows
onroadmuwl Dec 18, 2023
8fe8c24
docs: update the documentation to include detachment on Linux
onroadmuwl Dec 18, 2023
d6d2acb
fix: remove extra space
onroadmuwl Dec 18, 2023
d3589ea
fix: extra space
onroadmuwl Dec 18, 2023
51d2b86
fix: extra space
onroadmuwl Dec 18, 2023
1956e11
fix: modify format
onroadmuwl Dec 18, 2023
8221e5b
fix: test can't be finished
onroadmuwl Dec 18, 2023
0f05ee1
fix: add conditional macro ifdef LINUX
onroadmuwl Dec 18, 2023
cf4cde6
fix: modify the format of conditional macro
onroadmuwl Dec 18, 2023
8a75c73
fix: add conditional macro ifdef LINUX
onroadmuwl Dec 18, 2023
7c0b21b
fix: add conditional macro ifdef LINUX
onroadmuwl Dec 18, 2023
cea79a5
fix: add conditional macro ifdef LINUX
onroadmuwl Dec 18, 2023
30ddfe1
fix: add conditional macro ifdef LINUX
onroadmuwl Dec 18, 2023
892b75b
fix: add conditional macro ifdef LINUX
onroadmuwl Dec 18, 2023
3a00052
fix: add conditional macro ifdef LINUX
onroadmuwl Dec 18, 2023
c54f5b1
test again
onroadmuwl Dec 18, 2023
0caa3ef
Merge branch 'master' into detach
onroadmuwl Dec 19, 2023
35faf43
resume attach test
onroadmuwl Dec 19, 2023
ba45b75
Merge branch 'master' into detach
onroadmuwl Dec 19, 2023
70f137d
debug git test suite
onroadmuwl Dec 20, 2023
0de122f
Merge branch 'master' into detach
onroadmuwl Dec 20, 2023
da9e31c
Merge branch 'detach' of github.com:onroadmuwl/dynamorio into detach
onroadmuwl Dec 20, 2023
2d6fa39
debug git test suite
onroadmuwl Dec 20, 2023
12b244f
add is_detach_external
onroadmuwl Dec 20, 2023
f8fa6a4
ignore on aarch64-native(like attach_test)
onroadmuwl Dec 20, 2023
0442427
remove extra comment and macro
onroadmuwl Dec 20, 2023
11e53e8
Merge branch 'master' into detach
onroadmuwl Dec 21, 2023
77a180d
fix: modify the frontend
onroadmuwl Dec 22, 2023
4be80a7
fix: modify the frontend
onroadmuwl Dec 22, 2023
824efb1
fix: modify the code on detach
onroadmuwl Dec 22, 2023
e18c728
fix: modify the frontend
onroadmuwl Dec 22, 2023
c7ff3ca
fix: add helper functions
onroadmuwl Dec 22, 2023
32306ca
fix: modify format
onroadmuwl Dec 22, 2023
270a3e3
fix: add _IF_WINDOWS inside helper functions
onroadmuwl Dec 22, 2023
33c74be
fix: modify format
onroadmuwl Dec 22, 2023
f97f6e2
fix: modify format
onroadmuwl Dec 22, 2023
dcd7280
fix: modify the test suite
onroadmuwl Dec 22, 2023
a871d8a
fix: modify the test suite
onroadmuwl Dec 22, 2023
eeb6ad2
Merge branch 'master' into detach
onroadmuwl Dec 23, 2023
9eea0c4
style: modfiy detach helpers
onroadmuwl Dec 30, 2023
36b9f94
kill background process forcely
onroadmuwl Dec 30, 2023
b150436
rollback: kill background process forcely
onroadmuwl Dec 30, 2023
351b0e0
modify the test suite
onroadmuwl Dec 30, 2023
05c4481
test again
onroadmuwl Dec 30, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions api/docs/deployment.dox
Original file line number Diff line number Diff line change
Expand Up @@ -337,6 +337,12 @@ with this command:
% echo 0 | sudo tee /proc/sys/kernel/yama/ptrace_scope
\endcode

Then, you can also detach DynamoRIO from the target process
without affecting the normal execution of the application.
\code
% bin32/drconfig -detach <target_pid>
\endcode

Run \c drrun with no options to get a list of the options and
environment variable shortcuts it supports. To disable following across
child execve calls, use the \ref op_children "-no_follow_children" runtime
Expand Down
21 changes: 21 additions & 0 deletions core/nudge.c
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,10 @@
#else
#endif /* WINDOWS */

#ifdef LINUX
# include "synch.h"
#endif

#ifdef HOT_PATCHING_INTERFACE
# include "hotpatch.h" /* for hotp_nudge_update() */
#endif
Expand Down Expand Up @@ -438,6 +442,23 @@ handle_nudge(dcontext_t *dcontext, nudge_arg_t *arg)
detach_helper(DETACH_NORMAL_TYPE);
}
#endif
#ifdef LINUX
/* The detach handler is last since in the common case it doesn't return. */
if (TEST(NUDGE_GENERIC(detach), nudge_action_mask)) {
onroadmuwl marked this conversation as resolved.
Show resolved Hide resolved
nudge_action_mask &= ~NUDGE_GENERIC(detach);
heap_error_code_t error_code_reserve, error_code_commit;
void *d_r_detachstack =
os_heap_reserve(NULL, DYNAMORIO_STACK_SIZE, &error_code_reserve, false);
if (!os_heap_commit(d_r_detachstack, DYNAMORIO_STACK_SIZE,
onroadmuwl marked this conversation as resolved.
Show resolved Hide resolved
onroadmuwl marked this conversation as resolved.
Show resolved Hide resolved
MEMPROT_READ | MEMPROT_WRITE, &error_code_commit)) {
ASSERT_NOT_REACHED();
}
call_switch_stack(dcontext,
(byte *)((ptr_uint_t)d_r_detachstack + DYNAMORIO_STACK_SIZE),
(void (*)(void *))detach_externally_on_linux, NULL, true);
ASSERT_NOT_REACHED();
}
#endif
}

#ifdef UNIX
Expand Down
10 changes: 8 additions & 2 deletions core/os_shared.h
Original file line number Diff line number Diff line change
Expand Up @@ -204,15 +204,21 @@ is_thread_currently_native(thread_record_t *tr);
*/
bool
thread_get_mcontext(thread_record_t *tr, priv_mcontext_t *mc);

#ifdef LINUX
bool
thread_get_nudged_mcontext(thread_record_t *tr, priv_mcontext_t *mc);
#endif

bool
thread_set_mcontext(thread_record_t *tr, priv_mcontext_t *mc);

/* Takes an os-specific context. Does not return. */
void
thread_set_self_context(void *cxt);
thread_set_self_context(void *cxt, bool is_detach_external);
/* Only sets the priv_mcontext_t state. Does not return. */
void
thread_set_self_mcontext(priv_mcontext_t *mc);
thread_set_self_mcontext(priv_mcontext_t *mc, bool is_detach_external);

/* Assumes target thread is suspended */
bool
Expand Down
202 changes: 200 additions & 2 deletions core/synch.c
Original file line number Diff line number Diff line change
Expand Up @@ -761,9 +761,9 @@ check_wait_at_safe_spot(dcontext_t *dcontext, thread_synch_permission_t cur_stat
* being at the synch point vs in the cache.
*/
if (set_mcontext)
thread_set_self_mcontext((priv_mcontext_t *)cxt);
thread_set_self_mcontext((priv_mcontext_t *)cxt, false);
else
thread_set_self_context((void *)cxt);
thread_set_self_context((void *)cxt, false);
ASSERT_NOT_REACHED();
}
}
Expand Down Expand Up @@ -2289,3 +2289,201 @@ detach_on_permanent_stack(bool internal, bool do_cleanup, dr_stats_t *drstats)
EXITING_DR();
options_detach();
}
#ifdef LINUX
void
detach_externally_on_linux()
onroadmuwl marked this conversation as resolved.
Show resolved Hide resolved
{
dcontext_t *my_dcontext;
priv_mcontext_t my_mcontext;
thread_record_t **threads;
thread_record_t *my_tr = NULL;
int i, num_threads, my_idx = -1;
thread_id_t my_id;
DEBUG_DECLARE(bool ok;)
DEBUG_DECLARE(int exit_res;)
/* synch-all flags: */
uint flags = 0;
/* For Unix, such privilege problems are rarer but we would still prefer to
* continue if we hit a problem.
*/
flags |= THREAD_SYNCH_SUSPEND_FAILURE_IGNORE;
/* i#297: we only synch client threads after process exit event. */
flags |= THREAD_SYNCH_SKIP_CLIENT_THREAD;
ENTERING_DR();
/* dynamo_detaching_flag is not really a lock, and since no one ever waits
* on it we can't deadlock on it either.
*/
if (!atomic_compare_exchange(&dynamo_detaching_flag, LOCK_FREE_STATE, LOCK_SET_STATE))
return;
instrument_pre_detach_event();
/* Unprotect .data for exit cleanup.
* XXX: more secure to not do this until we've synched, but then need
* alternative prot for started_detach and init_apc_go_native*
*/
SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT);
ASSERT(!started_detach);
started_detach = true;
ASSERT(dynamo_initialized);
ASSERT(!dynamo_exited);
my_id = d_r_get_thread_id();
my_dcontext = get_thread_private_dcontext();
ASSERT(my_dcontext != NULL);
LOG(GLOBAL, LOG_ALL, 1, "Detach: thread %d starting detach process\n", my_id);
SYSLOG(SYSLOG_INFORMATION, INFO_DETACHING, 2, get_application_name(),
get_application_pid());
/* synch with flush */
if (my_dcontext != NULL)
enter_threadexit(my_dcontext);
/* i#2270: we ignore alarm signals during detach to reduce races. */
signal_remove_alarm_handlers(my_dcontext);
/* suspend all DR-controlled threads at safe locations */
if (!synch_with_all_threads(THREAD_SYNCH_SUSPENDED_VALID_MCONTEXT, &threads,
&num_threads,
/* Case 6821: allow other synch-all-thread uses
* that beat us to not wait on us. We still have
* a problem if we go first since we must xfer
* other threads.
*/
THREAD_SYNCH_NO_LOCKS_NO_XFER, flags)) {
REPORT_FATAL_ERROR_AND_EXIT(FAILED_TO_SYNCHRONIZE_THREADS, 2,
get_application_name(), get_application_pid());
}
/* Now we own the thread_initexit_lock. We'll release the locks grabbed in
* synch_with_all_threads below after cleaning up all the threads in case we
* need to grab it during process exit cleanup.
*/
ASSERT(mutex_testlock(&all_threads_synch_lock) &&
mutex_testlock(&thread_initexit_lock));
ASSERT(!doing_detach);
doing_detach = true;
detacher_tid = d_r_get_thread_id();
# ifdef HOT_PATCHING_INTERFACE
/* In hotp_only mode, we must remove patches when detaching; we don't want
* to leave in all our hooks and detach; that will definitely crash the app.
*/
if (DYNAMO_OPTION(hotp_only))
hotp_only_detach_helper();
# endif
if (!DYNAMO_OPTION(thin_client))
revert_memory_regions();
unhook_vsyscall();
LOG(GLOBAL, LOG_ALL, 1,
"Detach : unpatched ntdll.dll and fixed memory permissions\n");
/* perform exit tasks that require full thread data structs */
dynamo_process_exit_with_thread_info();
LOG(GLOBAL, LOG_ALL, 1, "Detach: starting to translate contexts\n");
for (i = 0; i < num_threads; i++) {
priv_mcontext_t mc;
if (threads[i]->dcontext == my_dcontext) {
my_idx = i;
my_tr = threads[i];
DEBUG_DECLARE(ok =)
thread_get_nudged_mcontext(threads[i], &my_mcontext);
DEBUG_DECLARE(ok =)
translate_mcontext(threads[i], &my_mcontext, true /*restore mem*/,
NULL /*f*/);
continue;
} else if (IS_CLIENT_THREAD(threads[i]->dcontext)) {
/* i#297 we will kill client-owned threads later after app exit events
* in dynamo_shared_exit().
*/
continue;
} else if (detach_do_not_translate(threads[i])) {
LOG(GLOBAL, LOG_ALL, 2, "Detach: not translating " TIDFMT "\n",
threads[i]->id);
} else {
LOG(GLOBAL, LOG_ALL, 2, "Detach: translating " TIDFMT "\n", threads[i]->id);
DEBUG_DECLARE(ok =)
thread_get_mcontext(threads[i], &mc);
ASSERT(ok);
/* For a thread at a syscall, we use SA_RESTART for our suspend signal,
* so the kernel will adjust the restart point back to the syscall for us
* where expected. This is an artifical signal we're introducing, so an
* app that assumes no signals and assumes its non-auto-restart syscalls
* don't need loops could be broken.
*/
LOG(GLOBAL, LOG_ALL, 3,
/* Having the code bytes can help diagnose post-detach where the code
* cache is gone.
*/
"Detach: pre-xl8 pc=%p (%02x %02x %02x %02x %02x), xsp=%p "
"for thread " TIDFMT "\n",
mc.pc, *mc.pc, *(mc.pc + 1), *(mc.pc + 2), *(mc.pc + 3), *(mc.pc + 4),
mc.xsp, threads[i]->id);
DEBUG_DECLARE(ok =)
translate_mcontext(threads[i], &mc, true /*restore mem*/, NULL /*f*/);
ASSERT(ok);
if (!threads[i]->under_dynamo_control) {
dr_printf("Detach : thread " TIDFMT " already running natively\n",
threads[i]->id);
LOG(GLOBAL, LOG_ALL, 1,
"Detach : thread " TIDFMT " already running natively\n",
threads[i]->id);
/* we do need to restore the app ret addr, for native_exec */
if (!DYNAMO_OPTION(thin_client) && DYNAMO_OPTION(native_exec) &&
!vmvector_empty(native_exec_areas)) {
put_back_native_retaddrs(threads[i]->dcontext);
}
}
LOG(GLOBAL, LOG_ALL, 1, "Detach: pc=" PFX " for thread " TIDFMT "\n", mc.pc,
threads[i]->id);
ASSERT(!is_dynamo_address(mc.pc) && !in_fcache(mc.pc));
/* XXX case 7457: if the thread is suspended after it received a fault
* but before the kernel copied the faulting context to the user mode
* structures for the handler, it could result in a codemod exception
* that wouldn't happen natively!
*/
DEBUG_DECLARE(ok =)
thread_set_mcontext(threads[i], &mc);
ASSERT(ok);
}
/* Resumes the thread, which will do kernel-visible cleanup of
* signal state. Resume happens within the synch_all region where
* the thread_initexit_lock is held so that we can clean up thread
* data later.
*/
os_signal_thread_detach(threads[i]->dcontext);
LOG(GLOBAL, LOG_ALL, 1, "Detach: thread " TIDFMT " is being resumed as native\n",
threads[i]->id);
os_thread_resume(threads[i]);
}
LOG(GLOBAL, LOG_ALL, 1, "Detach: waiting for threads to fully detach\n");
for (i = 0; i < num_threads; i++) {
if (i != my_idx && !IS_CLIENT_THREAD(threads[i]->dcontext))
os_wait_thread_detached(threads[i]->dcontext);
}
/* Clean up each thread now that everyone has gone native. Needs to be
* done with the thread_initexit_lock held, which is true within a synched
* region.
*/
for (i = 0; i < num_threads; i++) {
onroadmuwl marked this conversation as resolved.
Show resolved Hide resolved
if (i != my_idx && !IS_CLIENT_THREAD(threads[i]->dcontext)) {
LOG(GLOBAL, LOG_ALL, 1, "Detach: cleaning up thread " TIDFMT " %s\n",
threads[i]->id, IF_WINDOWS_ELSE(cleanup_tpc[i] ? "and its TPC" : "", ""));
dynamo_other_thread_exit(threads[i] _IF_WINDOWS(!cleanup_tpc[i]));
}
}
if (my_idx != -1) {
/* pre-client thread cleanup (PR 536058) */
dynamo_thread_exit_pre_client(my_dcontext, my_tr->id);
}
LOG(GLOBAL, LOG_ALL, 1, "Detach: Letting secondary threads go native\n");
end_synch_with_all_threads(threads, num_threads, false /*don't resume */);
threads = NULL;
LOG(GLOBAL, LOG_ALL, 1, "Detach: Entering final cleanup and unload\n");
SYSLOG_INTERNAL_INFO("Detaching from process, entering final cleanup");
DEBUG_DECLARE(exit_res =)
dynamo_shared_exit(my_tr _IF_WINDOWS(detach_stacked_callbacks));
ASSERT(exit_res == SUCCESS);
detach_finalize_cleanup();
stack_free(d_r_initstack, DYNAMORIO_STACK_SIZE);
dynamo_exit_post_detach();
doing_detach = false;
started_detach = false;
SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT);
dynamo_detaching_flag = LOCK_FREE_STATE;
EXITING_DR();
options_detach();
thread_set_self_mcontext(&my_mcontext, true);
}
#endif
5 changes: 5 additions & 0 deletions core/synch.h
Original file line number Diff line number Diff line change
Expand Up @@ -263,6 +263,11 @@ send_all_other_threads_native(void);
void
detach_on_permanent_stack(bool internal, bool do_cleanup, dr_stats_t *drstats);

#ifdef LINUX
void
detach_externally_on_linux();
#endif

/*** exported for detach only ***/

bool
Expand Down
17 changes: 17 additions & 0 deletions core/unix/os.c
Original file line number Diff line number Diff line change
Expand Up @@ -3979,6 +3979,23 @@ thread_get_mcontext(thread_record_t *tr, priv_mcontext_t *mc)
return true;
}

#ifdef LINUX
bool
thread_get_nudged_mcontext(thread_record_t *tr, priv_mcontext_t *mc)
{
/* PR 212090: only works when target is suspended by us, and
* we then take the signal context
onroadmuwl marked this conversation as resolved.
Show resolved Hide resolved
*/
os_thread_data_t *ostd = (os_thread_data_t *)tr->dcontext->os_field;
ASSERT(ostd != NULL);
ASSERT(ostd->nudged_sigcxt != NULL);
sigcontext_to_mcontext(mc, ostd->nudged_sigcxt, DR_MC_ALL);
IF_ARM(dr_set_isa_mode(tr->dcontext, get_sigcontext_isa_mode(ostd->nudged_sigcxt),
NULL));
return true;
}
#endif

bool
thread_set_mcontext(thread_record_t *tr, priv_mcontext_t *mc)
{
Expand Down
5 changes: 5 additions & 0 deletions core/unix/os_private.h
Original file line number Diff line number Diff line change
Expand Up @@ -165,6 +165,11 @@ typedef struct _os_thread_data_t {
KSYNCH_TYPE resumed;
sig_full_cxt_t *suspended_sigcxt;

#ifdef LINUX
/* For detachment on Linux*/
onroadmuwl marked this conversation as resolved.
Show resolved Hide resolved
sig_full_cxt_t *nudged_sigcxt;
#endif

/* PR 297902: for thread termination */
bool terminate;
/* Any function that sets this flag must also notify possibly waiting
Expand Down
22 changes: 16 additions & 6 deletions core/unix/signal.c
Original file line number Diff line number Diff line change
Expand Up @@ -3166,10 +3166,10 @@ translate_sigcontext(dcontext_t *dcontext, kernel_ucontext_t *uc, bool avoid_fai

/* Takes an os-specific context */
void
thread_set_self_context(void *cxt)
thread_set_self_context(void *cxt, bool is_detach_external)
{
#ifdef X86
if (!INTERNAL_OPTION(use_sigreturn_setcontext)) {
if (!INTERNAL_OPTION(use_sigreturn_setcontext) || is_detach_external) {
sigcontext_t *sc = (sigcontext_t *)cxt;
dr_jmp_buf_t buf;
buf.xbx = sc->SC_XBX;
Expand Down Expand Up @@ -3311,7 +3311,7 @@ thread_set_segment_registers(sigcontext_t *sc)

/* Takes a priv_mcontext_t */
void
thread_set_self_mcontext(priv_mcontext_t *mc)
thread_set_self_mcontext(priv_mcontext_t *mc, bool is_detach_external)
{
kernel_ucontext_t ucxt;
sig_full_cxt_t sc_full;
Expand All @@ -3325,7 +3325,7 @@ thread_set_self_mcontext(priv_mcontext_t *mc)
IF_ARM(
set_pc_mode_in_cpsr(sc_full.sc, dr_get_isa_mode(get_thread_private_dcontext())));
/* thread_set_self_context will fill in the real fp/simd state for x86 */
thread_set_self_context((void *)sc_full.sc);
thread_set_self_context((void *)sc_full.sc, is_detach_external);
ASSERT_NOT_REACHED();
}

Expand Down Expand Up @@ -7917,10 +7917,15 @@ signal_to_itimer_type(int sig)
static bool
alarm_signal_has_DR_only_itimer(dcontext_t *dcontext, int signal)
{
thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field;
int which = signal_to_itimer_type(signal);
if (which == -1)
return false;
#ifdef LINUX
if (dcontext == GLOBAL_DCONTEXT) {
return false;
}
#endif
thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field;
if (info->shared_itimer)
acquire_recursive_lock(&(*info->itimer)[which].lock);
bool DR_only =
Expand Down Expand Up @@ -8480,8 +8485,13 @@ handle_suspend_signal(dcontext_t *dcontext, kernel_siginfo_t *siginfo,

if (is_sigqueue_supported() && SUSPEND_SIGNAL == NUDGESIG_SIGNUM) {
nudge_arg_t *arg = (nudge_arg_t *)siginfo;
if (!TEST(NUDGE_IS_SUSPEND, arg->flags))
if (!TEST(NUDGE_IS_SUSPEND, arg->flags)) {
#ifdef LINUX
sig_full_initialize(&sc_full, ucxt);
ostd->nudged_sigcxt = &sc_full;
#endif
return handle_nudge_signal(dcontext, siginfo, ucxt);
}
}

/* We distinguish from an app signal further below from the rare case of an
Expand Down
Loading
Loading