diff --git a/patches/dynamorio-10.0.patch b/patches/dynamorio-10.0.patch index 87c0c45..a19f8d3 100644 --- a/patches/dynamorio-10.0.patch +++ b/patches/dynamorio-10.0.patch @@ -694,6 +694,31 @@ index d5133bf16..e9273a08b 100644 if (ksynch_get_value(&ostd->suspended) == 0) { /* If it still has to wait, give up the cpu. */ os_thread_yield(); +diff --git a/core/unix/rseq_linux.c b/core/unix/rseq_linux.c +index 4d3b9e60f..4ce713450 100644 +--- a/core/unix/rseq_linux.c ++++ b/core/unix/rseq_linux.c +@@ -253,12 +253,14 @@ rseq_clear_tls_ptr(dcontext_t *dcontext) + { + ASSERT(rseq_tls_offset != 0); + byte *base = get_app_segment_base(LIB_SEG_TLS); +- struct rseq *app_rseq = (struct rseq *)(base + rseq_tls_offset); +- /* We're directly writing this in the cache, so we do not bother with safe_read +- * or safe_write here either. We already cannot handle rseq adversarial cases. +- */ +- if (is_dynamo_address((byte *)(ptr_uint_t)app_rseq->rseq_cs)) +- app_rseq->rseq_cs = 0; ++ if (base > 0) { ++ struct rseq *app_rseq = (struct rseq *)(base + rseq_tls_offset); ++ /* We're directly writing this in the cache, so we do not bother with safe_read ++ * or safe_write here either. We already cannot handle rseq adversarial cases. ++ */ ++ if (is_dynamo_address((byte *)(ptr_uint_t)app_rseq->rseq_cs)) ++ app_rseq->rseq_cs = 0; ++ } + } + + int diff --git a/core/unix/signal.c b/core/unix/signal.c index 0cbc94337..4d32e4610 100644 --- a/core/unix/signal.c