Skip to content

Commit

Permalink
Merge in jdk-23.0.1+9 (24.1)
Browse files Browse the repository at this point in the history
PullRequest: labsjdk-ce/111
  • Loading branch information
OracleLabsAutomation authored and marwan-hallaoui committed Sep 11, 2024
2 parents bf79406 + 25e060d commit 519b5c1
Show file tree
Hide file tree
Showing 94 changed files with 4,262 additions and 537 deletions.
4 changes: 2 additions & 2 deletions .jcheck/conf
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
[general]
project=jdk
project=jdk-updates
jbs=JDK
version=23
version=23.0.1

[checks]
error=author,committer,reviewers,merge,issues,executable,symlink,message,hg-tag,whitespace,problemlists
Expand Down
4 changes: 2 additions & 2 deletions make/conf/version-numbers.conf
Original file line number Diff line number Diff line change
Expand Up @@ -28,12 +28,12 @@

DEFAULT_VERSION_FEATURE=23
DEFAULT_VERSION_INTERIM=0
DEFAULT_VERSION_UPDATE=0
DEFAULT_VERSION_UPDATE=1
DEFAULT_VERSION_PATCH=0
DEFAULT_VERSION_EXTRA1=0
DEFAULT_VERSION_EXTRA2=0
DEFAULT_VERSION_EXTRA3=0
DEFAULT_VERSION_DATE=2024-09-17
DEFAULT_VERSION_DATE=2024-10-15
DEFAULT_VERSION_CLASSFILE_MAJOR=67 # "`$EXPR $DEFAULT_VERSION_FEATURE + 44`"
DEFAULT_VERSION_CLASSFILE_MINOR=0
DEFAULT_VERSION_DOCS_API_SINCE=11
Expand Down
2 changes: 2 additions & 0 deletions src/hotspot/cpu/riscv/assembler_riscv.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -1828,10 +1828,12 @@ enum Nf {
// Vector unordered indexed load instructions
INSN( vluxei8_v, 0b0000111, 0b000, 0b01, 0b0);
INSN(vluxei32_v, 0b0000111, 0b110, 0b01, 0b0);
INSN(vluxei64_v, 0b0000111, 0b111, 0b01, 0b0);

// Vector unordered indexed store instructions
INSN( vsuxei8_v, 0b0100111, 0b000, 0b01, 0b0);
INSN(vsuxei32_v, 0b0100111, 0b110, 0b01, 0b0);
INSN(vsuxei64_v, 0b0100111, 0b111, 0b01, 0b0);

#undef INSN

Expand Down
98 changes: 82 additions & 16 deletions src/hotspot/cpu/riscv/riscv_v.ad
Original file line number Diff line number Diff line change
Expand Up @@ -4795,12 +4795,11 @@ instruct vcountTrailingZeros(vReg dst, vReg src) %{

// ------------------------------ Vector Load Gather ---------------------------

instruct gather_load(vReg dst, indirect mem, vReg idx) %{
predicate(type2aelembytes(Matcher::vector_element_basic_type(n)) == 4 ||
type2aelembytes(Matcher::vector_element_basic_type(n)) == 8);
instruct gather_loadS(vReg dst, indirect mem, vReg idx) %{
predicate(type2aelembytes(Matcher::vector_element_basic_type(n)) == 4);
match(Set dst (LoadVectorGather mem idx));
effect(TEMP_DEF dst);
format %{ "gather_load $dst, $mem, $idx" %}
format %{ "gather_loadS $dst, $mem, $idx" %}
ins_encode %{
__ vmv1r_v(as_VectorRegister($dst$$reg), as_VectorRegister($idx$$reg));
BasicType bt = Matcher::vector_element_basic_type(this);
Expand All @@ -4813,12 +4812,28 @@ instruct gather_load(vReg dst, indirect mem, vReg idx) %{
ins_pipe(pipe_slow);
%}

instruct gather_load_masked(vReg dst, indirect mem, vReg idx, vRegMask_V0 v0, vReg tmp) %{
predicate(type2aelembytes(Matcher::vector_element_basic_type(n)) == 4 ||
type2aelembytes(Matcher::vector_element_basic_type(n)) == 8);
instruct gather_loadD(vReg dst, indirect mem, vReg idx) %{
predicate(type2aelembytes(Matcher::vector_element_basic_type(n)) == 8);
match(Set dst (LoadVectorGather mem idx));
effect(TEMP_DEF dst);
format %{ "gather_loadD $dst, $mem, $idx" %}
ins_encode %{
BasicType bt = Matcher::vector_element_basic_type(this);
Assembler::SEW sew = Assembler::elemtype_to_sew(bt);
__ vsetvli_helper(bt, Matcher::vector_length(this));
__ vzext_vf2(as_VectorRegister($dst$$reg), as_VectorRegister($idx$$reg));
__ vsll_vi(as_VectorRegister($dst$$reg), as_VectorRegister($dst$$reg), (int)sew);
__ vluxei64_v(as_VectorRegister($dst$$reg), as_Register($mem$$base),
as_VectorRegister($dst$$reg));
%}
ins_pipe(pipe_slow);
%}

instruct gather_loadS_masked(vReg dst, indirect mem, vReg idx, vRegMask_V0 v0, vReg tmp) %{
predicate(type2aelembytes(Matcher::vector_element_basic_type(n)) == 4);
match(Set dst (LoadVectorGatherMasked mem (Binary idx v0)));
effect(TEMP_DEF dst, TEMP tmp);
format %{ "gather_load_masked $dst, $mem, $idx, $v0\t# KILL $tmp" %}
format %{ "gather_loadS_masked $dst, $mem, $idx, $v0\t# KILL $tmp" %}
ins_encode %{
__ vmv1r_v(as_VectorRegister($tmp$$reg), as_VectorRegister($idx$$reg));
BasicType bt = Matcher::vector_element_basic_type(this);
Expand All @@ -4833,14 +4848,32 @@ instruct gather_load_masked(vReg dst, indirect mem, vReg idx, vRegMask_V0 v0, vR
ins_pipe(pipe_slow);
%}

instruct gather_loadD_masked(vReg dst, indirect mem, vReg idx, vRegMask_V0 v0, vReg tmp) %{
predicate(type2aelembytes(Matcher::vector_element_basic_type(n)) == 8);
match(Set dst (LoadVectorGatherMasked mem (Binary idx v0)));
effect(TEMP_DEF dst, TEMP tmp);
format %{ "gather_loadD_masked $dst, $mem, $idx, $v0\t# KILL $tmp" %}
ins_encode %{
BasicType bt = Matcher::vector_element_basic_type(this);
Assembler::SEW sew = Assembler::elemtype_to_sew(bt);
__ vsetvli_helper(bt, Matcher::vector_length(this));
__ vzext_vf2(as_VectorRegister($tmp$$reg), as_VectorRegister($idx$$reg));
__ vsll_vi(as_VectorRegister($tmp$$reg), as_VectorRegister($tmp$$reg), (int)sew);
__ vxor_vv(as_VectorRegister($dst$$reg), as_VectorRegister($dst$$reg),
as_VectorRegister($dst$$reg));
__ vluxei64_v(as_VectorRegister($dst$$reg), as_Register($mem$$base),
as_VectorRegister($tmp$$reg), Assembler::v0_t);
%}
ins_pipe(pipe_slow);
%}

// ------------------------------ Vector Store Scatter -------------------------

instruct scatter_store(indirect mem, vReg src, vReg idx, vReg tmp) %{
predicate(type2aelembytes(Matcher::vector_element_basic_type(n->in(3)->in(1))) == 4 ||
type2aelembytes(Matcher::vector_element_basic_type(n->in(3)->in(1))) == 8);
instruct scatter_storeS(indirect mem, vReg src, vReg idx, vReg tmp) %{
predicate(type2aelembytes(Matcher::vector_element_basic_type(n->in(3)->in(1))) == 4);
match(Set mem (StoreVectorScatter mem (Binary src idx)));
effect(TEMP tmp);
format %{ "scatter_store $mem, $idx, $src\t# KILL $tmp" %}
format %{ "scatter_storeS $mem, $idx, $src\t# KILL $tmp" %}
ins_encode %{
__ vmv1r_v(as_VectorRegister($tmp$$reg), as_VectorRegister($idx$$reg));
BasicType bt = Matcher::vector_element_basic_type(this, $src);
Expand All @@ -4853,12 +4886,28 @@ instruct scatter_store(indirect mem, vReg src, vReg idx, vReg tmp) %{
ins_pipe(pipe_slow);
%}

instruct scatter_store_masked(indirect mem, vReg src, vReg idx, vRegMask_V0 v0, vReg tmp) %{
predicate(type2aelembytes(Matcher::vector_element_basic_type(n->in(3)->in(1))) == 4 ||
type2aelembytes(Matcher::vector_element_basic_type(n->in(3)->in(1))) == 8);
instruct scatter_storeD(indirect mem, vReg src, vReg idx, vReg tmp) %{
predicate(type2aelembytes(Matcher::vector_element_basic_type(n->in(3)->in(1))) == 8);
match(Set mem (StoreVectorScatter mem (Binary src idx)));
effect(TEMP tmp);
format %{ "scatter_storeD $mem, $idx, $src\t# KILL $tmp" %}
ins_encode %{
BasicType bt = Matcher::vector_element_basic_type(this, $src);
Assembler::SEW sew = Assembler::elemtype_to_sew(bt);
__ vsetvli_helper(bt, Matcher::vector_length(this, $src));
__ vzext_vf2(as_VectorRegister($tmp$$reg), as_VectorRegister($idx$$reg));
__ vsll_vi(as_VectorRegister($tmp$$reg), as_VectorRegister($tmp$$reg), (int)sew);
__ vsuxei64_v(as_VectorRegister($src$$reg), as_Register($mem$$base),
as_VectorRegister($tmp$$reg));
%}
ins_pipe(pipe_slow);
%}

instruct scatter_storeS_masked(indirect mem, vReg src, vReg idx, vRegMask_V0 v0, vReg tmp) %{
predicate(type2aelembytes(Matcher::vector_element_basic_type(n->in(3)->in(1))) == 4);
match(Set mem (StoreVectorScatterMasked mem (Binary src (Binary idx v0))));
effect(TEMP tmp);
format %{ "scatter_store_masked $mem, $idx, $src, $v0\t# KILL $tmp" %}
format %{ "scatter_storeS_masked $mem, $idx, $src, $v0\t# KILL $tmp" %}
ins_encode %{
__ vmv1r_v(as_VectorRegister($tmp$$reg), as_VectorRegister($idx$$reg));
BasicType bt = Matcher::vector_element_basic_type(this, $src);
Expand All @@ -4871,6 +4920,23 @@ instruct scatter_store_masked(indirect mem, vReg src, vReg idx, vRegMask_V0 v0,
ins_pipe(pipe_slow);
%}

instruct scatter_storeD_masked(indirect mem, vReg src, vReg idx, vRegMask_V0 v0, vReg tmp) %{
predicate(type2aelembytes(Matcher::vector_element_basic_type(n->in(3)->in(1))) == 8);
match(Set mem (StoreVectorScatterMasked mem (Binary src (Binary idx v0))));
effect(TEMP tmp);
format %{ "scatter_storeD_masked $mem, $idx, $src, $v0\t# KILL $tmp" %}
ins_encode %{
BasicType bt = Matcher::vector_element_basic_type(this, $src);
Assembler::SEW sew = Assembler::elemtype_to_sew(bt);
__ vsetvli_helper(bt, Matcher::vector_length(this, $src));
__ vzext_vf2(as_VectorRegister($tmp$$reg), as_VectorRegister($idx$$reg));
__ vsll_vi(as_VectorRegister($tmp$$reg), as_VectorRegister($tmp$$reg), (int)sew);
__ vsuxei64_v(as_VectorRegister($src$$reg), as_Register($mem$$base),
as_VectorRegister($tmp$$reg), Assembler::v0_t);
%}
ins_pipe(pipe_slow);
%}

// ------------------------------ Populate Index to a Vector -------------------

instruct populateindex(vReg dst, iRegIorL2I src1, iRegIorL2I src2, vReg tmp) %{
Expand Down
2 changes: 2 additions & 0 deletions src/hotspot/share/cds/archiveHeapLoader.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@
#include "memory/iterator.inline.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
#include "sanitizers/ub.hpp"
#include "utilities/bitMap.inline.hpp"
#include "utilities/copy.hpp"

Expand Down Expand Up @@ -61,6 +62,7 @@ ptrdiff_t ArchiveHeapLoader::_mapped_heap_delta = 0;

// Every mapped region is offset by _mapped_heap_delta from its requested address.
// See FileMapInfo::heap_region_requested_address().
ATTRIBUTE_NO_UBSAN
void ArchiveHeapLoader::init_mapped_heap_info(address mapped_heap_bottom, ptrdiff_t delta, int dumptime_oop_shift) {
assert(!_mapped_heap_relocation_initialized, "only once");
if (!UseCompressedOops) {
Expand Down
6 changes: 6 additions & 0 deletions src/hotspot/share/classfile/verifier.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@
#include "classfile/stackMapTableFormat.hpp"
#include "classfile/symbolTable.hpp"
#include "classfile/systemDictionary.hpp"
#include "classfile/systemDictionaryShared.hpp"
#include "classfile/verifier.hpp"
#include "classfile/vmClasses.hpp"
#include "classfile/vmSymbols.hpp"
Expand Down Expand Up @@ -212,6 +213,11 @@ bool Verifier::verify(InstanceKlass* klass, bool should_verify_class, TRAPS) {
exception_name == vmSymbols::java_lang_ClassFormatError())) {
log_info(verification)("Fail over class verification to old verifier for: %s", klass->external_name());
log_info(class, init)("Fail over class verification to old verifier for: %s", klass->external_name());
// Exclude any classes that fail over during dynamic dumping
if (CDSConfig::is_dumping_dynamic_archive()) {
SystemDictionaryShared::warn_excluded(klass, "Failed over class verification while dynamic dumping");
SystemDictionaryShared::set_excluded(klass);
}
message_buffer = NEW_RESOURCE_ARRAY(char, message_buffer_len);
exception_message = message_buffer;
exception_name = inference_verify(
Expand Down
10 changes: 8 additions & 2 deletions src/hotspot/share/gc/shenandoah/shenandoahBarrierSetNMethod.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -36,13 +36,19 @@
#include "runtime/threadWXSetters.inline.hpp"

bool ShenandoahBarrierSetNMethod::nmethod_entry_barrier(nmethod* nm) {
if (!is_armed(nm)) {
// Some other thread got here first and healed the oops
// and disarmed the nmethod. No need to continue.
return true;
}

ShenandoahReentrantLock* lock = ShenandoahNMethod::lock_for_nmethod(nm);
assert(lock != nullptr, "Must be");
ShenandoahReentrantLocker locker(lock);

if (!is_armed(nm)) {
// Some other thread got here first and healed the oops
// and disarmed the nmethod.
// Some other thread managed to complete while we were
// waiting for lock. No need to continue.
return true;
}

Expand Down
49 changes: 29 additions & 20 deletions src/hotspot/share/gc/shenandoah/shenandoahLock.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -32,40 +32,49 @@
#include "runtime/javaThread.hpp"
#include "runtime/os.inline.hpp"

// These are inline variants of Thread::SpinAcquire with optional blocking in VM.

class ShenandoahNoBlockOp : public StackObj {
public:
ShenandoahNoBlockOp(JavaThread* java_thread) {
assert(java_thread == nullptr, "Should not pass anything");
}
};

void ShenandoahLock::contended_lock(bool allow_block_for_safepoint) {
Thread* thread = Thread::current();
if (allow_block_for_safepoint && thread->is_Java_thread()) {
contended_lock_internal<ThreadBlockInVM>(JavaThread::cast(thread));
contended_lock_internal<true>(JavaThread::cast(thread));
} else {
contended_lock_internal<ShenandoahNoBlockOp>(nullptr);
contended_lock_internal<false>(nullptr);
}
}

template<typename BlockOp>
template<bool ALLOW_BLOCK>
void ShenandoahLock::contended_lock_internal(JavaThread* java_thread) {
int ctr = 0;
int yields = 0;
assert(!ALLOW_BLOCK || java_thread != nullptr, "Must have a Java thread when allowing block.");
// Spin this much on multi-processor, do not spin on multi-processor.
int ctr = os::is_MP() ? 0xFF : 0;
// Apply TTAS to avoid more expensive CAS calls if the lock is still held by other thread.
while (Atomic::load(&_state) == locked ||
Atomic::cmpxchg(&_state, unlocked, locked) != unlocked) {
if ((++ctr & 0xFFF) == 0) {
BlockOp block(java_thread);
if (yields > 5) {
os::naked_short_sleep(1);
if (ctr > 0 && !SafepointSynchronize::is_synchronizing()) {
// Lightly contended, spin a little if no safepoint is pending.
SpinPause();
ctr--;
} else if (ALLOW_BLOCK) {
ThreadBlockInVM block(java_thread);
if (SafepointSynchronize::is_synchronizing()) {
// If safepoint is pending, we want to block and allow safepoint to proceed.
// Normally, TBIVM above would block us in its destructor.
//
// But that blocking only happens when TBIVM knows the thread poll is armed.
// There is a window between announcing a safepoint and arming the thread poll
// during which trying to continuously enter TBIVM is counter-productive.
// Under high contention, we may end up going in circles thousands of times.
// To avoid it, we wait here until local poll is armed and then proceed
// to TBVIM exit for blocking. We do not SpinPause, but yield to let
// VM thread to arm the poll sooner.
while (SafepointSynchronize::is_synchronizing() &&
!SafepointMechanism::local_poll_armed(java_thread)) {
os::naked_yield();
}
} else {
os::naked_yield();
yields++;
}
} else {
SpinPause();
os::naked_yield();
}
}
}
Expand Down
12 changes: 7 additions & 5 deletions src/hotspot/share/gc/shenandoah/shenandoahLock.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -37,20 +37,22 @@ class ShenandoahLock {
shenandoah_padding(0);
volatile LockState _state;
shenandoah_padding(1);
volatile Thread* _owner;
Thread* volatile _owner;
shenandoah_padding(2);

template<typename BlockOp>
template<bool ALLOW_BLOCK>
void contended_lock_internal(JavaThread* java_thread);

public:
ShenandoahLock() : _state(unlocked), _owner(nullptr) {};

void lock(bool allow_block_for_safepoint) {
assert(Atomic::load(&_owner) != Thread::current(), "reentrant locking attempt, would deadlock");

// Try to lock fast, or dive into contended lock handling.
if (Atomic::cmpxchg(&_state, unlocked, locked) != unlocked) {
if ((allow_block_for_safepoint && SafepointSynchronize::is_synchronizing()) ||
(Atomic::cmpxchg(&_state, unlocked, locked) != unlocked)) {
// 1. Java thread, and there is a pending safepoint. Dive into contended locking
// immediately without trying anything else, and block.
// 2. Fast lock fails, dive into contended lock handling.
contended_lock(allow_block_for_safepoint);
}

Expand Down
Loading

0 comments on commit 519b5c1

Please sign in to comment.