Skip to content

Commit

Permalink
Merge tag 'jdk-24+2' into labsjdk/automation-6-13-2024-5213
Browse files Browse the repository at this point in the history
Added tag jdk-24+2 for changeset 301bd70
  • Loading branch information
OracleLabsAutomation committed Jun 13, 2024
2 parents 351f22c + 301bd70 commit f0be01d
Show file tree
Hide file tree
Showing 552 changed files with 11,915 additions and 9,117 deletions.
2 changes: 1 addition & 1 deletion make/conf/jib-profiles.js
Original file line number Diff line number Diff line change
Expand Up @@ -1213,7 +1213,7 @@ var getJibProfilesDependencies = function (input, common) {

jcov: {
organization: common.organization,
revision: "3.0-16-jdk-asm+1.0",
revision: "3.0-17-jdk-asm+1.0",
ext: "zip",
environment_name: "JCOV_HOME",
},
Expand Down
2 changes: 1 addition & 1 deletion make/modules/jdk.jpackage/Java.gmk
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,6 @@ DISABLED_WARNINGS_java += dangling-doc-comments

COPY += .gif .png .txt .spec .script .prerm .preinst \
.postrm .postinst .list .sh .desktop .copyright .control .plist .template \
.icns .scpt .wxs .wxl .wxi .ico .bmp .tiff .service
.icns .scpt .wxs .wxl .wxi .ico .bmp .tiff .service .xsl

CLEAN += .properties
4 changes: 2 additions & 2 deletions make/src/classes/build/tools/jfr/GenerateJfrFiles.java
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
Expand Down Expand Up @@ -663,7 +663,7 @@ private static void printJfrEventControlHpp(Metadata metadata, File outputFile)
out.write("");
out.write("union JfrNativeSettings {");
out.write(" // Array version.");
out.write(" jfrNativeEventSetting bits[NUMBER_OF_EVENTS];");
out.write(" jfrNativeEventSetting bits[NUMBER_OF_EVENTS + NUMBER_OF_RESERVED_EVENTS];");
out.write(" // Then, to make it easy to debug,");
out.write(" // add named struct members also.");
out.write(" struct {");
Expand Down
42 changes: 24 additions & 18 deletions src/hotspot/cpu/aarch64/aarch64.ad
Original file line number Diff line number Diff line change
Expand Up @@ -5628,24 +5628,24 @@ operand cmpOpLtGe()
// used for certain unsigned integral comparisons which can be
// converted to cbxx or tbxx instructions

operand cmpOpUEqNeLtGe()
operand cmpOpUEqNeLeGt()
%{
match(Bool);
op_cost(0);

predicate(n->as_Bool()->_test._test == BoolTest::eq
|| n->as_Bool()->_test._test == BoolTest::ne
|| n->as_Bool()->_test._test == BoolTest::lt
|| n->as_Bool()->_test._test == BoolTest::ge);
predicate(n->as_Bool()->_test._test == BoolTest::eq ||
n->as_Bool()->_test._test == BoolTest::ne ||
n->as_Bool()->_test._test == BoolTest::le ||
n->as_Bool()->_test._test == BoolTest::gt);

format %{ "" %}
interface(COND_INTER) %{
equal(0x0, "eq");
not_equal(0x1, "ne");
less(0xb, "lt");
greater_equal(0xa, "ge");
less_equal(0xd, "le");
greater(0xc, "gt");
less(0x3, "lo");
greater_equal(0x2, "hs");
less_equal(0x9, "ls");
greater(0x8, "hi");
overflow(0x6, "vs");
no_overflow(0x7, "vc");
%}
Expand Down Expand Up @@ -7780,7 +7780,7 @@ instruct membar_acquire() %{
ins_cost(VOLATILE_REF_COST);

format %{ "membar_acquire\n\t"
"dmb ish" %}
"dmb ishld" %}

ins_encode %{
__ block_comment("membar_acquire");
Expand Down Expand Up @@ -7834,11 +7834,13 @@ instruct membar_release() %{
ins_cost(VOLATILE_REF_COST);

format %{ "membar_release\n\t"
"dmb ish" %}
"dmb ishst\n\tdmb ishld" %}

ins_encode %{
__ block_comment("membar_release");
__ membar(Assembler::LoadStore|Assembler::StoreStore);
// These will be merged if AlwaysMergeDMB is enabled.
__ membar(Assembler::StoreStore);
__ membar(Assembler::LoadStore);
%}
ins_pipe(pipe_serial);
%}
Expand Down Expand Up @@ -15685,7 +15687,7 @@ instruct cmpP_narrowOop_imm0_branch(cmpOpEqNe cmp, iRegN oop, immP0 zero, label
ins_pipe(pipe_cmp_branch);
%}

instruct cmpUI_imm0_branch(cmpOpUEqNeLtGe cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsRegU cr) %{
instruct cmpUI_imm0_branch(cmpOpUEqNeLeGt cmp, iRegIorL2I op1, immI0 op2, label labl) %{
match(If cmp (CmpU op1 op2));
effect(USE labl);

Expand All @@ -15694,15 +15696,17 @@ instruct cmpUI_imm0_branch(cmpOpUEqNeLtGe cmp, iRegIorL2I op1, immI0 op2, label
ins_encode %{
Label* L = $labl$$label;
Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
if (cond == Assembler::EQ || cond == Assembler::LS)
if (cond == Assembler::EQ || cond == Assembler::LS) {
__ cbzw($op1$$Register, *L);
else
} else {
assert(cond == Assembler::NE || cond == Assembler::HI, "unexpected condition");
__ cbnzw($op1$$Register, *L);
}
%}
ins_pipe(pipe_cmp_branch);
%}

instruct cmpUL_imm0_branch(cmpOpUEqNeLtGe cmp, iRegL op1, immL0 op2, label labl, rFlagsRegU cr) %{
instruct cmpUL_imm0_branch(cmpOpUEqNeLeGt cmp, iRegL op1, immL0 op2, label labl) %{
match(If cmp (CmpUL op1 op2));
effect(USE labl);

Expand All @@ -15711,10 +15715,12 @@ instruct cmpUL_imm0_branch(cmpOpUEqNeLtGe cmp, iRegL op1, immL0 op2, label labl,
ins_encode %{
Label* L = $labl$$label;
Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
if (cond == Assembler::EQ || cond == Assembler::LS)
if (cond == Assembler::EQ || cond == Assembler::LS) {
__ cbz($op1$$Register, *L);
else
} else {
assert(cond == Assembler::NE || cond == Assembler::HI, "unexpected condition");
__ cbnz($op1$$Register, *L);
}
%}
ins_pipe(pipe_cmp_branch);
%}
Expand Down
62 changes: 42 additions & 20 deletions src/hotspot/cpu/aarch64/aarch64_vector.ad
Original file line number Diff line number Diff line change
Expand Up @@ -135,9 +135,9 @@ source %{
(opcode == Op_VectorCastL2X && bt == T_FLOAT) ||
(opcode == Op_CountLeadingZerosV && bt == T_LONG) ||
(opcode == Op_CountTrailingZerosV && bt == T_LONG) ||
// The vector implementation of Op_AddReductionVD/F is for the Vector API only.
// It is not suitable for auto-vectorization because it does not add the elements
// in the same order as sequential code, and FP addition is non-associative.
// The implementations of Op_AddReductionVD/F in Neon are for the Vector API only.
// They are not suitable for auto-vectorization because the result would not conform
// to the JLS, Section Evaluation Order.
opcode == Op_AddReductionVD || opcode == Op_AddReductionVF ||
opcode == Op_MulReductionVD || opcode == Op_MulReductionVF ||
opcode == Op_MulVL) {
Expand Down Expand Up @@ -2858,26 +2858,28 @@ instruct reduce_addL_sve(iRegLNoSp dst, iRegL isrc, vReg vsrc, vRegD tmp) %{
%}

// reduction addF
// Floating-point addition is not associative, so the rules for AddReductionVF
// on NEON can't be used to auto-vectorize floating-point reduce-add.
// Currently, on NEON, AddReductionVF is only generated by Vector API.
instruct reduce_add2F_neon(vRegF dst, vRegF fsrc, vReg vsrc) %{
predicate(UseSVE == 0 && Matcher::vector_length(n->in(2)) == 2);

instruct reduce_non_strict_order_add2F_neon(vRegF dst, vRegF fsrc, vReg vsrc) %{
// Non-strictly ordered floating-point add reduction for a 64-bits-long vector. This rule is
// intended for the VectorAPI (which allows for non-strictly ordered add reduction).
predicate(Matcher::vector_length(n->in(2)) == 2 && !n->as_Reduction()->requires_strict_order());
match(Set dst (AddReductionVF fsrc vsrc));
effect(TEMP_DEF dst);
format %{ "reduce_add2F_neon $dst, $fsrc, $vsrc" %}
format %{ "reduce_non_strict_order_add2F_neon $dst, $fsrc, $vsrc" %}
ins_encode %{
__ faddp($dst$$FloatRegister, $vsrc$$FloatRegister, __ S);
__ fadds($dst$$FloatRegister, $dst$$FloatRegister, $fsrc$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}

instruct reduce_add4F_neon(vRegF dst, vRegF fsrc, vReg vsrc, vReg tmp) %{
predicate(UseSVE == 0 && Matcher::vector_length(n->in(2)) == 4);
instruct reduce_non_strict_order_add4F_neon(vRegF dst, vRegF fsrc, vReg vsrc, vReg tmp) %{
// Non-strictly ordered floating-point add reduction for 128-bits-long vector. This rule is
// intended for the VectorAPI (which allows for non-strictly ordered add reduction).
predicate(Matcher::vector_length(n->in(2)) == 4 && !n->as_Reduction()->requires_strict_order());
match(Set dst (AddReductionVF fsrc vsrc));
effect(TEMP_DEF dst, TEMP tmp);
format %{ "reduce_add4F_neon $dst, $fsrc, $vsrc\t# KILL $tmp" %}
format %{ "reduce_non_strict_order_add4F_neon $dst, $fsrc, $vsrc\t# KILL $tmp" %}
ins_encode %{
__ faddp($tmp$$FloatRegister, __ T4S, $vsrc$$FloatRegister, $vsrc$$FloatRegister);
__ faddp($dst$$FloatRegister, $tmp$$FloatRegister, __ S);
Expand All @@ -2886,11 +2888,21 @@ instruct reduce_add4F_neon(vRegF dst, vRegF fsrc, vReg vsrc, vReg tmp) %{
ins_pipe(pipe_slow);
%}

// This rule calculates the reduction result in strict order. Two cases will
// reach here:
// 1. Non strictly-ordered AddReductionVF when vector size > 128-bits. For example -
// AddReductionVF generated by Vector API. For vector size > 128-bits, it is more
// beneficial performance-wise to generate direct SVE instruction even if it is
// strictly ordered.
// 2. Strictly-ordered AddReductionVF. For example - AddReductionVF generated by
// auto-vectorization on SVE machine.
instruct reduce_addF_sve(vRegF dst_src1, vReg src2) %{
predicate(UseSVE > 0);
predicate(!VM_Version::use_neon_for_vector(Matcher::vector_length_in_bytes(n->in(2))) ||
n->as_Reduction()->requires_strict_order());
match(Set dst_src1 (AddReductionVF dst_src1 src2));
format %{ "reduce_addF_sve $dst_src1, $dst_src1, $src2" %}
ins_encode %{
assert(UseSVE > 0, "must be sve");
uint length_in_bytes = Matcher::vector_length_in_bytes(this, $src2);
assert(length_in_bytes == MaxVectorSize, "invalid vector length");
__ sve_fadda($dst_src1$$FloatRegister, __ S, ptrue, $src2$$FloatRegister);
Expand All @@ -2899,26 +2911,36 @@ instruct reduce_addF_sve(vRegF dst_src1, vReg src2) %{
%}

// reduction addD
// Floating-point addition is not associative, so the rule for AddReductionVD
// on NEON can't be used to auto-vectorize floating-point reduce-add.
// Currently, on NEON, AddReductionVD is only generated by Vector API.
instruct reduce_addD_neon(vRegD dst, vRegD dsrc, vReg vsrc) %{
predicate(UseSVE == 0);

instruct reduce_non_strict_order_add2D_neon(vRegD dst, vRegD dsrc, vReg vsrc) %{
// Non-strictly ordered floating-point add reduction for doubles. This rule is
// intended for the VectorAPI (which allows for non-strictly ordered add reduction).
predicate(!n->as_Reduction()->requires_strict_order());
match(Set dst (AddReductionVD dsrc vsrc));
effect(TEMP_DEF dst);
format %{ "reduce_addD_neon $dst, $dsrc, $vsrc\t# 2D" %}
format %{ "reduce_non_strict_order_add2D_neon $dst, $dsrc, $vsrc\t# 2D" %}
ins_encode %{
__ faddp($dst$$FloatRegister, $vsrc$$FloatRegister, __ D);
__ faddd($dst$$FloatRegister, $dst$$FloatRegister, $dsrc$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}

// This rule calculates the reduction result in strict order. Two cases will
// reach here:
// 1. Non strictly-ordered AddReductionVD when vector size > 128-bits. For example -
// AddReductionVD generated by Vector API. For vector size > 128-bits, it is more
// beneficial performance-wise to generate direct SVE instruction even if it is
// strictly ordered.
// 2. Strictly-ordered AddReductionVD. For example - AddReductionVD generated by
// auto-vectorization on SVE machine.
instruct reduce_addD_sve(vRegD dst_src1, vReg src2) %{
predicate(UseSVE > 0);
predicate(!VM_Version::use_neon_for_vector(Matcher::vector_length_in_bytes(n->in(2))) ||
n->as_Reduction()->requires_strict_order());
match(Set dst_src1 (AddReductionVD dst_src1 src2));
format %{ "reduce_addD_sve $dst_src1, $dst_src1, $src2" %}
ins_encode %{
assert(UseSVE > 0, "must be sve");
uint length_in_bytes = Matcher::vector_length_in_bytes(this, $src2);
assert(length_in_bytes == MaxVectorSize, "invalid vector length");
__ sve_fadda($dst_src1$$FloatRegister, __ D, ptrue, $src2$$FloatRegister);
Expand Down
50 changes: 31 additions & 19 deletions src/hotspot/cpu/aarch64/aarch64_vector_ad.m4
Original file line number Diff line number Diff line change
Expand Up @@ -125,9 +125,9 @@ source %{
(opcode == Op_VectorCastL2X && bt == T_FLOAT) ||
(opcode == Op_CountLeadingZerosV && bt == T_LONG) ||
(opcode == Op_CountTrailingZerosV && bt == T_LONG) ||
// The vector implementation of Op_AddReductionVD/F is for the Vector API only.
// It is not suitable for auto-vectorization because it does not add the elements
// in the same order as sequential code, and FP addition is non-associative.
// The implementations of Op_AddReductionVD/F in Neon are for the Vector API only.
// They are not suitable for auto-vectorization because the result would not conform
// to the JLS, Section Evaluation Order.
opcode == Op_AddReductionVD || opcode == Op_AddReductionVF ||
opcode == Op_MulReductionVD || opcode == Op_MulReductionVF ||
opcode == Op_MulVL) {
Expand Down Expand Up @@ -1752,26 +1752,28 @@ REDUCE_ADD_INT_NEON_SVE_PAIRWISE(I, iRegIorL2I)
REDUCE_ADD_INT_NEON_SVE_PAIRWISE(L, iRegL)

// reduction addF
// Floating-point addition is not associative, so the rules for AddReductionVF
// on NEON can't be used to auto-vectorize floating-point reduce-add.
// Currently, on NEON, AddReductionVF is only generated by Vector API.
instruct reduce_add2F_neon(vRegF dst, vRegF fsrc, vReg vsrc) %{
predicate(UseSVE == 0 && Matcher::vector_length(n->in(2)) == 2);

instruct reduce_non_strict_order_add2F_neon(vRegF dst, vRegF fsrc, vReg vsrc) %{
// Non-strictly ordered floating-point add reduction for a 64-bits-long vector. This rule is
// intended for the VectorAPI (which allows for non-strictly ordered add reduction).
predicate(Matcher::vector_length(n->in(2)) == 2 && !n->as_Reduction()->requires_strict_order());
match(Set dst (AddReductionVF fsrc vsrc));
effect(TEMP_DEF dst);
format %{ "reduce_add2F_neon $dst, $fsrc, $vsrc" %}
format %{ "reduce_non_strict_order_add2F_neon $dst, $fsrc, $vsrc" %}
ins_encode %{
__ faddp($dst$$FloatRegister, $vsrc$$FloatRegister, __ S);
__ fadds($dst$$FloatRegister, $dst$$FloatRegister, $fsrc$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}

instruct reduce_add4F_neon(vRegF dst, vRegF fsrc, vReg vsrc, vReg tmp) %{
predicate(UseSVE == 0 && Matcher::vector_length(n->in(2)) == 4);
instruct reduce_non_strict_order_add4F_neon(vRegF dst, vRegF fsrc, vReg vsrc, vReg tmp) %{
// Non-strictly ordered floating-point add reduction for 128-bits-long vector. This rule is
// intended for the VectorAPI (which allows for non-strictly ordered add reduction).
predicate(Matcher::vector_length(n->in(2)) == 4 && !n->as_Reduction()->requires_strict_order());
match(Set dst (AddReductionVF fsrc vsrc));
effect(TEMP_DEF dst, TEMP tmp);
format %{ "reduce_add4F_neon $dst, $fsrc, $vsrc\t# KILL $tmp" %}
format %{ "reduce_non_strict_order_add4F_neon $dst, $fsrc, $vsrc\t# KILL $tmp" %}
ins_encode %{
__ faddp($tmp$$FloatRegister, __ T4S, $vsrc$$FloatRegister, $vsrc$$FloatRegister);
__ faddp($dst$$FloatRegister, $tmp$$FloatRegister, __ S);
Expand All @@ -1783,11 +1785,21 @@ dnl
dnl REDUCE_ADD_FP_SVE($1, $2 )
dnl REDUCE_ADD_FP_SVE(type, size)
define(`REDUCE_ADD_FP_SVE', `
// This rule calculates the reduction result in strict order. Two cases will
// reach here:
// 1. Non strictly-ordered AddReductionV$1 when vector size > 128-bits. For example -
// AddReductionV$1 generated by Vector API. For vector size > 128-bits, it is more
// beneficial performance-wise to generate direct SVE instruction even if it is
// strictly ordered.
// 2. Strictly-ordered AddReductionV$1. For example - AddReductionV$1 generated by
// auto-vectorization on SVE machine.
instruct reduce_add$1_sve(vReg$1 dst_src1, vReg src2) %{
predicate(UseSVE > 0);
predicate(!VM_Version::use_neon_for_vector(Matcher::vector_length_in_bytes(n->in(2))) ||
n->as_Reduction()->requires_strict_order());
match(Set dst_src1 (AddReductionV$1 dst_src1 src2));
format %{ "reduce_add$1_sve $dst_src1, $dst_src1, $src2" %}
ins_encode %{
assert(UseSVE > 0, "must be sve");
uint length_in_bytes = Matcher::vector_length_in_bytes(this, $src2);
assert(length_in_bytes == MaxVectorSize, "invalid vector length");
__ sve_fadda($dst_src1$$FloatRegister, __ $2, ptrue, $src2$$FloatRegister);
Expand All @@ -1798,14 +1810,14 @@ dnl
REDUCE_ADD_FP_SVE(F, S)

// reduction addD
// Floating-point addition is not associative, so the rule for AddReductionVD
// on NEON can't be used to auto-vectorize floating-point reduce-add.
// Currently, on NEON, AddReductionVD is only generated by Vector API.
instruct reduce_addD_neon(vRegD dst, vRegD dsrc, vReg vsrc) %{
predicate(UseSVE == 0);

instruct reduce_non_strict_order_add2D_neon(vRegD dst, vRegD dsrc, vReg vsrc) %{
// Non-strictly ordered floating-point add reduction for doubles. This rule is
// intended for the VectorAPI (which allows for non-strictly ordered add reduction).
predicate(!n->as_Reduction()->requires_strict_order());
match(Set dst (AddReductionVD dsrc vsrc));
effect(TEMP_DEF dst);
format %{ "reduce_addD_neon $dst, $dsrc, $vsrc\t# 2D" %}
format %{ "reduce_non_strict_order_add2D_neon $dst, $dsrc, $vsrc\t# 2D" %}
ins_encode %{
__ faddp($dst$$FloatRegister, $vsrc$$FloatRegister, __ D);
__ faddd($dst$$FloatRegister, $dst$$FloatRegister, $dsrc$$FloatRegister);
Expand Down
2 changes: 2 additions & 0 deletions src/hotspot/cpu/aarch64/globals_aarch64.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -124,6 +124,8 @@ define_pd_global(intx, InlineSmallCode, 1000);
range(1, 99) \
product(ccstr, UseBranchProtection, "none", \
"Branch Protection to use: none, standard, pac-ret") \
product(bool, AlwaysMergeDMB, true, DIAGNOSTIC, \
"Always merge DMB instructions in code emission") \

// end of ARCH_FLAGS

Expand Down
Loading

0 comments on commit f0be01d

Please sign in to comment.