diff --git a/.jcheck/conf b/.jcheck/conf index 17552ee7361..617f3d95ed8 100644 --- a/.jcheck/conf +++ b/.jcheck/conf @@ -5,6 +5,7 @@ version=23 [checks] error=author,committer,reviewers,merge,issues,executable,symlink,message,hg-tag,whitespace,problemlists +warning=issuestitle [repository] tags=(?:jdk-(?:[1-9]([0-9]*)(?:\.(?:0|[1-9][0-9]*)){0,4})(?:\+(?:(?:[0-9]+))|(?:-ga)))|(?:jdk[4-9](?:u\d{1,3})?-(?:(?:b\d{2,3})|(?:ga)))|(?:hs\d\d(?:\.\d{1,2})?-b\d\d) diff --git a/make/autoconf/jdk-options.m4 b/make/autoconf/jdk-options.m4 index 8c91a2ccd33..23f331e79b7 100644 --- a/make/autoconf/jdk-options.m4 +++ b/make/autoconf/jdk-options.m4 @@ -190,6 +190,17 @@ AC_DEFUN_ONCE([JDKOPT_SETUP_JDK_OPTIONS], fi AC_SUBST(INCLUDE_SA) + # Setup default CDS alignment. On platforms where one build may run on machines with different + # page sizes, the JVM choses a compatible alignment to fit all possible page sizes. This slightly + # increases archive size. + # The only platform having this problem at the moment is Linux on aarch64, which may encounter + # three different page sizes: 4K, 64K, and if run on Mac m1 hardware, 16K. + COMPATIBLE_CDS_ALIGNMENT_DEFAULT=false + if test "x$OPENJDK_TARGET_OS" = "xlinux" && test "x$OPENJDK_TARGET_CPU" = "xaarch64"; then + COMPATIBLE_CDS_ALIGNMENT_DEFAULT=true + fi + AC_SUBST(COMPATIBLE_CDS_ALIGNMENT_DEFAULT) + # Compress jars COMPRESS_JARS=false @@ -673,7 +684,7 @@ AC_DEFUN([JDKOPT_ENABLE_DISABLE_CDS_ARCHIVE], # AC_DEFUN([JDKOPT_ENABLE_DISABLE_COMPATIBLE_CDS_ALIGNMENT], [ - UTIL_ARG_ENABLE(NAME: compatible-cds-alignment, DEFAULT: false, + UTIL_ARG_ENABLE(NAME: compatible-cds-alignment, DEFAULT: $COMPATIBLE_CDS_ALIGNMENT_DEFAULT, RESULT: ENABLE_COMPATIBLE_CDS_ALIGNMENT, DESC: [enable use alternative compatible cds core region alignment], DEFAULT_DESC: [disabled], diff --git a/make/conf/module-loader-map.conf b/make/conf/module-loader-map.conf index 5b72bf78aa0..e904031186d 100644 --- a/make/conf/module-loader-map.conf +++ b/make/conf/module-loader-map.conf @@ -120,6 +120,7 @@ NATIVE_ACCESS_MODULES= \ jdk.dynalink \ jdk.httpserver \ jdk.incubator.vector \ + jdk.internal.le \ jdk.internal.vm.ci \ jdk.jfr \ jdk.jsobject \ diff --git a/make/jdk/src/classes/build/tools/cldrconverter/CLDRConverter.java b/make/jdk/src/classes/build/tools/cldrconverter/CLDRConverter.java index 8d39ef4c1d5..15251358a01 100644 --- a/make/jdk/src/classes/build/tools/cldrconverter/CLDRConverter.java +++ b/make/jdk/src/classes/build/tools/cldrconverter/CLDRConverter.java @@ -87,6 +87,7 @@ public class CLDRConverter { static final String ZONE_NAME_PREFIX = "timezone.displayname."; static final String METAZONE_ID_PREFIX = "metazone.id."; static final String PARENT_LOCALE_PREFIX = "parentLocale."; + static final String LIKELY_SCRIPT_PREFIX = "likelyScript."; static final String META_EMPTY_ZONE_NAME = "EMPTY_ZONE"; static final String[] EMPTY_ZONE = {"", "", "", "", "", ""}; static final String META_ETCUTC_ZONE_NAME = "ETC_UTC"; @@ -114,9 +115,13 @@ public class CLDRConverter { // "parentLocales" map private static final Map> parentLocalesMap = new HashMap<>(); + static boolean nonlikelyScript; private static final ResourceBundle.Control defCon = ResourceBundle.Control.getControl(ResourceBundle.Control.FORMAT_DEFAULT); + // "likelyScript" map + private static final Map> likelyScriptMap = new HashMap<>(); + private static Set AVAILABLE_TZIDS; static int copyrightYear; static String jdkHeaderTemplate; @@ -175,7 +180,7 @@ String getKeyword() { private static boolean verbose; private CLDRConverter() { - // no instantiation + // no instantiation } @SuppressWarnings("AssignmentToForLoopParameter") @@ -475,8 +480,8 @@ private static void parseSupplemental() throws Exception { parseLDMLFile(new File(SPPL_SOURCE_FILE), handlerSuppl); Map parentData = handlerSuppl.getData("root"); parentData.keySet().stream() - .filter(key -> key.startsWith(PARENT_LOCALE_PREFIX)) - .forEach(key -> { + .filter(key -> key.startsWith(PARENT_LOCALE_PREFIX)) + .forEach(key -> { parentLocalesMap.put(key, new TreeSet( Arrays.asList(((String)parentData.get(key)).split(" ")))); }); @@ -492,6 +497,16 @@ private static void parseSupplemental() throws Exception { // Parse likelySubtags handlerLikelySubtags = new LikelySubtagsParseHandler(); parseLDMLFile(new File(LIKELYSUBTAGS_SOURCE_FILE), handlerLikelySubtags); + handlerLikelySubtags.getData().forEach((from, to) -> { + if (!from.contains("-")) { // look for language-only tag + var script = to.split("-")[1]; + var key = LIKELY_SCRIPT_PREFIX + script; + var prev = likelyScriptMap.putIfAbsent(key, new TreeSet(Set.of(from))); + if (prev != null) { + prev.add(from); + } + } + }); // Parse supplementalMetadata // Currently interested in deprecated time zone ids and language aliases. @@ -561,6 +576,7 @@ private static void convertBundles(List bundles) throws Exception { // for now. if (isBaseModule) { metaInfo.putAll(parentLocalesMap); + metaInfo.putAll(likelyScriptMap); } for (Bundle bundle : bundles) { @@ -1135,7 +1151,7 @@ private static List applyParentLocales(String baseName, List can // check irregular parents for (int i = 0; i < candidates.size(); i++) { Locale l = candidates.get(i); - Locale p = childToParentLocaleMap.get(l); + Locale p = getParentLocale(l); if (!l.equals(Locale.ROOT) && Objects.nonNull(p) && !candidates.get(i+1).equals(p)) { @@ -1152,6 +1168,27 @@ private static List applyParentLocales(String baseName, List can return candidates; } + private static Locale getParentLocale(Locale child) { + Locale parent = childToParentLocaleMap.get(child); + + // check non-likely script for root + if (nonlikelyScript && parent == null && child.getCountry().isEmpty()) { + var lang = " " + child.getLanguage() + " "; + var script = child.getScript(); + + if (!script.isEmpty()) { + parent = likelyScriptMap.entrySet().stream() + .filter(e -> e.getValue().contains(lang)) + .findAny() + .map(Map.Entry::getKey) + .map(likely -> likely.equals(script) ? null : Locale.ROOT) + .orElse(null); + } + } + + return parent; + } + private static void generateZoneName() throws Exception { Files.createDirectories(Paths.get(DESTINATION_DIR, "java", "time", "format")); Files.write(Paths.get(DESTINATION_DIR, "java", "time", "format", "ZoneName.java"), diff --git a/make/jdk/src/classes/build/tools/cldrconverter/ResourceBundleGenerator.java b/make/jdk/src/classes/build/tools/cldrconverter/ResourceBundleGenerator.java index 49c74544be6..3953f38f653 100644 --- a/make/jdk/src/classes/build/tools/cldrconverter/ResourceBundleGenerator.java +++ b/make/jdk/src/classes/build/tools/cldrconverter/ResourceBundleGenerator.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -38,6 +38,7 @@ import java.util.Objects; import java.util.Set; import java.util.SortedSet; +import java.util.stream.Collectors; class ResourceBundleGenerator implements BundleGenerator { // preferred timezones - keeping compatibility with JDK1.1 3 letter abbreviations @@ -306,81 +307,77 @@ public void generateMetaInfo(Map> metaInfo) throws IOE import sun.util.locale.provider.LocaleProviderAdapter; public class %s implements LocaleDataMetaInfo { - private static final Map resourceNameToLocales = HashMap.newHashMap(%d); - %s - static { - """, CLDRConverter.isBaseModule ? "cldr" : "resources.cldr.provider", - className, metaInfo.keySet().stream().filter(k -> k.equals("AvailableLocales")).count(), - CLDRConverter.isBaseModule ? - """ + """, + CLDRConverter.isBaseModule ? "cldr" : "resources.cldr.provider", + className); + + if (CLDRConverter.isBaseModule) { + out.printf(""" private static final Map parentLocalesMap = HashMap.newHashMap(%d); private static final Map languageAliasMap = HashMap.newHashMap(%d); + static final boolean nonlikelyScript = %s; // package access from CLDRLocaleProviderAdapter + + static { """.formatted( metaInfo.keySet().stream().filter(k -> k.startsWith(CLDRConverter.PARENT_LOCALE_PREFIX)).count(), - CLDRConverter.handlerSupplMeta.getLanguageAliasData().size()) : - ""); - - for (String key : metaInfo.keySet()) { - if (key.startsWith(CLDRConverter.PARENT_LOCALE_PREFIX)) { - String parentTag = key.substring(CLDRConverter.PARENT_LOCALE_PREFIX.length()); - if ("root".equals(parentTag)) { - out.printf(" parentLocalesMap.put(Locale.ROOT,\n"); - } else { - out.printf(" parentLocalesMap.put(Locale.forLanguageTag(\"%s\"),\n", - parentTag); - } - String[] children = toLocaleList(metaInfo.get(key), true).split(" "); - Arrays.sort(children); - out.printf(" new String[] {\n" + - " "); - int count = 0; - for (int i = 0; i < children.length; i++) { - String child = children[i]; - out.printf("\"%s\", ", child); - count += child.length() + 4; - if (i != children.length - 1 && count > 64) { - out.printf("\n "); - count = 0; + CLDRConverter.handlerSupplMeta.getLanguageAliasData().size(), + Boolean.valueOf(CLDRConverter.nonlikelyScript))); + + for (String key : metaInfo.keySet()) { + if (key.startsWith(CLDRConverter.PARENT_LOCALE_PREFIX)) { + String parentTag = key.substring(CLDRConverter.PARENT_LOCALE_PREFIX.length()); + if ("root".equals(parentTag)) { + out.printf(" parentLocalesMap.put(Locale.ROOT,\n"); + } else { + out.printf(" parentLocalesMap.put(Locale.forLanguageTag(\"%s\"),\n", + parentTag); } - } - out.printf("\n });\n"); - } else { - if ("AvailableLocales".equals(key)) { - out.printf(" resourceNameToLocales.put(\"%s\",\n", key); - out.printf(" \"%s\");\n", toLocaleList(applyLanguageAliases(metaInfo.get(key)), false)); + generateStringArray(metaInfo.get(key), out); } } - } - // for languageAliasMap - if (CLDRConverter.isBaseModule) { + out.println(); + + // for languageAliasMap CLDRConverter.handlerSupplMeta.getLanguageAliasData().forEach((key, value) -> { out.printf(" languageAliasMap.put(\"%s\", \"%s\");\n", key, value); }); - } + out.printf(" }\n\n"); - out.printf(" }\n\n"); + // end of static initializer block. - // end of static initializer block. - - // Canonical TZ names for delayed initialization - if (CLDRConverter.isBaseModule) { + // Delayed initialization section out.printf(""" - private static class TZCanonicalIDMapHolder { - static final Map tzCanonicalIDMap = HashMap.newHashMap(%d); + private static class CLDRMapHolder { + private static final Map tzCanonicalIDMap = HashMap.newHashMap(%d); + private static final Map likelyScriptMap = HashMap.newHashMap(%d); + static { - """, CLDRConverter.handlerTimeZone.getData().size()); + """, CLDRConverter.handlerTimeZone.getData().size(), + metaInfo.keySet().stream().filter(k -> k.startsWith(CLDRConverter.LIKELY_SCRIPT_PREFIX)).count()); CLDRConverter.handlerTimeZone.getData().entrySet().stream() .forEach(e -> { String[] ids = ((String)e.getValue()).split("\\s"); out.printf(" tzCanonicalIDMap.put(\"%s\", \"%s\");\n", e.getKey(), - ids[0]); + ids[0]); for (int i = 1; i < ids.length; i++) { out.printf(" tzCanonicalIDMap.put(\"%s\", \"%s\");\n", ids[i], ids[0]); } }); - out.printf(" }\n }\n\n"); + out.println(); + + // for likelyScript map + for (String key : metaInfo.keySet()) { + if (key.startsWith(CLDRConverter.LIKELY_SCRIPT_PREFIX)) { + // ensure spaces at the begin/end for delimiting purposes + out.printf(" likelyScriptMap.put(\"%s\", \"%s\");\n", + key.substring(CLDRConverter.LIKELY_SCRIPT_PREFIX.length()), + " " + metaInfo.get(key).stream().collect(Collectors.joining(" ")) + " "); + } + } + out.printf(" }\n }\n"); } + out.println(); out.printf(""" @Override @@ -390,12 +387,13 @@ public LocaleProviderAdapter.Type getType() { @Override public String availableLanguageTags(String category) { - return resourceNameToLocales.getOrDefault(category, ""); + return " %s"; } - %s - } """, - CLDRConverter.isBaseModule ? """ + toLocaleList(applyLanguageAliases(metaInfo.get("AvailableLocales")), false)); + + if(CLDRConverter.isBaseModule) { + out.printf(""" @Override public Map getLanguageAliasMap() { @@ -404,16 +402,41 @@ public Map getLanguageAliasMap() { @Override public Map tzCanonicalIDs() { - return TZCanonicalIDMapHolder.tzCanonicalIDMap; + return CLDRMapHolder.tzCanonicalIDMap; } public Map parentLocales() { return parentLocalesMap; } - """ : ""); + + // package access from CLDRLocaleProviderAdapter + Map likelyScriptMap() { + return CLDRMapHolder.likelyScriptMap; + } + """); + } + out.printf("}\n"); } } + private static void generateStringArray(SortedSet set, PrintWriter out) throws IOException { + String[] children = toLocaleList(set, true).split(" "); + Arrays.sort(children); + out.printf(" new String[] {\n" + + " "); + int count = 0; + for (int i = 0; i < children.length; i++) { + String child = children[i]; + out.printf("\"%s\", ", child); + count += child.length() + 4; + if (i != children.length - 1 && count > 64) { + out.printf("\n "); + count = 0; + } + } + out.printf("\n });\n"); + } + private static final Locale.Builder LOCALE_BUILDER = new Locale.Builder(); private static boolean isBaseLocale(String localeID) { localeID = localeID.replaceAll("-", "_"); @@ -433,7 +456,9 @@ private static String toLocaleList(SortedSet set, boolean all) { if (!all && CLDRConverter.isBaseModule ^ isBaseLocale(id)) { continue; } - sb.append(' '); + if (sb.length() > 0) { + sb.append(' '); + } sb.append(id); } } diff --git a/make/jdk/src/classes/build/tools/cldrconverter/SupplementalDataParseHandler.java b/make/jdk/src/classes/build/tools/cldrconverter/SupplementalDataParseHandler.java index f4d20160ffb..9ba4d645199 100644 --- a/make/jdk/src/classes/build/tools/cldrconverter/SupplementalDataParseHandler.java +++ b/make/jdk/src/classes/build/tools/cldrconverter/SupplementalDataParseHandler.java @@ -57,29 +57,22 @@ class SupplementalDataParseHandler extends AbstractLDMLHandler { // the weekData is listed using country code. // // weekData are generated per each country - private final Map firstDayMap; - private final Map minDaysMap; + private static final Map firstDayMap = new HashMap<>(); + private static final Map minDaysMap = new HashMap<>(); // Parent locales. These information will only be // generated towards the base meta info, with the format of // // parentLocale.=(" ")+ - private final Map parentLocalesMap; + private static final Map parentLocalesMap = new HashMap<>(); // Input Skeleton map for "preferred" and "allowed" // Map<"preferred"/"allowed", Map<"skeleton", SortedSet<"regions">>> - private final Map>> inputSkeletonMap; + private static final Map>> inputSkeletonMap = new HashMap<>(); // "component" specific to this parent locale chain private String currentParentLocaleComponent; - SupplementalDataParseHandler() { - firstDayMap = new HashMap<>(); - minDaysMap = new HashMap<>(); - parentLocalesMap = new HashMap<>(); - inputSkeletonMap = new HashMap<>(); - } - /** * It returns Map that contains the firstDay and minDays information for * the country. The Map is created in JRE format after obtaining the data @@ -158,9 +151,15 @@ public void startElement(String uri, String localName, String qName, Attributes // Ignore component for now, otherwise "zh-Hant" falling back to "zh" would happen // https://github.com/unicode-org/cldr/pull/2664 if (currentParentLocaleComponent == null) { + var parent = attributes.getValue("parent").replaceAll("_", "-"); + parentLocalesMap.put( - attributes.getValue("parent").replaceAll("_", "-"), + parent, attributes.getValue("locales").replaceAll("_", "-")); + + if ("root".equals(parent)) { + CLDRConverter.nonlikelyScript = "nonlikelyScript".equals(attributes.getValue("localeRules")); + } } } break; diff --git a/make/jdk/src/classes/build/tools/generatecurrencydata/GenerateCurrencyData.java b/make/jdk/src/classes/build/tools/generatecurrencydata/GenerateCurrencyData.java index 9655e08016c..673e9829488 100644 --- a/make/jdk/src/classes/build/tools/generatecurrencydata/GenerateCurrencyData.java +++ b/make/jdk/src/classes/build/tools/generatecurrencydata/GenerateCurrencyData.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -312,9 +312,6 @@ private static int makeSpecialCaseEntry(String currencyInfo) throws Exception { checkCurrencyCode(newCurrency); String timeString = currencyInfo.substring(4, length - 4); long time = format.parse(timeString).getTime(); - if (Math.abs(time - System.currentTimeMillis()) > ((long) 10) * 365 * 24 * 60 * 60 * 1000) { - throw new RuntimeException("time is more than 10 years from present: " + time); - } specialCaseCutOverTimes[specialCaseCount] = time; specialCaseOldCurrencies[specialCaseCount] = oldCurrency; specialCaseOldCurrenciesDefaultFractionDigits[specialCaseCount] = getDefaultFractionDigits(oldCurrency); diff --git a/make/modules/jdk.internal.le/Lib.gmk b/make/modules/jdk.internal.le/Lib.gmk deleted file mode 100644 index e81137e0d53..00000000000 --- a/make/modules/jdk.internal.le/Lib.gmk +++ /dev/null @@ -1,45 +0,0 @@ -# -# Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved. -# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. -# -# This code is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License version 2 only, as -# published by the Free Software Foundation. Oracle designates this -# particular file as subject to the "Classpath" exception as provided -# by Oracle in the LICENSE file that accompanied this code. -# -# This code is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -# version 2 for more details (a copy is included in the LICENSE file that -# accompanied this code). -# -# You should have received a copy of the GNU General Public License version -# 2 along with this work; if not, write to the Free Software Foundation, -# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. -# -# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA -# or visit www.oracle.com if you need additional information or have any -# questions. -# - -include LibCommon.gmk - -ifeq ($(call isTargetOs, aix), false) - ############################################################################## - ## Build lible - ############################################################################## - - $(eval $(call SetupJdkLibrary, BUILD_LIBLE, \ - NAME := le, \ - LINK_TYPE := C++, \ - OPTIMIZATION := LOW, \ - EXTRA_HEADER_DIRS := \ - java.base:libjava \ - java.base:libjvm, \ - LD_SET_ORIGIN := false, \ - LIBS_windows := user32.lib, \ - )) - - TARGETS += $(BUILD_LIBLE) -endif diff --git a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp index 9bd8c6b8e9f..615c8e19ac8 100644 --- a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp @@ -1212,7 +1212,8 @@ void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) { arrayOopDesc::base_offset_in_bytes(op->type()), array_element_size(op->type()), op->klass()->as_register(), - *op->stub()->entry()); + *op->stub()->entry(), + op->zero_array()); } __ bind(*op->stub()->continuation()); } @@ -2504,7 +2505,9 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { __ call_VM_leaf(entry, 3); } - __ bind(*stub->continuation()); + if (stub != nullptr) { + __ bind(*stub->continuation()); + } } diff --git a/src/hotspot/cpu/aarch64/c1_LIRGenerator_aarch64.cpp b/src/hotspot/cpu/aarch64/c1_LIRGenerator_aarch64.cpp index 568be65e144..8f1260feba3 100644 --- a/src/hotspot/cpu/aarch64/c1_LIRGenerator_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/c1_LIRGenerator_aarch64.cpp @@ -878,7 +878,13 @@ void LIRGenerator::do_ArrayCopy(Intrinsic* x) { assert(x->number_of_arguments() == 5, "wrong type"); // Make all state_for calls early since they can emit code - CodeEmitInfo* info = state_for(x, x->state()); + CodeEmitInfo* info = nullptr; + if (x->state_before() != nullptr && x->state_before()->force_reexecute()) { + info = state_for(x, x->state_before()); + info->set_force_reexecute(); + } else { + info = state_for(x, x->state()); + } LIRItem src(x->argument_at(0), this); LIRItem src_pos(x->argument_at(1), this); @@ -911,6 +917,9 @@ void LIRGenerator::do_ArrayCopy(Intrinsic* x) { int flags; ciArrayKlass* expected_type; arraycopy_helper(x, &flags, &expected_type); + if (x->check_flag(Instruction::OmitChecksFlag)) { + flags = 0; + } __ arraycopy(src.result(), src_pos.result(), dst.result(), dst_pos.result(), length.result(), tmp, expected_type, flags, info); // does add_safepoint } @@ -1132,7 +1141,13 @@ void LIRGenerator::do_NewInstance(NewInstance* x) { } void LIRGenerator::do_NewTypeArray(NewTypeArray* x) { - CodeEmitInfo* info = state_for(x, x->state()); + CodeEmitInfo* info = nullptr; + if (x->state_before() != nullptr && x->state_before()->force_reexecute()) { + info = state_for(x, x->state_before()); + info->set_force_reexecute(); + } else { + info = state_for(x, x->state()); + } LIRItem length(x->length(), this); length.load_item_force(FrameMap::r19_opr); @@ -1149,7 +1164,7 @@ void LIRGenerator::do_NewTypeArray(NewTypeArray* x) { __ metadata2reg(ciTypeArrayKlass::make(elem_type)->constant_encoding(), klass_reg); CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info); - __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, elem_type, klass_reg, slow_path); + __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, elem_type, klass_reg, slow_path, x->zero_array()); LIR_Opr result = rlock_result(x); __ move(reg, result); diff --git a/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.cpp index e48d64d9069..c0455ad1bff 100644 --- a/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.cpp @@ -272,7 +272,7 @@ void C1_MacroAssembler::initialize_object(Register obj, Register klass, Register verify_oop(obj); } -void C1_MacroAssembler::allocate_array(Register obj, Register len, Register t1, Register t2, int base_offset_in_bytes, int f, Register klass, Label& slow_case) { +void C1_MacroAssembler::allocate_array(Register obj, Register len, Register t1, Register t2, int base_offset_in_bytes, int f, Register klass, Label& slow_case, bool zero_array) { assert_different_registers(obj, len, t1, t2, klass); // determine alignment mask @@ -297,7 +297,9 @@ void C1_MacroAssembler::allocate_array(Register obj, Register len, Register t1, // following the length field in initialize_header(). int base_offset = align_up(base_offset_in_bytes, BytesPerWord); // clear rest of allocated space - initialize_body(obj, arr_size, base_offset, t1, t2); + if (zero_array) { + initialize_body(obj, arr_size, base_offset, t1, t2); + } if (Compilation::current()->bailed_out()) { return; } diff --git a/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.hpp b/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.hpp index 3a4c868744c..fc8e83d706b 100644 --- a/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.hpp @@ -100,7 +100,8 @@ using MacroAssembler::null_check; // base_offset_in_bytes: offset of first array element, in bytes // f : element scale factor // slow_case : exit to slow case implementation if fast allocation fails - void allocate_array(Register obj, Register len, Register t, Register t2, int base_offset_in_bytes, int f, Register klass, Label& slow_case); + // zero_array : zero the allocated array or not + void allocate_array(Register obj, Register len, Register t, Register t2, int base_offset_in_bytes, int f, Register klass, Label& slow_case, bool zero_array); int rsp_offset() const { return _rsp_offset; } void set_rsp_offset(int n) { _rsp_offset = n; } diff --git a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp index b90b5862ee5..faba321afc7 100644 --- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp @@ -5535,12 +5535,22 @@ address MacroAssembler::arrays_equals(Register a1, Register a2, Register tmp3, Label DONE, SAME; Register tmp1 = rscratch1; Register tmp2 = rscratch2; - Register cnt2 = tmp2; // cnt2 only used in array length compare int elem_per_word = wordSize/elem_size; int log_elem_size = exact_log2(elem_size); + int klass_offset = arrayOopDesc::klass_offset_in_bytes(); int length_offset = arrayOopDesc::length_offset_in_bytes(); int base_offset = arrayOopDesc::base_offset_in_bytes(elem_size == 2 ? T_CHAR : T_BYTE); + // When the length offset is not aligned to 8 bytes, + // then we align it down. This is valid because the new + // offset will always be the klass which is the same + // for type arrays. + int start_offset = align_down(length_offset, BytesPerWord); + int extra_length = base_offset - start_offset; + assert(start_offset == length_offset || start_offset == klass_offset, + "start offset must be 8-byte-aligned or be the klass offset"); + assert(base_offset != start_offset, "must include the length field"); + extra_length = extra_length / elem_size; // We count in elements, not bytes. int stubBytesThreshold = 3 * 64 + (UseSIMDForArrayEquals ? 0 : 16); assert(elem_size == 1 || elem_size == 2, "must be char or byte"); @@ -5574,11 +5584,10 @@ address MacroAssembler::arrays_equals(Register a1, Register a2, Register tmp3, // return false; bind(A_IS_NOT_NULL); ldrw(cnt1, Address(a1, length_offset)); - ldrw(cnt2, Address(a2, length_offset)); - eorw(tmp5, cnt1, cnt2); - cbnzw(tmp5, DONE); - lea(a1, Address(a1, base_offset)); - lea(a2, Address(a2, base_offset)); + // Increase loop counter by diff between base- and actual start-offset. + addw(cnt1, cnt1, extra_length); + lea(a1, Address(a1, start_offset)); + lea(a2, Address(a2, start_offset)); // Check for short strings, i.e. smaller than wordSize. subs(cnt1, cnt1, elem_per_word); br(Assembler::LT, SHORT); @@ -5641,18 +5650,18 @@ address MacroAssembler::arrays_equals(Register a1, Register a2, Register tmp3, cbz(a1, DONE); ldrw(cnt1, Address(a1, length_offset)); cbz(a2, DONE); - ldrw(cnt2, Address(a2, length_offset)); + // Increase loop counter by diff between base- and actual start-offset. + addw(cnt1, cnt1, extra_length); + // on most CPUs a2 is still "locked"(surprisingly) in ldrw and it's // faster to perform another branch before comparing a1 and a2 cmp(cnt1, (u1)elem_per_word); br(LE, SHORT); // short or same - ldr(tmp3, Address(pre(a1, base_offset))); + ldr(tmp3, Address(pre(a1, start_offset))); subs(zr, cnt1, stubBytesThreshold); br(GE, STUB); - ldr(tmp4, Address(pre(a2, base_offset))); + ldr(tmp4, Address(pre(a2, start_offset))); sub(tmp5, zr, cnt1, LSL, 3 + log_elem_size); - cmp(cnt2, cnt1); - br(NE, DONE); // Main 16 byte comparison loop with 2 exits bind(NEXT_DWORD); { @@ -5684,9 +5693,7 @@ address MacroAssembler::arrays_equals(Register a1, Register a2, Register tmp3, b(LAST_CHECK); bind(STUB); - ldr(tmp4, Address(pre(a2, base_offset))); - cmp(cnt2, cnt1); - br(NE, DONE); + ldr(tmp4, Address(pre(a2, start_offset))); if (elem_size == 2) { // convert to byte counter lsl(cnt1, cnt1, 1); } @@ -5707,12 +5714,9 @@ address MacroAssembler::arrays_equals(Register a1, Register a2, Register tmp3, mov(result, a2); b(DONE); bind(SHORT); - cmp(cnt2, cnt1); - br(NE, DONE); - cbz(cnt1, SAME); sub(tmp5, zr, cnt1, LSL, 3 + log_elem_size); - ldr(tmp3, Address(a1, base_offset)); - ldr(tmp4, Address(a2, base_offset)); + ldr(tmp3, Address(a1, start_offset)); + ldr(tmp4, Address(a2, start_offset)); bind(LAST_CHECK); eor(tmp4, tmp3, tmp4); lslv(tmp5, tmp4, tmp5); diff --git a/src/hotspot/cpu/aarch64/relocInfo_aarch64.cpp b/src/hotspot/cpu/aarch64/relocInfo_aarch64.cpp index 5424f0d9c75..332f2499693 100644 --- a/src/hotspot/cpu/aarch64/relocInfo_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/relocInfo_aarch64.cpp @@ -32,7 +32,7 @@ #include "runtime/safepoint.hpp" -void Relocation::pd_set_data_value(address x, intptr_t o, bool verify_only) { +void Relocation::pd_set_data_value(address x, bool verify_only) { if (verify_only) return; diff --git a/src/hotspot/cpu/arm/relocInfo_arm.cpp b/src/hotspot/cpu/arm/relocInfo_arm.cpp index fc5b2981ce6..fb112cdcfc0 100644 --- a/src/hotspot/cpu/arm/relocInfo_arm.cpp +++ b/src/hotspot/cpu/arm/relocInfo_arm.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2008, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,13 +30,13 @@ #include "oops/oop.hpp" #include "runtime/safepoint.hpp" -void Relocation::pd_set_data_value(address x, intptr_t o, bool verify_only) { +void Relocation::pd_set_data_value(address x, bool verify_only) { NativeMovConstReg* ni = nativeMovConstReg_at(addr()); if (verify_only) { - guarantee(ni->data() == (intptr_t)(x + o), "instructions must match"); + guarantee(ni->data() == (intptr_t)x, "instructions must match"); } else { - ni->set_data((intptr_t)(x + o)); + ni->set_data((intptr_t)x); } } diff --git a/src/hotspot/cpu/ppc/relocInfo_ppc.cpp b/src/hotspot/cpu/ppc/relocInfo_ppc.cpp index 39e50465231..5d0d2785bf4 100644 --- a/src/hotspot/cpu/ppc/relocInfo_ppc.cpp +++ b/src/hotspot/cpu/ppc/relocInfo_ppc.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2024, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2012, 2015 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -32,10 +32,7 @@ #include "oops/oop.hpp" #include "runtime/safepoint.hpp" -void Relocation::pd_set_data_value(address x, intptr_t o, bool verify_only) { - // Currently we don't support splitting of relocations. - assert(o == 0, "tried to split relocations"); - +void Relocation::pd_set_data_value(address x, bool verify_only) { if (!verify_only) { if (format() != 1) { nativeMovConstReg_at(addr())->set_data_plain(((intptr_t)x), code()); diff --git a/src/hotspot/cpu/riscv/assembler_riscv.hpp b/src/hotspot/cpu/riscv/assembler_riscv.hpp index 614e4ee3d18..ff55951bd7d 100644 --- a/src/hotspot/cpu/riscv/assembler_riscv.hpp +++ b/src/hotspot/cpu/riscv/assembler_riscv.hpp @@ -1869,15 +1869,10 @@ enum Nf { // Vector Bit-manipulation used in Cryptography (Zvkb) Extension INSN(vandn_vv, 0b1010111, 0b000, 0b000001); - INSN(vandn_vx, 0b1010111, 0b100, 0b000001); INSN(vclmul_vv, 0b1010111, 0b010, 0b001100); - INSN(vclmul_vx, 0b1010111, 0b110, 0b001100); INSN(vclmulh_vv, 0b1010111, 0b010, 0b001101); - INSN(vclmulh_vx, 0b1010111, 0b110, 0b001101); INSN(vror_vv, 0b1010111, 0b000, 0b010100); - INSN(vror_vx, 0b1010111, 0b100, 0b010100); INSN(vrol_vv, 0b1010111, 0b000, 0b010101); - INSN(vrol_vx, 0b1010111, 0b100, 0b010101); #undef INSN @@ -1891,6 +1886,9 @@ enum Nf { INSN(vbrev8_v, 0b1010111, 0b010, 0b01000, 0b010010); // reverse bits in every byte of element INSN(vrev8_v, 0b1010111, 0b010, 0b01001, 0b010010); // reverse bytes in every elememt + INSN(vclz_v, 0b1010111, 0b010, 0b01100, 0b010010); // count leading zeros + INSN(vctz_v, 0b1010111, 0b010, 0b01101, 0b010010); // count trailing zeros + #undef INSN #define INSN(NAME, op, funct3, vm, funct6) \ diff --git a/src/hotspot/cpu/riscv/relocInfo_riscv.cpp b/src/hotspot/cpu/riscv/relocInfo_riscv.cpp index 0e50d0b0796..b3cdb93a979 100644 --- a/src/hotspot/cpu/riscv/relocInfo_riscv.cpp +++ b/src/hotspot/cpu/riscv/relocInfo_riscv.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2024, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, Red Hat Inc. All rights reserved. * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -31,7 +31,7 @@ #include "oops/oop.inline.hpp" #include "runtime/safepoint.hpp" -void Relocation::pd_set_data_value(address x, intptr_t o, bool verify_only) { +void Relocation::pd_set_data_value(address x, bool verify_only) { if (verify_only) { return; } diff --git a/src/hotspot/cpu/riscv/riscv_v.ad b/src/hotspot/cpu/riscv/riscv_v.ad index 863d3772379..3d2cbbe5cf4 100644 --- a/src/hotspot/cpu/riscv/riscv_v.ad +++ b/src/hotspot/cpu/riscv/riscv_v.ad @@ -73,6 +73,8 @@ source %{ return false; } break; + case Op_CountTrailingZerosV: + case Op_CountLeadingZerosV: case Op_ReverseBytesV: case Op_PopCountVL: case Op_PopCountVI: @@ -3759,14 +3761,14 @@ instruct vsignum_reg(vReg dst, vReg zero, vReg one, vRegMask_V0 v0) %{ // -------------------------------- Reverse Bytes Vector Operations ------------------------ -instruct vreverse_bytes_masked(vReg dst, vReg src, vRegMask_V0 v0) %{ - match(Set dst (ReverseBytesV src v0)); - format %{ "vreverse_bytes_masked $dst, $src, v0" %} +instruct vreverse_bytes_masked(vReg dst_src, vRegMask_V0 v0) %{ + match(Set dst_src (ReverseBytesV dst_src v0)); + format %{ "vreverse_bytes_masked $dst_src, $dst_src, v0" %} ins_encode %{ BasicType bt = Matcher::vector_element_basic_type(this); uint vlen = Matcher::vector_length(this); __ vsetvli_helper(bt, vlen); - __ vrev8_v(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg), Assembler::v0_t); + __ vrev8_v(as_VectorRegister($dst_src$$reg), as_VectorRegister($dst_src$$reg), Assembler::v0_t); %} ins_pipe(pipe_slow); %} @@ -3817,16 +3819,16 @@ instruct vconvF2HF(vReg dst, vReg src, vReg vtmp, vRegMask_V0 v0, iRegINoSp tmp) // ------------------------------ Popcount vector ------------------------------ -instruct vpopcount_masked(vReg dst, vReg src, vRegMask_V0 v0) %{ - match(Set dst (PopCountVI src v0)); - match(Set dst (PopCountVL src v0)); +instruct vpopcount_masked(vReg dst_src, vRegMask_V0 v0) %{ + match(Set dst_src (PopCountVI dst_src v0)); + match(Set dst_src (PopCountVL dst_src v0)); ins_cost(VEC_COST); - format %{ "vcpop_v $dst, $src, $v0\t# vcpop_v with mask" %} + format %{ "vcpop_v $dst_src, $dst_src, $v0\t# vcpop_v with mask" %} ins_encode %{ BasicType bt = Matcher::vector_element_basic_type(this); uint vlen = Matcher::vector_length(this); __ vsetvli_helper(bt, vlen); - __ vcpop_v(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg), Assembler::v0_t); + __ vcpop_v(as_VectorRegister($dst_src$$reg), as_VectorRegister($dst_src$$reg), Assembler::v0_t); %} ins_pipe(pipe_slow); %} @@ -3845,6 +3847,62 @@ instruct vpopcount(vReg dst, vReg src) %{ ins_pipe(pipe_slow); %} +// ------------------------------ CountLeadingZerosV -------------------------- + +instruct vcountLeadingZeros_masked(vReg dst_src, vRegMask_V0 v0) %{ + match(Set dst_src (CountLeadingZerosV dst_src v0)); + ins_cost(VEC_COST); + format %{ "vcount_leading_zeros_masked $dst_src, $dst_src, v0" %} + ins_encode %{ + BasicType bt = Matcher::vector_element_basic_type(this); + uint vlen = Matcher::vector_length(this); + __ vsetvli_helper(bt, vlen); + __ vclz_v(as_VectorRegister($dst_src$$reg), as_VectorRegister($dst_src$$reg), Assembler::v0_t); + %} + ins_pipe(pipe_slow); +%} + +instruct vcountLeadingZeros(vReg dst, vReg src) %{ + match(Set dst (CountLeadingZerosV src)); + ins_cost(VEC_COST); + format %{ "vcount_leading_zeros $dst, $src" %} + ins_encode %{ + BasicType bt = Matcher::vector_element_basic_type(this); + uint vlen = Matcher::vector_length(this); + __ vsetvli_helper(bt, vlen); + __ vclz_v(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg)); + %} + ins_pipe(pipe_slow); +%} + +// ------------------------------ CountTrailingZerosV -------------------------- + +instruct vcountTrailingZeros_masked(vReg dst_src, vRegMask_V0 v0) %{ + match(Set dst_src (CountTrailingZerosV dst_src v0)); + ins_cost(VEC_COST); + format %{ "vcount_trailing_zeros_masked $dst_src, $dst_src, v0" %} + ins_encode %{ + BasicType bt = Matcher::vector_element_basic_type(this); + uint vlen = Matcher::vector_length(this); + __ vsetvli_helper(bt, vlen); + __ vctz_v(as_VectorRegister($dst_src$$reg), as_VectorRegister($dst_src$$reg), Assembler::v0_t); + %} + ins_pipe(pipe_slow); +%} + +instruct vcountTrailingZeros(vReg dst, vReg src) %{ + match(Set dst (CountTrailingZerosV src)); + ins_cost(VEC_COST); + format %{ "vcount_trailing_zeros $dst, $src" %} + ins_encode %{ + BasicType bt = Matcher::vector_element_basic_type(this); + uint vlen = Matcher::vector_length(this); + __ vsetvli_helper(bt, vlen); + __ vctz_v(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg)); + %} + ins_pipe(pipe_slow); +%} + // ------------------------------ Vector Load Gather --------------------------- instruct gather_load(vReg dst, indirect mem, vReg idx) %{ diff --git a/src/hotspot/cpu/s390/assembler_s390.hpp b/src/hotspot/cpu/s390/assembler_s390.hpp index 91cc7e611bf..cf80d164faf 100644 --- a/src/hotspot/cpu/s390/assembler_s390.hpp +++ b/src/hotspot/cpu/s390/assembler_s390.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2024, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016, 2023 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -352,10 +352,6 @@ class AddressLiteral { relocInfo::relocType rtype() const { return _rspec.type(); } const RelocationHolder& rspec() const { return _rspec; } - - RelocationHolder rspec(int offset) const { - return offset == 0 ? _rspec : _rspec.plus(offset); - } }; // Convenience classes diff --git a/src/hotspot/cpu/s390/relocInfo_s390.cpp b/src/hotspot/cpu/s390/relocInfo_s390.cpp index 747ae9c535d..9e352be20a3 100644 --- a/src/hotspot/cpu/s390/relocInfo_s390.cpp +++ b/src/hotspot/cpu/s390/relocInfo_s390.cpp @@ -30,27 +30,25 @@ #include "oops/oop.inline.hpp" #include "runtime/safepoint.hpp" -void Relocation::pd_set_data_value(address x, intptr_t o, bool verify_only) { - // we don't support splitting of relocations, so o must be zero: - assert(o == 0, "tried to split relocations"); +void Relocation::pd_set_data_value(address x, bool verify_only) { if (!verify_only) { switch (format()) { case relocInfo::uncompressed_format: - nativeMovConstReg_at(addr())->set_data_plain(((intptr_t)x) + o, code()); + nativeMovConstReg_at(addr())->set_data_plain(((intptr_t)x), code()); break; case relocInfo::compressed_format: if (type() == relocInfo::metadata_type) - nativeMovConstReg_at(addr())->set_narrow_klass(((intptr_t)x) + o); + nativeMovConstReg_at(addr())->set_narrow_klass(((intptr_t)x)); else if (type() == relocInfo::oop_type) - nativeMovConstReg_at(addr())->set_narrow_oop(((intptr_t)x) + o); + nativeMovConstReg_at(addr())->set_narrow_oop(((intptr_t)x)); else guarantee(false, "bad relocInfo type for relocInfo::narrow_oop_format"); break; case relocInfo::pcrel_addr_format: // patch target location - nativeMovConstReg_at(addr())->set_pcrel_addr(((intptr_t)x) + o, code()); + nativeMovConstReg_at(addr())->set_pcrel_addr(((intptr_t)x), code()); break; case relocInfo::pcrel_data_format: // patch data at target location - nativeMovConstReg_at(addr())->set_pcrel_data(((intptr_t)x) + o, code()); + nativeMovConstReg_at(addr())->set_pcrel_data(((intptr_t)x), code()); break; default: assert(false, "not a valid relocInfo format"); diff --git a/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp b/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp index c279e3073af..978708d03e6 100644 --- a/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp +++ b/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp @@ -1621,7 +1621,8 @@ void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) { arrayOopDesc::base_offset_in_bytes(op->type()), array_element_size(op->type()), op->klass()->as_register(), - *op->stub()->entry()); + *op->stub()->entry(), + op->zero_array()); } __ bind(*op->stub()->continuation()); } @@ -3453,7 +3454,9 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false); __ call_VM_leaf(entry, 0); - __ bind(*stub->continuation()); + if (stub != nullptr) { + __ bind(*stub->continuation()); + } } void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) { diff --git a/src/hotspot/cpu/x86/c1_LIRGenerator_x86.cpp b/src/hotspot/cpu/x86/c1_LIRGenerator_x86.cpp index 7088cf33cf6..d3add6975b4 100644 --- a/src/hotspot/cpu/x86/c1_LIRGenerator_x86.cpp +++ b/src/hotspot/cpu/x86/c1_LIRGenerator_x86.cpp @@ -1004,7 +1004,13 @@ void LIRGenerator::do_ArrayCopy(Intrinsic* x) { assert(x->number_of_arguments() == 5, "wrong type"); // Make all state_for calls early since they can emit code - CodeEmitInfo* info = state_for(x, x->state()); + CodeEmitInfo* info = nullptr; + if (x->state_before() != nullptr && x->state_before()->force_reexecute()) { + info = state_for(x, x->state_before()); + info->set_force_reexecute(); + } else { + info = state_for(x, x->state()); + } LIRItem src(x->argument_at(0), this); LIRItem src_pos(x->argument_at(1), this); @@ -1016,6 +1022,13 @@ void LIRGenerator::do_ArrayCopy(Intrinsic* x) { // LinearScan will fail allocation (because arraycopy always needs a // call) + int flags; + ciArrayKlass* expected_type; + arraycopy_helper(x, &flags, &expected_type); + if (x->check_flag(Instruction::OmitChecksFlag)) { + flags = 0; + } + #ifndef _LP64 src.load_item_force (FrameMap::rcx_oop_opr); src_pos.load_item_force (FrameMap::rdx_opr); @@ -1023,6 +1036,11 @@ void LIRGenerator::do_ArrayCopy(Intrinsic* x) { dst_pos.load_item_force (FrameMap::rbx_opr); length.load_item_force (FrameMap::rdi_opr); LIR_Opr tmp = (FrameMap::rsi_opr); + + if (expected_type != nullptr && flags == 0) { + FrameMap* f = Compilation::current()->frame_map(); + f->update_reserved_argument_area_size(3 * BytesPerWord); + } #else // The java calling convention will give us enough registers @@ -1044,10 +1062,6 @@ void LIRGenerator::do_ArrayCopy(Intrinsic* x) { set_no_result(x); - int flags; - ciArrayKlass* expected_type; - arraycopy_helper(x, &flags, &expected_type); - __ arraycopy(src.result(), src_pos.result(), dst.result(), dst_pos.result(), length.result(), tmp, expected_type, flags, info); // does add_safepoint } @@ -1310,7 +1324,13 @@ void LIRGenerator::do_NewInstance(NewInstance* x) { void LIRGenerator::do_NewTypeArray(NewTypeArray* x) { - CodeEmitInfo* info = state_for(x, x->state()); + CodeEmitInfo* info = nullptr; + if (x->state_before() != nullptr && x->state_before()->force_reexecute()) { + info = state_for(x, x->state_before()); + info->set_force_reexecute(); + } else { + info = state_for(x, x->state()); + } LIRItem length(x->length(), this); length.load_item_force(FrameMap::rbx_opr); @@ -1327,7 +1347,7 @@ void LIRGenerator::do_NewTypeArray(NewTypeArray* x) { __ metadata2reg(ciTypeArrayKlass::make(elem_type)->constant_encoding(), klass_reg); CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info); - __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, elem_type, klass_reg, slow_path); + __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, elem_type, klass_reg, slow_path, x->zero_array()); LIR_Opr result = rlock_result(x); __ move(reg, result); diff --git a/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp b/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp index caca3a15282..2374324ca7c 100644 --- a/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp +++ b/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp @@ -278,7 +278,7 @@ void C1_MacroAssembler::initialize_object(Register obj, Register klass, Register verify_oop(obj); } -void C1_MacroAssembler::allocate_array(Register obj, Register len, Register t1, Register t2, int base_offset_in_bytes, Address::ScaleFactor f, Register klass, Label& slow_case) { +void C1_MacroAssembler::allocate_array(Register obj, Register len, Register t1, Register t2, int base_offset_in_bytes, Address::ScaleFactor f, Register klass, Label& slow_case, bool zero_array) { assert(obj == rax, "obj must be in rax, for cmpxchg"); assert_different_registers(obj, len, t1, t2, klass); @@ -300,11 +300,13 @@ void C1_MacroAssembler::allocate_array(Register obj, Register len, Register t1, initialize_header(obj, klass, len, t1, t2); // clear rest of allocated space - const Register len_zero = len; - // Align-up to word boundary, because we clear the 4 bytes potentially - // following the length field in initialize_header(). - int base_offset = align_up(base_offset_in_bytes, BytesPerWord); - initialize_body(obj, arr_size, base_offset, len_zero); + if (zero_array) { + const Register len_zero = len; + // Align-up to word boundary, because we clear the 4 bytes potentially + // following the length field in initialize_header(). + int base_offset = align_up(base_offset_in_bytes, BytesPerWord); + initialize_body(obj, arr_size, base_offset, len_zero); + } if (CURRENT_ENV->dtrace_alloc_probes()) { assert(obj == rax, "must be"); diff --git a/src/hotspot/cpu/x86/c1_MacroAssembler_x86.hpp b/src/hotspot/cpu/x86/c1_MacroAssembler_x86.hpp index a705dd70efd..6344a7b6ef1 100644 --- a/src/hotspot/cpu/x86/c1_MacroAssembler_x86.hpp +++ b/src/hotspot/cpu/x86/c1_MacroAssembler_x86.hpp @@ -89,7 +89,8 @@ // base_offset_in_bytes: offset of the first array element, in bytes // f : element scale factor // slow_case : exit to slow case implementation if fast allocation fails - void allocate_array(Register obj, Register len, Register t, Register t2, int base_offset_in_bytes, Address::ScaleFactor f, Register klass, Label& slow_case); + // zero_array : zero the allocated array or not + void allocate_array(Register obj, Register len, Register t, Register t2, int base_offset_in_bytes, Address::ScaleFactor f, Register klass, Label& slow_case, bool zero_array); int rsp_offset() const { return _rsp_offset; } void set_rsp_offset(int n) { _rsp_offset = n; } diff --git a/src/hotspot/cpu/x86/relocInfo_x86.cpp b/src/hotspot/cpu/x86/relocInfo_x86.cpp index db806419b51..d7fddf838ac 100644 --- a/src/hotspot/cpu/x86/relocInfo_x86.cpp +++ b/src/hotspot/cpu/x86/relocInfo_x86.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -36,9 +36,8 @@ #include "utilities/checkedCast.hpp" -void Relocation::pd_set_data_value(address x, intptr_t o, bool verify_only) { +void Relocation::pd_set_data_value(address x, bool verify_only) { #ifdef AMD64 - x += o; typedef Assembler::WhichOperand WhichOperand; WhichOperand which = (WhichOperand) format(); // that is, disp32 or imm, call32, narrow oop assert(which == Assembler::disp32_operand || @@ -80,9 +79,9 @@ void Relocation::pd_set_data_value(address x, intptr_t o, bool verify_only) { } #else if (verify_only) { - guarantee(*pd_address_in_code() == (x + o), "instructions must match"); + guarantee(*pd_address_in_code() == x, "instructions must match"); } else { - *pd_address_in_code() = x + o; + *pd_address_in_code() = x; } #endif // AMD64 } diff --git a/src/hotspot/cpu/zero/relocInfo_zero.cpp b/src/hotspot/cpu/zero/relocInfo_zero.cpp index 1e9fdc1081d..b926f20cfe7 100644 --- a/src/hotspot/cpu/zero/relocInfo_zero.cpp +++ b/src/hotspot/cpu/zero/relocInfo_zero.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved. * Copyright 2007, 2009, 2010, 2011 Red Hat, Inc. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -30,7 +30,7 @@ #include "oops/oop.inline.hpp" #include "runtime/safepoint.hpp" -void Relocation::pd_set_data_value(address x, intptr_t o, bool verify_only) { +void Relocation::pd_set_data_value(address x, bool verify_only) { ShouldNotCallThis(); } diff --git a/src/hotspot/os/bsd/gc/z/zPhysicalMemoryBacking_bsd.cpp b/src/hotspot/os/bsd/gc/z/zPhysicalMemoryBacking_bsd.cpp index bd1d7174c12..29825a9eab2 100644 --- a/src/hotspot/os/bsd/gc/z/zPhysicalMemoryBacking_bsd.cpp +++ b/src/hotspot/os/bsd/gc/z/zPhysicalMemoryBacking_bsd.cpp @@ -103,7 +103,7 @@ bool ZPhysicalMemoryBacking::commit_inner(zoffset offset, size_t length) const { assert(is_aligned(length, os::vm_page_size()), "Invalid length"); log_trace(gc, heap)("Committing memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)", - untype(offset) / M, untype(offset) + length / M, length / M); + untype(offset) / M, untype(to_zoffset_end(offset, length)) / M, length / M); const uintptr_t addr = _base + untype(offset); const void* const res = mmap((void*)addr, length, PROT_READ | PROT_WRITE, MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); @@ -150,7 +150,7 @@ size_t ZPhysicalMemoryBacking::uncommit(zoffset offset, size_t length) const { assert(is_aligned(length, os::vm_page_size()), "Invalid length"); log_trace(gc, heap)("Uncommitting memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)", - untype(offset) / M, untype(offset) + length / M, length / M); + untype(offset) / M, untype(to_zoffset_end(offset, length)) / M, length / M); const uintptr_t start = _base + untype(offset); const void* const res = mmap((void*)start, length, PROT_NONE, MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE, -1, 0); diff --git a/src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.cpp b/src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.cpp index ff891509365..76f9d90cd71 100644 --- a/src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.cpp +++ b/src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.cpp @@ -597,7 +597,7 @@ ZErrno ZPhysicalMemoryBacking::fallocate(bool punch_hole, zoffset offset, size_t bool ZPhysicalMemoryBacking::commit_inner(zoffset offset, size_t length) const { log_trace(gc, heap)("Committing memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)", - untype(offset) / M, untype(offset + length) / M, length / M); + untype(offset) / M, untype(to_zoffset_end(offset, length)) / M, length / M); retry: const ZErrno err = fallocate(false /* punch_hole */, offset, length); @@ -697,7 +697,7 @@ size_t ZPhysicalMemoryBacking::commit(zoffset offset, size_t length) const { size_t ZPhysicalMemoryBacking::uncommit(zoffset offset, size_t length) const { log_trace(gc, heap)("Uncommitting memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)", - untype(offset) / M, untype(offset + length) / M, length / M); + untype(offset) / M, untype(to_zoffset_end(offset, length)) / M, length / M); const ZErrno err = fallocate(true /* punch_hole */, offset, length); if (err) { diff --git a/src/hotspot/os/windows/gc/z/zPhysicalMemoryBacking_windows.cpp b/src/hotspot/os/windows/gc/z/zPhysicalMemoryBacking_windows.cpp index 9a38352d1d7..181d0fada59 100644 --- a/src/hotspot/os/windows/gc/z/zPhysicalMemoryBacking_windows.cpp +++ b/src/hotspot/os/windows/gc/z/zPhysicalMemoryBacking_windows.cpp @@ -225,14 +225,14 @@ void ZPhysicalMemoryBacking::warn_commit_limits(size_t max_capacity) const { size_t ZPhysicalMemoryBacking::commit(zoffset offset, size_t length) { log_trace(gc, heap)("Committing memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)", - untype(offset) / M, (untype(offset) + length) / M, length / M); + untype(offset) / M, untype(to_zoffset_end(offset, length)) / M, length / M); return _impl->commit(offset, length); } size_t ZPhysicalMemoryBacking::uncommit(zoffset offset, size_t length) { log_trace(gc, heap)("Uncommitting memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)", - untype(offset) / M, (untype(offset) + length) / M, length / M); + untype(offset) / M, untype(to_zoffset_end(offset, length)) / M, length / M); return _impl->uncommit(offset, length); } diff --git a/src/hotspot/share/c1/c1_Compiler.cpp b/src/hotspot/share/c1/c1_Compiler.cpp index bdbeb39f89a..6e518b0213b 100644 --- a/src/hotspot/share/c1/c1_Compiler.cpp +++ b/src/hotspot/share/c1/c1_Compiler.cpp @@ -235,6 +235,9 @@ bool Compiler::is_intrinsic_supported(vmIntrinsics::ID id) { case vmIntrinsics::_counterTime: #endif case vmIntrinsics::_getObjectSize: +#if defined(X86) || defined(AARCH64) + case vmIntrinsics::_clone: +#endif break; case vmIntrinsics::_blackhole: break; diff --git a/src/hotspot/share/c1/c1_GraphBuilder.cpp b/src/hotspot/share/c1/c1_GraphBuilder.cpp index a361f3da9c2..db025883b78 100644 --- a/src/hotspot/share/c1/c1_GraphBuilder.cpp +++ b/src/hotspot/share/c1/c1_GraphBuilder.cpp @@ -2026,8 +2026,11 @@ void GraphBuilder::invoke(Bytecodes::Code code) { int index = state()->stack_size() - (target->arg_size_no_receiver() + 1); receiver = state()->stack_at(index); ciType* type = receiver->exact_type(); - if (type != nullptr && type->is_loaded() && - type->is_instance_klass() && !type->as_instance_klass()->is_interface()) { + if (type != nullptr && type->is_loaded()) { + assert(!type->is_instance_klass() || !type->as_instance_klass()->is_interface(), "Must not be an interface"); + // Detects non-interface instances, primitive arrays, and some object arrays. + // Array receivers can only call Object methods, so we should be able to allow + // all object arrays here too, even those with unloaded types. receiver_klass = (ciInstanceKlass*) type; type_is_exact = true; } @@ -2243,7 +2246,7 @@ void GraphBuilder::new_instance(int klass_index) { void GraphBuilder::new_type_array() { ValueStack* state_before = copy_state_exhandling(); - apush(append_split(new NewTypeArray(ipop(), (BasicType)stream()->get_index(), state_before))); + apush(append_split(new NewTypeArray(ipop(), (BasicType)stream()->get_index(), state_before, true))); } @@ -3650,9 +3653,13 @@ void GraphBuilder::build_graph_for_intrinsic(ciMethod* callee, bool ignore_retur case vmIntrinsics::_getAndSetReference : append_unsafe_get_and_set(callee, false); return; case vmIntrinsics::_getCharStringU : append_char_access(callee, false); return; case vmIntrinsics::_putCharStringU : append_char_access(callee, true); return; + case vmIntrinsics::_clone : append_alloc_array_copy(callee); return; default: break; } + if (_inline_bailout_msg != nullptr) { + return; + } // create intrinsic node const bool has_receiver = !callee->is_static(); @@ -3714,6 +3721,9 @@ bool GraphBuilder::try_inline_intrinsics(ciMethod* callee, bool ignore_return) { } } build_graph_for_intrinsic(callee, ignore_return); + if (_inline_bailout_msg != nullptr) { + return false; + } return true; } @@ -4427,6 +4437,43 @@ void GraphBuilder::append_char_access(ciMethod* callee, bool is_store) { } } +void GraphBuilder::append_alloc_array_copy(ciMethod* callee) { + const int args_base = state()->stack_size() - callee->arg_size(); + ciType* receiver_type = state()->stack_at(args_base)->exact_type(); + if (receiver_type == nullptr) { + inline_bailout("must have a receiver"); + return; + } + if (!receiver_type->is_type_array_klass()) { + inline_bailout("clone array not primitive"); + return; + } + + ValueStack* state_before = copy_state_before(); + state_before->set_force_reexecute(); + Value src = apop(); + BasicType basic_type = src->exact_type()->as_array_klass()->element_type()->basic_type(); + Value length = append(new ArrayLength(src, state_before)); + Value new_array = append_split(new NewTypeArray(length, basic_type, state_before, false)); + + ValueType* result_type = as_ValueType(callee->return_type()); + vmIntrinsics::ID id = vmIntrinsics::_arraycopy; + Values* args = new Values(5); + args->push(src); + args->push(append(new Constant(new IntConstant(0)))); + args->push(new_array); + args->push(append(new Constant(new IntConstant(0)))); + args->push(length); + const bool has_receiver = true; + Intrinsic* array_copy = new Intrinsic(result_type, id, + args, has_receiver, state_before, + vmIntrinsics::preserves_state(id), + vmIntrinsics::can_trap(id)); + array_copy->set_flag(Instruction::OmitChecksFlag, true); + append_split(array_copy); + apush(new_array); +} + void GraphBuilder::print_inlining(ciMethod* callee, const char* msg, bool success) { CompileLog* log = compilation()->log(); if (log != nullptr) { diff --git a/src/hotspot/share/c1/c1_GraphBuilder.hpp b/src/hotspot/share/c1/c1_GraphBuilder.hpp index 42233455d4c..92b9a518a20 100644 --- a/src/hotspot/share/c1/c1_GraphBuilder.hpp +++ b/src/hotspot/share/c1/c1_GraphBuilder.hpp @@ -379,6 +379,7 @@ class GraphBuilder { void append_unsafe_CAS(ciMethod* callee); void append_unsafe_get_and_set(ciMethod* callee, bool is_add); void append_char_access(ciMethod* callee, bool is_store); + void append_alloc_array_copy(ciMethod* callee); void print_inlining(ciMethod* callee, const char* msg, bool success = true); diff --git a/src/hotspot/share/c1/c1_Instruction.hpp b/src/hotspot/share/c1/c1_Instruction.hpp index 8f7fd698e79..32ff3d9f61c 100644 --- a/src/hotspot/share/c1/c1_Instruction.hpp +++ b/src/hotspot/share/c1/c1_Instruction.hpp @@ -364,6 +364,7 @@ class Instruction: public CompilationResourceObj { InWorkListFlag, DeoptimizeOnException, KillsMemoryFlag, + OmitChecksFlag, InstructionLastFlag }; @@ -1327,16 +1328,19 @@ BASE(NewArray, StateSplit) LEAF(NewTypeArray, NewArray) private: BasicType _elt_type; + bool _zero_array; public: // creation - NewTypeArray(Value length, BasicType elt_type, ValueStack* state_before) + NewTypeArray(Value length, BasicType elt_type, ValueStack* state_before, bool zero_array) : NewArray(length, state_before) , _elt_type(elt_type) + , _zero_array(zero_array) {} // accessors BasicType elt_type() const { return _elt_type; } + bool zero_array() const { return _zero_array; } ciType* exact_type() const; }; diff --git a/src/hotspot/share/c1/c1_LIR.cpp b/src/hotspot/share/c1/c1_LIR.cpp index dee208c11be..4017a5324b5 100644 --- a/src/hotspot/share/c1/c1_LIR.cpp +++ b/src/hotspot/share/c1/c1_LIR.cpp @@ -353,7 +353,15 @@ LIR_OpArrayCopy::LIR_OpArrayCopy(LIR_Opr src, LIR_Opr src_pos, LIR_Opr dst, LIR_ , _tmp(tmp) , _expected_type(expected_type) , _flags(flags) { +#if defined(X86) || defined(AARCH64) + if (expected_type != nullptr && flags == 0) { + _stub = nullptr; + } else { + _stub = new ArrayCopyStub(this); + } +#else _stub = new ArrayCopyStub(this); +#endif } LIR_OpUpdateCRC32::LIR_OpUpdateCRC32(LIR_Opr crc, LIR_Opr val, LIR_Opr res) @@ -999,7 +1007,10 @@ void LIR_OpLabel::emit_code(LIR_Assembler* masm) { void LIR_OpArrayCopy::emit_code(LIR_Assembler* masm) { masm->emit_arraycopy(this); - masm->append_code_stub(stub()); + ArrayCopyStub* code_stub = stub(); + if (code_stub != nullptr) { + masm->append_code_stub(code_stub); + } } void LIR_OpUpdateCRC32::emit_code(LIR_Assembler* masm) { @@ -1365,7 +1376,7 @@ void LIR_List::allocate_object(LIR_Opr dst, LIR_Opr t1, LIR_Opr t2, LIR_Opr t3, stub)); } -void LIR_List::allocate_array(LIR_Opr dst, LIR_Opr len, LIR_Opr t1,LIR_Opr t2, LIR_Opr t3,LIR_Opr t4, BasicType type, LIR_Opr klass, CodeStub* stub) { +void LIR_List::allocate_array(LIR_Opr dst, LIR_Opr len, LIR_Opr t1,LIR_Opr t2, LIR_Opr t3,LIR_Opr t4, BasicType type, LIR_Opr klass, CodeStub* stub, bool zero_array) { append(new LIR_OpAllocArray( klass, len, @@ -1375,7 +1386,8 @@ void LIR_List::allocate_array(LIR_Opr dst, LIR_Opr len, LIR_Opr t1,LIR_Opr t2, L t3, t4, type, - stub)); + stub, + zero_array)); } void LIR_List::shift_left(LIR_Opr value, LIR_Opr count, LIR_Opr dst, LIR_Opr tmp) { diff --git a/src/hotspot/share/c1/c1_LIR.hpp b/src/hotspot/share/c1/c1_LIR.hpp index 6f527135fbe..c69d29f8d61 100644 --- a/src/hotspot/share/c1/c1_LIR.hpp +++ b/src/hotspot/share/c1/c1_LIR.hpp @@ -1750,9 +1750,10 @@ class LIR_OpAllocArray : public LIR_Op { LIR_Opr _tmp4; BasicType _type; CodeStub* _stub; + bool _zero_array; public: - LIR_OpAllocArray(LIR_Opr klass, LIR_Opr len, LIR_Opr result, LIR_Opr t1, LIR_Opr t2, LIR_Opr t3, LIR_Opr t4, BasicType type, CodeStub* stub) + LIR_OpAllocArray(LIR_Opr klass, LIR_Opr len, LIR_Opr result, LIR_Opr t1, LIR_Opr t2, LIR_Opr t3, LIR_Opr t4, BasicType type, CodeStub* stub, bool zero_array) : LIR_Op(lir_alloc_array, result, nullptr) , _klass(klass) , _len(len) @@ -1761,7 +1762,8 @@ class LIR_OpAllocArray : public LIR_Op { , _tmp3(t3) , _tmp4(t4) , _type(type) - , _stub(stub) {} + , _stub(stub) + , _zero_array(zero_array) {} LIR_Opr klass() const { return _klass; } LIR_Opr len() const { return _len; } @@ -1772,6 +1774,7 @@ class LIR_OpAllocArray : public LIR_Op { LIR_Opr tmp4() const { return _tmp4; } BasicType type() const { return _type; } CodeStub* stub() const { return _stub; } + bool zero_array() const { return _zero_array; } virtual void emit_code(LIR_Assembler* masm); virtual LIR_OpAllocArray * as_OpAllocArray () { return this; } @@ -2302,7 +2305,7 @@ class LIR_List: public CompilationResourceObj { void irem(LIR_Opr left, int right, LIR_Opr res, LIR_Opr tmp, CodeEmitInfo* info); void allocate_object(LIR_Opr dst, LIR_Opr t1, LIR_Opr t2, LIR_Opr t3, LIR_Opr t4, int header_size, int object_size, LIR_Opr klass, bool init_check, CodeStub* stub); - void allocate_array(LIR_Opr dst, LIR_Opr len, LIR_Opr t1,LIR_Opr t2, LIR_Opr t3,LIR_Opr t4, BasicType type, LIR_Opr klass, CodeStub* stub); + void allocate_array(LIR_Opr dst, LIR_Opr len, LIR_Opr t1,LIR_Opr t2, LIR_Opr t3,LIR_Opr t4, BasicType type, LIR_Opr klass, CodeStub* stub, bool zero_array = true); // jump is an unconditional branch void jump(BlockBegin* block) { diff --git a/src/hotspot/share/c1/c1_ValueStack.hpp b/src/hotspot/share/c1/c1_ValueStack.hpp index 0a75fa39bf6..bb0c475585c 100644 --- a/src/hotspot/share/c1/c1_ValueStack.hpp +++ b/src/hotspot/share/c1/c1_ValueStack.hpp @@ -58,6 +58,7 @@ class ValueStack: public CompilationResourceObj { Values _locals; // the locals Values _stack; // the expression stack Values* _locks; // the monitor stack (holding the locked values) + bool _force_reexecute; // force the reexecute flag on, used for patching stub Value check(ValueTag tag, Value t) { assert(tag == t->type()->tag() || (tag == objectTag && t->type()->tag() == addressTag), "types must correspond"); @@ -225,6 +226,9 @@ class ValueStack: public CompilationResourceObj { void setup_phi_for_stack(BlockBegin* b, int index); void setup_phi_for_local(BlockBegin* b, int index); + bool force_reexecute() const { return _force_reexecute; } + void set_force_reexecute() { _force_reexecute = true; } + // debugging void print() PRODUCT_RETURN; void verify() PRODUCT_RETURN; diff --git a/src/hotspot/share/cds/archiveBuilder.cpp b/src/hotspot/share/cds/archiveBuilder.cpp index 51399f03434..a98fd04ba68 100644 --- a/src/hotspot/share/cds/archiveBuilder.cpp +++ b/src/hotspot/share/cds/archiveBuilder.cpp @@ -146,7 +146,7 @@ void ArchiveBuilder::SourceObjList::relocate(int i, ArchiveBuilder* builder) { } ArchiveBuilder::ArchiveBuilder() : - _current_dump_space(nullptr), + _current_dump_region(nullptr), _buffer_bottom(nullptr), _last_verified_top(nullptr), _num_dump_regions_used(0), @@ -341,10 +341,10 @@ address ArchiveBuilder::reserve_buffer() { _buffer_bottom = buffer_bottom; _last_verified_top = buffer_bottom; - _current_dump_space = &_rw_region; + _current_dump_region = &_rw_region; _num_dump_regions_used = 1; _other_region_used_bytes = 0; - _current_dump_space->init(&_shared_rs, &_shared_vs); + _current_dump_region->init(&_shared_rs, &_shared_vs); ArchivePtrMarker::initialize(&_ptrmap, &_shared_vs); @@ -560,21 +560,21 @@ ArchiveBuilder::FollowMode ArchiveBuilder::get_follow_mode(MetaspaceClosure::Ref } } -void ArchiveBuilder::start_dump_space(DumpRegion* next) { +void ArchiveBuilder::start_dump_region(DumpRegion* next) { address bottom = _last_verified_top; - address top = (address)(current_dump_space()->top()); + address top = (address)(current_dump_region()->top()); _other_region_used_bytes += size_t(top - bottom); - current_dump_space()->pack(next); - _current_dump_space = next; + current_dump_region()->pack(next); + _current_dump_region = next; _num_dump_regions_used ++; - _last_verified_top = (address)(current_dump_space()->top()); + _last_verified_top = (address)(current_dump_region()->top()); } void ArchiveBuilder::verify_estimate_size(size_t estimate, const char* which) { address bottom = _last_verified_top; - address top = (address)(current_dump_space()->top()); + address top = (address)(current_dump_region()->top()); size_t used = size_t(top - bottom) + _other_region_used_bytes; int diff = int(estimate) - int(used); @@ -630,7 +630,7 @@ void ArchiveBuilder::dump_ro_metadata() { ResourceMark rm; log_info(cds)("Allocating RO objects ... "); - start_dump_space(&_ro_region); + start_dump_region(&_ro_region); make_shallow_copies(&_ro_region, &_ro_src_objs); #if INCLUDE_CDS_JAVA_HEAP diff --git a/src/hotspot/share/cds/archiveBuilder.hpp b/src/hotspot/share/cds/archiveBuilder.hpp index dab369265b0..cbde5a7e02c 100644 --- a/src/hotspot/share/cds/archiveBuilder.hpp +++ b/src/hotspot/share/cds/archiveBuilder.hpp @@ -91,7 +91,7 @@ const int SharedSpaceObjectAlignment = KlassAlignmentInBytes; // class ArchiveBuilder : public StackObj { protected: - DumpRegion* _current_dump_space; + DumpRegion* _current_dump_region; address _buffer_bottom; // for writing the contents of rw/ro regions address _last_verified_top; int _num_dump_regions_used; @@ -114,7 +114,7 @@ class ArchiveBuilder : public StackObj { intx _buffer_to_requested_delta; - DumpRegion* current_dump_space() const { return _current_dump_space; } + DumpRegion* current_dump_region() const { return _current_dump_region; } public: enum FollowMode { @@ -278,17 +278,17 @@ class ArchiveBuilder : public StackObj { size_t estimate_archive_size(); - void start_dump_space(DumpRegion* next); + void start_dump_region(DumpRegion* next); void verify_estimate_size(size_t estimate, const char* which); public: address reserve_buffer(); - address buffer_bottom() const { return _buffer_bottom; } - address buffer_top() const { return (address)current_dump_space()->top(); } - address requested_static_archive_bottom() const { return _requested_static_archive_bottom; } - address mapped_static_archive_bottom() const { return _mapped_static_archive_bottom; } - intx buffer_to_requested_delta() const { return _buffer_to_requested_delta; } + address buffer_bottom() const { return _buffer_bottom; } + address buffer_top() const { return (address)current_dump_region()->top(); } + address requested_static_archive_bottom() const { return _requested_static_archive_bottom; } + address mapped_static_archive_bottom() const { return _mapped_static_archive_bottom; } + intx buffer_to_requested_delta() const { return _buffer_to_requested_delta; } bool is_in_buffer_space(address p) const { return (buffer_bottom() <= p && p < buffer_top()); diff --git a/src/hotspot/share/cds/archiveHeapWriter.cpp b/src/hotspot/share/cds/archiveHeapWriter.cpp index a86996d3b1f..d526a961b7f 100644 --- a/src/hotspot/share/cds/archiveHeapWriter.cpp +++ b/src/hotspot/share/cds/archiveHeapWriter.cpp @@ -67,8 +67,10 @@ ArchiveHeapWriter::BufferOffsetToSourceObjectTable* ArchiveHeapWriter::_buffer_offset_to_source_obj_table = nullptr; -typedef ResourceHashtable FillersTable; static FillersTable* _fillers; @@ -361,12 +363,12 @@ void ArchiveHeapWriter::maybe_fill_gc_region_gap(size_t required_byte_size) { array_length, fill_bytes, _buffer_used); HeapWord* filler = init_filler_array_at_buffer_top(array_length, fill_bytes); _buffer_used = filler_end; - _fillers->put((address)filler, fill_bytes); + _fillers->put(buffered_address_to_offset((address)filler), fill_bytes); } } size_t ArchiveHeapWriter::get_filler_size_at(address buffered_addr) { - size_t* p = _fillers->get(buffered_addr); + size_t* p = _fillers->get(buffered_address_to_offset(buffered_addr)); if (p != nullptr) { assert(*p > 0, "filler must be larger than zero bytes"); return *p; @@ -530,10 +532,8 @@ void ArchiveHeapWriter::update_header_for_requested_obj(oop requested_obj, oop s fake_oop->set_narrow_klass(nk); // We need to retain the identity_hash, because it may have been used by some hashtables - // in the shared heap. This also has the side effect of pre-initializing the - // identity_hash for all shared objects, so they are less likely to be written - // into during run time, increasing the potential of memory sharing. - if (src_obj != nullptr) { + // in the shared heap. + if (src_obj != nullptr && !src_obj->fast_no_hash_check()) { intptr_t src_hash = src_obj->identity_hash(); fake_oop->set_mark(markWord::prototype().copy_set_hash(src_hash)); assert(fake_oop->mark().is_unlocked(), "sanity"); diff --git a/src/hotspot/share/cds/archiveUtils.cpp b/src/hotspot/share/cds/archiveUtils.cpp index 5ba36960c55..8fd20e20267 100644 --- a/src/hotspot/share/cds/archiveUtils.cpp +++ b/src/hotspot/share/cds/archiveUtils.cpp @@ -310,18 +310,11 @@ void WriteClosure::do_ptr(void** p) { if (ptr != nullptr && !ArchiveBuilder::current()->is_in_buffer_space(ptr)) { ptr = ArchiveBuilder::current()->get_buffered_addr(ptr); } - _dump_region->append_intptr_t((intptr_t)ptr, true); -} - -void WriteClosure::do_region(u_char* start, size_t size) { - assert((intptr_t)start % sizeof(intptr_t) == 0, "bad alignment"); - assert(size % sizeof(intptr_t) == 0, "bad size"); - do_tag((int)size); - while (size > 0) { - do_ptr((void**)start); - start += sizeof(intptr_t); - size -= sizeof(intptr_t); + // null pointers do not need to be converted to offsets + if (ptr != nullptr) { + ptr = (address)ArchiveBuilder::current()->buffer_to_offset(ptr); } + _dump_region->append_intptr_t((intptr_t)ptr, false); } void ReadClosure::do_ptr(void** p) { @@ -329,7 +322,7 @@ void ReadClosure::do_ptr(void** p) { intptr_t obj = nextPtr(); assert((intptr_t)obj >= 0 || (intptr_t)obj < -100, "hit tag while initializing ptrs."); - *p = (void*)obj; + *p = (void*)obj != nullptr ? (void*)(SharedBaseAddress + obj) : (void*)obj; } void ReadClosure::do_u4(u4* p) { @@ -355,17 +348,6 @@ void ReadClosure::do_tag(int tag) { FileMapInfo::assert_mark(tag == old_tag); } -void ReadClosure::do_region(u_char* start, size_t size) { - assert((intptr_t)start % sizeof(intptr_t) == 0, "bad alignment"); - assert(size % sizeof(intptr_t) == 0, "bad size"); - do_tag((int)size); - while (size > 0) { - *(intptr_t*)start = nextPtr(); - start += sizeof(intptr_t); - size -= sizeof(intptr_t); - } -} - void ArchiveUtils::log_to_classlist(BootstrapInfo* bootstrap_specifier, TRAPS) { if (ClassListWriter::is_enabled()) { if (SystemDictionaryShared::is_supported_invokedynamic(bootstrap_specifier)) { diff --git a/src/hotspot/share/cds/archiveUtils.hpp b/src/hotspot/share/cds/archiveUtils.hpp index efe5a468b93..32cef97886f 100644 --- a/src/hotspot/share/cds/archiveUtils.hpp +++ b/src/hotspot/share/cds/archiveUtils.hpp @@ -215,7 +215,10 @@ class WriteClosure : public SerializeClosure { _dump_region->append_intptr_t((intptr_t)tag); } - void do_region(u_char* start, size_t size); + char* region_top() { + return _dump_region->top(); + } + bool reading() const { return false; } }; @@ -238,8 +241,8 @@ class ReadClosure : public SerializeClosure { void do_int(int* p); void do_bool(bool *p); void do_tag(int tag); - void do_region(u_char* start, size_t size); bool reading() const { return true; } + char* region_top() { return nullptr; } }; class ArchiveUtils { diff --git a/src/hotspot/share/cds/classListParser.cpp b/src/hotspot/share/cds/classListParser.cpp index b17da725deb..4e49cd3d881 100644 --- a/src/hotspot/share/cds/classListParser.cpp +++ b/src/hotspot/share/cds/classListParser.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -50,30 +50,24 @@ #include "runtime/javaCalls.hpp" #include "utilities/defaultStream.hpp" #include "utilities/macros.hpp" +#include "utilities/utf8.hpp" volatile Thread* ClassListParser::_parsing_thread = nullptr; ClassListParser* ClassListParser::_instance = nullptr; -ClassListParser::ClassListParser(const char* file, ParseMode parse_mode) : _id2klass_table(INITIAL_TABLE_SIZE, MAX_TABLE_SIZE) { +ClassListParser::ClassListParser(const char* file, ParseMode parse_mode) : + _classlist_file(file), + _id2klass_table(INITIAL_TABLE_SIZE, MAX_TABLE_SIZE), + _file_input(do_open(file), /* need_close=*/true), + _input_stream(&_file_input) { log_info(cds)("Parsing %s%s", file, (parse_mode == _parse_lambda_forms_invokers_only) ? " (lambda form invokers only)" : ""); - _classlist_file = file; - _file = nullptr; - // Use os::open() because neither fopen() nor os::fopen() - // can handle long path name on Windows. - int fd = os::open(file, O_RDONLY, S_IREAD); - if (fd != -1) { - // Obtain a File* from the file descriptor so that fgets() - // can be used in parse_one_line() - _file = os::fdopen(fd, "r"); - } - if (_file == nullptr) { + if (!_file_input.is_open()) { char errmsg[JVM_MAXPATHLEN]; os::lasterror(errmsg, JVM_MAXPATHLEN); vm_exit_during_initialization("Loading classlist failed", errmsg); } - _line_no = 0; - _token = _line; + _token = _line = nullptr; _interfaces = new (mtClass) GrowableArray(10, mtClass); _indy_items = new (mtClass) GrowableArray(9, mtClass); _parse_mode = parse_mode; @@ -84,14 +78,24 @@ ClassListParser::ClassListParser(const char* file, ParseMode parse_mode) : _id2k Atomic::store(&_parsing_thread, Thread::current()); } +FILE* ClassListParser::do_open(const char* file) { + // Use os::open() because neither fopen() nor os::fopen() + // can handle long path name on Windows. (See JDK-8216184) + int fd = os::open(file, O_RDONLY, S_IREAD); + FILE* fp = nullptr; + if (fd != -1) { + // Obtain a FILE* from the file descriptor so that _input_stream + // can be used in ClassListParser::parse() + fp = os::fdopen(fd, "r"); + } + return fp; +} + bool ClassListParser::is_parsing_thread() { return Atomic::load(&_parsing_thread) == Thread::current(); } ClassListParser::~ClassListParser() { - if (_file != nullptr) { - fclose(_file); - } Atomic::store(&_parsing_thread, (Thread*)nullptr); delete _indy_items; delete _interfaces; @@ -101,7 +105,15 @@ ClassListParser::~ClassListParser() { int ClassListParser::parse(TRAPS) { int class_count = 0; - while (parse_one_line()) { + for (; !_input_stream.done(); _input_stream.next()) { + _line = _input_stream.current_line(); + if (*_line == '#') { // comment + continue; + } + if (!parse_one_line()) { + break; + } + if (lambda_form_line()) { // The current line is "@lambda-form-invoker ...". It has been recorded in LambdaFormInvokers, // and will be processed later. @@ -112,6 +124,7 @@ int ClassListParser::parse(TRAPS) { continue; } + check_class_name(_class_name); TempNewSymbol class_name_symbol = SymbolTable::new_symbol(_class_name); if (_indy_items->length() > 0) { // The current line is "@lambda-proxy class_name". Load the proxy class. @@ -165,43 +178,26 @@ int ClassListParser::parse(TRAPS) { } bool ClassListParser::parse_one_line() { - for (;;) { - if (fgets(_line, sizeof(_line), _file) == nullptr) { - return false; - } - ++ _line_no; - _line_len = (int)strlen(_line); - if (_line_len > _max_allowed_line_len) { - error("input line too long (must be no longer than %d chars)", _max_allowed_line_len); - } - if (*_line == '#') { // comment - continue; - } - - { - int len = (int)strlen(_line); - int i; - // Replace \t\r\n\f with ' ' - for (i=0; i 0) { - if (_line[len-1] == ' ') { - _line[len-1] = '\0'; - len --; - } else { - break; - } + // Remove trailing newline/space + while (len > 0) { + if (_line[len-1] == ' ') { + _line[len-1] = '\0'; + len --; + } else { + break; } - _line_len = len; } - - // valid line - break; + _line_len = len; } _class_name = _line; @@ -286,7 +282,7 @@ int ClassListParser::split_at_tag_from_line() { _token = _line; char* ptr; if ((ptr = strchr(_line, ' ')) == nullptr) { - error("Too few items following the @ tag \"%s\" line #%d", _line, _line_no); + error("Too few items following the @ tag \"%s\" line #%zu", _line, lineno()); return 0; } *ptr++ = '\0'; @@ -304,7 +300,7 @@ bool ClassListParser::parse_at_tags() { if (strcmp(_token, LAMBDA_PROXY_TAG) == 0) { split_tokens_by_whitespace(offset); if (_indy_items->length() < 2) { - error("Line with @ tag has too few items \"%s\" line #%d", _token, _line_no); + error("Line with @ tag has too few items \"%s\" line #%zu", _token, lineno()); return false; } // set the class name @@ -315,7 +311,7 @@ bool ClassListParser::parse_at_tags() { _lambda_form_line = true; return true; } else { - error("Invalid @ tag at the beginning of line \"%s\" line #%d", _token, _line_no); + error("Invalid @ tag at the beginning of line \"%s\" line #%zu", _token, lineno()); return false; } } @@ -423,8 +419,8 @@ void ClassListParser::error(const char* msg, ...) { } jio_fprintf(defaultStream::error_stream(), - "An error has occurred while processing class list file %s %d:%d.\n", - _classlist_file, _line_no, (error_index + 1)); + "An error has occurred while processing class list file %s %zu:%d.\n", + _classlist_file, lineno(), (error_index + 1)); jio_vfprintf(defaultStream::error_stream(), msg, ap); if (_line_len <= 0) { @@ -450,6 +446,25 @@ void ClassListParser::error(const char* msg, ...) { va_end(ap); } +void ClassListParser::check_class_name(const char* class_name) { + const char* err = nullptr; + size_t len = strlen(class_name); + if (len > (size_t)Symbol::max_length()) { + err = "class name too long"; + } else { + assert(Symbol::max_length() < INT_MAX && len < INT_MAX, "must be"); + if (!UTF8::is_legal_utf8((const unsigned char*)class_name, (int)len, /*version_leq_47*/false)) { + err = "class name is not valid UTF8"; + } + } + if (err != nullptr) { + jio_fprintf(defaultStream::error_stream(), + "An error has occurred while processing class list file %s:%zu %s\n", + _classlist_file, lineno(), err); + vm_exit_during_initialization("class list format error.", nullptr); + } +} + // This function is used for loading classes for customized class loaders // during archive dumping. InstanceKlass* ClassListParser::load_class_from_source(Symbol* class_name, TRAPS) { diff --git a/src/hotspot/share/cds/classListParser.hpp b/src/hotspot/share/cds/classListParser.hpp index 74a2ff10515..a383e04d8e6 100644 --- a/src/hotspot/share/cds/classListParser.hpp +++ b/src/hotspot/share/cds/classListParser.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,6 +28,7 @@ #include "utilities/exceptions.hpp" #include "utilities/globalDefinitions.hpp" #include "utilities/growableArray.hpp" +#include "utilities/istream.hpp" #include "utilities/resizeableResourceHash.hpp" #define LAMBDA_PROXY_TAG "@lambda-proxy" @@ -80,14 +81,6 @@ class ClassListParser : public StackObj { enum { _unspecified = -999, - - // Max number of bytes allowed per line in the classlist. - // Theoretically Java class names could be 65535 bytes in length. Also, an input line - // could have a very long path name up to JVM_MAXPATHLEN bytes in length. In reality, - // 4K bytes is more than enough. - _max_allowed_line_len = 4096, - _line_buf_extra = 10, // for detecting input too long - _line_buf_size = _max_allowed_line_len + _line_buf_extra }; // Use a small initial size in debug build to test resizing logic @@ -96,16 +89,14 @@ class ClassListParser : public StackObj { static volatile Thread* _parsing_thread; // the thread that created _instance static ClassListParser* _instance; // the singleton. const char* _classlist_file; - FILE* _file; ID2KlassTable _id2klass_table; - // The following field contains information from the *current* line being - // parsed. - char _line[_line_buf_size]; // The buffer that holds the current line. Some characters in + FileInput _file_input; + inputStream _input_stream; + char* _line; // The buffer that holds the current line. Some characters in // the buffer may be overwritten by '\0' during parsing. int _line_len; // Original length of the input line. - int _line_no; // Line number for current line being parsed const char* _class_name; GrowableArray* _indy_items; // items related to invoke dynamic for archiving lambda proxy classes int _id; @@ -132,6 +123,8 @@ class ClassListParser : public StackObj { bool parse_one_line(); Klass* load_current_class(Symbol* class_name_symbol, TRAPS); + size_t lineno() { return _input_stream.lineno(); } + FILE* do_open(const char* file); ClassListParser(const char* file, ParseMode _parse_mode); ~ClassListParser(); @@ -183,6 +176,7 @@ class ClassListParser : public StackObj { error("%s id %d is not yet loaded", which, id); } } + void check_class_name(const char* class_name); const char* current_class_name() { return _class_name; diff --git a/src/hotspot/share/cds/cppVtables.cpp b/src/hotspot/share/cds/cppVtables.cpp index c339ce9c0de..f17d94a82fd 100644 --- a/src/hotspot/share/cds/cppVtables.cpp +++ b/src/hotspot/share/cds/cppVtables.cpp @@ -213,23 +213,30 @@ void CppVtableCloner::init_orig_cpp_vtptr(int kind) { // the following holds true: // _index[ConstantPool_Kind]->cloned_vtable() == ((intptr_t**)cp)[0] // _index[InstanceKlass_Kind]->cloned_vtable() == ((intptr_t**)ik)[0] -CppVtableInfo** CppVtables::_index = nullptr; +static CppVtableInfo* _index[_num_cloned_vtable_kinds]; -char* CppVtables::dumptime_init(ArchiveBuilder* builder) { +// Vtables are all fixed offsets from ArchiveBuilder::current()->mapped_base() +// E.g. ConstantPool is at offset 0x58. We can archive these offsets in the +// RO region and use them to alculate their location at runtime without storing +// the pointers in the RW region +char* CppVtables::_vtables_serialized_base = nullptr; + +void CppVtables::dumptime_init(ArchiveBuilder* builder) { assert(CDSConfig::is_dumping_static_archive(), "cpp tables are only dumped into static archive"); - size_t vtptrs_bytes = _num_cloned_vtable_kinds * sizeof(CppVtableInfo*); - _index = (CppVtableInfo**)builder->rw_region()->allocate(vtptrs_bytes); CPP_VTABLE_TYPES_DO(ALLOCATE_AND_INITIALIZE_VTABLE); size_t cpp_tables_size = builder->rw_region()->top() - builder->rw_region()->base(); builder->alloc_stats()->record_cpp_vtables((int)cpp_tables_size); - - return (char*)_index; } void CppVtables::serialize(SerializeClosure* soc) { - soc->do_ptr(&_index); + if (!soc->reading()) { + _vtables_serialized_base = (char*)ArchiveBuilder::current()->buffer_top(); + } + for (int i = 0; i < _num_cloned_vtable_kinds; i++) { + soc->do_ptr(&_index[i]); + } if (soc->reading()) { CPP_VTABLE_TYPES_DO(INITIALIZE_VTABLE); } diff --git a/src/hotspot/share/cds/cppVtables.hpp b/src/hotspot/share/cds/cppVtables.hpp index 5318a9de2ba..973502909dd 100644 --- a/src/hotspot/share/cds/cppVtables.hpp +++ b/src/hotspot/share/cds/cppVtables.hpp @@ -36,13 +36,14 @@ class CppVtableInfo; // Support for C++ vtables in CDS archive. class CppVtables : AllStatic { - static CppVtableInfo** _index; + static char* _vtables_serialized_base; public: - static char* dumptime_init(ArchiveBuilder* builder); + static void dumptime_init(ArchiveBuilder* builder); static void zero_archived_vtables(); static intptr_t* get_archived_vtable(MetaspaceObj::Type msotype, address obj); static void serialize(SerializeClosure* sc); static bool is_valid_shared_method(const Method* m) NOT_CDS_RETURN_(false); + static char* vtables_serialized_base() { return _vtables_serialized_base; } }; #endif // SHARE_CDS_CPPVTABLES_HPP diff --git a/src/hotspot/share/cds/dynamicArchive.cpp b/src/hotspot/share/cds/dynamicArchive.cpp index cd5dd88b099..f255b337d14 100644 --- a/src/hotspot/share/cds/dynamicArchive.cpp +++ b/src/hotspot/share/cds/dynamicArchive.cpp @@ -137,7 +137,7 @@ class DynamicArchiveBuilder : public ArchiveBuilder { // Note that these tables still point to the *original* objects, so // they would need to call DynamicArchive::original_to_target() to // get the correct addresses. - assert(current_dump_space() == ro_region(), "Must be RO space"); + assert(current_dump_region() == ro_region(), "Must be RO space"); SymbolTable::write_to_archive(symbols()); ArchiveBuilder::OtherROAllocMark mark; diff --git a/src/hotspot/share/cds/heapShared.cpp b/src/hotspot/share/cds/heapShared.cpp index 202042e622e..b9f5d963c85 100644 --- a/src/hotspot/share/cds/heapShared.cpp +++ b/src/hotspot/share/cds/heapShared.cpp @@ -278,12 +278,6 @@ bool HeapShared::archive_object(oop obj) { } else { count_allocation(obj->size()); ArchiveHeapWriter::add_source_obj(obj); - - // The archived objects are discovered in a predictable order. Compute - // their identity_hash() as soon as we see them. This ensures that the - // the identity_hash in the object header will have a predictable value, - // making the archive reproducible. - obj->identity_hash(); CachedOopInfo info = make_cached_oop_info(obj); archived_object_cache()->put_when_absent(obj, info); archived_object_cache()->maybe_grow(); diff --git a/src/hotspot/share/cds/metaspaceShared.cpp b/src/hotspot/share/cds/metaspaceShared.cpp index c7d14f83d03..93c5c1a1e3b 100644 --- a/src/hotspot/share/cds/metaspaceShared.cpp +++ b/src/hotspot/share/cds/metaspaceShared.cpp @@ -511,7 +511,7 @@ void VM_PopulateDumpSharedSpace::doit() { builder.gather_source_objs(); builder.reserve_buffer(); - char* cloned_vtables = CppVtables::dumptime_init(&builder); + CppVtables::dumptime_init(&builder); builder.sort_metadata_objs(); builder.dump_rw_metadata(); @@ -542,7 +542,7 @@ void VM_PopulateDumpSharedSpace::doit() { FileMapInfo* mapinfo = new FileMapInfo(static_archive, true); mapinfo->populate_header(MetaspaceShared::core_region_alignment()); mapinfo->set_serialized_data(serialized_data); - mapinfo->set_cloned_vtables(cloned_vtables); + mapinfo->set_cloned_vtables(CppVtables::vtables_serialized_base()); mapinfo->open_for_write(); builder.write_archive(mapinfo, &_heap_info); diff --git a/src/hotspot/share/cds/serializeClosure.hpp b/src/hotspot/share/cds/serializeClosure.hpp index 275009286cb..3d401407f37 100644 --- a/src/hotspot/share/cds/serializeClosure.hpp +++ b/src/hotspot/share/cds/serializeClosure.hpp @@ -48,8 +48,20 @@ class SerializeClosure : public StackObj { // Read/write the bool pointed to by p. virtual void do_bool(bool* p) = 0; - // Read/write the region specified. - virtual void do_region(u_char* start, size_t size) = 0; + // Iterate on the pointers from p[0] through p[num_pointers-1] + void do_ptrs(void** p, size_t size) { + assert((intptr_t)p % sizeof(intptr_t) == 0, "bad alignment"); + assert(size % sizeof(intptr_t) == 0, "bad size"); + do_tag((int)size); + while (size > 0) { + do_ptr(p); + p++; + size -= sizeof(intptr_t); + } + } + + // Address of the first element being written (write only) + virtual char* region_top() = 0; // Check/write the tag. If reading, then compare the tag against // the passed in value and fail is they don't match. This allows diff --git a/src/hotspot/share/classfile/vmSymbols.cpp b/src/hotspot/share/classfile/vmSymbols.cpp index 05cd4767e9a..172d074255b 100644 --- a/src/hotspot/share/classfile/vmSymbols.cpp +++ b/src/hotspot/share/classfile/vmSymbols.cpp @@ -205,9 +205,9 @@ void vmSymbols::metaspace_pointers_do(MetaspaceClosure *closure) { } void vmSymbols::serialize(SerializeClosure* soc) { - soc->do_region((u_char*)&Symbol::_vm_symbols[FIRST_SID], + soc->do_ptrs((void**)&Symbol::_vm_symbols[FIRST_SID], (SID_LIMIT - FIRST_SID) * sizeof(Symbol::_vm_symbols[0])); - soc->do_region((u_char*)_type_signatures, sizeof(_type_signatures)); + soc->do_ptrs((void**)_type_signatures, sizeof(_type_signatures)); } #ifndef PRODUCT diff --git a/src/hotspot/share/code/relocInfo.cpp b/src/hotspot/share/code/relocInfo.cpp index 69ff4bc78d6..d0f732edac4 100644 --- a/src/hotspot/share/code/relocInfo.cpp +++ b/src/hotspot/share/code/relocInfo.cpp @@ -277,30 +277,6 @@ DEFINE_COPY_INTO_AUX(Relocation) #undef DEFINE_COPY_INTO_AUX #undef DEFINE_COPY_INTO -//////// Methods for RelocationHolder - -RelocationHolder RelocationHolder::plus(int offset) const { - if (offset != 0) { - switch (type()) { - case relocInfo::none: - break; - case relocInfo::oop_type: - { - oop_Relocation* r = (oop_Relocation*)reloc(); - return oop_Relocation::spec(r->oop_index(), r->offset() + offset); - } - case relocInfo::metadata_type: - { - metadata_Relocation* r = (metadata_Relocation*)reloc(); - return metadata_Relocation::spec(r->metadata_index(), r->offset() + offset); - } - default: - ShouldNotReachHere(); - } - } - return (*this); -} - //////// Methods for flyweight Relocation types // some relocations can compute their own values @@ -402,24 +378,24 @@ void CallRelocation::fix_relocation_after_move(const CodeBuffer* src, CodeBuffer void oop_Relocation::pack_data_to(CodeSection* dest) { short* p = (short*) dest->locs_end(); - p = pack_2_ints_to(p, _oop_index, _offset); + p = pack_1_int_to(p, _oop_index); dest->set_locs_end((relocInfo*) p); } void oop_Relocation::unpack_data() { - unpack_2_ints(_oop_index, _offset); + _oop_index = unpack_1_int(); } void metadata_Relocation::pack_data_to(CodeSection* dest) { short* p = (short*) dest->locs_end(); - p = pack_2_ints_to(p, _metadata_index, _offset); + p = pack_1_int_to(p, _metadata_index); dest->set_locs_end((relocInfo*) p); } void metadata_Relocation::unpack_data() { - unpack_2_ints(_metadata_index, _offset); + _metadata_index = unpack_1_int(); } @@ -855,8 +831,8 @@ void RelocIterator::print_current() { raw_oop = *oop_addr; oop_value = r->oop_value(); } - tty->print(" | [oop_addr=" INTPTR_FORMAT " *=" INTPTR_FORMAT " offset=%d]", - p2i(oop_addr), p2i(raw_oop), r->offset()); + tty->print(" | [oop_addr=" INTPTR_FORMAT " *=" INTPTR_FORMAT "]", + p2i(oop_addr), p2i(raw_oop)); // Do not print the oop by default--we want this routine to // work even during GC or other inconvenient times. if (WizardMode && oop_value != nullptr) { @@ -878,8 +854,8 @@ void RelocIterator::print_current() { raw_metadata = *metadata_addr; metadata_value = r->metadata_value(); } - tty->print(" | [metadata_addr=" INTPTR_FORMAT " *=" INTPTR_FORMAT " offset=%d]", - p2i(metadata_addr), p2i(raw_metadata), r->offset()); + tty->print(" | [metadata_addr=" INTPTR_FORMAT " *=" INTPTR_FORMAT "]", + p2i(metadata_addr), p2i(raw_metadata)); if (metadata_value != nullptr) { tty->print("metadata_value=" INTPTR_FORMAT ": ", p2i(metadata_value)); metadata_value->print_value_on(tty); diff --git a/src/hotspot/share/code/relocInfo.hpp b/src/hotspot/share/code/relocInfo.hpp index 9f1db9f4684..6d0907d97de 100644 --- a/src/hotspot/share/code/relocInfo.hpp +++ b/src/hotspot/share/code/relocInfo.hpp @@ -129,12 +129,7 @@ class nmethod; // Value: an oop, or else the address (handle) of an oop // Instruction types: memory (load), set (load address) // Data: [] an oop stored in 4 bytes of instruction -// [n] n is the index of an oop in the CodeBlob's oop pool -// [[N]n l] and l is a byte offset to be applied to the oop -// [Nn Ll] both index and offset may be 32 bits if necessary -// Here is a special hack, used only by the old compiler: -// [[N]n 00] the value is the __address__ of the nth oop in the pool -// (Note that the offset allows optimal references to class variables.) +// [[N]n] the index of an oop in the CodeBlob's oop pool // // relocInfo::internal_word_type -- an address within the same CodeBlob // relocInfo::section_word_type -- same, but can refer to another section @@ -515,9 +510,6 @@ class RelocationHolder { Relocation* reloc() const { return (Relocation*)_relocbuf; } inline relocInfo::relocType type() const; - // Add a constant offset to a relocation. Helper for class Address. - RelocationHolder plus(int offset) const; - // Return a holder containing a relocation of type Reloc, constructed using args. template static RelocationHolder construct(const Args&... args) { @@ -788,8 +780,8 @@ class Relocation { void const_set_data_value (address x); void const_verify_data_value (address x); // platform-dependent utilities for decoding and patching instructions - void pd_set_data_value (address x, intptr_t off, bool verify_only = false); // a set or mem-ref - void pd_verify_data_value (address x, intptr_t off) { pd_set_data_value(x, off, true); } + void pd_set_data_value (address x, bool verify_only = false); // a set or mem-ref + void pd_verify_data_value (address x) { pd_set_data_value(x, true); } address pd_call_destination (address orig_addr = nullptr); void pd_set_call_destination (address x); @@ -895,41 +887,28 @@ relocInfo::relocType RelocationHolder::type() const { // A DataRelocation always points at a memory or load-constant instruction.. // It is absolute on most machines, and the constant is split on RISCs. // The specific subtypes are oop, external_word, and internal_word. -// By convention, the "value" does not include a separately reckoned "offset". class DataRelocation : public Relocation { public: DataRelocation(relocInfo::relocType type) : Relocation(type) {} - bool is_data() override { return true; } + bool is_data() override { return true; } - // both target and offset must be computed somehow from relocation data - virtual int offset() { return 0; } - address value() override = 0; - void set_value(address x) override { set_value(x, offset()); } - void set_value(address x, intptr_t o) { - if (addr_in_const()) + // target must be computed somehow from relocation data + address value() override = 0; + void set_value(address x) override { + if (addr_in_const()) { const_set_data_value(x); - else - pd_set_data_value(x, o); + } else { + pd_set_data_value(x); + } } - void verify_value(address x) { - if (addr_in_const()) + void verify_value(address x) { + if (addr_in_const()) { const_verify_data_value(x); - else - pd_verify_data_value(x, offset()); + } else { + pd_verify_data_value(x); + } } - - // The "o" (displacement) argument is relevant only to split relocations - // on RISC machines. In some CPUs (SPARC), the set-hi and set-lo ins'ns - // can encode more than 32 bits between them. This allows compilers to - // share set-hi instructions between addresses that differ by a small - // offset (e.g., different static variables in the same class). - // On such machines, the "x" argument to set_value on all set-lo - // instructions must be the same as the "x" argument for the - // corresponding set-hi instructions. The "o" arguments for the - // set-hi instructions are ignored, and must not affect the high-half - // immediate constant. The "o" arguments for the set-lo instructions are - // added into the low-half immediate constant, and must not overflow it. }; class post_call_nop_Relocation : public Relocation { @@ -976,40 +955,36 @@ class CallRelocation : public Relocation { class oop_Relocation : public DataRelocation { public: - // encode in one of these formats: [] [n] [n l] [Nn l] [Nn Ll] - // an oop in the CodeBlob's oop pool - static RelocationHolder spec(int oop_index, int offset = 0) { + // an oop in the CodeBlob's oop pool; encoded as [n] or [Nn] + static RelocationHolder spec(int oop_index) { assert(oop_index > 0, "must be a pool-resident oop"); - return RelocationHolder::construct(oop_index, offset); + return RelocationHolder::construct(oop_index); } - // an oop in the instruction stream + // an oop in the instruction stream; encoded as [] static RelocationHolder spec_for_immediate() { // If no immediate oops are generated, we can skip some walks over nmethods. // Assert that they don't get generated accidentally! assert(relocInfo::mustIterateImmediateOopsInCode(), "Must return true so we will search for oops as roots etc. in the code."); const int oop_index = 0; - const int offset = 0; // if you want an offset, use the oop pool - return RelocationHolder::construct(oop_index, offset); + return RelocationHolder::construct(oop_index); } void copy_into(RelocationHolder& holder) const override; private: jint _oop_index; // if > 0, index into CodeBlob::oop_at - jint _offset; // byte offset to apply to the oop itself - oop_Relocation(int oop_index, int offset) - : DataRelocation(relocInfo::oop_type), _oop_index(oop_index), _offset(offset) { } + oop_Relocation(int oop_index) + : DataRelocation(relocInfo::oop_type), _oop_index(oop_index) { } friend class RelocationHolder; oop_Relocation() : DataRelocation(relocInfo::oop_type) {} public: int oop_index() { return _oop_index; } - int offset() override { return _offset; } - // data is packed in "2_ints" format: [i o] or [Ii Oo] + // oop_index is packed in "1_int" format: [n] or [Nn] void pack_data_to(CodeSection* dest) override; void unpack_data() override; @@ -1031,27 +1006,24 @@ class oop_Relocation : public DataRelocation { class metadata_Relocation : public DataRelocation { public: - // encode in one of these formats: [] [n] [n l] [Nn l] [Nn Ll] - // an metadata in the CodeBlob's metadata pool - static RelocationHolder spec(int metadata_index, int offset = 0) { + // an metadata in the CodeBlob's metadata pool; encoded as [n] or [Nn] + static RelocationHolder spec(int metadata_index) { assert(metadata_index > 0, "must be a pool-resident metadata"); - return RelocationHolder::construct(metadata_index, offset); + return RelocationHolder::construct(metadata_index); } - // an metadata in the instruction stream + // an metadata in the instruction stream; encoded as [] static RelocationHolder spec_for_immediate() { const int metadata_index = 0; - const int offset = 0; // if you want an offset, use the metadata pool - return RelocationHolder::construct(metadata_index, offset); + return RelocationHolder::construct(metadata_index); } void copy_into(RelocationHolder& holder) const override; private: jint _metadata_index; // if > 0, index into nmethod::metadata_at - jint _offset; // byte offset to apply to the metadata itself - metadata_Relocation(int metadata_index, int offset) - : DataRelocation(relocInfo::metadata_type), _metadata_index(metadata_index), _offset(offset) { } + metadata_Relocation(int metadata_index) + : DataRelocation(relocInfo::metadata_type), _metadata_index(metadata_index) { } friend class RelocationHolder; metadata_Relocation() : DataRelocation(relocInfo::metadata_type) { } @@ -1063,9 +1035,8 @@ class metadata_Relocation : public DataRelocation { public: int metadata_index() { return _metadata_index; } - int offset() override { return _offset; } - // data is packed in "2_ints" format: [i o] or [Ii Oo] + // metadata_index is packed in "1_int" format: [n] or [Nn] void pack_data_to(CodeSection* dest) override; void unpack_data() override; diff --git a/src/hotspot/share/compiler/compilerOracle.cpp b/src/hotspot/share/compiler/compilerOracle.cpp index 87d879feac0..a81d60c9fc4 100644 --- a/src/hotspot/share/compiler/compilerOracle.cpp +++ b/src/hotspot/share/compiler/compilerOracle.cpp @@ -40,6 +40,7 @@ #include "runtime/handles.inline.hpp" #include "runtime/jniHandles.hpp" #include "runtime/os.hpp" +#include "utilities/istream.hpp" #include "utilities/parseInteger.hpp" // Default compile commands, if defined, are parsed before any of the @@ -1081,55 +1082,29 @@ bool CompilerOracle::parse_from_file() { return true; } - char token[1024]; - int pos = 0; - int c = getc(stream); - bool success = true; - while(c != EOF && pos < (int)(sizeof(token)-1)) { - if (c == '\n') { - token[pos++] = '\0'; - if (!parse_from_line(token)) { - success = false; - } - pos = 0; - } else { - token[pos++] = c; - } - c = getc(stream); - } - token[pos++] = '\0'; - if (!parse_from_line(token)) { - success = false; - } - fclose(stream); - return success; + FileInput input(stream, /*need_close=*/ true); + return parse_from_input(&input, parse_from_line); } -bool CompilerOracle::parse_from_string(const char* str, bool (*parse_line)(char*)) { - char token[1024]; - int pos = 0; - const char* sp = str; - int c = *sp++; +bool CompilerOracle::parse_from_input(inputStream::Input* input, + CompilerOracle:: + parse_from_line_fn_t* parse_from_line) { bool success = true; - while (c != '\0' && pos < (int)(sizeof(token)-1)) { - if (c == '\n') { - token[pos++] = '\0'; - if (!parse_line(token)) { - success = false; - } - pos = 0; - } else { - token[pos++] = c; + for (inputStream in(input); !in.done(); in.next()) { + if (!parse_from_line(in.current_line())) { + success = false; } - c = *sp++; - } - token[pos++] = '\0'; - if (!parse_line(token)) { - success = false; } return success; } +bool CompilerOracle::parse_from_string(const char* str, + CompilerOracle:: + parse_from_line_fn_t* parse_from_line) { + MemoryInput input(str, strlen(str)); + return parse_from_input(&input, parse_from_line); +} + bool compilerOracle_init() { bool success = true; // Register default compile commands first - any commands specified via CompileCommand will diff --git a/src/hotspot/share/compiler/compilerOracle.hpp b/src/hotspot/share/compiler/compilerOracle.hpp index f330b3b6075..1a85e0629f9 100644 --- a/src/hotspot/share/compiler/compilerOracle.hpp +++ b/src/hotspot/share/compiler/compilerOracle.hpp @@ -27,6 +27,7 @@ #include "memory/allStatic.hpp" #include "oops/oopsHierarchy.hpp" +#include "utilities/istream.hpp" class methodHandle; @@ -120,11 +121,18 @@ enum class MemStatAction { }; class CompilerOracle : AllStatic { + public: + typedef bool parse_from_line_fn_t(char*); + private: static bool _quiet; static void print_parse_error(char* error_msg, char* original_line); static void print_command(CompileCommandEnum option, const char* name, enum OptionType type); + // The core parser. + static bool parse_from_input(inputStream::Input* input, + parse_from_line_fn_t* parse_from_line); + public: // True if the command file has been specified or is implicit static bool has_command_file(); @@ -177,7 +185,8 @@ class CompilerOracle : AllStatic { static bool option_matches_type(CompileCommandEnum option, T& value); // Reads from string instead of file - static bool parse_from_string(const char* option_string, bool (*parser)(char*)); + static bool parse_from_string(const char* option_string, + parse_from_line_fn_t* parser); static bool parse_from_line(char* line); static bool parse_from_line_quietly(char* line); static bool parse_compile_only(char* line); diff --git a/src/hotspot/share/gc/g1/g1CollectionSetCandidates.hpp b/src/hotspot/share/gc/g1/g1CollectionSetCandidates.hpp index dd38d33b5d3..531559545d7 100644 --- a/src/hotspot/share/gc/g1/g1CollectionSetCandidates.hpp +++ b/src/hotspot/share/gc/g1/g1CollectionSetCandidates.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,7 +26,6 @@ #define SHARE_GC_G1_G1COLLECTIONSETCANDIDATES_HPP #include "gc/g1/g1CollectionSetCandidates.hpp" -#include "gc/g1/g1_globals.hpp" #include "gc/shared/gc_globals.hpp" #include "gc/shared/workerThread.hpp" #include "memory/allocation.hpp" diff --git a/src/hotspot/share/gc/g1/g1ConcurrentRebuildAndScrub.cpp b/src/hotspot/share/gc/g1/g1ConcurrentRebuildAndScrub.cpp index a176b847eab..ba309c97ca5 100644 --- a/src/hotspot/share/gc/g1/g1ConcurrentRebuildAndScrub.cpp +++ b/src/hotspot/share/gc/g1/g1ConcurrentRebuildAndScrub.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,7 +30,7 @@ #include "gc/g1/g1ConcurrentMarkBitMap.inline.hpp" #include "gc/g1/g1HeapRegion.inline.hpp" #include "gc/g1/g1HeapRegionManager.inline.hpp" -#include "gc/g1/g1_globals.hpp" +#include "gc/shared/gc_globals.hpp" #include "gc/shared/suspendibleThreadSet.hpp" #include "gc/shared/workerThread.hpp" #include "logging/log.hpp" diff --git a/src/hotspot/share/gc/g1/g1MonotonicArena.hpp b/src/hotspot/share/gc/g1/g1MonotonicArena.hpp index 586766a8c8f..bf46e4a3351 100644 --- a/src/hotspot/share/gc/g1/g1MonotonicArena.hpp +++ b/src/hotspot/share/gc/g1/g1MonotonicArena.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2021, 2024, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2021, 2022, Huawei Technologies Co., Ltd. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -27,7 +27,7 @@ #define SHARE_GC_G1_G1MONOTONICARENA_HPP #include "gc/shared/freeListAllocator.hpp" -#include "memory/allocation.hpp" +#include "nmt/memflags.hpp" #include "utilities/globalDefinitions.hpp" #include "utilities/lockFreeStack.hpp" diff --git a/src/hotspot/share/gc/g1/g1MonotonicArenaFreeMemoryTask.cpp b/src/hotspot/share/gc/g1/g1MonotonicArenaFreeMemoryTask.cpp index 337acfe2405..59a1afe6f34 100644 --- a/src/hotspot/share/gc/g1/g1MonotonicArenaFreeMemoryTask.cpp +++ b/src/hotspot/share/gc/g1/g1MonotonicArenaFreeMemoryTask.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2021, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,9 +29,8 @@ #include "gc/g1/g1HeapRegionRemSet.hpp" #include "gc/g1/g1MonotonicArenaFreeMemoryTask.hpp" #include "gc/g1/g1MonotonicArenaFreePool.hpp" -#include "gc/g1/g1_globals.hpp" -#include "gc/shared/gc_globals.hpp" #include "gc/shared/gcTraceTime.inline.hpp" +#include "gc/shared/gc_globals.hpp" #include "gc/shared/suspendibleThreadSet.hpp" #include "runtime/os.hpp" diff --git a/src/hotspot/share/gc/g1/g1ParScanThreadState.hpp b/src/hotspot/share/gc/g1/g1ParScanThreadState.hpp index 72f7a0b84e7..3f8f8e22a99 100644 --- a/src/hotspot/share/gc/g1/g1ParScanThreadState.hpp +++ b/src/hotspot/share/gc/g1/g1ParScanThreadState.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,9 +29,9 @@ #include "gc/g1/g1RedirtyCardsQueue.hpp" #include "gc/g1/g1OopClosures.hpp" #include "gc/g1/g1YoungGCAllocationFailureInjector.hpp" -#include "gc/g1/g1_globals.hpp" #include "gc/shared/ageTable.hpp" #include "gc/shared/copyFailedInfo.hpp" +#include "gc/shared/gc_globals.hpp" #include "gc/shared/partialArrayTaskStepper.hpp" #include "gc/shared/preservedMarks.hpp" #include "gc/shared/stringdedup/stringDedup.hpp" diff --git a/src/hotspot/share/gc/g1/g1RemSet.cpp b/src/hotspot/share/gc/g1/g1RemSet.cpp index a47fd379a70..63f48a15e8d 100644 --- a/src/hotspot/share/gc/g1/g1RemSet.cpp +++ b/src/hotspot/share/gc/g1/g1RemSet.cpp @@ -42,10 +42,10 @@ #include "gc/g1/g1Policy.hpp" #include "gc/g1/g1RootClosures.hpp" #include "gc/g1/g1RemSet.hpp" -#include "gc/g1/g1_globals.hpp" #include "gc/shared/bufferNode.hpp" #include "gc/shared/bufferNodeList.hpp" #include "gc/shared/gcTraceTime.inline.hpp" +#include "gc/shared/gc_globals.hpp" #include "jfr/jfrEvents.hpp" #include "memory/iterator.hpp" #include "memory/resourceArea.hpp" diff --git a/src/hotspot/share/gc/g1/g1YoungCollector.cpp b/src/hotspot/share/gc/g1/g1YoungCollector.cpp index bf1c8b738f8..720cf3a3629 100644 --- a/src/hotspot/share/gc/g1/g1YoungCollector.cpp +++ b/src/hotspot/share/gc/g1/g1YoungCollector.cpp @@ -49,10 +49,10 @@ #include "gc/g1/g1YoungGCAllocationFailureInjector.hpp" #include "gc/g1/g1YoungGCPostEvacuateTasks.hpp" #include "gc/g1/g1YoungGCPreEvacuateTasks.hpp" -#include "gc/g1/g1_globals.hpp" #include "gc/shared/concurrentGCBreakpoints.hpp" #include "gc/shared/gcTraceTime.inline.hpp" #include "gc/shared/gcTimer.hpp" +#include "gc/shared/gc_globals.hpp" #include "gc/shared/preservedMarks.hpp" #include "gc/shared/referenceProcessor.hpp" #include "gc/shared/weakProcessor.inline.hpp" diff --git a/src/hotspot/share/gc/g1/g1YoungGCAllocationFailureInjector.cpp b/src/hotspot/share/gc/g1/g1YoungGCAllocationFailureInjector.cpp index 60b7e0872f2..9bbb6bddb31 100644 --- a/src/hotspot/share/gc/g1/g1YoungGCAllocationFailureInjector.cpp +++ b/src/hotspot/share/gc/g1/g1YoungGCAllocationFailureInjector.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2021, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,7 +26,7 @@ #include "gc/g1/g1CollectedHeap.inline.hpp" #include "gc/g1/g1YoungGCAllocationFailureInjector.inline.hpp" -#include "gc/g1/g1_globals.hpp" +#include "gc/shared/gc_globals.hpp" #if ALLOCATION_FAILURE_INJECTOR diff --git a/src/hotspot/share/gc/g1/g1YoungGCAllocationFailureInjector.hpp b/src/hotspot/share/gc/g1/g1YoungGCAllocationFailureInjector.hpp index fb7e8cd3ad3..44677fe4710 100644 --- a/src/hotspot/share/gc/g1/g1YoungGCAllocationFailureInjector.hpp +++ b/src/hotspot/share/gc/g1/g1YoungGCAllocationFailureInjector.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2021, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,7 +25,7 @@ #ifndef SHARE_GC_G1_G1YOUNGGCALLOCATIONFAILUREINJECTOR_HPP #define SHARE_GC_G1_G1YOUNGGCALLOCATIONFAILUREINJECTOR_HPP -#include "gc/g1/g1_globals.hpp" +#include "gc/shared/gc_globals.hpp" #include "memory/allStatic.hpp" #include "utilities/globalDefinitions.hpp" diff --git a/src/hotspot/share/gc/g1/g1YoungGCAllocationFailureInjector.inline.hpp b/src/hotspot/share/gc/g1/g1YoungGCAllocationFailureInjector.inline.hpp index 7c71eb14d52..cfa047ea0ce 100644 --- a/src/hotspot/share/gc/g1/g1YoungGCAllocationFailureInjector.inline.hpp +++ b/src/hotspot/share/gc/g1/g1YoungGCAllocationFailureInjector.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2021, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,8 +27,8 @@ #include "gc/g1/g1YoungGCAllocationFailureInjector.hpp" -#include "gc/g1/g1_globals.hpp" #include "gc/g1/g1CollectedHeap.inline.hpp" +#include "gc/shared/gc_globals.hpp" #if ALLOCATION_FAILURE_INJECTOR diff --git a/src/hotspot/share/gc/shared/gc_globals.hpp b/src/hotspot/share/gc/shared/gc_globals.hpp index 42d9e028b5e..1440f788e18 100644 --- a/src/hotspot/share/gc/shared/gc_globals.hpp +++ b/src/hotspot/share/gc/shared/gc_globals.hpp @@ -118,7 +118,7 @@ product(bool, UseZGC, false, \ "Use the Z garbage collector") \ \ - product(bool, ZGenerational, false, \ + product(bool, ZGenerational, true, \ "Use the generational version of ZGC") \ \ product(bool, UseShenandoahGC, false, \ diff --git a/src/hotspot/share/gc/shared/oopStorageSet.hpp b/src/hotspot/share/gc/shared/oopStorageSet.hpp index 89cdde4b969..26e0e9f5a77 100644 --- a/src/hotspot/share/gc/shared/oopStorageSet.hpp +++ b/src/hotspot/share/gc/shared/oopStorageSet.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,7 +25,7 @@ #ifndef SHARE_GC_SHARED_OOPSTORAGESET_HPP #define SHARE_GC_SHARED_OOPSTORAGESET_HPP -#include "memory/allocation.hpp" +#include "nmt/memflags.hpp" #include "utilities/debug.hpp" #include "utilities/enumIterator.hpp" #include "utilities/globalDefinitions.hpp" diff --git a/src/hotspot/share/gc/shared/stringdedup/stringDedupProcessor.cpp b/src/hotspot/share/gc/shared/stringdedup/stringDedupProcessor.cpp index 23311a2868d..aab2f5d3123 100644 --- a/src/hotspot/share/gc/shared/stringdedup/stringDedupProcessor.cpp +++ b/src/hotspot/share/gc/shared/stringdedup/stringDedupProcessor.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2021, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -34,8 +34,8 @@ #include "gc/shared/stringdedup/stringDedupStorageUse.hpp" #include "gc/shared/stringdedup/stringDedupTable.hpp" #include "logging/log.hpp" -#include "memory/allocation.hpp" #include "memory/iterator.hpp" +#include "nmt/memflags.hpp" #include "oops/access.inline.hpp" #include "runtime/atomic.hpp" #include "runtime/cpuTimeCounters.hpp" diff --git a/src/hotspot/share/gc/shenandoah/shenandoahTaskqueue.hpp b/src/hotspot/share/gc/shenandoah/shenandoahTaskqueue.hpp index 90b51160e7a..50f18a8c73f 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahTaskqueue.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahTaskqueue.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2020, Red Hat, Inc. All rights reserved. + * Copyright (c) 2016, 2024, Red Hat, Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,7 +28,7 @@ #include "gc/shared/taskTerminator.hpp" #include "gc/shared/taskqueue.hpp" #include "gc/shenandoah/shenandoahPadding.hpp" -#include "memory/allocation.hpp" +#include "nmt/memflags.hpp" #include "runtime/atomic.hpp" #include "runtime/javaThread.hpp" #include "runtime/mutex.hpp" diff --git a/src/hotspot/share/gc/x/xArguments.cpp b/src/hotspot/share/gc/x/xArguments.cpp index 60e78d2c756..13cb302d14a 100644 --- a/src/hotspot/share/gc/x/xArguments.cpp +++ b/src/hotspot/share/gc/x/xArguments.cpp @@ -42,6 +42,8 @@ void XArguments::initialize_heap_flags_and_sizes() { } void XArguments::initialize() { + warning("Non-generational ZGC is deprecated."); + // Check mark stack size const size_t mark_stack_space_limit = XAddressSpaceLimit::mark_stack(); if (ZMarkStackSpaceLimit > mark_stack_space_limit) { diff --git a/src/hotspot/share/gc/x/xInitialize.cpp b/src/hotspot/share/gc/x/xInitialize.cpp index 01b79f3ffd7..156be17971f 100644 --- a/src/hotspot/share/gc/x/xInitialize.cpp +++ b/src/hotspot/share/gc/x/xInitialize.cpp @@ -41,7 +41,7 @@ XInitialize::XInitialize(XBarrierSet* barrier_set) { log_info(gc, init)("Version: %s (%s)", VM_Version::vm_release(), VM_Version::jdk_debug_level()); - log_info(gc, init)("Using legacy single-generation mode"); + log_info(gc, init)("Using deprecated non-generational mode"); // Early initialization XAddress::initialize(); diff --git a/src/hotspot/share/gc/z/zArray.inline.hpp b/src/hotspot/share/gc/z/zArray.inline.hpp index e4de7a37040..2ec87a76156 100644 --- a/src/hotspot/share/gc/z/zArray.inline.hpp +++ b/src/hotspot/share/gc/z/zArray.inline.hpp @@ -96,7 +96,7 @@ ZActivatedArray::ZActivatedArray(bool locked) _array() {} template -ZActivatedArray::~ZActivatedArray() { +ZActivatedArray::~ZActivatedArray() { FreeHeap(_lock); } diff --git a/src/hotspot/share/gc/z/zNMT.cpp b/src/hotspot/share/gc/z/zNMT.cpp index 9a54c0f5963..7d1719aa6f0 100644 --- a/src/hotspot/share/gc/z/zNMT.cpp +++ b/src/hotspot/share/gc/z/zNMT.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,7 +26,7 @@ #include "gc/z/zGlobals.hpp" #include "gc/z/zNMT.hpp" #include "gc/z/zVirtualMemory.hpp" -#include "memory/allocation.hpp" +#include "nmt/memflags.hpp" #include "nmt/memTracker.hpp" #include "utilities/nativeCallStack.hpp" diff --git a/src/hotspot/share/jfr/leakprofiler/chains/jfrbitset.hpp b/src/hotspot/share/jfr/leakprofiler/chains/jfrbitset.hpp index d786a87e3b8..8f1d2b4d5b1 100644 --- a/src/hotspot/share/jfr/leakprofiler/chains/jfrbitset.hpp +++ b/src/hotspot/share/jfr/leakprofiler/chains/jfrbitset.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,7 +25,7 @@ #ifndef SHARE_JFR_LEAKPROFILER_JFRBITSET_HPP #define SHARE_JFR_LEAKPROFILER_JFRBITSET_HPP -#include "memory/allocation.hpp" +#include "nmt/memflags.hpp" #include "utilities/objectBitSet.inline.hpp" typedef ObjectBitSet JFRBitSet; diff --git a/src/hotspot/share/jfr/leakprofiler/checkpoint/rootResolver.cpp b/src/hotspot/share/jfr/leakprofiler/checkpoint/rootResolver.cpp index 9627259e264..57b29a09d01 100644 --- a/src/hotspot/share/jfr/leakprofiler/checkpoint/rootResolver.cpp +++ b/src/hotspot/share/jfr/leakprofiler/checkpoint/rootResolver.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -248,12 +248,6 @@ bool ReferenceToThreadRootClosure::do_thread_stack_detailed(JavaThread* jt) { ReferenceLocateClosure rcl(_callback, OldObjectRoot::_threads, OldObjectRoot::_stack_variable, jt); if (jt->has_last_Java_frame()) { - // Traverse the monitor chunks - MonitorChunk* chunk = jt->monitor_chunks(); - for (; chunk != nullptr; chunk = chunk->next()) { - chunk->oops_do(&rcl); - } - if (rcl.complete()) { return true; } diff --git a/src/hotspot/share/jfr/periodic/jfrNativeMemoryEvent.hpp b/src/hotspot/share/jfr/periodic/jfrNativeMemoryEvent.hpp index 710198efddd..c5a476c78a8 100644 --- a/src/hotspot/share/jfr/periodic/jfrNativeMemoryEvent.hpp +++ b/src/hotspot/share/jfr/periodic/jfrNativeMemoryEvent.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,7 +25,7 @@ #ifndef SHARE_JFR_PERIODIC_JFRNATIVEMEMORYEVENT_HPP #define SHARE_JFR_PERIODIC_JFRNATIVEMEMORYEVENT_HPP -#include "memory/allocation.hpp" +#include "nmt/memflags.hpp" #include "nmt/nmtUsage.hpp" #include "utilities/globalDefinitions.hpp" #include "utilities/ticks.hpp" diff --git a/src/hotspot/share/memory/allocation.hpp b/src/hotspot/share/memory/allocation.hpp index 50bb2e0c68b..495ca66d867 100644 --- a/src/hotspot/share/memory/allocation.hpp +++ b/src/hotspot/share/memory/allocation.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,6 +26,7 @@ #define SHARE_MEMORY_ALLOCATION_HPP #include "memory/allStatic.hpp" +#include "nmt/memflags.hpp" #include "utilities/debug.hpp" #include "utilities/globalDefinitions.hpp" #include "utilities/macros.hpp" @@ -99,63 +100,6 @@ typedef AllocFailStrategy::AllocFailEnum AllocFailType; // void FreeHeap(void* p); // -#define MEMORY_TYPES_DO(f) \ - /* Memory type by sub systems. It occupies lower byte. */ \ - f(mtJavaHeap, "Java Heap") /* Java heap */ \ - f(mtClass, "Class") /* Java classes */ \ - f(mtThread, "Thread") /* thread objects */ \ - f(mtThreadStack, "Thread Stack") \ - f(mtCode, "Code") /* generated code */ \ - f(mtGC, "GC") \ - f(mtGCCardSet, "GCCardSet") /* G1 card set remembered set */ \ - f(mtCompiler, "Compiler") \ - f(mtJVMCI, "JVMCI") \ - f(mtInternal, "Internal") /* memory used by VM, but does not belong to */ \ - /* any of above categories, and not used by */ \ - /* NMT */ \ - f(mtOther, "Other") /* memory not used by VM */ \ - f(mtSymbol, "Symbol") \ - f(mtNMT, "Native Memory Tracking") /* memory used by NMT */ \ - f(mtClassShared, "Shared class space") /* class data sharing */ \ - f(mtChunk, "Arena Chunk") /* chunk that holds content of arenas */ \ - f(mtTest, "Test") /* Test type for verifying NMT */ \ - f(mtTracing, "Tracing") \ - f(mtLogging, "Logging") \ - f(mtStatistics, "Statistics") \ - f(mtArguments, "Arguments") \ - f(mtModule, "Module") \ - f(mtSafepoint, "Safepoint") \ - f(mtSynchronizer, "Synchronization") \ - f(mtServiceability, "Serviceability") \ - f(mtMetaspace, "Metaspace") \ - f(mtStringDedup, "String Deduplication") \ - f(mtObjectMonitor, "Object Monitors") \ - f(mtNone, "Unknown") \ - //end - -#define MEMORY_TYPE_DECLARE_ENUM(type, human_readable) \ - type, - -/* - * Memory types - */ -enum class MEMFLAGS : uint8_t { - MEMORY_TYPES_DO(MEMORY_TYPE_DECLARE_ENUM) - mt_number_of_types // number of memory types (mtDontTrack - // is not included as validate type) -}; -// Extra insurance that MEMFLAGS truly has the same size as uint8_t. -STATIC_ASSERT(sizeof(MEMFLAGS) == sizeof(uint8_t)); - -#define MEMORY_TYPE_SHORTNAME(type, human_readable) \ - constexpr MEMFLAGS type = MEMFLAGS::type; - -// Generate short aliases for the enum values. E.g. mtGC instead of MEMFLAGS::mtGC. -MEMORY_TYPES_DO(MEMORY_TYPE_SHORTNAME) - -// Make an int version of the sentinel end value. -constexpr int mt_number_of_types = static_cast(MEMFLAGS::mt_number_of_types); - extern bool NMT_track_callsite; class NativeCallStack; diff --git a/src/hotspot/share/memory/guardedMemory.cpp b/src/hotspot/share/memory/guardedMemory.cpp index 25a50f8ffb3..91adb016c65 100644 --- a/src/hotspot/share/memory/guardedMemory.cpp +++ b/src/hotspot/share/memory/guardedMemory.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,9 +22,8 @@ * */ #include "precompiled.hpp" -#include "memory/allocation.hpp" -#include "memory/allocation.inline.hpp" #include "memory/guardedMemory.hpp" +#include "nmt/memflags.hpp" #include "runtime/os.hpp" void* GuardedMemory::wrap_copy(const void* ptr, const size_t len, const void* tag) { diff --git a/src/hotspot/share/memory/padded.hpp b/src/hotspot/share/memory/padded.hpp index 0597dbd9f3c..bca1d168cb5 100644 --- a/src/hotspot/share/memory/padded.hpp +++ b/src/hotspot/share/memory/padded.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,7 +25,7 @@ #ifndef SHARE_MEMORY_PADDED_HPP #define SHARE_MEMORY_PADDED_HPP -#include "memory/allocation.hpp" +#include "nmt/memflags.hpp" #include "utilities/align.hpp" #include "utilities/globalDefinitions.hpp" diff --git a/src/hotspot/share/nmt/allocationSite.hpp b/src/hotspot/share/nmt/allocationSite.hpp index 5093bb39f0c..022fb6f4390 100644 --- a/src/hotspot/share/nmt/allocationSite.hpp +++ b/src/hotspot/share/nmt/allocationSite.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,7 +25,7 @@ #ifndef SHARE_NMT_ALLOCATIONSITE_HPP #define SHARE_NMT_ALLOCATIONSITE_HPP -#include "memory/allocation.hpp" +#include "nmt/memflags.hpp" #include "utilities/nativeCallStack.hpp" // Allocation site represents a code path that makes a memory diff --git a/src/hotspot/share/nmt/mallocHeader.cpp b/src/hotspot/share/nmt/mallocHeader.cpp index 57d1c4a313a..a125ba416b1 100644 --- a/src/hotspot/share/nmt/mallocHeader.cpp +++ b/src/hotspot/share/nmt/mallocHeader.cpp @@ -26,11 +26,17 @@ #include "nmt/mallocHeader.inline.hpp" #include "nmt/mallocSiteTable.hpp" +#include "nmt/memflags.hpp" #include "runtime/os.hpp" +#include "utilities/debug.hpp" #include "utilities/globalDefinitions.hpp" #include "utilities/nativeCallStack.hpp" #include "utilities/ostream.hpp" +// The malloc header, as well as the coming VMATree implementation, rely on MEMFLAGS +// fitting into eight bits. +STATIC_ASSERT(sizeof(MEMFLAGS) == sizeof(uint8_t)); + void MallocHeader::print_block_on_error(outputStream* st, address bad_address) const { assert(bad_address >= (address)this, "sanity"); diff --git a/src/hotspot/share/nmt/mallocHeader.hpp b/src/hotspot/share/nmt/mallocHeader.hpp index c0fae5803ce..6d847a22de3 100644 --- a/src/hotspot/share/nmt/mallocHeader.hpp +++ b/src/hotspot/share/nmt/mallocHeader.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2024, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2021, 2022 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -26,7 +26,7 @@ #ifndef SHARE_NMT_MALLOCHEADER_HPP #define SHARE_NMT_MALLOCHEADER_HPP -#include "memory/allocation.hpp" +#include "nmt/memflags.hpp" #include "utilities/globalDefinitions.hpp" #include "utilities/macros.hpp" #include "utilities/nativeCallStack.hpp" diff --git a/src/hotspot/share/nmt/mallocTracker.hpp b/src/hotspot/share/nmt/mallocTracker.hpp index f3b38b43ef0..5d755f60ae6 100644 --- a/src/hotspot/share/nmt/mallocTracker.hpp +++ b/src/hotspot/share/nmt/mallocTracker.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2024, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2021, 2023 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -26,8 +26,8 @@ #ifndef SHARE_NMT_MALLOCTRACKER_HPP #define SHARE_NMT_MALLOCTRACKER_HPP -#include "memory/allocation.hpp" #include "nmt/mallocHeader.hpp" +#include "nmt/memflags.hpp" #include "nmt/nmtCommon.hpp" #include "runtime/atomic.hpp" #include "runtime/threadCritical.hpp" diff --git a/src/hotspot/share/nmt/memFlagBitmap.hpp b/src/hotspot/share/nmt/memFlagBitmap.hpp index 87815536980..0464179948b 100644 --- a/src/hotspot/share/nmt/memFlagBitmap.hpp +++ b/src/hotspot/share/nmt/memFlagBitmap.hpp @@ -1,5 +1,6 @@ /* - * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2023, 2024, Red Hat, Inc. All rights reserved. + * Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,7 +26,8 @@ #ifndef SHARE_NMT_MEMFLAGBITMAP_HPP #define SHARE_NMT_MEMFLAGBITMAP_HPP -#include "memory/allocation.hpp" // for mt_number_of_types +#include "nmt/memflags.hpp" +#include "utilities/debug.hpp" #include "utilities/globalDefinitions.hpp" class MemFlagBitmap { diff --git a/src/hotspot/share/nmt/memMapPrinter.cpp b/src/hotspot/share/nmt/memMapPrinter.cpp index fdf9674f290..ec5003c562e 100644 --- a/src/hotspot/share/nmt/memMapPrinter.cpp +++ b/src/hotspot/share/nmt/memMapPrinter.cpp @@ -29,8 +29,8 @@ #include "logging/logAsyncWriter.hpp" #include "gc/shared/collectedHeap.hpp" -#include "memory/allocation.hpp" #include "memory/universe.hpp" +#include "nmt/memflags.hpp" #include "runtime/nonJavaThread.hpp" #include "runtime/osThread.hpp" #include "runtime/thread.hpp" diff --git a/src/hotspot/share/nmt/memMapPrinter.hpp b/src/hotspot/share/nmt/memMapPrinter.hpp index 09e1cea113b..67706c4d4d7 100644 --- a/src/hotspot/share/nmt/memMapPrinter.hpp +++ b/src/hotspot/share/nmt/memMapPrinter.hpp @@ -1,6 +1,6 @@ /* - * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2023, Red Hat, Inc. and/or its affiliates. + * Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2023, 2024, Red Hat, Inc. and/or its affiliates. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,8 +26,8 @@ #ifndef SHARE_SERVICES_MEMMAPPRINTER_HPP #define SHARE_SERVICES_MEMMAPPRINTER_HPP -#include "memory/allocation.hpp" #include "memory/allStatic.hpp" +#include "nmt/memflags.hpp" #include "utilities/globalDefinitions.hpp" #ifdef LINUX diff --git a/src/hotspot/share/nmt/memReporter.cpp b/src/hotspot/share/nmt/memReporter.cpp index 964911bb595..edb289c2f35 100644 --- a/src/hotspot/share/nmt/memReporter.cpp +++ b/src/hotspot/share/nmt/memReporter.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -23,10 +23,10 @@ */ #include "precompiled.hpp" #include "cds/filemap.hpp" -#include "memory/allocation.hpp" #include "memory/metaspace.hpp" #include "memory/metaspaceUtils.hpp" #include "nmt/mallocTracker.hpp" +#include "nmt/memflags.hpp" #include "nmt/memReporter.hpp" #include "nmt/threadStackTracker.hpp" #include "nmt/virtualMemoryTracker.hpp" diff --git a/src/hotspot/share/nmt/memTracker.cpp b/src/hotspot/share/nmt/memTracker.cpp index a63e754b9f5..5504f436b7d 100644 --- a/src/hotspot/share/nmt/memTracker.cpp +++ b/src/hotspot/share/nmt/memTracker.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2024, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2020, 2023 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -146,12 +146,17 @@ void MemTracker::report(bool summary_only, outputStream* output, size_t scale) { void MemTracker::tuning_statistics(outputStream* out) { // NMT statistics out->print_cr("Native Memory Tracking Statistics:"); - out->print_cr("State: %s", NMTUtil::tracking_level_to_string(_tracking_level)); - out->print_cr("Malloc allocation site table size: %d", MallocSiteTable::hash_buckets()); - out->print_cr(" Tracking stack depth: %d", NMT_TrackingStackDepth); - out->cr(); - MallocSiteTable::print_tuning_statistics(out); - out->cr(); + out->print_cr("State: %s", + NMTUtil::tracking_level_to_string(_tracking_level)); + if (_tracking_level == NMT_detail) { + out->print_cr("Malloc allocation site table size: %d", + MallocSiteTable::hash_buckets()); + out->print_cr(" Tracking stack depth: %d", + NMT_TrackingStackDepth); + out->cr(); + MallocSiteTable::print_tuning_statistics(out); + out->cr(); + } out->print_cr("Preinit state:"); NMTPreInit::print_state(out); MallocLimitHandler::print_on(out); diff --git a/src/hotspot/share/nmt/memflags.hpp b/src/hotspot/share/nmt/memflags.hpp new file mode 100644 index 00000000000..530c9ae9d95 --- /dev/null +++ b/src/hotspot/share/nmt/memflags.hpp @@ -0,0 +1,82 @@ +/* + * Copyright (c) 2012, 2024, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_NMT_MEMFLAGS_HPP +#define SHARE_NMT_MEMFLAGS_HPP + +#include "utilities/globalDefinitions.hpp" + +#define MEMORY_TYPES_DO(f) \ + /* Memory type by sub systems. It occupies lower byte. */ \ + f(mtJavaHeap, "Java Heap") /* Java heap */ \ + f(mtClass, "Class") /* Java classes */ \ + f(mtThread, "Thread") /* thread objects */ \ + f(mtThreadStack, "Thread Stack") \ + f(mtCode, "Code") /* generated code */ \ + f(mtGC, "GC") \ + f(mtGCCardSet, "GCCardSet") /* G1 card set remembered set */ \ + f(mtCompiler, "Compiler") \ + f(mtJVMCI, "JVMCI") \ + f(mtInternal, "Internal") /* memory used by VM, but does not belong to */ \ + /* any of above categories, and not used by */ \ + /* NMT */ \ + f(mtOther, "Other") /* memory not used by VM */ \ + f(mtSymbol, "Symbol") \ + f(mtNMT, "Native Memory Tracking") /* memory used by NMT */ \ + f(mtClassShared, "Shared class space") /* class data sharing */ \ + f(mtChunk, "Arena Chunk") /* chunk that holds content of arenas */ \ + f(mtTest, "Test") /* Test type for verifying NMT */ \ + f(mtTracing, "Tracing") \ + f(mtLogging, "Logging") \ + f(mtStatistics, "Statistics") \ + f(mtArguments, "Arguments") \ + f(mtModule, "Module") \ + f(mtSafepoint, "Safepoint") \ + f(mtSynchronizer, "Synchronization") \ + f(mtServiceability, "Serviceability") \ + f(mtMetaspace, "Metaspace") \ + f(mtStringDedup, "String Deduplication") \ + f(mtObjectMonitor, "Object Monitors") \ + f(mtNone, "Unknown") \ + //end + +#define MEMORY_TYPE_DECLARE_ENUM(type, human_readable) \ + type, + +enum class MEMFLAGS : uint8_t { + MEMORY_TYPES_DO(MEMORY_TYPE_DECLARE_ENUM) + mt_number_of_types // number of memory types (mtDontTrack + // is not included as validate type) +}; + +#define MEMORY_TYPE_SHORTNAME(type, human_readable) \ + constexpr MEMFLAGS type = MEMFLAGS::type; + +// Generate short aliases for the enum values. E.g. mtGC instead of MEMFLAGS::mtGC. +MEMORY_TYPES_DO(MEMORY_TYPE_SHORTNAME) + +// Make an int version of the sentinel end value. +constexpr int mt_number_of_types = static_cast(MEMFLAGS::mt_number_of_types); + +#endif // SHARE_NMT_MEMFLAGS_HPP diff --git a/src/hotspot/share/nmt/nmtCommon.hpp b/src/hotspot/share/nmt/nmtCommon.hpp index c6d1bcddf5e..8ca0965b3d3 100644 --- a/src/hotspot/share/nmt/nmtCommon.hpp +++ b/src/hotspot/share/nmt/nmtCommon.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2024, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2021, 2023 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -27,7 +27,8 @@ #ifndef SHARE_NMT_NMTCOMMON_HPP #define SHARE_NMT_NMTCOMMON_HPP -#include "memory/allocation.hpp" // for MEMFLAGS only +#include "memory/allStatic.hpp" +#include "nmt/memflags.hpp" #include "utilities/align.hpp" #include "utilities/globalDefinitions.hpp" diff --git a/src/hotspot/share/nmt/nmtDCmd.cpp b/src/hotspot/share/nmt/nmtDCmd.cpp index 80b2ed6141a..cb42212ba6b 100644 --- a/src/hotspot/share/nmt/nmtDCmd.cpp +++ b/src/hotspot/share/nmt/nmtDCmd.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -137,8 +137,10 @@ void NMTDCmd::execute(DCmdSource source, TRAPS) { output()->print_cr("No detail baseline for comparison"); } } else if (_statistics.value()) { - if (check_detail_tracking_level(output())) { + if (MemTracker::enabled()) { MemTracker::tuning_statistics(output()); + } else { + output()->print_cr("Native memory tracking is not enabled"); } } else { ShouldNotReachHere(); diff --git a/src/hotspot/share/opto/escape.cpp b/src/hotspot/share/opto/escape.cpp index c82eaad50e1..4b328806122 100644 --- a/src/hotspot/share/opto/escape.cpp +++ b/src/hotspot/share/opto/escape.cpp @@ -548,8 +548,8 @@ bool ConnectionGraph::can_reduce_check_users(Node* n, uint nesting) const { if (!use_use->is_Load() || !use_use->as_Load()->can_split_through_phi_base(_igvn)) { NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. AddP user isn't a [splittable] Load(): %s", n->_idx, _invocation, use_use->Name());) return false; - } else if (nesting > 0 && load_type->isa_narrowklass()) { - NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. Nested NarrowKlass Load: %s", n->_idx, _invocation, use_use->Name());) + } else if (load_type->isa_narrowklass() || load_type->isa_klassptr()) { + NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. [Narrow] Klass Load: %s", n->_idx, _invocation, use_use->Name());) return false; } } @@ -2184,7 +2184,8 @@ void ConnectionGraph::process_call_arguments(CallNode *call) { strcmp(call->as_CallLeaf()->_name, "vectorizedMismatch") == 0 || strcmp(call->as_CallLeaf()->_name, "arraysort_stub") == 0 || strcmp(call->as_CallLeaf()->_name, "array_partition_stub") == 0 || - strcmp(call->as_CallLeaf()->_name, "get_class_id_intrinsic") == 0) + strcmp(call->as_CallLeaf()->_name, "get_class_id_intrinsic") == 0 || + strcmp(call->as_CallLeaf()->_name, "unsafe_setmemory") == 0) ))) { call->dump(); fatal("EA unexpected CallLeaf %s", call->as_CallLeaf()->_name); diff --git a/src/hotspot/share/opto/superword.cpp b/src/hotspot/share/opto/superword.cpp index cd9b76eb44d..f41e8a7b8b3 100644 --- a/src/hotspot/share/opto/superword.cpp +++ b/src/hotspot/share/opto/superword.cpp @@ -45,12 +45,13 @@ SuperWord::SuperWord(const VLoopAnalyzer &vloop_analyzer) : _arena(mtCompiler), _node_info(arena(), _vloop.estimated_body_length(), 0, SWNodeInfo::initial), // info needed per node _clone_map(phase()->C->clone_map()), // map of nodes created in cloning - _align_to_ref(nullptr), // memory reference to align vectors to _pairset(&_arena, _vloop_analyzer), _packset(&_arena, _vloop_analyzer NOT_PRODUCT(COMMA is_trace_superword_packset()) NOT_PRODUCT(COMMA is_trace_superword_rejections()) ), + _mem_ref_for_main_loop_alignment(nullptr), + _aw_for_main_loop_alignment(0), _do_vector_loop(phase()->C->do_vector_loop()), // whether to do vectorization/simd style _num_work_vecs(0), // amount of vector work we have _num_reductions(0) // amount of reduction work we have @@ -516,22 +517,12 @@ void SuperWord::find_adjacent_refs() { int max_idx; - // Take the first mem_ref as the reference to align to. The pre-loop trip count is - // modified to align this reference to a vector-aligned address. If strict alignment - // is required, we may change the reference later (see filter_packs_for_alignment()). - MemNode* align_to_mem_ref = nullptr; - while (memops.size() != 0) { // Find a memory reference to align to. MemNode* mem_ref = find_align_to_ref(memops, max_idx); if (mem_ref == nullptr) break; int iv_adjustment = get_iv_adjustment(mem_ref); - if (align_to_mem_ref == nullptr) { - align_to_mem_ref = mem_ref; - set_align_to_ref(align_to_mem_ref); - } - const VPointer& align_to_ref_p = vpointer(mem_ref); // Set alignment relative to "align_to_ref" for all related memory operations. for (int i = memops.size() - 1; i >= 0; i--) { @@ -573,9 +564,6 @@ void SuperWord::find_adjacent_refs() { } } // while (memops.size() != 0) - assert(_pairset.is_empty() || align_to_mem_ref != nullptr, - "pairset empty or we find the alignment reference"); - #ifndef PRODUCT if (is_trace_superword_packset()) { tty->print_cr("\nAfter Superword::find_adjacent_refs"); @@ -1723,7 +1711,11 @@ void SuperWord::filter_packs_for_alignment() { if (current->is_constrained()) { // Solution is constrained (not trivial) // -> must change pre-limit to achieve alignment - set_align_to_ref(current->as_constrained()->mem_ref()); + MemNode const* mem = current->as_constrained()->mem_ref(); + Node_List* pack = get_pack(mem); + assert(pack != nullptr, "memop of final solution must still be packed"); + _mem_ref_for_main_loop_alignment = mem; + _aw_for_main_loop_alignment = pack->size() * mem->memory_size(); } } @@ -3397,6 +3389,32 @@ LoadNode::ControlDependency SuperWord::control_dependency(Node_List* p) { return dep; } +// Find the memop pack with the maximum vector width, unless they were already +// determined by SuperWord::filter_packs_for_alignment(). +void SuperWord::determine_mem_ref_and_aw_for_main_loop_alignment() { + if (_mem_ref_for_main_loop_alignment != nullptr) { + assert(vectors_should_be_aligned(), "mem_ref only set if filtered for alignment"); + return; + } + + MemNode const* mem_ref = nullptr; + int max_aw = 0; + for (int i = 0; i < _packset.length(); i++) { + Node_List* pack = _packset.at(i); + MemNode* first = pack->at(0)->isa_Mem(); + if (first == nullptr) { continue; } + + int vw = first->memory_size() * pack->size(); + if (vw > max_aw) { + max_aw = vw; + mem_ref = first; + } + } + assert(mem_ref != nullptr && max_aw > 0, "found mem_ref and aw"); + _mem_ref_for_main_loop_alignment = mem_ref; + _aw_for_main_loop_alignment = max_aw; +} + #define TRACE_ALIGN_VECTOR_NODE(node) { \ DEBUG_ONLY( \ if (is_trace_align_vector()) { \ @@ -3407,11 +3425,14 @@ LoadNode::ControlDependency SuperWord::control_dependency(Node_List* p) { } \ // Ensure that the main loop vectors are aligned by adjusting the pre loop limit. We memory-align -// the address of "align_to_ref" to the maximal possible vector width. We adjust the pre-loop -// iteration count by adjusting the pre-loop limit. +// the address of "_mem_ref_for_main_loop_alignment" to "_aw_for_main_loop_alignment", which is a +// sufficiently large alignment width. We adjust the pre-loop iteration count by adjusting the +// pre-loop limit. void SuperWord::adjust_pre_loop_limit_to_align_main_loop_vectors() { - const MemNode* align_to_ref = _align_to_ref; - assert(align_to_ref != nullptr, "align_to_ref must be set"); + determine_mem_ref_and_aw_for_main_loop_alignment(); + const MemNode* align_to_ref = _mem_ref_for_main_loop_alignment; + const int aw = _aw_for_main_loop_alignment; + assert(align_to_ref != nullptr && aw > 0, "must have alignment reference and aw"); assert(cl()->is_main_loop(), "can only do alignment for main loop"); // The opaque node for the limit, where we adjust the input @@ -3556,10 +3577,7 @@ void SuperWord::adjust_pre_loop_limit_to_align_main_loop_vectors() { // = MIN(new_limit, orig_limit) (15a, stride > 0) // constrained_limit = MAX(old_limit - adjust_pre_iter, orig_limit) // = MAX(new_limit, orig_limit) (15a, stride < 0) - - // We chose an aw that is the maximal possible vector width for the type of - // align_to_ref. - const int aw = vector_width_in_bytes(align_to_ref); + // const int stride = iv_stride(); const int scale = align_to_ref_p.scale_in_bytes(); const int offset = align_to_ref_p.offset_in_bytes(); diff --git a/src/hotspot/share/opto/superword.hpp b/src/hotspot/share/opto/superword.hpp index 7d1ba1131f3..159032d94b9 100644 --- a/src/hotspot/share/opto/superword.hpp +++ b/src/hotspot/share/opto/superword.hpp @@ -411,11 +411,15 @@ class SuperWord : public ResourceObj { GrowableArray _node_info; // Info needed per node CloneMap& _clone_map; // map of nodes created in cloning - MemNode const* _align_to_ref; // Memory reference that pre-loop will align to PairSet _pairset; PackSet _packset; + // Memory reference, and the alignment width (aw) for which we align the main-loop, + // by adjusting the pre-loop limit. + MemNode const* _mem_ref_for_main_loop_alignment; + int _aw_for_main_loop_alignment; + public: SuperWord(const VLoopAnalyzer &vloop_analyzer); @@ -563,8 +567,6 @@ class SuperWord : public ResourceObj { Arena* arena() { return &_arena; } int get_vw_bytes_special(MemNode* s); - const MemNode* align_to_ref() const { return _align_to_ref; } - void set_align_to_ref(const MemNode* m) { _align_to_ref = m; } // Ensure node_info contains element "i" void grow_node_info(int i) { if (i >= _node_info.length()) _node_info.at_put_grow(i, SWNodeInfo::initial); } @@ -670,6 +672,7 @@ class SuperWord : public ResourceObj { // Alignment within a vector memory reference int memory_alignment(MemNode* s, int iv_adjust); // Ensure that the main loop vectors are aligned by adjusting the pre loop limit. + void determine_mem_ref_and_aw_for_main_loop_alignment(); void adjust_pre_loop_limit_to_align_main_loop_vectors(); }; diff --git a/src/hotspot/share/opto/vectorIntrinsics.cpp b/src/hotspot/share/opto/vectorIntrinsics.cpp index 2039e9c27be..807912327e6 100644 --- a/src/hotspot/share/opto/vectorIntrinsics.cpp +++ b/src/hotspot/share/opto/vectorIntrinsics.cpp @@ -59,6 +59,17 @@ static bool check_vbox(const TypeInstPtr* vbox_type) { } #endif +#define log_if_needed(...) \ + if (C->print_intrinsics()) { \ + tty->print_cr(__VA_ARGS__); \ + } + +#ifndef PRODUCT +#define non_product_log_if_needed(...) log_if_needed(__VA_ARGS__) +#else +#define non_product_log_if_needed(...) +#endif + static bool is_vector_mask(ciKlass* klass) { return klass->is_subclass_of(ciEnv::current()->vector_VectorMask_klass()); } @@ -85,12 +96,8 @@ bool LibraryCallKit::arch_supports_vector_rotate(int opc, int num_elem, BasicTyp if ((mask_use_type & VecMaskUseLoad) != 0) { if (!Matcher::match_rule_supported_vector(Op_VectorLoadMask, num_elem, elem_bt) || !Matcher::match_rule_supported_vector(Op_LoadVector, num_elem, T_BOOLEAN)) { - #ifndef PRODUCT - if (C->print_intrinsics()) { - tty->print_cr(" ** Rejected vector mask loading (%s,%s,%d) because architecture does not support it", - NodeClassNames[Op_VectorLoadMask], type2name(elem_bt), num_elem); - } - #endif + non_product_log_if_needed(" ** Rejected vector mask loading (%s,%s,%d) because architecture does not support it", + NodeClassNames[Op_VectorLoadMask], type2name(elem_bt), num_elem); return false; } } @@ -98,12 +105,8 @@ bool LibraryCallKit::arch_supports_vector_rotate(int opc, int num_elem, BasicTyp if ((mask_use_type & VecMaskUsePred) != 0) { if (!Matcher::has_predicated_vectors() || !Matcher::match_rule_supported_vector_masked(opc, num_elem, elem_bt)) { - #ifndef PRODUCT - if (C->print_intrinsics()) { - tty->print_cr("Rejected vector mask predicate using (%s,%s,%d) because architecture does not support it", - NodeClassNames[opc], type2name(elem_bt), num_elem); - } - #endif + non_product_log_if_needed("Rejected vector mask predicate using (%s,%s,%d) because architecture does not support it", + NodeClassNames[opc], type2name(elem_bt), num_elem); return false; } } @@ -185,43 +188,27 @@ Node* GraphKit::vector_shift_count(Node* cnt, int shift_op, BasicType bt, int nu bool LibraryCallKit::arch_supports_vector(int sopc, int num_elem, BasicType type, VectorMaskUseType mask_use_type, bool has_scalar_args) { // Check that the operation is valid. if (sopc <= 0) { -#ifndef PRODUCT - if (C->print_intrinsics()) { - tty->print_cr(" ** Rejected intrinsification because no valid vector op could be extracted"); - } -#endif + non_product_log_if_needed(" ** Rejected intrinsification because no valid vector op could be extracted"); return false; } if (VectorNode::is_vector_rotate(sopc)) { if(!arch_supports_vector_rotate(sopc, num_elem, type, mask_use_type, has_scalar_args)) { -#ifndef PRODUCT - if (C->print_intrinsics()) { - tty->print_cr(" ** Rejected vector op (%s,%s,%d) because architecture does not support variable vector shifts", - NodeClassNames[sopc], type2name(type), num_elem); - } -#endif + non_product_log_if_needed(" ** Rejected vector op (%s,%s,%d) because architecture does not support variable vector shifts", + NodeClassNames[sopc], type2name(type), num_elem); return false; } } else if (VectorNode::is_vector_integral_negate(sopc)) { if (!VectorNode::is_vector_integral_negate_supported(sopc, num_elem, type, false)) { -#ifndef PRODUCT - if (C->print_intrinsics()) { - tty->print_cr(" ** Rejected vector op (%s,%s,%d) because architecture does not support integral vector negate", - NodeClassNames[sopc], type2name(type), num_elem); - } -#endif + non_product_log_if_needed(" ** Rejected vector op (%s,%s,%d) because architecture does not support integral vector negate", + NodeClassNames[sopc], type2name(type), num_elem); return false; } } else { // Check that architecture supports this op-size-type combination. if (!Matcher::match_rule_supported_vector(sopc, num_elem, type)) { -#ifndef PRODUCT - if (C->print_intrinsics()) { - tty->print_cr(" ** Rejected vector op (%s,%s,%d) because architecture does not support it", - NodeClassNames[sopc], type2name(type), num_elem); - } -#endif + non_product_log_if_needed(" ** Rejected vector op (%s,%s,%d) because architecture does not support it", + NodeClassNames[sopc], type2name(type), num_elem); return false; } else { assert(Matcher::match_rule_supported(sopc), "must be supported"); @@ -230,23 +217,15 @@ bool LibraryCallKit::arch_supports_vector(int sopc, int num_elem, BasicType type if (num_elem == 1) { if (mask_use_type != VecMaskNotUsed) { -#ifndef PRODUCT - if (C->print_intrinsics()) { - tty->print_cr(" ** Rejected vector mask op (%s,%s,%d) because architecture does not support it", - NodeClassNames[sopc], type2name(type), num_elem); - } -#endif + non_product_log_if_needed(" ** Rejected vector mask op (%s,%s,%d) because architecture does not support it", + NodeClassNames[sopc], type2name(type), num_elem); return false; } if (sopc != 0) { if (sopc != Op_LoadVector && sopc != Op_StoreVector) { -#ifndef PRODUCT - if (C->print_intrinsics()) { - tty->print_cr(" ** Not a svml call or load/store vector op (%s,%s,%d)", - NodeClassNames[sopc], type2name(type), num_elem); - } -#endif + non_product_log_if_needed(" ** Not a svml call or load/store vector op (%s,%s,%d)", + NodeClassNames[sopc], type2name(type), num_elem); return false; } } @@ -254,10 +233,8 @@ bool LibraryCallKit::arch_supports_vector(int sopc, int num_elem, BasicType type if (!has_scalar_args && VectorNode::is_vector_shift(sopc) && Matcher::supports_vector_variable_shifts() == false) { - if (C->print_intrinsics()) { - tty->print_cr(" ** Rejected vector op (%s,%s,%d) because architecture does not support variable vector shifts", - NodeClassNames[sopc], type2name(type), num_elem); - } + log_if_needed(" ** Rejected vector op (%s,%s,%d) because architecture does not support variable vector shifts", + NodeClassNames[sopc], type2name(type), num_elem); return false; } @@ -267,12 +244,8 @@ bool LibraryCallKit::arch_supports_vector(int sopc, int num_elem, BasicType type if ((mask_use_type & VecMaskUseLoad) != 0) { if (!Matcher::match_rule_supported_vector(Op_VectorLoadMask, num_elem, type) || !Matcher::match_rule_supported_vector(Op_LoadVector, num_elem, T_BOOLEAN)) { - #ifndef PRODUCT - if (C->print_intrinsics()) { - tty->print_cr(" ** Rejected vector mask loading (%s,%s,%d) because architecture does not support it", - NodeClassNames[Op_VectorLoadMask], type2name(type), num_elem); - } - #endif + non_product_log_if_needed(" ** Rejected vector mask loading (%s,%s,%d) because architecture does not support it", + NodeClassNames[Op_VectorLoadMask], type2name(type), num_elem); return false; } } @@ -283,12 +256,8 @@ bool LibraryCallKit::arch_supports_vector(int sopc, int num_elem, BasicType type if ((mask_use_type & VecMaskUseStore) != 0) { if (!Matcher::match_rule_supported_vector(Op_VectorStoreMask, num_elem, type) || !Matcher::match_rule_supported_vector(Op_StoreVector, num_elem, T_BOOLEAN)) { - #ifndef PRODUCT - if (C->print_intrinsics()) { - tty->print_cr("Rejected vector mask storing (%s,%s,%d) because architecture does not support it", - NodeClassNames[Op_VectorStoreMask], type2name(type), num_elem); - } - #endif + non_product_log_if_needed("Rejected vector mask storing (%s,%s,%d) because architecture does not support it", + NodeClassNames[Op_VectorStoreMask], type2name(type), num_elem); return false; } } @@ -305,12 +274,8 @@ bool LibraryCallKit::arch_supports_vector(int sopc, int num_elem, BasicType type is_supported |= Matcher::supports_vector_predicate_op_emulation(sopc, num_elem, type); if (!is_supported) { - #ifndef PRODUCT - if (C->print_intrinsics()) { - tty->print_cr("Rejected vector mask predicate using (%s,%s,%d) because architecture does not support it", - NodeClassNames[sopc], type2name(type), num_elem); - } - #endif + non_product_log_if_needed("Rejected vector mask predicate using (%s,%s,%d) because architecture does not support it", + NodeClassNames[sopc], type2name(type), num_elem); return false; } } @@ -360,27 +325,21 @@ bool LibraryCallKit::inline_vector_nary_operation(int n) { if (opr == nullptr || vector_klass == nullptr || elem_klass == nullptr || vlen == nullptr || !opr->is_con() || vector_klass->const_oop() == nullptr || elem_klass->const_oop() == nullptr || !vlen->is_con()) { - if (C->print_intrinsics()) { - tty->print_cr(" ** missing constant: opr=%s vclass=%s etype=%s vlen=%s", + log_if_needed(" ** missing constant: opr=%s vclass=%s etype=%s vlen=%s", NodeClassNames[argument(0)->Opcode()], NodeClassNames[argument(1)->Opcode()], NodeClassNames[argument(3)->Opcode()], NodeClassNames[argument(4)->Opcode()]); - } return false; // not enough info for intrinsification } ciType* elem_type = elem_klass->const_oop()->as_instance()->java_mirror_type(); if (!elem_type->is_primitive_type()) { - if (C->print_intrinsics()) { - tty->print_cr(" ** not a primitive bt=%d", elem_type->basic_type()); - } + log_if_needed(" ** not a primitive bt=%d", elem_type->basic_type()); return false; // should be primitive type } if (!is_klass_initialized(vector_klass)) { - if (C->print_intrinsics()) { - tty->print_cr(" ** klass argument not initialized"); - } + log_if_needed(" ** klass argument not initialized"); return false; } @@ -390,23 +349,17 @@ bool LibraryCallKit::inline_vector_nary_operation(int n) { bool is_masked_op = vmask_type != TypePtr::NULL_PTR; if (is_masked_op) { if (mask_klass == nullptr || mask_klass->const_oop() == nullptr) { - if (C->print_intrinsics()) { - tty->print_cr(" ** missing constant: maskclass=%s", NodeClassNames[argument(2)->Opcode()]); - } + log_if_needed(" ** missing constant: maskclass=%s", NodeClassNames[argument(2)->Opcode()]); return false; // not enough info for intrinsification } if (!is_klass_initialized(mask_klass)) { - if (C->print_intrinsics()) { - tty->print_cr(" ** mask klass argument not initialized"); - } + log_if_needed(" ** mask klass argument not initialized"); return false; } if (vmask_type->maybe_null()) { - if (C->print_intrinsics()) { - tty->print_cr(" ** null mask values are not allowed for masked op"); - } + log_if_needed(" ** null mask values are not allowed for masked op"); return false; } } @@ -416,17 +369,13 @@ bool LibraryCallKit::inline_vector_nary_operation(int n) { int opc = VectorSupport::vop2ideal(opr->get_con(), elem_bt); int sopc = VectorNode::opcode(opc, elem_bt); if ((opc != Op_CallLeafVector) && (sopc == 0)) { - if (C->print_intrinsics()) { - tty->print_cr(" ** operation not supported: opc=%s bt=%s", NodeClassNames[opc], type2name(elem_bt)); - } + log_if_needed(" ** operation not supported: opc=%s bt=%s", NodeClassNames[opc], type2name(elem_bt)); return false; // operation not supported } if (num_elem == 1) { if (opc != Op_CallLeafVector || elem_bt != T_DOUBLE) { - if (C->print_intrinsics()) { - tty->print_cr(" ** not a svml call: arity=%d opc=%d vlen=%d etype=%s", + log_if_needed(" ** not a svml call: arity=%d opc=%d vlen=%d etype=%s", n, opc, num_elem, type2name(elem_bt)); - } return false; } } @@ -439,22 +388,16 @@ bool LibraryCallKit::inline_vector_nary_operation(int n) { if (opc == Op_CallLeafVector) { if (!UseVectorStubs) { - if (C->print_intrinsics()) { - tty->print_cr(" ** vector stubs support is disabled"); - } + log_if_needed(" ** vector stubs support is disabled"); return false; } if (!Matcher::supports_vector_calling_convention()) { - if (C->print_intrinsics()) { - tty->print_cr(" ** no vector calling conventions supported"); - } + log_if_needed(" ** no vector calling conventions supported"); return false; } if (!Matcher::vector_size_supported(elem_bt, num_elem)) { - if (C->print_intrinsics()) { - tty->print_cr(" ** vector size (vlen=%d, etype=%s) is not supported", + log_if_needed(" ** vector size (vlen=%d, etype=%s) is not supported", num_elem, type2name(elem_bt)); - } return false; } } @@ -463,21 +406,17 @@ bool LibraryCallKit::inline_vector_nary_operation(int n) { VectorMaskUseType mask_use_type = is_vector_mask(vbox_klass) ? VecMaskUseAll : is_masked_op ? VecMaskUseLoad : VecMaskNotUsed; if ((sopc != 0) && !arch_supports_vector(sopc, num_elem, elem_bt, mask_use_type)) { - if (C->print_intrinsics()) { - tty->print_cr(" ** not supported: arity=%d opc=%d vlen=%d etype=%s ismask=%d is_masked_op=%d", + log_if_needed(" ** not supported: arity=%d opc=%d vlen=%d etype=%s ismask=%d is_masked_op=%d", n, sopc, num_elem, type2name(elem_bt), is_vector_mask(vbox_klass) ? 1 : 0, is_masked_op ? 1 : 0); - } return false; // not supported } // Return true if current platform has implemented the masked operation with predicate feature. bool use_predicate = is_masked_op && sopc != 0 && arch_supports_vector(sopc, num_elem, elem_bt, VecMaskUsePred); if (is_masked_op && !use_predicate && !arch_supports_vector(Op_VectorBlend, num_elem, elem_bt, VecMaskUseLoad)) { - if (C->print_intrinsics()) { - tty->print_cr(" ** not supported: arity=%d opc=%d vlen=%d etype=%s ismask=0 is_masked_op=1", + log_if_needed(" ** not supported: arity=%d opc=%d vlen=%d etype=%s ismask=0 is_masked_op=1", n, sopc, num_elem, type2name(elem_bt)); - } return false; } @@ -486,10 +425,8 @@ bool LibraryCallKit::inline_vector_nary_operation(int n) { case 3: { opd3 = unbox_vector(argument(7), vbox_type, elem_bt, num_elem); if (opd3 == nullptr) { - if (C->print_intrinsics()) { - tty->print_cr(" ** unbox failed v3=%s", + log_if_needed(" ** unbox failed v3=%s", NodeClassNames[argument(7)->Opcode()]); - } return false; } // fall-through @@ -497,10 +434,8 @@ bool LibraryCallKit::inline_vector_nary_operation(int n) { case 2: { opd2 = unbox_vector(argument(6), vbox_type, elem_bt, num_elem); if (opd2 == nullptr) { - if (C->print_intrinsics()) { - tty->print_cr(" ** unbox failed v2=%s", + log_if_needed(" ** unbox failed v2=%s", NodeClassNames[argument(6)->Opcode()]); - } return false; } // fall-through @@ -508,10 +443,8 @@ bool LibraryCallKit::inline_vector_nary_operation(int n) { case 1: { opd1 = unbox_vector(argument(5), vbox_type, elem_bt, num_elem); if (opd1 == nullptr) { - if (C->print_intrinsics()) { - tty->print_cr(" ** unbox failed v1=%s", + log_if_needed(" ** unbox failed v1=%s", NodeClassNames[argument(5)->Opcode()]); - } return false; } break; @@ -526,10 +459,8 @@ bool LibraryCallKit::inline_vector_nary_operation(int n) { const TypeInstPtr* mbox_type = TypeInstPtr::make_exact(TypePtr::NotNull, mbox_klass); mask = unbox_vector(argument(n + 5), mbox_type, elem_bt, num_elem); if (mask == nullptr) { - if (C->print_intrinsics()) { - tty->print_cr(" ** unbox failed mask=%s", + log_if_needed(" ** unbox failed mask=%s", NodeClassNames[argument(n + 5)->Opcode()]); - } return false; } } @@ -539,12 +470,10 @@ bool LibraryCallKit::inline_vector_nary_operation(int n) { assert(UseVectorStubs, "sanity"); operation = gen_call_to_svml(opr->get_con(), elem_bt, num_elem, opd1, opd2); if (operation == nullptr) { - if (C->print_intrinsics()) { - tty->print_cr(" ** svml call failed for %s_%s_%d", + log_if_needed(" ** svml call failed for %s_%s_%d", (elem_bt == T_FLOAT)?"float":"double", VectorSupport::svmlname[opr->get_con() - VectorSupport::VECTOR_OP_SVML_START], num_elem * type2aelembytes(elem_bt)); - } return false; } } else { @@ -599,9 +528,7 @@ bool LibraryCallKit::inline_vector_shuffle_iota() { } if (!is_klass_initialized(shuffle_klass)) { - if (C->print_intrinsics()) { - tty->print_cr(" ** klass argument not initialized"); - } + log_if_needed(" ** klass argument not initialized"); return false; } @@ -708,9 +635,7 @@ bool LibraryCallKit::inline_vector_mask_operation() { } if (!is_klass_initialized(mask_klass)) { - if (C->print_intrinsics()) { - tty->print_cr(" ** klass argument not initialized"); - } + log_if_needed(" ** klass argument not initialized"); return false; } @@ -720,10 +645,8 @@ bool LibraryCallKit::inline_vector_mask_operation() { int mopc = VectorSupport::vop2ideal(oper->get_con(), elem_bt); if (!arch_supports_vector(mopc, num_elem, elem_bt, VecMaskUseLoad)) { - if (C->print_intrinsics()) { - tty->print_cr(" ** not supported: arity=1 op=cast#%d/3 vlen2=%d etype2=%s", + log_if_needed(" ** not supported: arity=1 op=cast#%d/3 vlen2=%d etype2=%s", mopc, num_elem, type2name(elem_bt)); - } return false; // not supported } @@ -732,10 +655,8 @@ bool LibraryCallKit::inline_vector_mask_operation() { const TypeInstPtr* mask_box_type = TypeInstPtr::make_exact(TypePtr::NotNull, mbox_klass); Node* mask_vec = unbox_vector(mask, mask_box_type, elem_bt, num_elem, true); if (mask_vec == nullptr) { - if (C->print_intrinsics()) { - tty->print_cr(" ** unbox failed mask=%s", + log_if_needed(" ** unbox failed mask=%s", NodeClassNames[argument(4)->Opcode()]); - } return false; } @@ -774,9 +695,7 @@ bool LibraryCallKit::inline_vector_shuffle_to_vector() { return false; // not enough info for intrinsification } if (!is_klass_initialized(shuffle_klass) || !is_klass_initialized(vector_klass) ) { - if (C->print_intrinsics()) { - tty->print_cr(" ** klass argument not initialized"); - } + log_if_needed(" ** klass argument not initialized"); return false; } @@ -791,10 +710,8 @@ bool LibraryCallKit::inline_vector_shuffle_to_vector() { int cast_vopc = VectorCastNode::opcode(-1, T_BYTE); // from shuffle of type T_BYTE // Make sure that cast is implemented to particular type/size combination. if (!arch_supports_vector(cast_vopc, num_elem, elem_bt, VecMaskNotUsed)) { - if (C->print_intrinsics()) { - tty->print_cr(" ** not supported: arity=1 op=cast#%d/3 vlen2=%d etype2=%s", + log_if_needed(" ** not supported: arity=1 op=cast#%d/3 vlen2=%d etype2=%s", cast_vopc, num_elem, type2name(elem_bt)); - } return false; } @@ -838,27 +755,21 @@ bool LibraryCallKit::inline_vector_frombits_coerced() { if (vector_klass == nullptr || elem_klass == nullptr || vlen == nullptr || mode == nullptr || bits_type == nullptr || vector_klass->const_oop() == nullptr || elem_klass->const_oop() == nullptr || !vlen->is_con() || !mode->is_con()) { - if (C->print_intrinsics()) { - tty->print_cr(" ** missing constant: vclass=%s etype=%s vlen=%s bitwise=%s", + log_if_needed(" ** missing constant: vclass=%s etype=%s vlen=%s bitwise=%s", NodeClassNames[argument(0)->Opcode()], NodeClassNames[argument(1)->Opcode()], NodeClassNames[argument(2)->Opcode()], NodeClassNames[argument(5)->Opcode()]); - } return false; // not enough info for intrinsification } if (!is_klass_initialized(vector_klass)) { - if (C->print_intrinsics()) { - tty->print_cr(" ** klass argument not initialized"); - } + log_if_needed(" ** klass argument not initialized"); return false; } ciType* elem_type = elem_klass->const_oop()->as_instance()->java_mirror_type(); if (!elem_type->is_primitive_type()) { - if (C->print_intrinsics()) { - tty->print_cr(" ** not a primitive bt=%d", elem_type->basic_type()); - } + log_if_needed(" ** not a primitive bt=%d", elem_type->basic_type()); return false; // should be primitive type } BasicType elem_bt = elem_type->basic_type(); @@ -872,12 +783,10 @@ bool LibraryCallKit::inline_vector_frombits_coerced() { int opc = bcast_mode == VectorSupport::MODE_BITS_COERCED_LONG_TO_MASK ? Op_VectorLongToMask : Op_Replicate; if (!arch_supports_vector(opc, num_elem, elem_bt, checkFlags, true /*has_scalar_args*/)) { - if (C->print_intrinsics()) { - tty->print_cr(" ** not supported: arity=0 op=broadcast vlen=%d etype=%s ismask=%d bcast_mode=%d", + log_if_needed(" ** not supported: arity=0 op=broadcast vlen=%d etype=%s ismask=%d bcast_mode=%d", num_elem, type2name(elem_bt), is_mask ? 1 : 0, bcast_mode); - } return false; // not supported } @@ -973,27 +882,21 @@ bool LibraryCallKit::inline_vector_mem_operation(bool is_store) { if (vector_klass == nullptr || elem_klass == nullptr || vlen == nullptr || !from_ms->is_con() || vector_klass->const_oop() == nullptr || elem_klass->const_oop() == nullptr || !vlen->is_con()) { - if (C->print_intrinsics()) { - tty->print_cr(" ** missing constant: vclass=%s etype=%s vlen=%s from_ms=%s", + log_if_needed(" ** missing constant: vclass=%s etype=%s vlen=%s from_ms=%s", NodeClassNames[argument(0)->Opcode()], NodeClassNames[argument(1)->Opcode()], NodeClassNames[argument(2)->Opcode()], NodeClassNames[argument(6)->Opcode()]); - } return false; // not enough info for intrinsification } if (!is_klass_initialized(vector_klass)) { - if (C->print_intrinsics()) { - tty->print_cr(" ** klass argument not initialized"); - } + log_if_needed(" ** klass argument not initialized"); return false; } ciType* elem_type = elem_klass->const_oop()->as_instance()->java_mirror_type(); if (!elem_type->is_primitive_type()) { - if (C->print_intrinsics()) { - tty->print_cr(" ** not a primitive bt=%d", elem_type->basic_type()); - } + log_if_needed(" ** not a primitive bt=%d", elem_type->basic_type()); return false; // should be primitive type } BasicType elem_bt = elem_type->basic_type(); @@ -1001,11 +904,9 @@ bool LibraryCallKit::inline_vector_mem_operation(bool is_store) { // TODO When mask usage is supported, VecMaskNotUsed needs to be VecMaskUseLoad. if (!arch_supports_vector(is_store ? Op_StoreVector : Op_LoadVector, num_elem, elem_bt, VecMaskNotUsed)) { - if (C->print_intrinsics()) { - tty->print_cr(" ** not supported: arity=%d op=%s vlen=%d etype=%s ismask=no", + log_if_needed(" ** not supported: arity=%d op=%s vlen=%d etype=%s ismask=no", is_store, is_store ? "store" : "load", num_elem, type2name(elem_bt)); - } return false; // not supported } @@ -1046,18 +947,14 @@ bool LibraryCallKit::inline_vector_mem_operation(bool is_store) { arr_type->elem()->array_element_basic_type() != elem_bt); BasicType mem_elem_bt = mismatched_ms ? arr_type->elem()->array_element_basic_type() : elem_bt; if (!is_java_primitive(mem_elem_bt)) { - if (C->print_intrinsics()) { - tty->print_cr(" ** non-primitive array element type"); - } + log_if_needed(" ** non-primitive array element type"); return false; } int mem_num_elem = mismatched_ms ? (num_elem * type2aelembytes(elem_bt)) / type2aelembytes(mem_elem_bt) : num_elem; if (arr_type != nullptr && !is_mask && !elem_consistent_with_arr(elem_bt, arr_type, mismatched_ms)) { - if (C->print_intrinsics()) { - tty->print_cr(" ** not supported: arity=%d op=%s vlen=%d etype=%s atype=%s ismask=no", + log_if_needed(" ** not supported: arity=%d op=%s vlen=%d etype=%s atype=%s ismask=no", is_store, is_store ? "store" : "load", num_elem, type2name(elem_bt), type2name(arr_type->elem()->array_element_basic_type())); - } set_map(old_map); set_sp(old_sp); return false; @@ -1068,11 +965,9 @@ bool LibraryCallKit::inline_vector_mem_operation(bool is_store) { if (is_store) { if (!arch_supports_vector(Op_StoreVector, num_elem, elem_bt, VecMaskNotUsed) || !arch_supports_vector(Op_VectorReinterpret, mem_num_elem, mem_elem_bt, VecMaskNotUsed)) { - if (C->print_intrinsics()) { - tty->print_cr(" ** not supported: arity=%d op=%s vlen=%d*8 etype=%s/8 ismask=no", + log_if_needed(" ** not supported: arity=%d op=%s vlen=%d*8 etype=%s/8 ismask=no", is_store, "store", num_elem, type2name(elem_bt)); - } set_map(old_map); set_sp(old_sp); return false; // not supported @@ -1080,11 +975,9 @@ bool LibraryCallKit::inline_vector_mem_operation(bool is_store) { } else { if (!arch_supports_vector(Op_LoadVector, mem_num_elem, mem_elem_bt, VecMaskNotUsed) || !arch_supports_vector(Op_VectorReinterpret, num_elem, elem_bt, VecMaskNotUsed)) { - if (C->print_intrinsics()) { - tty->print_cr(" ** not supported: arity=%d op=%s vlen=%d*8 etype=%s/8 ismask=no", + log_if_needed(" ** not supported: arity=%d op=%s vlen=%d*8 etype=%s/8 ismask=no", is_store, "load", mem_num_elem, type2name(mem_elem_bt)); - } set_map(old_map); set_sp(old_sp); return false; // not supported @@ -1198,35 +1091,27 @@ bool LibraryCallKit::inline_vector_mem_masked_operation(bool is_store) { if (vector_klass == nullptr || mask_klass == nullptr || elem_klass == nullptr || vlen == nullptr || vector_klass->const_oop() == nullptr || mask_klass->const_oop() == nullptr || from_ms == nullptr || elem_klass->const_oop() == nullptr || !vlen->is_con() || !from_ms->is_con()) { - if (C->print_intrinsics()) { - tty->print_cr(" ** missing constant: vclass=%s mclass=%s etype=%s vlen=%s from_ms=%s", + log_if_needed(" ** missing constant: vclass=%s mclass=%s etype=%s vlen=%s from_ms=%s", NodeClassNames[argument(0)->Opcode()], NodeClassNames[argument(1)->Opcode()], NodeClassNames[argument(2)->Opcode()], NodeClassNames[argument(3)->Opcode()], NodeClassNames[argument(7)->Opcode()]); - } return false; // not enough info for intrinsification } if (!is_klass_initialized(vector_klass)) { - if (C->print_intrinsics()) { - tty->print_cr(" ** klass argument not initialized"); - } + log_if_needed(" ** klass argument not initialized"); return false; } if (!is_klass_initialized(mask_klass)) { - if (C->print_intrinsics()) { - tty->print_cr(" ** mask klass argument not initialized"); - } + log_if_needed(" ** mask klass argument not initialized"); return false; } ciType* elem_type = elem_klass->const_oop()->as_instance()->java_mirror_type(); if (!elem_type->is_primitive_type()) { - if (C->print_intrinsics()) { - tty->print_cr(" ** not a primitive bt=%d", elem_type->basic_type()); - } + log_if_needed(" ** not a primitive bt=%d", elem_type->basic_type()); return false; // should be primitive type } @@ -1248,11 +1133,9 @@ bool LibraryCallKit::inline_vector_mem_masked_operation(bool is_store) { BIG_ENDIAN_ONLY(if (mismatched_ms) return false;) // If there is no consistency between array and vector element types, it must be special byte array case if (arr_type != nullptr && !elem_consistent_with_arr(elem_bt, arr_type, mismatched_ms)) { - if (C->print_intrinsics()) { - tty->print_cr(" ** not supported: arity=%d op=%s vlen=%d etype=%s atype=%s", + log_if_needed(" ** not supported: arity=%d op=%s vlen=%d etype=%s atype=%s", is_store, is_store ? "storeMasked" : "loadMasked", num_elem, type2name(elem_bt), type2name(arr_type->elem()->array_element_basic_type())); - } set_map(old_map); set_sp(old_sp); return false; @@ -1274,10 +1157,8 @@ bool LibraryCallKit::inline_vector_mem_masked_operation(bool is_store) { // Masked vector load with IOOBE always uses the predicated load. const TypeInt* offset_in_range = gvn().type(argument(9))->isa_int(); if (!offset_in_range->is_con()) { - if (C->print_intrinsics()) { - tty->print_cr(" ** missing constant: offsetInRange=%s", + log_if_needed(" ** missing constant: offsetInRange=%s", NodeClassNames[argument(8)->Opcode()]); - } set_map(old_map); set_sp(old_sp); return false; @@ -1286,11 +1167,9 @@ bool LibraryCallKit::inline_vector_mem_masked_operation(bool is_store) { } if (needs_predicate) { - if (C->print_intrinsics()) { - tty->print_cr(" ** not supported: op=%s vlen=%d etype=%s mismatched_ms=%d", + log_if_needed(" ** not supported: op=%s vlen=%d etype=%s mismatched_ms=%d", is_store ? "storeMasked" : "loadMasked", num_elem, type2name(elem_bt), mismatched_ms ? 1 : 0); - } set_map(old_map); set_sp(old_sp); return false; @@ -1301,10 +1180,8 @@ bool LibraryCallKit::inline_vector_mem_masked_operation(bool is_store) { // the normal vector load and blend operations are supported by backend. if (!supports_predicate && (!arch_supports_vector(Op_LoadVector, mem_num_elem, mem_elem_bt, VecMaskNotUsed) || !arch_supports_vector(Op_VectorBlend, mem_num_elem, mem_elem_bt, VecMaskUseLoad))) { - if (C->print_intrinsics()) { - tty->print_cr(" ** not supported: op=loadMasked vlen=%d etype=%s mismatched_ms=%d", + log_if_needed(" ** not supported: op=loadMasked vlen=%d etype=%s mismatched_ms=%d", num_elem, type2name(elem_bt), mismatched_ms ? 1 : 0); - } set_map(old_map); set_sp(old_sp); return false; @@ -1314,11 +1191,9 @@ bool LibraryCallKit::inline_vector_mem_masked_operation(bool is_store) { // with byte type is supported by backend. if (mismatched_ms) { if (!arch_supports_vector(Op_VectorReinterpret, mem_num_elem, T_BYTE, VecMaskNotUsed)) { - if (C->print_intrinsics()) { - tty->print_cr(" ** not supported: arity=%d op=%s vlen=%d etype=%s mismatched_ms=1", + log_if_needed(" ** not supported: arity=%d op=%s vlen=%d etype=%s mismatched_ms=1", is_store, is_store ? "storeMasked" : "loadMasked", num_elem, type2name(elem_bt)); - } set_map(old_map); set_sp(old_sp); return false; @@ -1328,11 +1203,9 @@ bool LibraryCallKit::inline_vector_mem_masked_operation(bool is_store) { // Since it needs to unbox the mask, we need to double check that the related load operations // for mask are supported by backend. if (!arch_supports_vector(Op_LoadVector, num_elem, elem_bt, VecMaskUseLoad)) { - if (C->print_intrinsics()) { - tty->print_cr(" ** not supported: arity=%d op=%s vlen=%d etype=%s", + log_if_needed(" ** not supported: arity=%d op=%s vlen=%d etype=%s", is_store, is_store ? "storeMasked" : "loadMasked", num_elem, type2name(elem_bt)); - } set_map(old_map); set_sp(old_sp); return false; @@ -1352,11 +1225,9 @@ bool LibraryCallKit::inline_vector_mem_masked_operation(bool is_store) { Node* mask = unbox_vector(is_store ? argument(9) : argument(8), mbox_type, elem_bt, num_elem); if (mask == nullptr) { - if (C->print_intrinsics()) { - tty->print_cr(" ** unbox failed mask=%s", + log_if_needed(" ** unbox failed mask=%s", is_store ? NodeClassNames[argument(9)->Opcode()] : NodeClassNames[argument(8)->Opcode()]); - } set_map(old_map); set_sp(old_sp); return false; @@ -1365,10 +1236,8 @@ bool LibraryCallKit::inline_vector_mem_masked_operation(bool is_store) { if (is_store) { Node* val = unbox_vector(argument(8), vbox_type, elem_bt, num_elem); if (val == nullptr) { - if (C->print_intrinsics()) { - tty->print_cr(" ** unbox failed vector=%s", + log_if_needed(" ** unbox failed vector=%s", NodeClassNames[argument(8)->Opcode()]); - } set_map(old_map); set_sp(old_sp); return false; // operand unboxing failed @@ -1460,28 +1329,22 @@ bool LibraryCallKit::inline_vector_gather_scatter(bool is_scatter) { if (vector_klass == nullptr || elem_klass == nullptr || vector_idx_klass == nullptr || vlen == nullptr || vector_klass->const_oop() == nullptr || elem_klass->const_oop() == nullptr || vector_idx_klass->const_oop() == nullptr || !vlen->is_con()) { - if (C->print_intrinsics()) { - tty->print_cr(" ** missing constant: vclass=%s etype=%s vlen=%s viclass=%s", + log_if_needed(" ** missing constant: vclass=%s etype=%s vlen=%s viclass=%s", NodeClassNames[argument(0)->Opcode()], NodeClassNames[argument(2)->Opcode()], NodeClassNames[argument(3)->Opcode()], NodeClassNames[argument(4)->Opcode()]); - } return false; // not enough info for intrinsification } if (!is_klass_initialized(vector_klass) || !is_klass_initialized(vector_idx_klass)) { - if (C->print_intrinsics()) { - tty->print_cr(" ** klass argument not initialized"); - } + log_if_needed(" ** klass argument not initialized"); return false; } ciType* elem_type = elem_klass->const_oop()->as_instance()->java_mirror_type(); if (!elem_type->is_primitive_type()) { - if (C->print_intrinsics()) { - tty->print_cr(" ** not a primitive bt=%d", elem_type->basic_type()); - } + log_if_needed(" ** not a primitive bt=%d", elem_type->basic_type()); return false; // should be primitive type } @@ -1492,44 +1355,34 @@ bool LibraryCallKit::inline_vector_gather_scatter(bool is_scatter) { bool is_masked_op = vmask_type != TypePtr::NULL_PTR; if (is_masked_op) { if (mask_klass == nullptr || mask_klass->const_oop() == nullptr) { - if (C->print_intrinsics()) { - tty->print_cr(" ** missing constant: maskclass=%s", NodeClassNames[argument(1)->Opcode()]); - } + log_if_needed(" ** missing constant: maskclass=%s", NodeClassNames[argument(1)->Opcode()]); return false; // not enough info for intrinsification } if (!is_klass_initialized(mask_klass)) { - if (C->print_intrinsics()) { - tty->print_cr(" ** mask klass argument not initialized"); - } + log_if_needed(" ** mask klass argument not initialized"); return false; } if (vmask_type->maybe_null()) { - if (C->print_intrinsics()) { - tty->print_cr(" ** null mask values are not allowed for masked op"); - } + log_if_needed(" ** null mask values are not allowed for masked op"); return false; } // Check whether the predicated gather/scatter node is supported by architecture. VectorMaskUseType mask = (VectorMaskUseType) (VecMaskUseLoad | VecMaskUsePred); if (!arch_supports_vector(is_scatter ? Op_StoreVectorScatterMasked : Op_LoadVectorGatherMasked, num_elem, elem_bt, mask)) { - if (C->print_intrinsics()) { - tty->print_cr(" ** not supported: arity=%d op=%s vlen=%d etype=%s is_masked_op=1", + log_if_needed(" ** not supported: arity=%d op=%s vlen=%d etype=%s is_masked_op=1", is_scatter, is_scatter ? "scatterMasked" : "gatherMasked", num_elem, type2name(elem_bt)); - } return false; // not supported } } else { // Check whether the normal gather/scatter node is supported for non-masked operation. if (!arch_supports_vector(is_scatter ? Op_StoreVectorScatter : Op_LoadVectorGather, num_elem, elem_bt, VecMaskNotUsed)) { - if (C->print_intrinsics()) { - tty->print_cr(" ** not supported: arity=%d op=%s vlen=%d etype=%s is_masked_op=0", + log_if_needed(" ** not supported: arity=%d op=%s vlen=%d etype=%s is_masked_op=0", is_scatter, is_scatter ? "scatter" : "gather", num_elem, type2name(elem_bt)); - } return false; // not supported } } @@ -1537,11 +1390,9 @@ bool LibraryCallKit::inline_vector_gather_scatter(bool is_scatter) { // Check that the vector holding indices is supported by architecture // For sub-word gathers expander receive index array. if (!is_subword_type(elem_bt) && !arch_supports_vector(Op_LoadVector, num_elem, T_INT, VecMaskNotUsed)) { - if (C->print_intrinsics()) { - tty->print_cr(" ** not supported: arity=%d op=%s/loadindex vlen=%d etype=int is_masked_op=%d", + log_if_needed(" ** not supported: arity=%d op=%s/loadindex vlen=%d etype=int is_masked_op=%d", is_scatter, is_scatter ? "scatter" : "gather", num_elem, is_masked_op ? 1 : 0); - } return false; // not supported } @@ -1559,11 +1410,9 @@ bool LibraryCallKit::inline_vector_gather_scatter(bool is_scatter) { // The array must be consistent with vector type if (arr_type == nullptr || (arr_type != nullptr && !elem_consistent_with_arr(elem_bt, arr_type, false))) { - if (C->print_intrinsics()) { - tty->print_cr(" ** not supported: arity=%d op=%s vlen=%d etype=%s atype=%s ismask=no", + log_if_needed(" ** not supported: arity=%d op=%s vlen=%d etype=%s atype=%s ismask=no", is_scatter, is_scatter ? "scatter" : "gather", num_elem, type2name(elem_bt), type2name(arr_type->elem()->array_element_basic_type())); - } set_map(old_map); set_sp(old_sp); return false; @@ -1595,11 +1444,9 @@ bool LibraryCallKit::inline_vector_gather_scatter(bool is_scatter) { const TypeInstPtr* mbox_type = TypeInstPtr::make_exact(TypePtr::NotNull, mbox_klass); mask = unbox_vector(is_scatter ? argument(10) : argument(9), mbox_type, elem_bt, num_elem); if (mask == nullptr) { - if (C->print_intrinsics()) { - tty->print_cr(" ** unbox failed mask=%s", + log_if_needed(" ** unbox failed mask=%s", is_scatter ? NodeClassNames[argument(10)->Opcode()] : NodeClassNames[argument(9)->Opcode()]); - } set_map(old_map); set_sp(old_sp); return false; @@ -1669,26 +1516,20 @@ bool LibraryCallKit::inline_vector_reduction() { if (opr == nullptr || vector_klass == nullptr || elem_klass == nullptr || vlen == nullptr || !opr->is_con() || vector_klass->const_oop() == nullptr || elem_klass->const_oop() == nullptr || !vlen->is_con()) { - if (C->print_intrinsics()) { - tty->print_cr(" ** missing constant: opr=%s vclass=%s etype=%s vlen=%s", + log_if_needed(" ** missing constant: opr=%s vclass=%s etype=%s vlen=%s", NodeClassNames[argument(0)->Opcode()], NodeClassNames[argument(1)->Opcode()], NodeClassNames[argument(3)->Opcode()], NodeClassNames[argument(4)->Opcode()]); - } return false; // not enough info for intrinsification } if (!is_klass_initialized(vector_klass)) { - if (C->print_intrinsics()) { - tty->print_cr(" ** klass argument not initialized"); - } + log_if_needed(" ** klass argument not initialized"); return false; } ciType* elem_type = elem_klass->const_oop()->as_instance()->java_mirror_type(); if (!elem_type->is_primitive_type()) { - if (C->print_intrinsics()) { - tty->print_cr(" ** not a primitive bt=%d", elem_type->basic_type()); - } + log_if_needed(" ** not a primitive bt=%d", elem_type->basic_type()); return false; // should be primitive type } @@ -1696,23 +1537,17 @@ bool LibraryCallKit::inline_vector_reduction() { bool is_masked_op = vmask_type != TypePtr::NULL_PTR; if (is_masked_op) { if (mask_klass == nullptr || mask_klass->const_oop() == nullptr) { - if (C->print_intrinsics()) { - tty->print_cr(" ** missing constant: maskclass=%s", NodeClassNames[argument(2)->Opcode()]); - } + log_if_needed(" ** missing constant: maskclass=%s", NodeClassNames[argument(2)->Opcode()]); return false; // not enough info for intrinsification } if (!is_klass_initialized(mask_klass)) { - if (C->print_intrinsics()) { - tty->print_cr(" ** mask klass argument not initialized"); - } + log_if_needed(" ** mask klass argument not initialized"); return false; } if (vmask_type->maybe_null()) { - if (C->print_intrinsics()) { - tty->print_cr(" ** null mask values are not allowed for masked op"); - } + log_if_needed(" ** null mask values are not allowed for masked op"); return false; } } @@ -1724,20 +1559,16 @@ bool LibraryCallKit::inline_vector_reduction() { // When using mask, mask use type needs to be VecMaskUseLoad. if (!arch_supports_vector(sopc, num_elem, elem_bt, is_masked_op ? VecMaskUseLoad : VecMaskNotUsed)) { - if (C->print_intrinsics()) { - tty->print_cr(" ** not supported: arity=1 op=%d/reduce vlen=%d etype=%s is_masked_op=%d", + log_if_needed(" ** not supported: arity=1 op=%d/reduce vlen=%d etype=%s is_masked_op=%d", sopc, num_elem, type2name(elem_bt), is_masked_op ? 1 : 0); - } return false; } // Return true if current platform has implemented the masked operation with predicate feature. bool use_predicate = is_masked_op && arch_supports_vector(sopc, num_elem, elem_bt, VecMaskUsePred); if (is_masked_op && !use_predicate && !arch_supports_vector(Op_VectorBlend, num_elem, elem_bt, VecMaskUseLoad)) { - if (C->print_intrinsics()) { - tty->print_cr(" ** not supported: arity=1 op=%d/reduce vlen=%d etype=%s is_masked_op=1", + log_if_needed(" ** not supported: arity=1 op=%d/reduce vlen=%d etype=%s is_masked_op=1", sopc, num_elem, type2name(elem_bt)); - } return false; } @@ -1756,10 +1587,8 @@ bool LibraryCallKit::inline_vector_reduction() { const TypeInstPtr* mbox_type = TypeInstPtr::make_exact(TypePtr::NotNull, mbox_klass); mask = unbox_vector(argument(6), mbox_type, elem_bt, num_elem); if (mask == nullptr) { - if (C->print_intrinsics()) { - tty->print_cr(" ** unbox failed mask=%s", + log_if_needed(" ** unbox failed mask=%s", NodeClassNames[argument(6)->Opcode()]); - } return false; } } @@ -1822,26 +1651,20 @@ bool LibraryCallKit::inline_vector_test() { if (cond == nullptr || vector_klass == nullptr || elem_klass == nullptr || vlen == nullptr || !cond->is_con() || vector_klass->const_oop() == nullptr || elem_klass->const_oop() == nullptr || !vlen->is_con()) { - if (C->print_intrinsics()) { - tty->print_cr(" ** missing constant: cond=%s vclass=%s etype=%s vlen=%s", + log_if_needed(" ** missing constant: cond=%s vclass=%s etype=%s vlen=%s", NodeClassNames[argument(0)->Opcode()], NodeClassNames[argument(1)->Opcode()], NodeClassNames[argument(2)->Opcode()], NodeClassNames[argument(3)->Opcode()]); - } return false; // not enough info for intrinsification } if (!is_klass_initialized(vector_klass)) { - if (C->print_intrinsics()) { - tty->print_cr(" ** klass argument not initialized"); - } + log_if_needed(" ** klass argument not initialized"); return false; } ciType* elem_type = elem_klass->const_oop()->as_instance()->java_mirror_type(); if (!elem_type->is_primitive_type()) { - if (C->print_intrinsics()) { - tty->print_cr(" ** not a primitive bt=%d", elem_type->basic_type()); - } + log_if_needed(" ** not a primitive bt=%d", elem_type->basic_type()); return false; // should be primitive type } BasicType elem_bt = elem_type->basic_type(); @@ -1851,11 +1674,9 @@ bool LibraryCallKit::inline_vector_test() { const TypeInstPtr* vbox_type = TypeInstPtr::make_exact(TypePtr::NotNull, vbox_klass); if (!arch_supports_vector(Op_VectorTest, num_elem, elem_bt, is_vector_mask(vbox_klass) ? VecMaskUseLoad : VecMaskNotUsed)) { - if (C->print_intrinsics()) { - tty->print_cr(" ** not supported: arity=2 op=test/%d vlen=%d etype=%s ismask=%d", + log_if_needed(" ** not supported: arity=2 op=test/%d vlen=%d etype=%s ismask=%d", cond->get_con(), num_elem, type2name(elem_bt), is_vector_mask(vbox_klass)); - } return false; } @@ -1900,26 +1721,20 @@ bool LibraryCallKit::inline_vector_blend() { } if (mask_klass->const_oop() == nullptr || vector_klass->const_oop() == nullptr || elem_klass->const_oop() == nullptr || !vlen->is_con()) { - if (C->print_intrinsics()) { - tty->print_cr(" ** missing constant: vclass=%s mclass=%s etype=%s vlen=%s", + log_if_needed(" ** missing constant: vclass=%s mclass=%s etype=%s vlen=%s", NodeClassNames[argument(0)->Opcode()], NodeClassNames[argument(1)->Opcode()], NodeClassNames[argument(2)->Opcode()], NodeClassNames[argument(3)->Opcode()]); - } return false; // not enough info for intrinsification } if (!is_klass_initialized(vector_klass) || !is_klass_initialized(mask_klass)) { - if (C->print_intrinsics()) { - tty->print_cr(" ** klass argument not initialized"); - } + log_if_needed(" ** klass argument not initialized"); return false; } ciType* elem_type = elem_klass->const_oop()->as_instance()->java_mirror_type(); if (!elem_type->is_primitive_type()) { - if (C->print_intrinsics()) { - tty->print_cr(" ** not a primitive bt=%d", elem_type->basic_type()); - } + log_if_needed(" ** not a primitive bt=%d", elem_type->basic_type()); return false; // should be primitive type } BasicType elem_bt = elem_type->basic_type(); @@ -1927,10 +1742,8 @@ bool LibraryCallKit::inline_vector_blend() { int num_elem = vlen->get_con(); if (!arch_supports_vector(Op_VectorBlend, num_elem, elem_bt, VecMaskUseLoad)) { - if (C->print_intrinsics()) { - tty->print_cr(" ** not supported: arity=2 op=blend vlen=%d etype=%s ismask=useload", + log_if_needed(" ** not supported: arity=2 op=blend vlen=%d etype=%s ismask=useload", num_elem, type2name(elem_bt)); - } return false; // not supported } ciKlass* vbox_klass = vector_klass->const_oop()->as_instance()->java_lang_Class_klass(); @@ -1974,27 +1787,21 @@ bool LibraryCallKit::inline_vector_compare() { } if (!cond->is_con() || vector_klass->const_oop() == nullptr || mask_klass->const_oop() == nullptr || elem_klass->const_oop() == nullptr || !vlen->is_con()) { - if (C->print_intrinsics()) { - tty->print_cr(" ** missing constant: cond=%s vclass=%s mclass=%s etype=%s vlen=%s", + log_if_needed(" ** missing constant: cond=%s vclass=%s mclass=%s etype=%s vlen=%s", NodeClassNames[argument(0)->Opcode()], NodeClassNames[argument(1)->Opcode()], NodeClassNames[argument(2)->Opcode()], NodeClassNames[argument(3)->Opcode()], NodeClassNames[argument(4)->Opcode()]); - } return false; // not enough info for intrinsification } if (!is_klass_initialized(vector_klass) || !is_klass_initialized(mask_klass)) { - if (C->print_intrinsics()) { - tty->print_cr(" ** klass argument not initialized"); - } + log_if_needed(" ** klass argument not initialized"); return false; } ciType* elem_type = elem_klass->const_oop()->as_instance()->java_mirror_type(); if (!elem_type->is_primitive_type()) { - if (C->print_intrinsics()) { - tty->print_cr(" ** not a primitive bt=%d", elem_type->basic_type()); - } + log_if_needed(" ** not a primitive bt=%d", elem_type->basic_type()); return false; // should be primitive type } @@ -2004,19 +1811,15 @@ bool LibraryCallKit::inline_vector_compare() { if ((cond->get_con() & BoolTest::unsigned_compare) != 0) { if (!Matcher::supports_vector_comparison_unsigned(num_elem, elem_bt)) { - if (C->print_intrinsics()) { - tty->print_cr(" ** not supported: unsigned comparison op=comp/%d vlen=%d etype=%s ismask=usestore", + log_if_needed(" ** not supported: unsigned comparison op=comp/%d vlen=%d etype=%s ismask=usestore", cond->get_con() & (BoolTest::unsigned_compare - 1), num_elem, type2name(elem_bt)); - } return false; } } if (!arch_supports_vector(Op_VectorMaskCmp, num_elem, elem_bt, VecMaskUseStore)) { - if (C->print_intrinsics()) { - tty->print_cr(" ** not supported: arity=2 op=comp/%d vlen=%d etype=%s ismask=usestore", + log_if_needed(" ** not supported: arity=2 op=comp/%d vlen=%d etype=%s ismask=usestore", cond->get_con(), num_elem, type2name(elem_bt)); - } return false; } @@ -2032,19 +1835,15 @@ bool LibraryCallKit::inline_vector_compare() { bool is_masked_op = argument(7)->bottom_type() != TypePtr::NULL_PTR; Node* mask = is_masked_op ? unbox_vector(argument(7), mbox_type, elem_bt, num_elem) : nullptr; if (is_masked_op && mask == nullptr) { - if (C->print_intrinsics()) { - tty->print_cr(" ** not supported: mask = null arity=2 op=comp/%d vlen=%d etype=%s ismask=usestore is_masked_op=1", + log_if_needed(" ** not supported: mask = null arity=2 op=comp/%d vlen=%d etype=%s ismask=usestore is_masked_op=1", cond->get_con(), num_elem, type2name(elem_bt)); - } return false; } bool use_predicate = is_masked_op && arch_supports_vector(Op_VectorMaskCmp, num_elem, elem_bt, VecMaskUsePred); if (is_masked_op && !use_predicate && !arch_supports_vector(Op_AndV, num_elem, elem_bt, VecMaskUseLoad)) { - if (C->print_intrinsics()) { - tty->print_cr(" ** not supported: arity=2 op=comp/%d vlen=%d etype=%s ismask=usestore is_masked_op=1", + log_if_needed(" ** not supported: arity=2 op=comp/%d vlen=%d etype=%s ismask=usestore is_masked_op=1", cond->get_con(), num_elem, type2name(elem_bt)); - } return false; } @@ -2097,27 +1896,21 @@ bool LibraryCallKit::inline_vector_rearrange() { vector_klass->const_oop() == nullptr || elem_klass->const_oop() == nullptr || !vlen->is_con()) { - if (C->print_intrinsics()) { - tty->print_cr(" ** missing constant: vclass=%s sclass=%s etype=%s vlen=%s", + log_if_needed(" ** missing constant: vclass=%s sclass=%s etype=%s vlen=%s", NodeClassNames[argument(0)->Opcode()], NodeClassNames[argument(1)->Opcode()], NodeClassNames[argument(3)->Opcode()], NodeClassNames[argument(4)->Opcode()]); - } return false; // not enough info for intrinsification } if (!is_klass_initialized(vector_klass) || !is_klass_initialized(shuffle_klass)) { - if (C->print_intrinsics()) { - tty->print_cr(" ** klass argument not initialized"); - } + log_if_needed(" ** klass argument not initialized"); return false; } ciType* elem_type = elem_klass->const_oop()->as_instance()->java_mirror_type(); if (!elem_type->is_primitive_type()) { - if (C->print_intrinsics()) { - tty->print_cr(" ** not a primitive bt=%d", elem_type->basic_type()); - } + log_if_needed(" ** not a primitive bt=%d", elem_type->basic_type()); return false; // should be primitive type } BasicType elem_bt = elem_type->basic_type(); @@ -2125,10 +1918,8 @@ bool LibraryCallKit::inline_vector_rearrange() { int num_elem = vlen->get_con(); if (!arch_supports_vector(Op_VectorLoadShuffle, num_elem, elem_bt, VecMaskNotUsed)) { - if (C->print_intrinsics()) { - tty->print_cr(" ** not supported: arity=0 op=load/shuffle vlen=%d etype=%s ismask=no", + log_if_needed(" ** not supported: arity=0 op=load/shuffle vlen=%d etype=%s ismask=no", num_elem, type2name(elem_bt)); - } return false; // not supported } @@ -2138,9 +1929,7 @@ bool LibraryCallKit::inline_vector_rearrange() { (mask_klass == nullptr || mask_klass->const_oop() == nullptr || !is_klass_initialized(mask_klass))) { - if (C->print_intrinsics()) { - tty->print_cr(" ** mask_klass argument not initialized"); - } + log_if_needed(" ** mask_klass argument not initialized"); } VectorMaskUseType checkFlags = (VectorMaskUseType)(is_masked_op ? (VecMaskUseLoad | VecMaskUsePred) : VecMaskNotUsed); if (!arch_supports_vector(Op_VectorRearrange, num_elem, elem_bt, checkFlags)) { @@ -2149,10 +1938,8 @@ bool LibraryCallKit::inline_vector_rearrange() { (!arch_supports_vector(Op_VectorRearrange, num_elem, elem_bt, VecMaskNotUsed) || !arch_supports_vector(Op_VectorBlend, num_elem, elem_bt, VecMaskUseLoad) || !arch_supports_vector(Op_Replicate, num_elem, elem_bt, VecMaskNotUsed))) { - if (C->print_intrinsics()) { - tty->print_cr(" ** not supported: arity=2 op=shuffle/rearrange vlen=%d etype=%s ismask=no", + log_if_needed(" ** not supported: arity=2 op=shuffle/rearrange vlen=%d etype=%s ismask=no", num_elem, type2name(elem_bt)); - } return false; // not supported } } @@ -2175,10 +1962,8 @@ bool LibraryCallKit::inline_vector_rearrange() { const TypeInstPtr* mbox_type = TypeInstPtr::make_exact(TypePtr::NotNull, mbox_klass); mask = unbox_vector(argument(7), mbox_type, elem_bt, num_elem); if (mask == nullptr) { - if (C->print_intrinsics()) { - tty->print_cr(" ** not supported: arity=3 op=shuffle/rearrange vlen=%d etype=%s ismask=useload is_masked_op=1", + log_if_needed(" ** not supported: arity=3 op=shuffle/rearrange vlen=%d etype=%s ismask=useload is_masked_op=1", num_elem, type2name(elem_bt)); - } return false; } } @@ -2280,19 +2065,15 @@ bool LibraryCallKit::inline_vector_broadcast_int() { return false; // dead code } if (!opr->is_con() || vector_klass->const_oop() == nullptr || elem_klass->const_oop() == nullptr || !vlen->is_con()) { - if (C->print_intrinsics()) { - tty->print_cr(" ** missing constant: opr=%s vclass=%s etype=%s vlen=%s", + log_if_needed(" ** missing constant: opr=%s vclass=%s etype=%s vlen=%s", NodeClassNames[argument(0)->Opcode()], NodeClassNames[argument(1)->Opcode()], NodeClassNames[argument(3)->Opcode()], NodeClassNames[argument(4)->Opcode()]); - } return false; // not enough info for intrinsification } if (!is_klass_initialized(vector_klass)) { - if (C->print_intrinsics()) { - tty->print_cr(" ** klass argument not initialized"); - } + log_if_needed(" ** klass argument not initialized"); return false; } @@ -2300,32 +2081,24 @@ bool LibraryCallKit::inline_vector_broadcast_int() { bool is_masked_op = vmask_type != TypePtr::NULL_PTR; if (is_masked_op) { if (mask_klass == nullptr || mask_klass->const_oop() == nullptr) { - if (C->print_intrinsics()) { - tty->print_cr(" ** missing constant: maskclass=%s", NodeClassNames[argument(2)->Opcode()]); - } + log_if_needed(" ** missing constant: maskclass=%s", NodeClassNames[argument(2)->Opcode()]); return false; // not enough info for intrinsification } if (!is_klass_initialized(mask_klass)) { - if (C->print_intrinsics()) { - tty->print_cr(" ** mask klass argument not initialized"); - } + log_if_needed(" ** mask klass argument not initialized"); return false; } if (vmask_type->maybe_null()) { - if (C->print_intrinsics()) { - tty->print_cr(" ** null mask values are not allowed for masked op"); - } + log_if_needed(" ** null mask values are not allowed for masked op"); return false; } } ciType* elem_type = elem_klass->const_oop()->as_instance()->java_mirror_type(); if (!elem_type->is_primitive_type()) { - if (C->print_intrinsics()) { - tty->print_cr(" ** not a primitive bt=%d", elem_type->basic_type()); - } + log_if_needed(" ** not a primitive bt=%d", elem_type->basic_type()); return false; // should be primitive type } @@ -2337,17 +2110,13 @@ bool LibraryCallKit::inline_vector_broadcast_int() { bool is_rotate = VectorNode::is_rotate_opcode(opc); if (opc == 0 || (!is_shift && !is_rotate)) { - if (C->print_intrinsics()) { - tty->print_cr(" ** operation not supported: op=%d bt=%s", opr->get_con(), type2name(elem_bt)); - } + log_if_needed(" ** operation not supported: op=%d bt=%s", opr->get_con(), type2name(elem_bt)); return false; // operation not supported } int sopc = VectorNode::opcode(opc, elem_bt); if (sopc == 0) { - if (C->print_intrinsics()) { - tty->print_cr(" ** operation not supported: opc=%s bt=%s", NodeClassNames[opc], type2name(elem_bt)); - } + log_if_needed(" ** operation not supported: opc=%s bt=%s", NodeClassNames[opc], type2name(elem_bt)); return false; // operation not supported } @@ -2370,10 +2139,8 @@ bool LibraryCallKit::inline_vector_broadcast_int() { (!arch_supports_vector(sopc, num_elem, elem_bt, VecMaskNotUsed, has_scalar_args) || !arch_supports_vector(Op_VectorBlend, num_elem, elem_bt, VecMaskUseLoad))) { - if (C->print_intrinsics()) { - tty->print_cr(" ** not supported: arity=0 op=int/%d vlen=%d etype=%s is_masked_op=%d", + log_if_needed(" ** not supported: arity=0 op=int/%d vlen=%d etype=%s is_masked_op=%d", sopc, num_elem, type2name(elem_bt), is_masked_op ? 1 : 0); - } return false; // not supported } } @@ -2404,9 +2171,7 @@ bool LibraryCallKit::inline_vector_broadcast_int() { const TypeInstPtr* mbox_type = TypeInstPtr::make_exact(TypePtr::NotNull, mbox_klass); mask = unbox_vector(argument(7), mbox_type, elem_bt, num_elem); if (mask == nullptr) { - if (C->print_intrinsics()) { - tty->print_cr(" ** unbox failed mask=%s", NodeClassNames[argument(7)->Opcode()]); - } + log_if_needed(" ** unbox failed mask=%s", NodeClassNames[argument(7)->Opcode()]); return false; } } @@ -2456,8 +2221,7 @@ bool LibraryCallKit::inline_vector_convert() { if (!opr->is_con() || vector_klass_from->const_oop() == nullptr || elem_klass_from->const_oop() == nullptr || !vlen_from->is_con() || vector_klass_to->const_oop() == nullptr || elem_klass_to->const_oop() == nullptr || !vlen_to->is_con()) { - if (C->print_intrinsics()) { - tty->print_cr(" ** missing constant: opr=%s vclass_from=%s etype_from=%s vlen_from=%s vclass_to=%s etype_to=%s vlen_to=%s", + log_if_needed(" ** missing constant: opr=%s vclass_from=%s etype_from=%s vlen_from=%s vclass_to=%s etype_to=%s vlen_to=%s", NodeClassNames[argument(0)->Opcode()], NodeClassNames[argument(1)->Opcode()], NodeClassNames[argument(2)->Opcode()], @@ -2465,13 +2229,10 @@ bool LibraryCallKit::inline_vector_convert() { NodeClassNames[argument(4)->Opcode()], NodeClassNames[argument(5)->Opcode()], NodeClassNames[argument(6)->Opcode()]); - } return false; // not enough info for intrinsification } if (!is_klass_initialized(vector_klass_from) || !is_klass_initialized(vector_klass_to)) { - if (C->print_intrinsics()) { - tty->print_cr(" ** klass argument not initialized"); - } + log_if_needed(" ** klass argument not initialized"); return false; } @@ -2508,11 +2269,9 @@ bool LibraryCallKit::inline_vector_convert() { num_elem_from, elem_bt_from, is_mask ? VecMaskUseAll : VecMaskNotUsed)) { - if (C->print_intrinsics()) { - tty->print_cr(" ** not supported: arity=1 op=%s/1 vlen1=%d etype1=%s ismask=%d", + log_if_needed(" ** not supported: arity=1 op=%s/1 vlen1=%d etype1=%s ismask=%d", is_cast ? "cast" : "reinterpret", num_elem_from, type2name(elem_bt_from), is_mask); - } return false; } @@ -2521,11 +2280,9 @@ bool LibraryCallKit::inline_vector_convert() { num_elem_to, elem_bt_to, is_mask ? VecMaskUseAll : VecMaskNotUsed)) { - if (C->print_intrinsics()) { - tty->print_cr(" ** not supported: arity=1 op=%s/2 vlen2=%d etype2=%s ismask=%d", + log_if_needed(" ** not supported: arity=1 op=%s/2 vlen2=%d etype2=%s ismask=%d", is_cast ? "cast" : "reinterpret", num_elem_to, type2name(elem_bt_to), is_mask); - } return false; } @@ -2565,10 +2322,8 @@ bool LibraryCallKit::inline_vector_convert() { // Make sure that vector cast is implemented to particular type/size combination if it is // not a mask casting. if (!is_mask && !arch_supports_vector(cast_vopc, num_elem_to, elem_bt_to, VecMaskNotUsed)) { - if (C->print_intrinsics()) { - tty->print_cr(" ** not supported: arity=1 op=cast#%d/3 vlen2=%d etype2=%s ismask=%d", + log_if_needed(" ** not supported: arity=1 op=cast#%d/3 vlen2=%d etype2=%s ismask=%d", cast_vopc, num_elem_to, type2name(elem_bt_to), is_mask); - } return false; } @@ -2580,11 +2335,9 @@ bool LibraryCallKit::inline_vector_convert() { // It is possible that arch does not support this intermediate vector size // TODO More complex logic required here to handle this corner case for the sizes. if (!arch_supports_vector(cast_vopc, num_elem_for_cast, elem_bt_to, VecMaskNotUsed)) { - if (C->print_intrinsics()) { - tty->print_cr(" ** not supported: arity=1 op=cast#%d/4 vlen1=%d etype2=%s ismask=%d", + log_if_needed(" ** not supported: arity=1 op=cast#%d/4 vlen1=%d etype2=%s ismask=%d", cast_vopc, num_elem_for_cast, type2name(elem_bt_to), is_mask); - } return false; } @@ -2602,10 +2355,8 @@ bool LibraryCallKit::inline_vector_convert() { num_elem_for_resize, elem_bt_from, VecMaskNotUsed)) { - if (C->print_intrinsics()) { - tty->print_cr(" ** not supported: arity=1 op=cast/5 vlen2=%d etype1=%s ismask=%d", + log_if_needed(" ** not supported: arity=1 op=cast/5 vlen2=%d etype1=%s ismask=%d", num_elem_for_resize, type2name(elem_bt_from), is_mask); - } return false; } @@ -2616,10 +2367,8 @@ bool LibraryCallKit::inline_vector_convert() { if (is_mask) { // Make sure that cast for vector mask is implemented to particular type/size combination. if (!arch_supports_vector(Op_VectorMaskCast, num_elem_to, elem_bt_to, VecMaskNotUsed)) { - if (C->print_intrinsics()) { - tty->print_cr(" ** not supported: arity=1 op=maskcast vlen2=%d etype2=%s ismask=%d", + log_if_needed(" ** not supported: arity=1 op=maskcast vlen2=%d etype2=%s ismask=%d", num_elem_to, type2name(elem_bt_to), is_mask); - } return false; } op = gvn().transform(new VectorMaskCastNode(op, dst_type)); @@ -2657,35 +2406,27 @@ bool LibraryCallKit::inline_vector_insert() { return false; // dead code } if (vector_klass->const_oop() == nullptr || elem_klass->const_oop() == nullptr || !vlen->is_con() || !idx->is_con()) { - if (C->print_intrinsics()) { - tty->print_cr(" ** missing constant: vclass=%s etype=%s vlen=%s idx=%s", + log_if_needed(" ** missing constant: vclass=%s etype=%s vlen=%s idx=%s", NodeClassNames[argument(0)->Opcode()], NodeClassNames[argument(1)->Opcode()], NodeClassNames[argument(2)->Opcode()], NodeClassNames[argument(4)->Opcode()]); - } return false; // not enough info for intrinsification } if (!is_klass_initialized(vector_klass)) { - if (C->print_intrinsics()) { - tty->print_cr(" ** klass argument not initialized"); - } + log_if_needed(" ** klass argument not initialized"); return false; } ciType* elem_type = elem_klass->const_oop()->as_instance()->java_mirror_type(); if (!elem_type->is_primitive_type()) { - if (C->print_intrinsics()) { - tty->print_cr(" ** not a primitive bt=%d", elem_type->basic_type()); - } + log_if_needed(" ** not a primitive bt=%d", elem_type->basic_type()); return false; // should be primitive type } BasicType elem_bt = elem_type->basic_type(); int num_elem = vlen->get_con(); if (!arch_supports_vector(Op_VectorInsert, num_elem, elem_bt, VecMaskNotUsed)) { - if (C->print_intrinsics()) { - tty->print_cr(" ** not supported: arity=1 op=insert vlen=%d etype=%s ismask=no", + log_if_needed(" ** not supported: arity=1 op=insert vlen=%d etype=%s ismask=no", num_elem, type2name(elem_bt)); - } return false; // not supported } @@ -2751,25 +2492,19 @@ bool LibraryCallKit::inline_vector_extract() { return false; // dead code } if (vector_klass->const_oop() == nullptr || elem_klass->const_oop() == nullptr || !vlen->is_con()) { - if (C->print_intrinsics()) { - tty->print_cr(" ** missing constant: vclass=%s etype=%s vlen=%s", + log_if_needed(" ** missing constant: vclass=%s etype=%s vlen=%s", NodeClassNames[argument(0)->Opcode()], NodeClassNames[argument(1)->Opcode()], NodeClassNames[argument(2)->Opcode()]); - } return false; // not enough info for intrinsification } if (!is_klass_initialized(vector_klass)) { - if (C->print_intrinsics()) { - tty->print_cr(" ** klass argument not initialized"); - } + log_if_needed(" ** klass argument not initialized"); return false; } ciType* elem_type = elem_klass->const_oop()->as_instance()->java_mirror_type(); if (!elem_type->is_primitive_type()) { - if (C->print_intrinsics()) { - tty->print_cr(" ** not a primitive bt=%d", elem_type->basic_type()); - } + log_if_needed(" ** not a primitive bt=%d", elem_type->basic_type()); return false; // should be primitive type } BasicType elem_bt = elem_type->basic_type(); @@ -2807,26 +2542,20 @@ bool LibraryCallKit::inline_vector_extract() { opd = gvn().transform(new URShiftLNode(opd, pos)); opd = gvn().transform(new AndLNode(opd, gvn().makecon(TypeLong::ONE))); } else { - if (C->print_intrinsics()) { - tty->print_cr(" ** Rejected mask extraction because architecture does not support it"); - } + log_if_needed(" ** Rejected mask extraction because architecture does not support it"); return false; // not supported } } else { // vbox_klass is vector. This is used for Vector.lane(int). if (!idx->is_con()) { - if (C->print_intrinsics()) { - tty->print_cr(" ** missing constant: idx=%s", NodeClassNames[argument(4)->Opcode()]); - } + log_if_needed(" ** missing constant: idx=%s", NodeClassNames[argument(4)->Opcode()]); return false; // not enough info for intrinsification } int vopc = ExtractNode::opcode(elem_bt); if (!arch_supports_vector(vopc, num_elem, elem_bt, VecMaskNotUsed)) { - if (C->print_intrinsics()) { - tty->print_cr(" ** not supported: arity=1 op=extract vlen=%d etype=%s ismask=no", + log_if_needed(" ** not supported: arity=1 op=extract vlen=%d etype=%s ismask=no", num_elem, type2name(elem_bt)); - } return false; // not supported } @@ -2882,29 +2611,23 @@ bool LibraryCallKit::inline_vector_compress_expand() { if (vector_klass == nullptr || elem_klass == nullptr || mask_klass == nullptr || vlen == nullptr || vector_klass->const_oop() == nullptr || mask_klass->const_oop() == nullptr || elem_klass->const_oop() == nullptr || !vlen->is_con() || !opr->is_con()) { - if (C->print_intrinsics()) { - tty->print_cr(" ** missing constant: opr=%s vclass=%s mclass=%s etype=%s vlen=%s", + log_if_needed(" ** missing constant: opr=%s vclass=%s mclass=%s etype=%s vlen=%s", NodeClassNames[argument(0)->Opcode()], NodeClassNames[argument(1)->Opcode()], NodeClassNames[argument(2)->Opcode()], NodeClassNames[argument(3)->Opcode()], NodeClassNames[argument(4)->Opcode()]); - } return false; // not enough info for intrinsification } if (!is_klass_initialized(vector_klass) || !is_klass_initialized(mask_klass)) { - if (C->print_intrinsics()) { - tty->print_cr(" ** klass argument not initialized"); - } + log_if_needed(" ** klass argument not initialized"); return false; } ciType* elem_type = elem_klass->const_oop()->as_instance()->java_mirror_type(); if (!elem_type->is_primitive_type()) { - if (C->print_intrinsics()) { - tty->print_cr(" ** not a primitive bt=%d", elem_type->basic_type()); - } + log_if_needed(" ** not a primitive bt=%d", elem_type->basic_type()); return false; // should be primitive type } @@ -2913,10 +2636,8 @@ bool LibraryCallKit::inline_vector_compress_expand() { int opc = VectorSupport::vop2ideal(opr->get_con(), elem_bt); if (!arch_supports_vector(opc, num_elem, elem_bt, VecMaskUseLoad)) { - if (C->print_intrinsics()) { - tty->print_cr(" ** not supported: opc=%d vlen=%d etype=%s ismask=useload", + log_if_needed(" ** not supported: opc=%d vlen=%d etype=%s ismask=useload", opc, num_elem, type2name(elem_bt)); - } return false; // not supported } @@ -2927,10 +2648,8 @@ bool LibraryCallKit::inline_vector_compress_expand() { vbox_type = TypeInstPtr::make_exact(TypePtr::NotNull, vbox_klass); opd1 = unbox_vector(argument(5), vbox_type, elem_bt, num_elem); if (opd1 == nullptr) { - if (C->print_intrinsics()) { - tty->print_cr(" ** unbox failed vector=%s", + log_if_needed(" ** unbox failed vector=%s", NodeClassNames[argument(5)->Opcode()]); - } return false; } } @@ -2941,10 +2660,8 @@ bool LibraryCallKit::inline_vector_compress_expand() { Node* mask = unbox_vector(argument(6), mbox_type, elem_bt, num_elem); if (mask == nullptr) { - if (C->print_intrinsics()) { - tty->print_cr(" ** unbox failed mask=%s", + log_if_needed(" ** unbox failed mask=%s", NodeClassNames[argument(6)->Opcode()]); - } return false; } @@ -2975,27 +2692,21 @@ bool LibraryCallKit::inline_index_vector() { if (vector_klass == nullptr || elem_klass == nullptr || vlen == nullptr || vector_klass->const_oop() == nullptr || !vlen->is_con() || elem_klass->const_oop() == nullptr) { - if (C->print_intrinsics()) { - tty->print_cr(" ** missing constant: vclass=%s etype=%s vlen=%s", + log_if_needed(" ** missing constant: vclass=%s etype=%s vlen=%s", NodeClassNames[argument(0)->Opcode()], NodeClassNames[argument(1)->Opcode()], NodeClassNames[argument(2)->Opcode()]); - } return false; // not enough info for intrinsification } if (!is_klass_initialized(vector_klass)) { - if (C->print_intrinsics()) { - tty->print_cr(" ** klass argument not initialized"); - } + log_if_needed(" ** klass argument not initialized"); return false; } ciType* elem_type = elem_klass->const_oop()->as_instance()->java_mirror_type(); if (!elem_type->is_primitive_type()) { - if (C->print_intrinsics()) { - tty->print_cr(" ** not a primitive bt=%d", elem_type->basic_type()); - } + log_if_needed(" ** not a primitive bt=%d", elem_type->basic_type()); return false; // should be primitive type } @@ -3004,9 +2715,7 @@ bool LibraryCallKit::inline_index_vector() { // Check whether the iota index generation op is supported by the current hardware if (!arch_supports_vector(Op_VectorLoadConst, num_elem, elem_bt, VecMaskNotUsed)) { - if (C->print_intrinsics()) { - tty->print_cr(" ** not supported: vlen=%d etype=%s", num_elem, type2name(elem_bt)); - } + log_if_needed(" ** not supported: vlen=%d etype=%s", num_elem, type2name(elem_bt)); return false; // not supported } @@ -3021,9 +2730,7 @@ bool LibraryCallKit::inline_index_vector() { } else { // Check whether the vector multiply op is supported by the current hardware if (!arch_supports_vector(vmul_op, num_elem, elem_bt, VecMaskNotUsed)) { - if (C->print_intrinsics()) { - tty->print_cr(" ** not supported: vlen=%d etype=%s", num_elem, type2name(elem_bt)); - } + log_if_needed(" ** not supported: vlen=%d etype=%s", num_elem, type2name(elem_bt)); return false; // not supported } @@ -3032,10 +2739,8 @@ bool LibraryCallKit::inline_index_vector() { int cast_op = elem_bt == T_LONG ? Op_ConvI2L : elem_bt == T_FLOAT? Op_ConvI2F : Op_ConvI2D; if (!Matcher::match_rule_supported(cast_op)) { - if (C->print_intrinsics()) { - tty->print_cr(" ** Rejected op (%s) because architecture does not support it", + log_if_needed(" ** Rejected op (%s) because architecture does not support it", NodeClassNames[cast_op]); - } return false; // not supported } } @@ -3045,10 +2750,8 @@ bool LibraryCallKit::inline_index_vector() { const TypeInstPtr* vbox_type = TypeInstPtr::make_exact(TypePtr::NotNull, vbox_klass); Node* opd = unbox_vector(argument(3), vbox_type, elem_bt, num_elem); if (opd == nullptr) { - if (C->print_intrinsics()) { - tty->print_cr(" ** unbox failed vector=%s", + log_if_needed(" ** unbox failed vector=%s", NodeClassNames[argument(3)->Opcode()]); - } return false; } @@ -3061,9 +2764,7 @@ bool LibraryCallKit::inline_index_vector() { } else { // Check whether the vector addition op is supported by the current hardware if (!arch_supports_vector(vadd_op, num_elem, elem_bt, VecMaskNotUsed)) { - if (C->print_intrinsics()) { - tty->print_cr(" ** not supported: vlen=%d etype=%s", num_elem, type2name(elem_bt)); - } + log_if_needed(" ** not supported: vlen=%d etype=%s", num_elem, type2name(elem_bt)); return false; // not supported } } @@ -3124,27 +2825,21 @@ bool LibraryCallKit::inline_index_partially_in_upper_range() { if (mask_klass == nullptr || elem_klass == nullptr || vlen == nullptr || mask_klass->const_oop() == nullptr || elem_klass->const_oop() == nullptr || !vlen->is_con()) { - if (C->print_intrinsics()) { - tty->print_cr(" ** missing constant: mclass=%s etype=%s vlen=%s", + log_if_needed(" ** missing constant: mclass=%s etype=%s vlen=%s", NodeClassNames[argument(0)->Opcode()], NodeClassNames[argument(1)->Opcode()], NodeClassNames[argument(2)->Opcode()]); - } return false; // not enough info for intrinsification } if (!is_klass_initialized(mask_klass)) { - if (C->print_intrinsics()) { - tty->print_cr(" ** klass argument not initialized"); - } + log_if_needed(" ** klass argument not initialized"); return false; } ciType* elem_type = elem_klass->const_oop()->as_instance()->java_mirror_type(); if (!elem_type->is_primitive_type()) { - if (C->print_intrinsics()) { - tty->print_cr(" ** not a primitive bt=%d", elem_type->basic_type()); - } + log_if_needed(" ** not a primitive bt=%d", elem_type->basic_type()); return false; // should be primitive type } @@ -3157,9 +2852,7 @@ bool LibraryCallKit::inline_index_partially_in_upper_range() { if (!arch_supports_vector(Op_VectorLoadConst, num_elem, elem_bt, VecMaskNotUsed) || !arch_supports_vector(Op_Replicate, num_elem, elem_bt, VecMaskNotUsed) || !arch_supports_vector(Op_VectorMaskCmp, num_elem, elem_bt, VecMaskUseStore)) { - if (C->print_intrinsics()) { - tty->print_cr(" ** not supported: vlen=%d etype=%s", num_elem, type2name(elem_bt)); - } + log_if_needed(" ** not supported: vlen=%d etype=%s", num_elem, type2name(elem_bt)); return false; // not supported } @@ -3168,10 +2861,8 @@ bool LibraryCallKit::inline_index_partially_in_upper_range() { int cast_op = is_integral_type(elem_bt) ? Op_ConvL2I : (elem_bt == T_FLOAT ? Op_ConvL2F : Op_ConvL2D); if (!Matcher::match_rule_supported(cast_op)) { - if (C->print_intrinsics()) { - tty->print_cr(" ** Rejected op (%s) because architecture does not support it", + log_if_needed(" ** Rejected op (%s) because architecture does not support it", NodeClassNames[cast_op]); - } return false; // not supported } } @@ -3180,9 +2871,7 @@ bool LibraryCallKit::inline_index_partially_in_upper_range() { Node* offset = argument(3); Node* limit = argument(5); if (offset == nullptr || limit == nullptr) { - if (C->print_intrinsics()) { - tty->print_cr(" ** offset or limit argument is null"); - } + log_if_needed(" ** offset or limit argument is null"); return false; // not supported } @@ -3238,3 +2927,6 @@ bool LibraryCallKit::inline_index_partially_in_upper_range() { C->set_max_vector_size(MAX2(C->max_vector_size(), (uint)(num_elem * type2aelembytes(elem_bt)))); return true; } + +#undef non_product_log_if_needed +#undef log_if_needed diff --git a/src/hotspot/share/prims/jvmtiAgentList.hpp b/src/hotspot/share/prims/jvmtiAgentList.hpp index c4a497dfa9e..671def02681 100644 --- a/src/hotspot/share/prims/jvmtiAgentList.hpp +++ b/src/hotspot/share/prims/jvmtiAgentList.hpp @@ -25,7 +25,7 @@ #ifndef SHARE_PRIMS_JVMTIAGENTLIST_HPP #define SHARE_PRIMS_JVMTIAGENTLIST_HPP -#include "memory/allocation.hpp" +#include "nmt/memflags.hpp" #include "prims/jvmtiAgent.hpp" #include "utilities/growableArray.hpp" diff --git a/src/hotspot/share/prims/jvmtiExport.cpp b/src/hotspot/share/prims/jvmtiExport.cpp index 5a87e16c855..c2b6d27986b 100644 --- a/src/hotspot/share/prims/jvmtiExport.cpp +++ b/src/hotspot/share/prims/jvmtiExport.cpp @@ -929,7 +929,9 @@ class JvmtiClassFileLoadHookPoster : public StackObj { _cached_class_file_ptr = cache_ptr; _has_been_modified = false; - assert(!_thread->is_in_any_VTMS_transition(), "CFLH events are not allowed in any VTMS transition"); + if (_thread->is_in_any_VTMS_transition()) { + return; // no events should be posted if thread is in any VTMS transition + } _state = JvmtiExport::get_jvmti_thread_state(_thread); if (_state != nullptr) { _class_being_redefined = _state->get_class_being_redefined(); @@ -1366,10 +1368,9 @@ void JvmtiExport::post_class_load(JavaThread *thread, Klass* klass) { if (state == nullptr) { return; } - if (thread->is_in_tmp_VTMS_transition()) { - return; // skip ClassLoad events in tmp VTMS transition + if (thread->is_in_any_VTMS_transition()) { + return; // no events should be posted if thread is in any VTMS transition } - assert(!thread->is_in_any_VTMS_transition(), "class load events are not allowed in any VTMS transition"); EVT_TRIG_TRACE(JVMTI_EVENT_CLASS_LOAD, ("[%s] Trg Class Load triggered", JvmtiTrace::safe_get_thread_name(thread))); @@ -1404,10 +1405,9 @@ void JvmtiExport::post_class_prepare(JavaThread *thread, Klass* klass) { if (state == nullptr) { return; } - if (thread->is_in_tmp_VTMS_transition()) { - return; // skip ClassPrepare events in tmp VTMS transition + if (thread->is_in_any_VTMS_transition()) { + return; // no events should be posted if thread is in any VTMS transition } - assert(!thread->is_in_any_VTMS_transition(), "class prepare events are not allowed in any VTMS transition"); EVT_TRIG_TRACE(JVMTI_EVENT_CLASS_PREPARE, ("[%s] Trg Class Prepare triggered", JvmtiTrace::safe_get_thread_name(thread))); diff --git a/src/hotspot/share/runtime/arguments.cpp b/src/hotspot/share/runtime/arguments.cpp index ee8c7b24541..7a74ef6e248 100644 --- a/src/hotspot/share/runtime/arguments.cpp +++ b/src/hotspot/share/runtime/arguments.cpp @@ -496,6 +496,7 @@ static SpecialFlag const special_jvm_flags[] = { // --- Non-alias flags - sorted by obsolete_in then expired_in: { "AllowRedefinitionToAddDeleteMethods", JDK_Version::jdk(13), JDK_Version::undefined(), JDK_Version::undefined() }, { "FlightRecorder", JDK_Version::jdk(13), JDK_Version::undefined(), JDK_Version::undefined() }, + { "ZGenerational", JDK_Version::jdk(23), JDK_Version::undefined(), JDK_Version::undefined() }, { "DumpSharedSpaces", JDK_Version::jdk(18), JDK_Version::jdk(19), JDK_Version::undefined() }, { "DynamicDumpSharedSpaces", JDK_Version::jdk(18), JDK_Version::jdk(19), JDK_Version::undefined() }, { "RequireSharedSpaces", JDK_Version::jdk(18), JDK_Version::jdk(19), JDK_Version::undefined() }, diff --git a/src/hotspot/share/runtime/deoptimization.cpp b/src/hotspot/share/runtime/deoptimization.cpp index 03e62075808..53a0f2e9c3b 100644 --- a/src/hotspot/share/runtime/deoptimization.cpp +++ b/src/hotspot/share/runtime/deoptimization.cpp @@ -1736,7 +1736,7 @@ void Deoptimization::pop_frames_failed_reallocs(JavaThread* thread, vframeArray* ObjectSynchronizer::exit(src->obj(), src->lock(), thread); } } - array->element(i)->free_monitors(thread); + array->element(i)->free_monitors(); #ifdef ASSERT array->element(i)->set_removed_monitors(); #endif diff --git a/src/hotspot/share/runtime/javaThread.cpp b/src/hotspot/share/runtime/javaThread.cpp index 7ed67c4616b..97be5333413 100644 --- a/src/hotspot/share/runtime/javaThread.cpp +++ b/src/hotspot/share/runtime/javaThread.cpp @@ -430,8 +430,6 @@ JavaThread::JavaThread() : _active_handles(nullptr), _free_handle_block(nullptr), - _monitor_chunks(nullptr), - _suspend_flags(0), _thread_state(_thread_new), @@ -1050,13 +1048,7 @@ JavaThread* JavaThread::active() { bool JavaThread::is_lock_owned(address adr) const { assert(LockingMode != LM_LIGHTWEIGHT, "should not be called with new lightweight locking"); - if (Thread::is_lock_owned(adr)) return true; - - for (MonitorChunk* chunk = monitor_chunks(); chunk != nullptr; chunk = chunk->next()) { - if (chunk->contains(adr)) return true; - } - - return false; + return is_in_full_stack(adr); } oop JavaThread::exception_oop() const { @@ -1067,22 +1059,6 @@ void JavaThread::set_exception_oop(oop o) { Atomic::store(&_exception_oop, o); } -void JavaThread::add_monitor_chunk(MonitorChunk* chunk) { - chunk->set_next(monitor_chunks()); - set_monitor_chunks(chunk); -} - -void JavaThread::remove_monitor_chunk(MonitorChunk* chunk) { - guarantee(monitor_chunks() != nullptr, "must be non empty"); - if (monitor_chunks() == chunk) { - set_monitor_chunks(chunk->next()); - } else { - MonitorChunk* prev = monitor_chunks(); - while (prev->next() != chunk) prev = prev->next(); - prev->set_next(chunk->next()); - } -} - void JavaThread::handle_special_runtime_exit_condition() { if (is_obj_deopt_suspend()) { frame_anchor()->make_walkable(); @@ -1408,13 +1384,6 @@ void JavaThread::oops_do_no_frames(OopClosure* f, NMethodClosure* cf) { DEBUG_ONLY(verify_frame_info();) - if (has_last_Java_frame()) { - // Traverse the monitor chunks - for (MonitorChunk* chunk = monitor_chunks(); chunk != nullptr; chunk = chunk->next()) { - chunk->oops_do(f); - } - } - assert(vframe_array_head() == nullptr, "deopt in progress at a safepoint!"); // If we have deferred set_locals there might be oops waiting to be // written diff --git a/src/hotspot/share/runtime/javaThread.hpp b/src/hotspot/share/runtime/javaThread.hpp index 37fd8981acd..2541aaded00 100644 --- a/src/hotspot/share/runtime/javaThread.hpp +++ b/src/hotspot/share/runtime/javaThread.hpp @@ -193,10 +193,6 @@ class JavaThread: public Thread { void pop_jni_handle_block(); private: - MonitorChunk* _monitor_chunks; // Contains the off stack monitors - // allocated during deoptimization - // and by JNI_MonitorEnter/Exit - enum SuspendFlags { // NOTE: avoid using the sign-bit as cc generates different test code // when the sign-bit is used, and sometimes incorrectly - see CR 6398077 @@ -679,7 +675,7 @@ class JavaThread: public Thread { return (_suspend_flags & (_obj_deopt JFR_ONLY(| _trace_flag))) != 0; } - // Fast-locking support + // Stack-locking support (not for LM_LIGHTWEIGHT) bool is_lock_owned(address adr) const; // Accessors for vframe array top @@ -881,13 +877,7 @@ class JavaThread: public Thread { int depth_first_number() { return _depth_first_number; } void set_depth_first_number(int dfn) { _depth_first_number = dfn; } - private: - void set_monitor_chunks(MonitorChunk* monitor_chunks) { _monitor_chunks = monitor_chunks; } - public: - MonitorChunk* monitor_chunks() const { return _monitor_chunks; } - void add_monitor_chunk(MonitorChunk* chunk); - void remove_monitor_chunk(MonitorChunk* chunk); bool in_deopt_handler() const { return _in_deopt_handler > 0; } void inc_in_deopt_handler() { _in_deopt_handler++; } void dec_in_deopt_handler() { diff --git a/src/hotspot/share/runtime/monitorChunk.cpp b/src/hotspot/share/runtime/monitorChunk.cpp index c54ad685cdb..d18fc21d78d 100644 --- a/src/hotspot/share/runtime/monitorChunk.cpp +++ b/src/hotspot/share/runtime/monitorChunk.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,7 +30,6 @@ MonitorChunk::MonitorChunk(int number_on_monitors) { _number_of_monitors = number_on_monitors; _monitors = NEW_C_HEAP_ARRAY(BasicObjectLock, number_on_monitors, mtSynchronizer); - _next = nullptr; } diff --git a/src/hotspot/share/runtime/monitorChunk.hpp b/src/hotspot/share/runtime/monitorChunk.hpp index f16aa46fa64..5c804b5c595 100644 --- a/src/hotspot/share/runtime/monitorChunk.hpp +++ b/src/hotspot/share/runtime/monitorChunk.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -38,23 +38,17 @@ class MonitorChunk: public CHeapObj { int _number_of_monitors; BasicObjectLock* _monitors; BasicObjectLock* monitors() const { return _monitors; } - MonitorChunk* _next; public: // Constructor MonitorChunk(int number_on_monitors); ~MonitorChunk(); - // link operations - MonitorChunk* next() const { return _next; } - void set_next(MonitorChunk* next) { _next = next; } - // Returns the number of monitors int number_of_monitors() const { return _number_of_monitors; } // Returns the index'th monitor BasicObjectLock* at(int index) { assert(index >= 0 && index < number_of_monitors(), "out of bounds check"); return &monitors()[index]; } - // Memory management void oops_do(OopClosure* f); diff --git a/src/hotspot/share/runtime/os.hpp b/src/hotspot/share/runtime/os.hpp index 4bfcd9c2da5..971c3c884c4 100644 --- a/src/hotspot/share/runtime/os.hpp +++ b/src/hotspot/share/runtime/os.hpp @@ -606,7 +606,7 @@ class os: AllStatic { // multiple calls to naked_short_sleep. Only for use by non-JavaThreads. static void naked_sleep(jlong millis); // Never returns, use with CAUTION - ATTRIBUTE_NORETURN static void infinite_sleep(); + [[noreturn]] static void infinite_sleep(); static void naked_yield () ; static OSReturn set_priority(Thread* thread, ThreadPriority priority); static OSReturn get_priority(const Thread* const thread, ThreadPriority& priority); @@ -630,26 +630,26 @@ class os: AllStatic { static int fork_and_exec(const char *cmd); // Call ::exit() on all platforms - ATTRIBUTE_NORETURN static void exit(int num); + [[noreturn]] static void exit(int num); // Call ::_exit() on all platforms. Similar semantics to die() except we never // want a core dump. - ATTRIBUTE_NORETURN static void _exit(int num); + [[noreturn]] static void _exit(int num); // Terminate the VM, but don't exit the process static void shutdown(); // Terminate with an error. Default is to generate a core file on platforms // that support such things. This calls shutdown() and then aborts. - ATTRIBUTE_NORETURN static void abort(bool dump_core, void *siginfo, const void *context); - ATTRIBUTE_NORETURN static void abort(bool dump_core = true); + [[noreturn]] static void abort(bool dump_core, void *siginfo, const void *context); + [[noreturn]] static void abort(bool dump_core = true); // Die immediately, no exit hook, no abort hook, no cleanup. // Dump a core file, if possible, for debugging. os::abort() is the // preferred means to abort the VM on error. os::die() should only // be called if something has gone badly wrong. CreateCoredumpOnCrash // is intentionally not honored by this function. - ATTRIBUTE_NORETURN static void die(); + [[noreturn]] static void die(); // File i/o operations static int open(const char *path, int oflag, int mode); diff --git a/src/hotspot/share/runtime/synchronizer.cpp b/src/hotspot/share/runtime/synchronizer.cpp index f8d4b323156..27b4163238a 100644 --- a/src/hotspot/share/runtime/synchronizer.cpp +++ b/src/hotspot/share/runtime/synchronizer.cpp @@ -1056,7 +1056,9 @@ intptr_t ObjectSynchronizer::FastHashCode(Thread* current, oop obj) { } // Fall thru so we only have one place that installs the hash in // the ObjectMonitor. - } else if (LockingMode == LM_LEGACY && mark.has_locker() && current->is_lock_owned((address)mark.locker())) { + } else if (LockingMode == LM_LEGACY && mark.has_locker() + && current->is_Java_thread() + && JavaThread::cast(current)->is_lock_owned((address)mark.locker())) { // This is a stack-lock owned by the calling thread so fetch the // displaced markWord from the BasicLock on the stack. temp = mark.displaced_mark_helper(); diff --git a/src/hotspot/share/runtime/thread.cpp b/src/hotspot/share/runtime/thread.cpp index 396f349a885..d98fcf6f664 100644 --- a/src/hotspot/share/runtime/thread.cpp +++ b/src/hotspot/share/runtime/thread.cpp @@ -24,6 +24,7 @@ */ #include "precompiled.hpp" +#include "cds/cdsConfig.hpp" #include "classfile/javaClasses.hpp" #include "classfile/javaThreadStatus.hpp" #include "gc/shared/barrierSet.hpp" @@ -103,7 +104,10 @@ Thread::Thread() { _vm_error_callbacks = nullptr; // thread-specific hashCode stream generator state - Marsaglia shift-xor form - _hashStateX = os::random(); + // If we are dumping, keep ihashes constant. Note that during dumping we only + // ever run one java thread, and no other thread should generate ihashes either, + // so using a constant seed should work fine. + _hashStateX = CDSConfig::is_dumping_static_archive() ? 0x12345678 : os::random(); _hashStateY = 842502087; _hashStateZ = 0x8767; // (int)(3579807591LL & 0xffff) ; _hashStateW = 273326509; @@ -527,16 +531,6 @@ void Thread::print_owned_locks_on(outputStream* st) const { } #endif // ASSERT -// We had to move these methods here, because vm threads get into ObjectSynchronizer::enter -// However, there is a note in JavaThread::is_lock_owned() about the VM threads not being -// used for compilation in the future. If that change is made, the need for these methods -// should be revisited, and they should be removed if possible. - -bool Thread::is_lock_owned(address adr) const { - assert(LockingMode != LM_LIGHTWEIGHT, "should not be called with new lightweight locking"); - return is_in_full_stack(adr); -} - bool Thread::set_as_starting_thread() { assert(_starting_thread == nullptr, "already initialized: " "_starting_thread=" INTPTR_FORMAT, p2i(_starting_thread)); diff --git a/src/hotspot/share/runtime/thread.hpp b/src/hotspot/share/runtime/thread.hpp index ebf1b590ebd..d0749b8101d 100644 --- a/src/hotspot/share/runtime/thread.hpp +++ b/src/hotspot/share/runtime/thread.hpp @@ -475,9 +475,6 @@ class Thread: public ThreadShadow { } public: - // Used by fast lock support - virtual bool is_lock_owned(address adr) const; - // Check if address is within the given range of this thread's // stack: stack_base() > adr >= limit bool is_in_stack_range_incl(address adr, address limit) const { diff --git a/src/hotspot/share/runtime/vframeArray.cpp b/src/hotspot/share/runtime/vframeArray.cpp index cf7b087887f..d3bbbc28399 100644 --- a/src/hotspot/share/runtime/vframeArray.cpp +++ b/src/hotspot/share/runtime/vframeArray.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -37,6 +37,7 @@ #include "runtime/handles.inline.hpp" #include "runtime/monitorChunk.hpp" #include "runtime/sharedRuntime.hpp" +#include "runtime/synchronizer.hpp" #include "runtime/vframe.hpp" #include "runtime/vframeArray.hpp" #include "runtime/vframe_hp.hpp" @@ -48,11 +49,10 @@ int vframeArrayElement:: bci(void) const { return (_bci == SynchronizationEntryBCI ? 0 : _bci); } -void vframeArrayElement::free_monitors(JavaThread* jt) { +void vframeArrayElement::free_monitors() { if (_monitors != nullptr) { MonitorChunk* chunk = _monitors; _monitors = nullptr; - jt->remove_monitor_chunk(chunk); delete chunk; } } @@ -72,7 +72,7 @@ void vframeArrayElement::fill_in(compiledVFrame* vf, bool realloc_failures) { int index; { - Thread* current_thread = Thread::current(); + JavaThread* current_thread = JavaThread::current(); ResourceMark rm(current_thread); HandleMark hm(current_thread); @@ -85,7 +85,6 @@ void vframeArrayElement::fill_in(compiledVFrame* vf, bool realloc_failures) { // Allocate monitor chunk _monitors = new MonitorChunk(list->length()); - vf->thread()->add_monitor_chunk(_monitors); // Migrate the BasicLocks from the stack to the monitor chunk for (index = 0; index < list->length(); index++) { @@ -95,9 +94,16 @@ void vframeArrayElement::fill_in(compiledVFrame* vf, bool realloc_failures) { if (monitor->owner_is_scalar_replaced()) { dest->set_obj(nullptr); } else { - assert(monitor->owner() == nullptr || !monitor->owner()->is_unlocked(), "object must be null or locked"); + assert(monitor->owner() != nullptr, "monitor owner must not be null"); + assert(!monitor->owner()->is_unlocked(), "monitor must be locked"); dest->set_obj(monitor->owner()); + assert(ObjectSynchronizer::current_thread_holds_lock(current_thread, Handle(current_thread, dest->obj())), + "should be held, before move_to"); + monitor->lock()->move_to(monitor->owner(), dest->lock()); + + assert(ObjectSynchronizer::current_thread_holds_lock(current_thread, Handle(current_thread, dest->obj())), + "should be held, after move_to"); } } } @@ -308,7 +314,11 @@ void vframeArrayElement::unpack_on_stack(int caller_actual_parameters, top = iframe()->previous_monitor_in_interpreter_frame(top); BasicObjectLock* src = _monitors->at(index); top->set_obj(src->obj()); + assert(src->obj() != nullptr || ObjectSynchronizer::current_thread_holds_lock(thread, Handle(thread, src->obj())), + "should be held, before move_to"); src->lock()->move_to(src->obj(), top->lock()); + assert(src->obj() != nullptr || ObjectSynchronizer::current_thread_holds_lock(thread, Handle(thread, src->obj())), + "should be held, after move_to"); } if (ProfileInterpreter) { iframe()->interpreter_frame_set_mdp(0); // clear out the mdp. @@ -649,9 +659,8 @@ void vframeArray::unpack_to_stack(frame &unpack_frame, int exec_mode, int caller } void vframeArray::deallocate_monitor_chunks() { - JavaThread* jt = JavaThread::current(); for (int index = 0; index < frames(); index++ ) { - element(index)->free_monitors(jt); + element(index)->free_monitors(); } } diff --git a/src/hotspot/share/runtime/vframeArray.hpp b/src/hotspot/share/runtime/vframeArray.hpp index 734703a94ae..b270046252d 100644 --- a/src/hotspot/share/runtime/vframeArray.hpp +++ b/src/hotspot/share/runtime/vframeArray.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -77,7 +77,7 @@ class vframeArrayElement { MonitorChunk* monitors(void) const { return _monitors; } - void free_monitors(JavaThread* jt); + void free_monitors(); StackValueCollection* locals(void) const { return _locals; } diff --git a/src/hotspot/share/services/mallocLimit.cpp b/src/hotspot/share/services/mallocLimit.cpp index 392b20f61ef..de3e8e872ee 100644 --- a/src/hotspot/share/services/mallocLimit.cpp +++ b/src/hotspot/share/services/mallocLimit.cpp @@ -1,6 +1,6 @@ /* * Copyright (c) 2023 SAP SE. All rights reserved. - * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,8 +24,7 @@ */ #include "precompiled.hpp" - -#include "memory/allocation.hpp" +#include "nmt/memflags.hpp" #include "nmt/nmtCommon.hpp" #include "runtime/java.hpp" #include "runtime/globals.hpp" diff --git a/src/hotspot/share/services/mallocLimit.hpp b/src/hotspot/share/services/mallocLimit.hpp index 3e5e9347629..281cfa51396 100644 --- a/src/hotspot/share/services/mallocLimit.hpp +++ b/src/hotspot/share/services/mallocLimit.hpp @@ -1,6 +1,6 @@ /* * Copyright (c) 2023 SAP SE. All rights reserved. - * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,7 +26,7 @@ #ifndef SHARE_SERVICES_MALLOCLIMIT_HPP #define SHARE_SERVICES_MALLOCLIMIT_HPP -#include "memory/allocation.hpp" +#include "nmt/memflags.hpp" #include "utilities/debug.hpp" #include "utilities/globalDefinitions.hpp" diff --git a/src/hotspot/share/services/threadService.cpp b/src/hotspot/share/services/threadService.cpp index bf9979fa3b4..09cb7ffb25a 100644 --- a/src/hotspot/share/services/threadService.cpp +++ b/src/hotspot/share/services/threadService.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,11 +28,11 @@ #include "classfile/vmClasses.hpp" #include "classfile/vmSymbols.hpp" #include "gc/shared/oopStorageSet.hpp" -#include "memory/allocation.hpp" #include "memory/heapInspection.hpp" #include "memory/oopFactory.hpp" #include "memory/resourceArea.hpp" #include "memory/universe.hpp" +#include "nmt/memflags.hpp" #include "oops/instanceKlass.hpp" #include "oops/klass.inline.hpp" #include "oops/objArrayKlass.hpp" diff --git a/src/hotspot/share/utilities/bitMap.hpp b/src/hotspot/share/utilities/bitMap.hpp index 6d83c5cdad9..0d592de7cd0 100644 --- a/src/hotspot/share/utilities/bitMap.hpp +++ b/src/hotspot/share/utilities/bitMap.hpp @@ -25,7 +25,7 @@ #ifndef SHARE_UTILITIES_BITMAP_HPP #define SHARE_UTILITIES_BITMAP_HPP -#include "memory/allocation.hpp" +#include "nmt/memflags.hpp" #include "runtime/atomic.hpp" #include "utilities/globalDefinitions.hpp" diff --git a/src/hotspot/share/utilities/chunkedList.hpp b/src/hotspot/share/utilities/chunkedList.hpp index 81898ac53b2..9a600e4ce1b 100644 --- a/src/hotspot/share/utilities/chunkedList.hpp +++ b/src/hotspot/share/utilities/chunkedList.hpp @@ -44,7 +44,7 @@ template class ChunkedList : public CHeapObj { } public: - ChunkedList() : _top(_values), _next_used(nullptr), _next_free(nullptr) {} + ChunkedList() : _top(_values), _next_used(nullptr), _next_free(nullptr) {} bool is_full() const { return _top == end(); diff --git a/src/hotspot/share/utilities/debug.hpp b/src/hotspot/share/utilities/debug.hpp index d21439c35ca..f4e97832a5d 100644 --- a/src/hotspot/share/utilities/debug.hpp +++ b/src/hotspot/share/utilities/debug.hpp @@ -25,7 +25,6 @@ #ifndef SHARE_UTILITIES_DEBUG_HPP #define SHARE_UTILITIES_DEBUG_HPP -#include "utilities/attributeNoreturn.hpp" #include "utilities/breakpoint.hpp" #include "utilities/compilerWarnings.hpp" #include "utilities/macros.hpp" @@ -254,32 +253,32 @@ enum VMErrorType : unsigned int { }; // error reporting helper functions -ATTRIBUTE_NORETURN +[[noreturn]] void report_vm_error(const char* file, int line, const char* error_msg); -ATTRIBUTE_NORETURN +[[noreturn]] ATTRIBUTE_PRINTF(4, 5) void report_vm_error(const char* file, int line, const char* error_msg, const char* detail_fmt, ...); -ATTRIBUTE_NORETURN +[[noreturn]] void report_vm_status_error(const char* file, int line, const char* error_msg, int status, const char* detail); -ATTRIBUTE_NORETURN +[[noreturn]] ATTRIBUTE_PRINTF(4, 5) void report_fatal(VMErrorType error_type, const char* file, int line, const char* detail_fmt, ...); -ATTRIBUTE_NORETURN +[[noreturn]] ATTRIBUTE_PRINTF(5, 6) void report_vm_out_of_memory(const char* file, int line, size_t size, VMErrorType vm_err_type, const char* detail_fmt, ...); -ATTRIBUTE_NORETURN void report_should_not_call(const char* file, int line); -ATTRIBUTE_NORETURN void report_should_not_reach_here(const char* file, int line); -ATTRIBUTE_NORETURN void report_unimplemented(const char* file, int line); +[[noreturn]] void report_should_not_call(const char* file, int line); +[[noreturn]] void report_should_not_reach_here(const char* file, int line); +[[noreturn]] void report_unimplemented(const char* file, int line); -// NOT ATTRIBUTE_NORETURN +// NOT [[noreturn]] void report_untested(const char* file, int line, const char* message); ATTRIBUTE_PRINTF(1, 2) diff --git a/src/hotspot/share/utilities/events.hpp b/src/hotspot/share/utilities/events.hpp index 0aefbbefd2b..4470002a1e3 100644 --- a/src/hotspot/share/utilities/events.hpp +++ b/src/hotspot/share/utilities/events.hpp @@ -99,7 +99,7 @@ template class EventLogBase : public EventLog { EventRecord* _records; public: - EventLogBase(const char* name, const char* handle, int length = LogEventsBufferEntries): + EventLogBase(const char* name, const char* handle, int length = LogEventsBufferEntries): _mutex(Mutex::event, name), _name(name), _handle(handle), diff --git a/src/hotspot/share/utilities/globalDefinitions.hpp b/src/hotspot/share/utilities/globalDefinitions.hpp index 34f0a40a826..a15a4de3e93 100644 --- a/src/hotspot/share/utilities/globalDefinitions.hpp +++ b/src/hotspot/share/utilities/globalDefinitions.hpp @@ -25,7 +25,6 @@ #ifndef SHARE_UTILITIES_GLOBALDEFINITIONS_HPP #define SHARE_UTILITIES_GLOBALDEFINITIONS_HPP -#include "utilities/attributeNoreturn.hpp" #include "utilities/compilerWarnings.hpp" #include "utilities/debug.hpp" #include "utilities/macros.hpp" diff --git a/src/hotspot/share/utilities/istream.cpp b/src/hotspot/share/utilities/istream.cpp new file mode 100644 index 00000000000..cb082128c62 --- /dev/null +++ b/src/hotspot/share/utilities/istream.cpp @@ -0,0 +1,368 @@ +/* + * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "memory/allocation.inline.hpp" +#include "runtime/orderAccess.hpp" +#include "utilities/istream.hpp" +#include "utilities/ostream.hpp" +#include "utilities/xmlstream.hpp" + +#ifndef ASSERT +#define COV(casen) {} +#else //ASSERT +// Support for coverage testing. Used by the gtest. +/* $ sed < istream.cpp '/^.* COV(\([A-Z][^)]*\)).*$/!d;s//COV_FN(\1)/' | + tr '\12' ' ' | fold -sw72 | sed 's| $||;s|.*| & \\|' + */ +#define DO_COV_CASES(COV_FN) \ + COV_FN(NXT_L) COV_FN(NXT_N) COV_FN(FIB_P) COV_FN(FIB_E) COV_FN(FIB_N) \ + COV_FN(FIB_L) COV_FN(PFB_C) COV_FN(PFB_P) COV_FN(PFB_A) \ + COV_FN(PFB_G) COV_FN(PFB_H) COV_FN(SBC_C) COV_FN(SBC_B) COV_FN(SBC_N) \ + COV_FN(SBC_L) COV_FN(EXB_R) COV_FN(EXB_A) + /**/ +#define COV_COUNT(casename) coverage_case_##casename +#define DECLARE_COV_CASE(casename) static int COV_COUNT(casename); +DO_COV_CASES(DECLARE_COV_CASE) +#undef DECLARE_COV_CASE + +static int current_coverage_mode = 0; +#define COV(casename) { \ + if (current_coverage_mode != 0) { \ + COV_COUNT(casename)++; \ + } } +#endif //ASSERT + +bool inputStream::next() { + // We have to look at the current line first, just in case nobody + // actually called current_line() or done(). + preload(); + if (definitely_done()) { + return false; // OK to call this->next() after done is true + } + // current line is at buffer[beg..end]; now skip past its '\0' + assert(have_current_line(), ""); + + set_buffer_content(_next, _content_end); + if (!need_to_read()) { // any next line was already in the buffer + COV(NXT_L); + assert(have_current_line(), ""); + return true; + } else { // go back to the source for more + COV(NXT_N); + return fill_buffer(); + } +} + +void inputStream::set_done() { + size_t end = _beg = _end = _content_end; + _next = end + NEXT_PHANTOM; + _line_ending = 0; + assert(definitely_done(), ""); +} + +void inputStream::set_error(bool error_condition) { + if (error_condition) { + set_done(); + _input_state = IState::ERR_STATE; + assert(error(), ""); + } else if (error()) { + _input_state = definitely_done() ? IState::EOF_STATE : IState::NTR_STATE; + } +} + +void inputStream::clear_buffer() { + _content_end = _beg = _end = _next = 0; + _line_ending = 0; +} + +const char* inputStream::next_content(size_t& next_content_length) const { + assert(is_sane(), ""); + size_t len = buffered_content_length(false); + next_content_length = len; + return len == 0 ? "" : &_buffer[_next]; +} + +void inputStream::set_input(inputStream::Input* input) { + clear_buffer(); + _input = input; + _input_state = IState::NTR_STATE; +} + +bool inputStream::fill_buffer() { + size_t fill_offset, fill_length; + assert(!definitely_done(), ""); // caller responsibility + while (need_to_read()) { + prepare_to_fill_buffer(fill_offset, fill_length); + if (error()) return false; + assert(fill_length > 0, ""); + assert(fill_offset < _buffer_size, ""); + assert(fill_offset + fill_length <= _buffer_size, ""); + size_t nr = 0; + if (_input != nullptr && _input_state == IState::NTR_STATE) { + nr = _input->read(&_buffer[fill_offset], fill_length); + if (nr == 0) _input_state = IState::EOF_STATE; // do not get EOF twice + } + bool last_partial = false; + if (nr > 0) { + fill_offset += nr; + } else if (_beg == _end) { // no partial line, so end it now + // we hit the end of the file (or there was never anything there) + COV(FIB_P); + assert(!definitely_done(), ""); + set_done(); + assert(definitely_done(), ""); + return false; + } else { + // pretend to read a newline, to complete the last partial line + COV(FIB_E); + _buffer[fill_offset++] = '\n'; // insert phantom newline + last_partial = true; + } + set_buffer_content(_beg, fill_offset); + assert(!definitely_done(), ""); + if (need_to_read()) { COV(FIB_N); } + else { COV(FIB_L); } + if (last_partial) { + assert(have_current_line(), ""); + _line_ending = 0; + _content_end -= 1; // reverse insertion of phantom newline + assert(_next == _content_end + NEXT_PHANTOM, ""); + assert(have_current_line(), ""); + } + } + return true; +} + +// Find some space in the buffer for reading. If there is already a +// partial line in the buffer, new space must follow it immediately. +// The partial line is between _beg and _end, and no other parts of +// the buffer are in use. +void inputStream::prepare_to_fill_buffer(size_t& fill_offset, + size_t& fill_length) { + assert(need_to_read(), ""); // _next pointer out of the way + size_t end = _content_end; + if (_beg == end) { // if no partial line present... + COV(PFB_C); + clear_buffer(); + fill_offset = 0; + fill_length = _buffer_size; + return; // use the whole buffer + } + // at this point we have a pending line that needs more input + if (_beg > 0 && (_input != nullptr || end == _buffer_size)) { + COV(PFB_P); + // compact the buffer by overwriting characters from previous lines + size_t shift_left = _beg; + ::memmove(_buffer, _buffer + shift_left, _content_end - _beg); + _beg -= shift_left; + _end -= shift_left; + _next -= shift_left; + _content_end -= shift_left; + end = _content_end; + } + if (end < _buffer_size) { + COV(PFB_A); + fill_offset = end; + fill_length = _buffer_size - end; + return; // use the whole buffer except partial line at the beginning + } + // the whole buffer contains a partial line, which means we must expand + COV(PFB_G); + size_t new_size = (_buffer_size < BIG_SIZE ? BIG_SIZE + : _buffer_size + _buffer_size / 2); + assert(new_size > _buffer_size, ""); + if (expand_buffer(new_size)) { + COV(PFB_H); + fill_offset = end; + fill_length = _buffer_size - end; + return; // use the expanded buffer, except the partial line + } + // no recovery from failed allocation; just set the error state and bail + set_error(); +} + +// The only buffer content is between the given offsets. +// Set _beg, _end, _next, and _content_end appropriately. +void inputStream::set_buffer_content(size_t content_start, + size_t content_end) { + assert(content_end <= _buffer_size, ""); + assert(content_start <= content_end + NEXT_PHANTOM, ""); + if (content_start >= content_end) { // empty content; clear buffer + COV(SBC_C); + clear_buffer(); + return; + } + COV(SBC_B); + size_t content_len = content_end - content_start; + _beg = content_start; + _content_end = content_end; + + // this is where we scan for newlines + char* nl = (char*) memchr(&_buffer[content_start], '\n', content_len); + if (nl == nullptr) { + COV(SBC_N); + _next = _end = content_end; + _line_ending = 0; + assert(need_to_read(), ""); + } else { + COV(SBC_L); + *nl = '\0'; // so that this->current_line() will work + ++_line_count; + size_t end = nl - &_buffer[0]; + _next = end + 1; + assert(_next != _content_end + NEXT_PHANTOM, ""); + if (end > content_start && nl[-1] == '\r') { // yuck + // again, for this->current_line(), remove '\r' before '\n' + nl[-1] = '\0'; + --end; + // Note: we could treat '\r' alone as a line ending on some + // platforms, but that is way too much work. Newline '\n' is + // supported everywhere, and some tools insist on accompanying + // it with return as well, so we remove that. But return '\r' + // by itself is an obsolete format, and also inconsistent with + // outputStream, which standarizes on '\n' and never emits '\r'. + // Postel's law suggests that we write '\n' only and grudgingly + // accept '\r' before '\n'. + } + _end = end; // now this->current_line() points to buf[beg..end] + _line_ending = (int)(_next - end); + assert(have_current_line(), ""); + assert(current_line() == &_buffer[_beg], ""); + assert(current_line_length() == _end - _beg, ""); + } +} + +// Return true iff we expanded the buffer to the given length. +bool inputStream::expand_buffer(size_t new_length) { + assert(new_length > _buffer_size, ""); + char* new_buf = nullptr; + assert(new_length > sizeof(_small_buffer), ""); + if (_buffer == &_small_buffer[0]) { + // fresh alloc from c-heap + COV(EXB_A); + new_buf = NEW_C_HEAP_ARRAY(char, new_length, mtInternal); + assert(new_buf != nullptr, "would have exited VM if OOM"); + if (_content_end > 0) { + assert(_content_end <= _buffer_size, ""); + ::memcpy(new_buf, _buffer, _content_end); // copy only the active content + } + } else { + // realloc + COV(EXB_R); + new_buf = REALLOC_C_HEAP_ARRAY(char, _buffer, new_length, mtInternal); + assert(new_buf != nullptr, "would have exited VM if OOM"); + } + + if (new_buf == nullptr) { + return false; // do not further update _buffer etc. + } + _buffer = new_buf; + _buffer_size = new_length; + return true; +} + +inputStream::~inputStream() { + if (has_c_heap_buffer()) { + FreeHeap(_buffer); + DEBUG_ONLY(_buffer = (char*)((uintptr_t)0xdeadbeef)); // sanity + } +} + +#ifdef ASSERT +void inputStream::dump(const char* what) { + int diff = (int)(_end - _beg); + if (!_buffer || _beg > _buffer_size || _end > _buffer_size) + diff = 0; + + bool ntr = (_next == _end), + hcl = (_beg < _content_end && _end < _next), + ddn = (_beg == _content_end && _next > _content_end); + tty->print_cr("%s%sistream %s%s%s%s%s [%d<%.*s>%d/%d..%d] LE=%d," + " B=%llx%s[%d], LN=%d, CH=%d", + what ? what : "", what ? ": " : "", + _buffer == nullptr ? "U" : "", + ntr ? "R" : "", + hcl ? "L" : "", + ddn ? "D" : "", + (_next < _content_end ? "" : + _next == _content_end ? "N" : "P"), + (int)_beg, + diff < 0 ? 0 : diff > 10 ? 10 : diff, + _buffer ? &_buffer[_beg] : "", + (int)_end, (int)_next, (int)_content_end, + _line_ending, + (unsigned long long)(intptr_t)_buffer, + _buffer == _small_buffer ? "(SB)" : "", + (int)_buffer_size, + (int)_line_count, + has_c_heap_buffer()); + assert(is_sane(), ""); +} +#endif + +#ifdef ASSERT +// More support for coverage testing. +int inputStream::coverage_mode(int start, + int& cases, int& total, int& zeroes) { + int old_mode = current_coverage_mode; + current_coverage_mode = start; + int num_cases = 0, zero_count = 0, case_count = 0; +#define COUNT_COV_CASE(casename) { \ + int tem = COV_COUNT(casename); \ + case_count += tem; \ + if (tem == 0) ++zero_count; \ + num_cases++; \ + } + DO_COV_CASES(COUNT_COV_CASE) +#undef COUNT_COV_CASE + if (start < 0) { + tty->print("istream coverage:"); + #define PRINT_COV_CASE(casename) \ + tty->print(" %s:%d", #casename, COV_COUNT(casename)); + DO_COV_CASES(PRINT_COV_CASE) + tty->cr(); + #undef PRINT_COV_CASE + if (zero_count != 0) { + case_count = -case_count; + #define ZERO_COV_CASE(casename) \ + if (COV_COUNT(casename) == 0) \ + tty->print_cr("%s: no coverage for %s", \ + __FILE__, #casename); \ + DO_COV_CASES(ZERO_COV_CASE) + #undef ZERO_COV_CASE + } + } + if (start >= 2 || start < 0) { + #define CLEAR_COV_CASE(casename) \ + COV_COUNT(casename) = 0; + DO_COV_CASES(CLEAR_COV_CASE) + #undef CLEAR_COV_CASE + } + cases = num_cases; + total = case_count; + zeroes = zero_count; + return old_mode; +} +#endif //ASSERT diff --git a/src/hotspot/share/utilities/istream.hpp b/src/hotspot/share/utilities/istream.hpp new file mode 100644 index 00000000000..b6a58055b93 --- /dev/null +++ b/src/hotspot/share/utilities/istream.hpp @@ -0,0 +1,386 @@ +/* + * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_UTILITIES_ISTREAM_HPP +#define SHARE_UTILITIES_ISTREAM_HPP + +#include "memory/allocation.hpp" +#include "utilities/globalDefinitions.hpp" +#include "utilities/macros.hpp" +#include "utilities/ostream.hpp" + +// Input streams for reading line-oriented textual data. These streams +// treat newline '\n' very differently from all other bytes. Carriage +// return '\r' is just another bit of whitespace, although it is +// removed just before newline. +// +// Null '\0' is just a data byte, although it also terminates C +// strings; the `current_line` function adds a null after removing any +// line terminator but does not specially process any nulls embedded +// in the line. +// +// There are sizing access functions which allow lines to contain +// null, but the simpler function assumes null termination, and thus +// lines containing null will "look" shorter when viewed as C strings. +// Use the sizing access functions if you care about this. +// +// Formatting guidelines: +// +// Configuration data should be line-oriented. It should be readable +// by humans (though perhaps with difficulty). It should be easily +// processed by text editors and by widely available text processing +// tools such as grep, sed, and awk. +// +// Configuration data should not require "compilers" to generate, if +// possible. It should be editable by hand, if possible. In cases +// where binary data is strongly required, pick a binary format +// already native to Hotspot, such as classfile, jar, or jmod. +// +// Each line should be separately parseable; the parsing can be ad +// hoc. For constructs inherently larger than single lines (such as +// complex method configuration information), try to use a structuring +// principle that allows "leaf" data to be line-oriented, and delimits +// that data with markup lines of some sort. Try to pick a +// line-friendly version of a standard format like XML or Markdown. +// JSON is somewhat problematic because there is no line-friendly leaf +// syntax: everything at the leaves must be a quoted string in JSON. +// +// Use simple parsing via scanf-like formats for simple applications. +// But, keep in mind that these formats may lose data when applied to +// unusual strings, such as class names that contain spaces, or method +// names that contain punctuation. For more robust transmission of +// potentially unusual names, consider wrapping them in XML-flavored +// lines like . +// +// Note: Input streams are never MT-safe. + +class inputStream : public CHeapObjBase { + public: + class Input; + + private: + NONCOPYABLE(inputStream); + + static constexpr size_t SMALL_SIZE = 240 DEBUG_ONLY(*0 + 10); + static constexpr size_t BIG_SIZE = 2048 DEBUG_ONLY(*0 + 20); + + protected: + // Values for _input_state, to distinguish some phases of history: + // Do we need to read more input (NTR)? Did we see EOF already? + // Was there an error getting input or allocating buffer space? + enum class IState : int { NTR_STATE, EOF_STATE, ERR_STATE }; + + // Named offset for _next relative to _content_end, of phantom '\n'. + static const int NEXT_PHANTOM = 1; + + Input* _input; // where the input comes from or else nullptr + IState _input_state; // one of {NTR,EOF,ERR}_STATE + char _line_ending; // one of {0,1,2} for "", "\n", "\r\n" + char* _buffer; // scratch buffer holding at least the current line + size_t _buffer_size; // allocated size of buffer + size_t _content_end; // offset to end of valid contents of buffer + size_t _beg; // offset in buffer to start of current line + size_t _end; // offset to end of known current line (else content_end) + size_t _next; // offset to known start of next line (else =end) + size_t _line_count; // increasing non-resettable count of lines read + char _small_buffer[SMALL_SIZE]; // stack-allocated buffer for holding lines; + // will switch to C_HEAP allocation when necessary. + + bool has_c_heap_buffer() { + return _buffer != &_small_buffer[0]; + } + + // Buffer states + // + // The current line (less any line ending) is always [beg..end). + // It is always the case that 0 <= beg <= end <= con_end <= buffer_size. + // When there is a current line buffered, end < next <= 1+con_end. + // In that case, the value of next is end + max(1, strlen(lend)), + // where lend is "\n", "\r\n", or (for a last partial line) "". + // But if next == end, we need to read more input, or observe an EOF. + // + // beg ==end ==next == con_end => nothing buffered, we need to read + // beg <=end < next <= con_end => have current line, with terminator + // beg < end < next ==1+con_end => have partial current line (saw EOF) + // beg < end ==next == con_end => partial line, we need to read + // beg ==end < next ==1+con_end => definitely done; no more I/O + // + // These states are in three mutually exclusive groups: + // need_to_read() <= nothing or partial line in buffer + // have_current_line() <= beg/end point to valid line (partial only if EOF) + // definitely_done() <= consumed all lines && (hit EOF || hit error) + // These states are internal; the user can only look at next/done/error. + // + // Relative to these states, everything already read from the input + // before the first byte of the current line is logically present + // (but not accessible) before _beg, while everything not yet read + // from the input is after _content_end. The difference between + // these two pointers is constant, except when characters change + // from being in the current line to being (logically) before it, + // when next is called. + + bool is_sane() const { + assert(_buffer != nullptr, ""); + assert(_content_end <= _buffer_size, ""); + assert(_beg <= _end && _end <= _content_end, ""); + assert(_end <= _next && _next <= _content_end + NEXT_PHANTOM, ""); + assert(_buffer_size == 0 || _next <= _buffer_size, ""); + return true; + } + + bool need_to_read() const { + assert(is_sane(), ""); + return _next == _end; + } + bool have_current_line() const { + assert(is_sane(), ""); + // _beg < _content_end because there is an \0 (was \n) at _end, + // or else it is a non-empty partial line and the \0 is at + // _content_end. In either case, if _end == _next we are + // still searching for more input. + return (_beg < _content_end && _end < _next); + } + bool definitely_done() const { + assert(is_sane(), ""); + // If _beg < _content_end we still have a line of some sort. + // Otherwise, if _next > _content_end, we have seen EOF or error. + return (_beg == _content_end && _next > _content_end); + } + + // Reset indexes within the buffer to point to no content. + void clear_buffer(); + + // Reset indexes within the buffer to point to the given content. + // This is where we scan for newlines as well. + void set_buffer_content(size_t content_start, size_t content_end); + + // Try to make the buffer bigger. This may be necessary in order to + // buffer a very long line. Returns false if there was an + // allocation failure. + // + // On allocation failure, just make do with whatever buffer there + // was to start with; the caller must check for this condition and + // avoid buffering more data in the non-expanded buffer. However, + // the buffer will always be non-null, so at least one line can be + // buffered, if it is of normal size. + bool expand_buffer(size_t new_length); + + // Make sure there is at least one line in the buffer, and set + // _beg/_end to indicate where it is. Any content before _beg can + // be overwritten to make more room in the buffer. If there is no + // more input, set the state up to indicate we are done. + bool fill_buffer(); + + // Find some room in the buffer so we can call read on it. + // This might call expand_buffer but will try not to. + // The assumption is that read already buffers slow I/O calls. + // The purpose for the small buffer managed here is to store whole lines, + // and perhaps edit them in-place. + void prepare_to_fill_buffer(size_t& fill_offset, size_t& fill_length); + + // Quick check for an initially incomplete buffer... + void preload() const { + if (need_to_read()) { + const_cast(this)->fill_buffer(); + } + } + + // How much content is buffered (if any) after the current line? + size_t buffered_content_length(bool include_current) const { + return (include_current ? _content_end - _beg : + _content_end >= _next ? _content_end - _next : 0); + } + + // Returns a pointer and count to characters buffered after the + // current line, but not yet read from my input source. Only useful + // if you are trying to stack input streams on top of each other + // somehow. You can also ask the input source if it thinks it has + // more bytes. + const char* next_content(size_t& next_content_length) const; + + public: + // Create an empty input stream. + // Call push_back_input or set_input to configure. + inputStream() : + _input(nullptr), + _input_state(IState::NTR_STATE), + _line_ending(0), + _buffer(&_small_buffer[0]), + _buffer_size(sizeof(_small_buffer)), + _content_end(0), + _beg(0), + _end(0), + _next(0), + _line_count(0) {} + + // Take input from the given source. Buffer only a modest amount. + inputStream(Input* input) + : inputStream() + { + set_input(input); + } + + virtual ~inputStream(); + + // Discards any previous input and sets the given input source. + void set_input(Input* input); + + // Returns a pointer to a null terminated mutable copy of the current line. + // Note that embedded nulls may make the line appear shorter than it really is. + // This may trigger input activity if there is not enough data buffered. + // If there are no more lines, return an empty line, statically allocated. + char* current_line() const { + preload(); + if (definitely_done()) + return (char*)""; + return &_buffer[_beg]; + } + + // Return the size of the current line, exclusive of any line terminator. + // If no lines have been read yet, or there are none remaining, return zero. + size_t current_line_length() const { + preload(); + return _end - _beg; + } + + // Reports my current input source, if any, else a null pointer. + Input* input() const { return _input; } + + // Discards the current line, gets ready to report the next line. + // Returns true if there is one, which is always the opposite of done(). + // Fetches input if necessary. + bool next(); + + // Reports if there are no more lines. Fetches input if necessary. + bool done() const { + preload(); + return definitely_done(); + } + + // Discard pending input and do not read any more. + // Takes no action if already done, whether in an error state or not. + void set_done(); + + // Reports if this stream has had an error was reported on it. + bool error() const { + return _input_state == IState::ERR_STATE; + } + + // Set this stream done with an error, if the argument is true. + // If it is false but there is an error condition, clear the error. + // Otherwise do nothing. + void set_error(bool error_condition = true); + + // lineno is the 1-based ordinal of the current line; it starts at one + size_t lineno() const { preload(); return _line_count; } + + // Copy the current line to the given output stream. + void print_on(outputStream* out); + + // Copy the current line to the given output stream, and also call cr(). + void print_cr_on(outputStream* out) { + print_on(out); out->cr(); + } + +#ifdef ASSERT + void dump(const char* what = nullptr); + static int coverage_mode(int mode, int& cases, int& total, int& zeroes); +#else + void dump(const char* what = nullptr) { } +#endif + + + // Block-oriented input, which treats all bytes equally. + class Input : public CHeapObjBase { + public: + // Read some characters from an external source into the line buffer. + // If there are no more, return zero, otherwise return non-zero. + // It must be OK to call read even after it returns zero. + virtual size_t read(char* buf, size_t size) = 0; + // Example: read(b,s) { return fread(b, 1, s, _my_fp); } + // Example: read(b,s) { return 0; } // never more than the initial buffer + }; +}; + +// for reading lines from files +class FileInput : public inputStream::Input { + NONCOPYABLE(FileInput); + + protected: + fileStream& _fs; + fileStream _private_fs; + + // it does not seem likely there are such file streams around + FileInput(fileStream& fs) + : _fs(fs) + { } + + public: + // just forward all the constructor arguments to the wrapped line-input class + template + FileInput(Arg... arg) + : _fs(_private_fs), _private_fs(arg...) + { } + + FileInput(const char* file_name) + : FileInput(file_name, "rt") + { } + + bool is_open() const { return _fs.is_open(); } + + protected: + size_t read(char* buf, size_t size) override { + return _fs.read(buf, size); + } +}; + +class MemoryInput : public inputStream::Input { + const void* _base; + const size_t _limit; + size_t _offset; + + public: + MemoryInput(const void* base, size_t size, + size_t offset = 0) + : _base(base), _limit(size), _offset(offset) {} + + MemoryInput(const char* start) + : MemoryInput(start, 0, strlen(start)) + { } + + protected: + size_t read(char* buf, size_t size) override { + size_t nr = size; + if (nr > _limit - _offset) { + nr = _limit - _offset; + } + if (nr > 0) { + ::memcpy(buf, (char*)_base + _offset, nr); + _offset += nr; + } + return nr; + } +}; + +#endif // SHARE_UTILITIES_ISTREAM_HPP diff --git a/src/hotspot/share/utilities/linkedlist.hpp b/src/hotspot/share/utilities/linkedlist.hpp index 5b8e258d539..eec7ea1e48d 100644 --- a/src/hotspot/share/utilities/linkedlist.hpp +++ b/src/hotspot/share/utilities/linkedlist.hpp @@ -82,7 +82,7 @@ template class LinkedListNode : public AnyObj { template class LinkedList : public AnyObj { protected: LinkedListNode* _head; - NONCOPYABLE(LinkedList); + NONCOPYABLE(LinkedList); public: LinkedList() : _head(nullptr) { } diff --git a/src/hotspot/share/utilities/ostream.cpp b/src/hotspot/share/utilities/ostream.cpp index a1575b3d2c6..89335f9cf4c 100644 --- a/src/hotspot/share/utilities/ostream.cpp +++ b/src/hotspot/share/utilities/ostream.cpp @@ -397,7 +397,7 @@ char* stringStream::as_string(bool c_heap) const { char* copy = c_heap ? NEW_C_HEAP_ARRAY(char, _written + 1, mtInternal) : NEW_RESOURCE_ARRAY(char, _written + 1); ::memcpy(copy, _buffer, _written); - copy[_written] = 0; // terminating null + copy[_written] = '\0'; // terminating null if (c_heap) { // Need to ensure our content is written to memory before we return // the pointer to it. @@ -590,23 +590,10 @@ long fileStream::fileSize() { return size; } -char* fileStream::readln(char *data, int count ) { - char * ret = nullptr; - if (_file != nullptr) { - ret = ::fgets(data, count, _file); - // Get rid of annoying \n char only if it is present. - size_t len = ::strlen(data); - if (len > 0 && data[len - 1] == '\n') { - data[len - 1] = '\0'; - } - } - return ret; -} - fileStream::~fileStream() { if (_file != nullptr) { - if (_need_close) fclose(_file); - _file = nullptr; + close(); + _file = nullptr; } } diff --git a/src/hotspot/share/utilities/ostream.hpp b/src/hotspot/share/utilities/ostream.hpp index 72cf804ed64..d39fca29ba4 100644 --- a/src/hotspot/share/utilities/ostream.hpp +++ b/src/hotspot/share/utilities/ostream.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -242,11 +242,20 @@ class fileStream : public outputStream { ~fileStream(); bool is_open() const { return _file != nullptr; } virtual void write(const char* c, size_t len); - size_t read(void *data, size_t size, size_t count) { return _file != nullptr ? ::fread(data, size, count, _file) : 0; } - char* readln(char *data, int count); - int eof() { return _file != nullptr ? feof(_file) : -1; } + // unlike other classes in this file, fileStream can perform input as well as output + size_t read(void* data, size_t size) { + if (_file == nullptr) return 0; + return ::fread(data, 1, size, _file); + } + size_t read(void *data, size_t size, size_t count) { + return read(data, size * count); + } + void close() { + if (_file == nullptr || !_need_close) return; + fclose(_file); + _need_close = false; + } long fileSize(); - void rewind() { if (_file != nullptr) ::rewind(_file); } void flush(); }; diff --git a/src/hotspot/share/utilities/vmError.hpp b/src/hotspot/share/utilities/vmError.hpp index a2a098e2cda..dee8335afd5 100644 --- a/src/hotspot/share/utilities/vmError.hpp +++ b/src/hotspot/share/utilities/vmError.hpp @@ -164,32 +164,32 @@ class VMError : public AllStatic { static void print_vm_info(outputStream* st); // main error reporting function - ATTRIBUTE_NORETURN + [[noreturn]] ATTRIBUTE_PRINTF(6, 7) static void report_and_die(Thread* thread, unsigned int sig, address pc, void* siginfo, void* context, const char* detail_fmt, ...); - ATTRIBUTE_NORETURN + [[noreturn]] ATTRIBUTE_PRINTF(6, 7) static void report_and_die(Thread* thread, void* context, const char* filename, int lineno, const char* message, const char* detail_fmt, ...); - ATTRIBUTE_NORETURN + [[noreturn]] ATTRIBUTE_PRINTF(3, 0) static void report_and_die(int id, const char* message, const char* detail_fmt, va_list detail_args, Thread* thread, address pc, void* siginfo, void* context, const char* filename, int lineno, size_t size); - ATTRIBUTE_NORETURN + [[noreturn]] static void report_and_die(Thread* thread, unsigned int sig, address pc, void* siginfo, void* context); - ATTRIBUTE_NORETURN + [[noreturn]] ATTRIBUTE_PRINTF(6, 0) static void report_and_die(Thread* thread, void* context, const char* filename, int lineno, const char* message, const char* detail_fmt, va_list detail_args); - ATTRIBUTE_NORETURN + [[noreturn]] ATTRIBUTE_PRINTF(6, 0) static void report_and_die(Thread* thread, const char* filename, int lineno, size_t size, VMErrorType vm_err_type, const char* detail_fmt, diff --git a/src/java.base/share/classes/java/lang/System.java b/src/java.base/share/classes/java/lang/System.java index ae1c65b9f93..5e28b9ce95b 100644 --- a/src/java.base/share/classes/java/lang/System.java +++ b/src/java.base/share/classes/java/lang/System.java @@ -191,8 +191,9 @@ private System() { */ public static final PrintStream err = null; - // Holder for the initial value of `in`, set within `initPhase1()`. - private static InputStream initialIn; + // Initial values of System.in and System.err, set in initPhase1(). + private static @Stable InputStream initialIn; + private static @Stable PrintStream initialErr; // indicates if a security manager is possible private static final int NEVER = 1; @@ -355,9 +356,6 @@ private static class CallersHolder { = Collections.synchronizedMap(new WeakHashMap<>()); } - // Remember initial System.err. setSecurityManager() warning goes here - private static volatile @Stable PrintStream initialErrStream; - private static URL codeSource(Class clazz) { PrivilegedAction pa = clazz::getProtectionDomain; @SuppressWarnings("removal") @@ -417,7 +415,7 @@ public static void setSecurityManager(@SuppressWarnings("removal") SecurityManag } else { source = callerClass.getName() + " (" + url + ")"; } - initialErrStream.printf(""" + initialErr.printf(""" WARNING: A terminally deprecated method in java.lang.System has been called WARNING: System::setSecurityManager has been called by %s WARNING: Please consider reporting this to the maintainers of %s @@ -2200,7 +2198,8 @@ private static void initPhase1() { // thus they are equivalent to Console.charset(), otherwise the encodings // of those properties default to native.encoding setOut0(newPrintStream(fdOut, props.getProperty("stdout.encoding"))); - setErr0(newPrintStream(fdErr, props.getProperty("stderr.encoding"))); + initialErr = newPrintStream(fdErr, props.getProperty("stderr.encoding")); + setErr0(initialErr); // Setup Java signal handlers for HUP, TERM, and INT (where available). Terminator.setup(); @@ -2406,8 +2405,6 @@ private static void initPhase3() { notSupportedJnuEncoding); } - initialErrStream = System.err; - // initializing the system class loader VM.initLevel(3); @@ -2598,6 +2595,10 @@ public InputStream initialSystemIn() { return initialIn; } + public PrintStream initialSystemErr() { + return initialErr; + } + public void setCause(Throwable t, Throwable cause) { t.setCause(cause); } diff --git a/src/java.base/share/classes/java/lang/classfile/TypeKind.java b/src/java.base/share/classes/java/lang/classfile/TypeKind.java index bf2435f43ef..5ba566b3d06 100644 --- a/src/java.base/share/classes/java/lang/classfile/TypeKind.java +++ b/src/java.base/share/classes/java/lang/classfile/TypeKind.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -58,7 +58,7 @@ public enum TypeKind { private final String name; private final String descriptor; - private final int newarraycode; + private final int newarrayCode; /** {@return the human-readable name corresponding to this type} */ public String typeName() { return name; } @@ -66,9 +66,12 @@ public enum TypeKind { /** {@return the field descriptor character corresponding to this type} */ public String descriptor() { return descriptor; } - /** {@return the code used by the {@code newarray} opcode corresponding to this type} */ - public int newarraycode() { - return newarraycode; + /** + * {@return the code used by the {@code newarray} opcode corresponding to this type} + * @since 23 + */ + public int newarrayCode() { + return newarrayCode; } /** @@ -94,19 +97,21 @@ public TypeKind asLoadable() { }; } - TypeKind(String name, String descriptor, int newarraycode) { + TypeKind(String name, String descriptor, int newarrayCode) { this.name = name; this.descriptor = descriptor; - this.newarraycode = newarraycode; + this.newarrayCode = newarrayCode; } /** * {@return the type kind associated with the array type described by the * array code used as an operand to {@code newarray}} - * @param newarraycode the operand of the {@code newarray} instruction + * @param newarrayCode the operand of the {@code newarray} instruction + * @throws IllegalArgumentException if the code is invalid + * @since 23 */ - public static TypeKind fromNewArrayCode(int newarraycode) { - return switch (newarraycode) { + public static TypeKind fromNewarrayCode(int newarrayCode) { + return switch (newarrayCode) { case 4 -> TypeKind.BooleanType; case 5 -> TypeKind.CharType; case 6 -> TypeKind.FloatType; @@ -115,15 +120,19 @@ public static TypeKind fromNewArrayCode(int newarraycode) { case 9 -> TypeKind.ShortType; case 10 -> TypeKind.IntType; case 11 -> TypeKind.LongType; - default -> throw new IllegalArgumentException("Bad new array code: " + newarraycode); + default -> throw new IllegalArgumentException("Bad newarray code: " + newarrayCode); }; } /** * {@return the type kind associated with the specified field descriptor} * @param s the field descriptor + * @throws IllegalArgumentException only if the descriptor is not valid */ public static TypeKind fromDescriptor(CharSequence s) { + if (s.isEmpty()) { // implicit null check + throw new IllegalArgumentException("Empty descriptor"); + } return switch (s.charAt(0)) { case '[', 'L' -> TypeKind.ReferenceType; case 'B' -> TypeKind.ByteType; @@ -144,6 +153,8 @@ public static TypeKind fromDescriptor(CharSequence s) { * @param descriptor the field descriptor */ public static TypeKind from(TypeDescriptor.OfField descriptor) { - return fromDescriptor(descriptor.descriptorString()); + return descriptor.isPrimitive() // implicit null check + ? fromDescriptor(descriptor.descriptorString()) + : TypeKind.ReferenceType; } } diff --git a/src/java.base/share/classes/java/lang/classfile/instruction/NewPrimitiveArrayInstruction.java b/src/java.base/share/classes/java/lang/classfile/instruction/NewPrimitiveArrayInstruction.java index a67a0f21c08..ae3f465402d 100644 --- a/src/java.base/share/classes/java/lang/classfile/instruction/NewPrimitiveArrayInstruction.java +++ b/src/java.base/share/classes/java/lang/classfile/instruction/NewPrimitiveArrayInstruction.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -56,7 +56,7 @@ public sealed interface NewPrimitiveArrayInstruction extends Instruction */ static NewPrimitiveArrayInstruction of(TypeKind typeKind) { // Implicit null-check: - if (typeKind.newarraycode() < 0) { + if (typeKind.newarrayCode() < 0) { throw new IllegalArgumentException("Illegal component type: " + typeKind.typeName()); } return new AbstractInstruction.UnboundNewPrimitiveArrayInstruction(typeKind); diff --git a/src/java.base/share/classes/java/lang/invoke/GenerateJLIClassesHelper.java b/src/java.base/share/classes/java/lang/invoke/GenerateJLIClassesHelper.java index 19a193a924a..46eeb67de54 100644 --- a/src/java.base/share/classes/java/lang/invoke/GenerateJLIClassesHelper.java +++ b/src/java.base/share/classes/java/lang/invoke/GenerateJLIClassesHelper.java @@ -28,7 +28,6 @@ import jdk.internal.org.objectweb.asm.ClassWriter; import jdk.internal.org.objectweb.asm.Opcodes; import sun.invoke.util.Wrapper; -import sun.util.logging.PlatformLogger; import java.util.ArrayList; import java.util.HashSet; @@ -73,6 +72,7 @@ static class HolderClassBuilder { private final TreeSet speciesTypes = new TreeSet<>(); private final TreeSet invokerTypes = new TreeSet<>(); + private final TreeSet linkerTypes = new TreeSet<>(); private final TreeSet callSiteTypes = new TreeSet<>(); private final Map> dmhMethods = new TreeMap<>(); @@ -87,6 +87,12 @@ HolderClassBuilder addInvokerType(String methodType) { return this; } + HolderClassBuilder addLinkerType(String methodType) { + validateMethodType(methodType); + linkerTypes.add(methodType); + return this; + } + HolderClassBuilder addCallSiteType(String csType) { validateMethodType(csType); callSiteTypes.add(csType); @@ -130,19 +136,33 @@ Map build() { } } - // The invoker type to ask for is retrieved by removing the first + // The linker type to ask for is retrieved by removing the first // and the last argument, which needs to be of Object.class + MethodType[] linkerMethodTypes = new MethodType[linkerTypes.size()]; + index = 0; + for (String linkerType : linkerTypes) { + MethodType mt = asMethodType(linkerType); + final int lastParam = mt.parameterCount() - 1; + if (!checkLinkerTypeParams(mt)) { + throw new RuntimeException( + "Linker type parameter must start and end with Object: " + linkerType); + } + mt = mt.dropParameterTypes(lastParam, lastParam + 1); + linkerMethodTypes[index] = mt.dropParameterTypes(0, 1); + index++; + } + + // The invoker type to ask for is retrieved by removing the first + // argument, which needs to be of Object.class MethodType[] invokerMethodTypes = new MethodType[invokerTypes.size()]; index = 0; for (String invokerType : invokerTypes) { MethodType mt = asMethodType(invokerType); - final int lastParam = mt.parameterCount() - 1; if (!checkInvokerTypeParams(mt)) { throw new RuntimeException( - "Invoker type parameter must start and end with Object: " + invokerType); + "Invoker type parameter must start with 2 Objects: " + invokerType); } - mt = mt.dropParameterTypes(lastParam, lastParam + 1); - invokerMethodTypes[index] = mt.dropParameterTypes(0, 1); + invokerMethodTypes[index] = mt.dropParameterTypes(0, 2); index++; } @@ -171,7 +191,7 @@ Map build() { DELEGATING_HOLDER, directMethodTypes)); result.put(INVOKERS_HOLDER, generateInvokersHolderClassBytes(INVOKERS_HOLDER, - invokerMethodTypes, callSiteMethodTypes)); + linkerMethodTypes, invokerMethodTypes, callSiteMethodTypes)); result.put(BASIC_FORMS_HOLDER, generateBasicFormsClassBytes(BASIC_FORMS_HOLDER)); @@ -207,6 +227,12 @@ public static MethodType asMethodType(String basicSignatureString) { } public static boolean checkInvokerTypeParams(MethodType mt) { + return (mt.parameterCount() >= 2 && + mt.parameterType(0) == Object.class && + mt.parameterType(1) == Object.class); + } + + public static boolean checkLinkerTypeParams(MethodType mt) { final int lastParam = mt.parameterCount() - 1; return (mt.parameterCount() >= 2 && mt.parameterType(0) == Object.class && @@ -320,15 +346,11 @@ static Map generateHolderClasses(Stream traces) { if ("linkToTargetMethod".equals(parts[2]) || "linkToCallSite".equals(parts[2])) { builder.addCallSiteType(methodType); + } else if (parts[2].endsWith("nvoker")) { + // MH.exactInvoker exactInvoker MH.invoker invoker + builder.addInvokerType(methodType); } else { - MethodType mt = HolderClassBuilder.asMethodType(methodType); - // Work around JDK-8327499 - if (HolderClassBuilder.checkInvokerTypeParams(mt)) { - builder.addInvokerType(methodType); - } else { - PlatformLogger.getLogger("java.lang.invoke") - .warning("Invalid LF_RESOLVE " + parts[1] + " " + parts[2] + " " + parts[3]); - } + builder.addLinkerType(methodType); } } else if (parts[1].contains("DirectMethodHandle")) { String dmh = parts[2]; @@ -465,27 +487,27 @@ static byte[] generateDelegatingMethodHandleHolderClassBytes(String className, /** * Returns a {@code byte[]} representation of a class implementing - * the invoker forms for the set of supplied {@code invokerMethodTypes} - * and {@code callSiteMethodTypes}. + * the invoker forms for the set of supplied {@code linkerMethodTypes} + * {@code invokerMethodTypes}, and {@code callSiteMethodTypes}. */ static byte[] generateInvokersHolderClassBytes(String className, - MethodType[] invokerMethodTypes, MethodType[] callSiteMethodTypes) { + MethodType[] linkerMethodTypes, MethodType[] invokerMethodTypes, + MethodType[] callSiteMethodTypes) { HashSet dedupSet = new HashSet<>(); ArrayList forms = new ArrayList<>(); ArrayList names = new ArrayList<>(); - int[] types = { - MethodTypeForm.LF_EX_LINKER, + + int[] invokerTypes = { MethodTypeForm.LF_EX_INVOKER, - MethodTypeForm.LF_GEN_LINKER, - MethodTypeForm.LF_GEN_INVOKER + MethodTypeForm.LF_GEN_INVOKER, }; - for (int i = 0; i < invokerMethodTypes.length; i++) { + for (MethodType methodType : invokerMethodTypes) { // generate methods representing invokers of the specified type - if (dedupSet.add(invokerMethodTypes[i])) { - for (int type : types) { - LambdaForm invokerForm = Invokers.invokeHandleForm(invokerMethodTypes[i], + if (dedupSet.add(methodType)) { + for (int type : invokerTypes) { + LambdaForm invokerForm = Invokers.invokeHandleForm(methodType, /*customized*/false, type); forms.add(invokerForm); names.add(invokerForm.kind.defaultLambdaName); @@ -493,6 +515,24 @@ static byte[] generateInvokersHolderClassBytes(String className, } } + int[] linkerTypes = { + MethodTypeForm.LF_EX_LINKER, + MethodTypeForm.LF_GEN_LINKER, + }; + + dedupSet = new HashSet<>(); + for (MethodType methodType : linkerMethodTypes) { + // generate methods representing linkers of the specified type + if (dedupSet.add(methodType)) { + for (int type : linkerTypes) { + LambdaForm linkerForm = Invokers.invokeHandleForm(methodType, + /*customized*/false, type); + forms.add(linkerForm); + names.add(linkerForm.kind.defaultLambdaName); + } + } + } + dedupSet = new HashSet<>(); for (int i = 0; i < callSiteMethodTypes.length; i++) { // generate methods representing invokers of the specified type diff --git a/src/java.base/share/classes/java/math/BigDecimal.java b/src/java.base/share/classes/java/math/BigDecimal.java index 207ff9bdf54..d915568a502 100644 --- a/src/java.base/share/classes/java/math/BigDecimal.java +++ b/src/java.base/share/classes/java/math/BigDecimal.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1996, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1996, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -5680,18 +5680,8 @@ private static BigDecimal divideAndRound128(final long dividendHi, final long di tmp = (dividendHi << shift) | (dividendLo >>> 64 - shift); long u2 = tmp & LONG_MASK; - long q1, r_tmp; - if (v1 == 1) { - q1 = tmp; - r_tmp = 0; - } else if (tmp >= 0) { - q1 = tmp / v1; - r_tmp = tmp - q1 * v1; - } else { - long[] rq = divRemNegativeLong(tmp, v1); - q1 = rq[1]; - r_tmp = rq[0]; - } + long q1 = Long.divideUnsigned(tmp, v1); + long r_tmp = Long.remainderUnsigned(tmp, v1); while(q1 >= DIV_NUM_BASE || unsignedLongCompare(q1*v0, make64(r_tmp, u1))) { q1--; @@ -5702,18 +5692,8 @@ private static BigDecimal divideAndRound128(final long dividendHi, final long di tmp = mulsub(u2,u1,v1,v0,q1); u1 = tmp & LONG_MASK; - long q0; - if (v1 == 1) { - q0 = tmp; - r_tmp = 0; - } else if (tmp >= 0) { - q0 = tmp / v1; - r_tmp = tmp - q0 * v1; - } else { - long[] rq = divRemNegativeLong(tmp, v1); - q0 = rq[1]; - r_tmp = rq[0]; - } + long q0 = Long.divideUnsigned(tmp, v1); + r_tmp = Long.remainderUnsigned(tmp, v1); while(q0 >= DIV_NUM_BASE || unsignedLongCompare(q0*v0,make64(r_tmp,u0))) { q0--; @@ -5793,37 +5773,6 @@ static BigDecimal scaledTenPow(int n, int sign, int scale) { } } - /** - * Calculate the quotient and remainder of dividing a negative long by - * another long. - * - * @param n the numerator; must be negative - * @param d the denominator; must not be unity - * @return a two-element {@code long} array with the remainder and quotient in - * the initial and final elements, respectively - */ - private static long[] divRemNegativeLong(long n, long d) { - assert n < 0 : "Non-negative numerator " + n; - assert d != 1 : "Unity denominator"; - - // Approximate the quotient and remainder - long q = (n >>> 1) / (d >>> 1); - long r = n - q * d; - - // Correct the approximation - while (r < 0) { - r += d; - q--; - } - while (r >= d) { - r -= d; - q++; - } - - // n - q*d == r && 0 <= r < d, hence we're done. - return new long[] {r, q}; - } - private static long make64(long hi, long lo) { return hi<<32 | lo; } diff --git a/src/java.base/share/classes/java/math/MutableBigInteger.java b/src/java.base/share/classes/java/math/MutableBigInteger.java index eca42ee25b1..30ea8e130fc 100644 --- a/src/java.base/share/classes/java/math/MutableBigInteger.java +++ b/src/java.base/share/classes/java/math/MutableBigInteger.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1092,9 +1092,9 @@ int divideOneWord(int divisor, MutableBigInteger quotient) { // Special case of one word dividend if (intLen == 1) { - long dividendValue = value[offset] & LONG_MASK; - int q = (int) (dividendValue / divisorLong); - int r = (int) (dividendValue - q * divisorLong); + int dividendValue = value[offset]; + int q = Integer.divideUnsigned(dividendValue, divisor); + int r = Integer.remainderUnsigned(dividendValue, divisor); quotient.value[0] = q; quotient.intLen = (q == 0) ? 0 : 1; quotient.offset = 0; @@ -1106,41 +1106,17 @@ int divideOneWord(int divisor, MutableBigInteger quotient) { quotient.offset = 0; quotient.intLen = intLen; - // Normalize the divisor - int shift = Integer.numberOfLeadingZeros(divisor); - - int rem = value[offset]; - long remLong = rem & LONG_MASK; - if (remLong < divisorLong) { - quotient.value[0] = 0; - } else { - quotient.value[0] = (int)(remLong / divisorLong); - rem = (int) (remLong - (quotient.value[0] * divisorLong)); - remLong = rem & LONG_MASK; - } - int xlen = intLen; - while (--xlen > 0) { - long dividendEstimate = (remLong << 32) | + long rem = 0; + for (int xlen = intLen; xlen > 0; xlen--) { + long dividendEstimate = (rem << 32) | (value[offset + intLen - xlen] & LONG_MASK); - int q; - if (dividendEstimate >= 0) { - q = (int) (dividendEstimate / divisorLong); - rem = (int) (dividendEstimate - q * divisorLong); - } else { - long tmp = divWord(dividendEstimate, divisor); - q = (int) (tmp & LONG_MASK); - rem = (int) (tmp >>> 32); - } + int q = (int) Long.divideUnsigned(dividendEstimate, divisorLong); + rem = Long.remainderUnsigned(dividendEstimate, divisorLong); quotient.value[intLen - xlen] = q; - remLong = rem & LONG_MASK; } quotient.normalize(); - // Unnormalize - if (shift > 0) - return rem % divisor; - else - return rem; + return (int)rem; } /** @@ -1557,14 +1533,8 @@ private MutableBigInteger divideMagnitude(MutableBigInteger div, skipCorrection = qrem + 0x80000000 < nh2; } else { long nChunk = (((long)nh) << 32) | (nm & LONG_MASK); - if (nChunk >= 0) { - qhat = (int) (nChunk / dhLong); - qrem = (int) (nChunk - (qhat * dhLong)); - } else { - long tmp = divWord(nChunk, dh); - qhat = (int) (tmp & LONG_MASK); - qrem = (int) (tmp >>> 32); - } + qhat = (int) Long.divideUnsigned(nChunk, dhLong); + qrem = (int) Long.remainderUnsigned(nChunk, dhLong); } if (qhat == 0) @@ -1616,14 +1586,8 @@ private MutableBigInteger divideMagnitude(MutableBigInteger div, skipCorrection = qrem + 0x80000000 < nh2; } else { long nChunk = (((long) nh) << 32) | (nm & LONG_MASK); - if (nChunk >= 0) { - qhat = (int) (nChunk / dhLong); - qrem = (int) (nChunk - (qhat * dhLong)); - } else { - long tmp = divWord(nChunk, dh); - qhat = (int) (tmp & LONG_MASK); - qrem = (int) (tmp >>> 32); - } + qhat = (int) Long.divideUnsigned(nChunk, dhLong); + qrem = (int) Long.remainderUnsigned(nChunk, dhLong); } if (qhat != 0) { if (!skipCorrection) { // Correct qhat @@ -1732,14 +1696,8 @@ private MutableBigInteger divideLongMagnitude(long ldivisor, MutableBigInteger q skipCorrection = qrem + 0x80000000 < nh2; } else { long nChunk = (((long) nh) << 32) | (nm & LONG_MASK); - if (nChunk >= 0) { - qhat = (int) (nChunk / dhLong); - qrem = (int) (nChunk - (qhat * dhLong)); - } else { - long tmp = divWord(nChunk, dh); - qhat =(int)(tmp & LONG_MASK); - qrem = (int)(tmp>>>32); - } + qhat = (int) Long.divideUnsigned(nChunk, dhLong); + qrem = (int) Long.remainderUnsigned(nChunk, dhLong); } if (qhat == 0) @@ -1834,40 +1792,6 @@ private boolean unsignedLongCompare(long one, long two) { return (one+Long.MIN_VALUE) > (two+Long.MIN_VALUE); } - /** - * This method divides a long quantity by an int to estimate - * qhat for two multi precision numbers. It is used when - * the signed value of n is less than zero. - * Returns long value where high 32 bits contain remainder value and - * low 32 bits contain quotient value. - */ - static long divWord(long n, int d) { - long dLong = d & LONG_MASK; - long r; - long q; - if (dLong == 1) { - q = (int)n; - r = 0; - return (r << 32) | (q & LONG_MASK); - } - - // Approximate the quotient and remainder - q = (n >>> 1) / (dLong >>> 1); - r = n - q*dLong; - - // Correct the approximation - while (r < 0) { - r += dLong; - q--; - } - while (r >= dLong) { - r -= dLong; - q++; - } - // n - q*dlong == r && 0 <= r LOCALE_CACHE = ReferencedKeyMap.create(true, ConcurrentHashMap::new); - private static Locale createLocale(Object key) { - if (key instanceof BaseLocale base) { - return new Locale(base, null); + private static final ReferencedKeyMap LOCALE_CACHE + = ReferencedKeyMap.create(true, ReferencedKeyMap.concurrentHashMapSupplier()); + + private static final Function LOCALE_CREATOR = new Function<>() { + @Override + public Locale apply(Object key) { + if (key instanceof BaseLocale base) { + return new Locale(base, null); + } + LocaleKey lk = (LocaleKey)key; + return new Locale(lk.base, lk.exts); } - LocaleKey lk = (LocaleKey)key; - return new Locale(lk.base, lk.exts); - } + }; private static final class LocaleKey { + private final BaseLocale base; private final LocaleExtensions exts; private final int hash; diff --git a/src/java.base/share/classes/java/util/concurrent/locks/Lock.java b/src/java.base/share/classes/java/util/concurrent/locks/Lock.java index 84100e64a13..bd6ce2eb6f9 100644 --- a/src/java.base/share/classes/java/util/concurrent/locks/Lock.java +++ b/src/java.base/share/classes/java/util/concurrent/locks/Lock.java @@ -80,11 +80,11 @@ * *
 {@code
  * Lock l = ...;
- * l.lock();
+ * l.lock(); // lock() as the last statement before the try block
  * try {
  *   // access the resource protected by this lock
  * } finally {
- *   l.unlock();
+ *   l.unlock(); // unlock() as the first statement in the finally block
  * }}
* * When locking and unlocking occur in different scopes, care must be diff --git a/src/java.base/share/classes/java/util/concurrent/locks/ReentrantLock.java b/src/java.base/share/classes/java/util/concurrent/locks/ReentrantLock.java index c58925b086e..3378ce3983a 100644 --- a/src/java.base/share/classes/java/util/concurrent/locks/ReentrantLock.java +++ b/src/java.base/share/classes/java/util/concurrent/locks/ReentrantLock.java @@ -71,8 +71,9 @@ * is available even if other threads are waiting. * *

It is recommended practice to always immediately - * follow a call to {@code lock} with a {@code try} block, most - * typically in a before/after construction such as: + * follow a call to {@code lock} with a {@code try} block, and + * to always immediately call {@code unlock} as the + * first statement in the finally block, as follows: * *

 {@code
  * class X {
@@ -80,11 +81,11 @@
  *   // ...
  *
  *   public void m() {
- *     lock.lock();  // block until condition holds
+ *     lock.lock();  // lock() as the last statement before the try block
  *     try {
  *       // ... method body
  *     } finally {
- *       lock.unlock();
+ *       lock.unlock(); // unlock() as the first statement in the finally block
  *     }
  *   }
  * }}
diff --git a/src/java.base/share/classes/java/util/concurrent/locks/ReentrantReadWriteLock.java b/src/java.base/share/classes/java/util/concurrent/locks/ReentrantReadWriteLock.java index c73bd41cea8..517708e70f3 100644 --- a/src/java.base/share/classes/java/util/concurrent/locks/ReentrantReadWriteLock.java +++ b/src/java.base/share/classes/java/util/concurrent/locks/ReentrantReadWriteLock.java @@ -141,6 +141,7 @@ * * void processCachedData() { * rwl.readLock().lock(); + * // Code between the lock() above, and the unlock() below must not throw * if (!cacheValid) { * // Must release read lock before acquiring write lock * rwl.readLock().unlock(); @@ -158,7 +159,7 @@ * rwl.writeLock().unlock(); // Unlock write, still hold read * } * } - * + * // Make sure that code that could throw is executed inside the try block * try { * use(data); * } finally { diff --git a/src/java.base/share/classes/java/util/spi/LocaleServiceProvider.java b/src/java.base/share/classes/java/util/spi/LocaleServiceProvider.java index 2bf0c711cfe..6ec8b2a149a 100644 --- a/src/java.base/share/classes/java/util/spi/LocaleServiceProvider.java +++ b/src/java.base/share/classes/java/util/spi/LocaleServiceProvider.java @@ -152,6 +152,11 @@ * supported by the Java runtime environment. The following table lists the * version of CLDR used in each JDK release. Unless otherwise specified, all * update releases in a given JDK release family use the same CLDR version. + * Note that the CLDR locale data are subject to change. Users should not assume + * that the locale data remain the same across CLDR versions. Otherwise, unexpected + * incompatible behaviors may occur, such as an exception on parsing a date. + * Refer to CLDR Releases + * for the deltas between their releases. * * * @@ -264,7 +269,7 @@ public boolean isSupportedLocale(Locale locale) { for (Locale available : getAvailableLocales()) { if (locale.equals(available.stripExtensions())) { return true; -} + } } return false; } diff --git a/src/java.base/share/classes/jdk/internal/access/JavaLangAccess.java b/src/java.base/share/classes/jdk/internal/access/JavaLangAccess.java index af7ff2e4aa2..c31e745cd89 100644 --- a/src/java.base/share/classes/jdk/internal/access/JavaLangAccess.java +++ b/src/java.base/share/classes/jdk/internal/access/JavaLangAccess.java @@ -26,6 +26,7 @@ package jdk.internal.access; import java.io.InputStream; +import java.io.PrintStream; import java.lang.annotation.Annotation; import java.lang.foreign.MemorySegment; import java.lang.invoke.MethodHandle; @@ -401,6 +402,11 @@ public interface JavaLangAccess { */ InputStream initialSystemIn(); + /** + * Returns the initial value of System.err. + */ + PrintStream initialSystemErr(); + /** * Encodes ASCII codepoints as possible from the source array into * the destination byte array, assuming that the encoding is ASCII diff --git a/src/java.base/share/classes/jdk/internal/classfile/impl/AbstractInstruction.java b/src/java.base/share/classes/jdk/internal/classfile/impl/AbstractInstruction.java index 4e9a55c77ac..e1598db1545 100644 --- a/src/java.base/share/classes/jdk/internal/classfile/impl/AbstractInstruction.java +++ b/src/java.base/share/classes/jdk/internal/classfile/impl/AbstractInstruction.java @@ -557,7 +557,7 @@ public BoundNewPrimitiveArrayInstruction(Opcode op, CodeImpl code, int pos) { @Override public TypeKind typeKind() { - return TypeKind.fromNewArrayCode(code.classReader.readU1(pos + 1)); + return TypeKind.fromNewarrayCode(code.classReader.readU1(pos + 1)); } @Override @@ -1149,7 +1149,7 @@ public TypeKind typeKind() { @Override public void writeTo(DirectCodeBuilder writer) { - writer.writeNewPrimitiveArray(typeKind.newarraycode()); + writer.writeNewPrimitiveArray(typeKind.newarrayCode()); } @Override diff --git a/src/java.base/share/classes/jdk/internal/foreign/LayoutPath.java b/src/java.base/share/classes/jdk/internal/foreign/LayoutPath.java index 5eee17573ee..390ad1b2f97 100644 --- a/src/java.base/share/classes/jdk/internal/foreign/LayoutPath.java +++ b/src/java.base/share/classes/jdk/internal/foreign/LayoutPath.java @@ -26,6 +26,7 @@ package jdk.internal.foreign; import jdk.internal.vm.annotation.ForceInline; + import java.lang.foreign.AddressLayout; import java.lang.foreign.GroupLayout; import java.lang.foreign.MemoryLayout; @@ -204,10 +205,7 @@ public VarHandle dereferenceHandle(boolean adapt) { String.format("Path does not select a value layout: %s", breadcrumbs())); } - // If we have an enclosing layout, drop the alignment check for the accessed element, - // we check the root layout instead - ValueLayout accessedLayout = enclosing != null ? valueLayout.withByteAlignment(1) : valueLayout; - VarHandle handle = accessedLayout.varHandle(); + VarHandle handle = valueLayout.varHandle(); handle = MethodHandles.collectCoordinates(handle, 1, offsetHandle()); // we only have to check the alignment of the root layout for the first dereference we do, diff --git a/src/java.base/share/classes/jdk/internal/misc/VM.java b/src/java.base/share/classes/jdk/internal/misc/VM.java index 9b51cfad458..de6f011fe8f 100644 --- a/src/java.base/share/classes/jdk/internal/misc/VM.java +++ b/src/java.base/share/classes/jdk/internal/misc/VM.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1996, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1996, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,6 +27,7 @@ import static java.lang.Thread.State.*; +import java.io.PrintStream; import java.util.ArrayList; import java.util.Collections; import java.util.List; @@ -496,4 +497,11 @@ private static class BufferPoolsHolder { public static List getBufferPools() { return BufferPoolsHolder.BUFFER_POOLS; } + + /** + * Return the initial value of System.err that was set during VM initialization. + */ + public static PrintStream initialErr() { + return SharedSecrets.getJavaLangAccess().initialSystemErr(); + } } diff --git a/src/java.base/share/classes/jdk/internal/util/ReferencedKeyMap.java b/src/java.base/share/classes/jdk/internal/util/ReferencedKeyMap.java index be392c3ae2d..9c364cd7813 100644 --- a/src/java.base/share/classes/jdk/internal/util/ReferencedKeyMap.java +++ b/src/java.base/share/classes/jdk/internal/util/ReferencedKeyMap.java @@ -99,6 +99,21 @@ public final class ReferencedKeyMap implements Map { */ private final ReferenceQueue stale; + /** + * @return a supplier to create a {@code ConcurrentHashMap} appropriate for use in the + * create methods. + * @param the type of keys maintained by the new map + * @param the type of mapped values + */ + public static Supplier, V>> concurrentHashMapSupplier() { + return new Supplier<>() { + @Override + public Map, V> get() { + return new ConcurrentHashMap<>(); + } + }; + } + /** * Private constructor. * diff --git a/src/java.base/share/classes/jdk/internal/util/ReferencedKeySet.java b/src/java.base/share/classes/jdk/internal/util/ReferencedKeySet.java index 21b940439e0..73ad6f32640 100644 --- a/src/java.base/share/classes/jdk/internal/util/ReferencedKeySet.java +++ b/src/java.base/share/classes/jdk/internal/util/ReferencedKeySet.java @@ -75,6 +75,15 @@ public final class ReferencedKeySet extends AbstractSet { */ final ReferencedKeyMap> map; + /** + * @return a supplier to create a {@code ConcurrentHashMap} appropriate for use in the + * create methods. + * @param the type of elements maintained by this set + */ + public static Supplier, ReferenceKey>> concurrentHashMapSupplier() { + return ReferencedKeyMap.concurrentHashMapSupplier(); + } + /** * Private constructor. * diff --git a/src/java.base/share/classes/sun/util/cldr/CLDRLocaleProviderAdapter.java b/src/java.base/share/classes/sun/util/cldr/CLDRLocaleProviderAdapter.java index 9f65e32fa27..0ce080c2054 100644 --- a/src/java.base/share/classes/sun/util/cldr/CLDRLocaleProviderAdapter.java +++ b/src/java.base/share/classes/sun/util/cldr/CLDRLocaleProviderAdapter.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -259,6 +259,24 @@ private static Locale getParentLocale(Locale locale) { break; } } + + if (parent == null) { + // check nonlikelyScript locales + if (CLDRBaseLocaleDataMetaInfo.nonlikelyScript && locale.getCountry().isEmpty()) { + var lang = " " + locale.getLanguage() + " "; + var script= locale.getScript(); + if (!script.isEmpty()) { + parent = baseMetaInfo.likelyScriptMap().entrySet().stream() + .filter(e -> e.getValue().contains(lang)) + .findAny() + .map(Map.Entry::getKey) + .map(likely -> likely.equals(script) ? null : Locale.ROOT) + .orElse(null); + } + } + } + + // no parent found if (parent == null) { parent = locale; // non existent marker } diff --git a/src/java.base/share/classes/sun/util/locale/BaseLocale.java b/src/java.base/share/classes/sun/util/locale/BaseLocale.java index 7e0fc9a2d34..ec2a6a49183 100644 --- a/src/java.base/share/classes/sun/util/locale/BaseLocale.java +++ b/src/java.base/share/classes/sun/util/locale/BaseLocale.java @@ -38,7 +38,7 @@ import jdk.internal.vm.annotation.Stable; import java.util.StringJoiner; -import java.util.concurrent.ConcurrentHashMap; +import java.util.function.UnaryOperator; public final class BaseLocale { @@ -93,7 +93,7 @@ public final class BaseLocale { // Interned BaseLocale cache private static final ReferencedKeySet CACHE = - ReferencedKeySet.create(true, ConcurrentHashMap::new); + ReferencedKeySet.create(true, ReferencedKeySet.concurrentHashMapSupplier()); public static final String SEP = "_"; @@ -164,13 +164,21 @@ public static BaseLocale getInstance(String language, String script, // "interned" instance can subsequently be used by the Locale // instance which guarantees the locale components are properly cased/interned. return CACHE.intern(new BaseLocale(language, script, region, variant), - (b) -> new BaseLocale( - LocaleUtils.toLowerString(b.language).intern(), - LocaleUtils.toTitleString(b.script).intern(), - LocaleUtils.toUpperString(b.region).intern(), - b.variant.intern())); + // Avoid lambdas since this may be on the bootstrap path in many locales + INTERNER); } + public static final UnaryOperator INTERNER = new UnaryOperator<>() { + @Override + public BaseLocale apply(BaseLocale b) { + return new BaseLocale( + LocaleUtils.toLowerString(b.language).intern(), + LocaleUtils.toTitleString(b.script).intern(), + LocaleUtils.toUpperString(b.region).intern(), + b.variant.intern()); + } + }; + public static String convertOldISOCodes(String language) { return switch (language) { case "he", "iw" -> OLD_ISO_CODES ? "iw" : "he"; diff --git a/src/java.desktop/share/classes/com/sun/java/swing/plaf/gtk/GTKLookAndFeel.java b/src/java.desktop/share/classes/com/sun/java/swing/plaf/gtk/GTKLookAndFeel.java index 72b8c759e30..65f29ea8384 100644 --- a/src/java.desktop/share/classes/com/sun/java/swing/plaf/gtk/GTKLookAndFeel.java +++ b/src/java.desktop/share/classes/com/sun/java/swing/plaf/gtk/GTKLookAndFeel.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -348,6 +348,8 @@ protected void initComponentDefaults(UIDefaults table) { Double defaultCaretAspectRatio = Double.valueOf(0.025); Color caretColor = table.getColor("caretColor"); Color controlText = table.getColor("controlText"); + Color tabbedPaneBg = new ColorUIResource(238, 238, 238); + Color unselectedTabColor = new ColorUIResource(255, 255, 255); Object fieldInputMap = new UIDefaults.LazyInputMap(new Object[] { "ctrl C", DefaultEditorKit.copyAction, @@ -1020,6 +1022,11 @@ public Object createValue(UIDefaults table) { "TabbedPane.selectedLabelShift", 3, "TabbedPane.font", new FontLazyValue(Region.TABBED_PANE), "TabbedPane.selectedTabPadInsets", new InsetsUIResource(2, 2, 0, 1), + "TabbedPane.selected", tabbedPaneBg, + "TabbedPane.contentOpaque", Boolean.TRUE, + "TabbedPane.tabsOpaque", Boolean.TRUE, + "TabbedPane.contentAreaColor", tabbedPaneBg, + "TabbedPane.unselectedBackground", unselectedTabColor, "Table.scrollPaneBorder", zeroBorder, "Table.background", tableBg, diff --git a/src/java.desktop/share/classes/com/sun/java/swing/plaf/gtk/GTKPainter.java b/src/java.desktop/share/classes/com/sun/java/swing/plaf/gtk/GTKPainter.java index 1635ff66f89..e643d3d28ff 100644 --- a/src/java.desktop/share/classes/com/sun/java/swing/plaf/gtk/GTKPainter.java +++ b/src/java.desktop/share/classes/com/sun/java/swing/plaf/gtk/GTKPainter.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -972,6 +972,9 @@ public void paintTabbedPaneTabBackground(SynthContext context, JTabbedPane pane = (JTabbedPane)context.getComponent(); int placement = pane.getTabPlacement(); + // Fill the tab rect area + g.fillRect(x, y, w, h); + synchronized (UNIXToolkit.GTK_LOCK) { if (! ENGINE.paintCachedImage(g, x, y, w, h, id, gtkState, placement, tabIndex)) { diff --git a/src/java.desktop/share/classes/javax/swing/plaf/nimbus/skin.laf b/src/java.desktop/share/classes/javax/swing/plaf/nimbus/skin.laf index 38e8530983a..1d486f9fe06 100644 --- a/src/java.desktop/share/classes/javax/swing/plaf/nimbus/skin.laf +++ b/src/java.desktop/share/classes/javax/swing/plaf/nimbus/skin.laf @@ -1,7 +1,7 @@
JDK releases and supported CLDR versions