forked from pool/java-1_8_0-openjdk
7388 lines
292 KiB
Diff
7388 lines
292 KiB
Diff
--- jdk8/hotspot/make/hotspot_version 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/make/hotspot_version 2015-01-08 21:23:31.130149345 +0100
|
|
@@ -35,7 +35,7 @@
|
|
|
|
HS_MAJOR_VER=25
|
|
HS_MINOR_VER=40
|
|
-HS_BUILD_NUMBER=16
|
|
+HS_BUILD_NUMBER=24
|
|
|
|
JDK_MAJOR_VER=1
|
|
JDK_MINOR_VER=8
|
|
--- jdk8/hotspot/make/linux/makefiles/adjust-mflags.sh 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/make/linux/makefiles/adjust-mflags.sh 2015-01-08 21:23:31.144149009 +0100
|
|
@@ -64,7 +64,7 @@
|
|
echo "$MFLAGS" \
|
|
| sed '
|
|
s/^-/ -/
|
|
- s/ -\([^ ][^ ]*\)j/ -\1 -j/
|
|
+ s/ -\([^ I][^ I]*\)j/ -\1 -j/
|
|
s/ -j[0-9][0-9]*/ -j/
|
|
s/ -j\([^ ]\)/ -j -\1/
|
|
s/ -j/ -j'${HOTSPOT_BUILD_JOBS:-${default_build_jobs}}'/
|
|
--- jdk8/hotspot/make/linux/makefiles/mapfile-vers-debug 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/make/linux/makefiles/mapfile-vers-debug 2015-01-08 21:23:31.144149009 +0100
|
|
@@ -217,6 +217,9 @@
|
|
JVM_RegisterSignal;
|
|
JVM_ReleaseUTF;
|
|
JVM_ResolveClass;
|
|
+ JVM_KnownToNotExist;
|
|
+ JVM_GetResourceLookupCacheURLs;
|
|
+ JVM_GetResourceLookupCache;
|
|
JVM_ResumeThread;
|
|
JVM_Send;
|
|
JVM_SendTo;
|
|
--- jdk8/hotspot/make/linux/makefiles/mapfile-vers-product 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/make/linux/makefiles/mapfile-vers-product 2015-01-08 21:23:31.144149009 +0100
|
|
@@ -217,6 +217,9 @@
|
|
JVM_RegisterSignal;
|
|
JVM_ReleaseUTF;
|
|
JVM_ResolveClass;
|
|
+ JVM_KnownToNotExist;
|
|
+ JVM_GetResourceLookupCacheURLs;
|
|
+ JVM_GetResourceLookupCache;
|
|
JVM_ResumeThread;
|
|
JVM_Send;
|
|
JVM_SendTo;
|
|
--- jdk8/hotspot/src/share/vm/c1/c1_globals.hpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/c1/c1_globals.hpp 2015-01-08 21:23:31.144149009 +0100
|
|
@@ -290,9 +290,6 @@
|
|
develop(bool, InstallMethods, true, \
|
|
"Install methods at the end of successful compilations") \
|
|
\
|
|
- product(intx, CompilationRepeat, 0, \
|
|
- "Number of times to recompile method before returning result") \
|
|
- \
|
|
develop(intx, NMethodSizeLimit, (64*K)*wordSize, \
|
|
"Maximum size of a compiled method.") \
|
|
\
|
|
--- jdk8/hotspot/src/share/vm/ci/ciEnv.cpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/ci/ciEnv.cpp 2015-01-08 21:23:31.145148985 +0100
|
|
@@ -559,8 +559,13 @@
|
|
oop obj = cpool->resolved_references()->obj_at(cache_index);
|
|
if (obj != NULL) {
|
|
ciObject* ciobj = get_object(obj);
|
|
+ if (ciobj->is_array()) {
|
|
+ return ciConstant(T_ARRAY, ciobj);
|
|
+ } else {
|
|
+ assert(ciobj->is_instance(), "should be an instance");
|
|
return ciConstant(T_OBJECT, ciobj);
|
|
}
|
|
+ }
|
|
index = cpool->object_to_cp_index(cache_index);
|
|
}
|
|
constantTag tag = cpool->tag_at(index);
|
|
@@ -586,8 +591,12 @@
|
|
}
|
|
}
|
|
ciObject* constant = get_object(string);
|
|
+ if (constant->is_array()) {
|
|
+ return ciConstant(T_ARRAY, constant);
|
|
+ } else {
|
|
assert (constant->is_instance(), "must be an instance, or not? ");
|
|
return ciConstant(T_OBJECT, constant);
|
|
+ }
|
|
} else if (tag.is_klass() || tag.is_unresolved_klass()) {
|
|
// 4881222: allow ldc to take a class type
|
|
ciKlass* klass = get_klass_by_index_impl(cpool, index, ignore_will_link, accessor);
|
|
--- jdk8/hotspot/src/share/vm/ci/ciMethod.cpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/ci/ciMethod.cpp 2015-01-08 21:23:31.148148913 +0100
|
|
@@ -68,7 +68,10 @@
|
|
// ciMethod::ciMethod
|
|
//
|
|
// Loaded method.
|
|
-ciMethod::ciMethod(methodHandle h_m) : ciMetadata(h_m()) {
|
|
+ciMethod::ciMethod(methodHandle h_m, ciInstanceKlass* holder) :
|
|
+ ciMetadata(h_m()),
|
|
+ _holder(holder)
|
|
+{
|
|
assert(h_m() != NULL, "no null method");
|
|
|
|
// These fields are always filled in in loaded methods.
|
|
@@ -124,7 +127,6 @@
|
|
// generating _signature may allow GC and therefore move m.
|
|
// These fields are always filled in.
|
|
_name = env->get_symbol(h_m()->name());
|
|
- _holder = env->get_instance_klass(h_m()->method_holder());
|
|
ciSymbol* sig_symbol = env->get_symbol(h_m()->signature());
|
|
constantPoolHandle cpool = h_m()->constants();
|
|
_signature = new (env->arena()) ciSignature(_holder, cpool, sig_symbol);
|
|
--- jdk8/hotspot/src/share/vm/ci/ciMethod.hpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/ci/ciMethod.hpp 2015-01-08 21:23:31.148148913 +0100
|
|
@@ -90,7 +90,7 @@
|
|
BCEscapeAnalyzer* _bcea;
|
|
#endif
|
|
|
|
- ciMethod(methodHandle h_m);
|
|
+ ciMethod(methodHandle h_m, ciInstanceKlass* holder);
|
|
ciMethod(ciInstanceKlass* holder, ciSymbol* name, ciSymbol* signature, ciInstanceKlass* accessor);
|
|
|
|
Method* get_Method() const {
|
|
--- jdk8/hotspot/src/share/vm/ci/ciObjectFactory.cpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/ci/ciObjectFactory.cpp 2015-01-08 21:23:31.148148913 +0100
|
|
@@ -239,7 +239,7 @@
|
|
ciObject* ciObjectFactory::get(oop key) {
|
|
ASSERT_IN_VM;
|
|
|
|
- assert(key == NULL || Universe::heap()->is_in_reserved(key), "must be");
|
|
+ assert(Universe::heap()->is_in_reserved(key), "must be");
|
|
|
|
NonPermObject* &bucket = find_non_perm(key);
|
|
if (bucket != NULL) {
|
|
@@ -290,9 +290,9 @@
|
|
}
|
|
#endif
|
|
if (!is_found_at(index, key, _ci_metadata)) {
|
|
- // The ciObject does not yet exist. Create it and insert it
|
|
+ // The ciMetadata does not yet exist. Create it and insert it
|
|
// into the cache.
|
|
- ciMetadata* new_object = create_new_object(key);
|
|
+ ciMetadata* new_object = create_new_metadata(key);
|
|
init_ident_of(new_object);
|
|
assert(new_object->is_metadata(), "must be");
|
|
|
|
@@ -344,15 +344,28 @@
|
|
}
|
|
|
|
// ------------------------------------------------------------------
|
|
-// ciObjectFactory::create_new_object
|
|
+// ciObjectFactory::create_new_metadata
|
|
//
|
|
-// Create a new ciObject from a Metadata*.
|
|
+// Create a new ciMetadata from a Metadata*.
|
|
//
|
|
-// Implementation note: this functionality could be virtual behavior
|
|
-// of the oop itself. For now, we explicitly marshal the object.
|
|
-ciMetadata* ciObjectFactory::create_new_object(Metadata* o) {
|
|
+// Implementation note: in order to keep Metadata live, an auxiliary ciObject
|
|
+// is used, which points to it's holder.
|
|
+ciMetadata* ciObjectFactory::create_new_metadata(Metadata* o) {
|
|
EXCEPTION_CONTEXT;
|
|
|
|
+ // Hold metadata from unloading by keeping it's holder alive.
|
|
+ if (_initialized && o->is_klass()) {
|
|
+ Klass* holder = ((Klass*)o);
|
|
+ if (holder->oop_is_instance() && InstanceKlass::cast(holder)->is_anonymous()) {
|
|
+ // Though ciInstanceKlass records class loader oop, it's not enough to keep
|
|
+ // VM anonymous classes alive (loader == NULL). Klass holder should be used instead.
|
|
+ // It is enough to record a ciObject, since cached elements are never removed
|
|
+ // during ciObjectFactory lifetime. ciObjectFactory itself is created for
|
|
+ // every compilation and lives for the whole duration of the compilation.
|
|
+ ciObject* h = get(holder->klass_holder());
|
|
+ }
|
|
+ }
|
|
+
|
|
if (o->is_klass()) {
|
|
KlassHandle h_k(THREAD, (Klass*)o);
|
|
Klass* k = (Klass*)o;
|
|
@@ -365,14 +378,16 @@
|
|
}
|
|
} else if (o->is_method()) {
|
|
methodHandle h_m(THREAD, (Method*)o);
|
|
- return new (arena()) ciMethod(h_m);
|
|
+ ciEnv *env = CURRENT_THREAD_ENV;
|
|
+ ciInstanceKlass* holder = env->get_instance_klass(h_m()->method_holder());
|
|
+ return new (arena()) ciMethod(h_m, holder);
|
|
} else if (o->is_methodData()) {
|
|
// Hold methodHandle alive - might not be necessary ???
|
|
methodHandle h_m(THREAD, ((MethodData*)o)->method());
|
|
return new (arena()) ciMethodData((MethodData*)o);
|
|
}
|
|
|
|
- // The oop is of some type not supported by the compiler interface.
|
|
+ // The Metadata* is of some type not supported by the compiler interface.
|
|
ShouldNotReachHere();
|
|
return NULL;
|
|
}
|
|
@@ -701,7 +716,7 @@
|
|
// If there is no entry in the cache corresponding to this oop, return
|
|
// the null tail of the bucket into which the oop should be inserted.
|
|
ciObjectFactory::NonPermObject* &ciObjectFactory::find_non_perm(oop key) {
|
|
- assert(Universe::heap()->is_in_reserved_or_null(key), "must be");
|
|
+ assert(Universe::heap()->is_in_reserved(key), "must be");
|
|
ciMetadata* klass = get_metadata(key->klass());
|
|
NonPermObject* *bp = &_non_perm_bucket[(unsigned) klass->hash() % NON_PERM_BUCKETS];
|
|
for (NonPermObject* p; (p = (*bp)) != NULL; bp = &p->next()) {
|
|
--- jdk8/hotspot/src/share/vm/ci/ciObjectFactory.hpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/ci/ciObjectFactory.hpp 2015-01-08 21:23:31.148148913 +0100
|
|
@@ -73,7 +73,7 @@
|
|
void insert(int index, ciMetadata* obj, GrowableArray<ciMetadata*>* objects);
|
|
|
|
ciObject* create_new_object(oop o);
|
|
- ciMetadata* create_new_object(Metadata* o);
|
|
+ ciMetadata* create_new_metadata(Metadata* o);
|
|
|
|
void ensure_metadata_alive(ciMetadata* m);
|
|
|
|
--- jdk8/hotspot/src/share/vm/ci/ciTypeFlow.cpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/ci/ciTypeFlow.cpp 2015-01-08 21:23:31.149148889 +0100
|
|
@@ -35,6 +35,7 @@
|
|
#include "interpreter/bytecode.hpp"
|
|
#include "interpreter/bytecodes.hpp"
|
|
#include "memory/allocation.inline.hpp"
|
|
+#include "opto/compile.hpp"
|
|
#include "runtime/deoptimization.hpp"
|
|
#include "utilities/growableArray.hpp"
|
|
|
|
@@ -730,7 +731,7 @@
|
|
if (obj->is_null_object()) {
|
|
push_null();
|
|
} else {
|
|
- assert(obj->is_instance(), "must be java_mirror of klass");
|
|
+ assert(obj->is_instance() || obj->is_array(), "must be java_mirror of klass");
|
|
push_object(obj->klass());
|
|
}
|
|
} else {
|
|
@@ -2646,7 +2647,7 @@
|
|
assert (!blk->has_pre_order(), "");
|
|
blk->set_next_pre_order();
|
|
|
|
- if (_next_pre_order >= MaxNodeLimit / 2) {
|
|
+ if (_next_pre_order >= (int)Compile::current()->max_node_limit() / 2) {
|
|
// Too many basic blocks. Bail out.
|
|
// This can happen when try/finally constructs are nested to depth N,
|
|
// and there is O(2**N) cloning of jsr bodies. See bug 4697245!
|
|
--- jdk8/hotspot/src/share/vm/classfile/classFileParser.cpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/classfile/classFileParser.cpp 2015-01-08 21:23:31.150148864 +0100
|
|
@@ -2529,7 +2529,7 @@
|
|
Array<Method*>* ClassFileParser::parse_methods(bool is_interface,
|
|
AccessFlags* promoted_flags,
|
|
bool* has_final_method,
|
|
- bool* has_default_methods,
|
|
+ bool* declares_default_methods,
|
|
TRAPS) {
|
|
ClassFileStream* cfs = stream();
|
|
cfs->guarantee_more(2, CHECK_NULL); // length
|
|
@@ -2548,11 +2548,11 @@
|
|
if (method->is_final()) {
|
|
*has_final_method = true;
|
|
}
|
|
- if (is_interface && !(*has_default_methods)
|
|
- && !method->is_abstract() && !method->is_static()
|
|
- && !method->is_private()) {
|
|
- // default method
|
|
- *has_default_methods = true;
|
|
+ // declares_default_methods: declares concrete instance methods, any access flags
|
|
+ // used for interface initialization, and default method inheritance analysis
|
|
+ if (is_interface && !(*declares_default_methods)
|
|
+ && !method->is_abstract() && !method->is_static()) {
|
|
+ *declares_default_methods = true;
|
|
}
|
|
_methods->at_put(index, method());
|
|
}
|
|
@@ -3058,21 +3058,39 @@
|
|
}
|
|
}
|
|
|
|
-// Transfer ownership of metadata allocated to the InstanceKlass.
|
|
-void ClassFileParser::apply_parsed_class_metadata(
|
|
- instanceKlassHandle this_klass,
|
|
- int java_fields_count, TRAPS) {
|
|
- // Assign annotations if needed
|
|
- if (_annotations != NULL || _type_annotations != NULL ||
|
|
- _fields_annotations != NULL || _fields_type_annotations != NULL) {
|
|
+// Create the Annotations object that will
|
|
+// hold the annotations array for the Klass.
|
|
+void ClassFileParser::create_combined_annotations(TRAPS) {
|
|
+ if (_annotations == NULL &&
|
|
+ _type_annotations == NULL &&
|
|
+ _fields_annotations == NULL &&
|
|
+ _fields_type_annotations == NULL) {
|
|
+ // Don't create the Annotations object unnecessarily.
|
|
+ return;
|
|
+ }
|
|
+
|
|
Annotations* annotations = Annotations::allocate(_loader_data, CHECK);
|
|
annotations->set_class_annotations(_annotations);
|
|
annotations->set_class_type_annotations(_type_annotations);
|
|
annotations->set_fields_annotations(_fields_annotations);
|
|
annotations->set_fields_type_annotations(_fields_type_annotations);
|
|
- this_klass->set_annotations(annotations);
|
|
+
|
|
+ // This is the Annotations object that will be
|
|
+ // assigned to InstanceKlass being constructed.
|
|
+ _combined_annotations = annotations;
|
|
+
|
|
+ // The annotations arrays below has been transfered the
|
|
+ // _combined_annotations so these fields can now be cleared.
|
|
+ _annotations = NULL;
|
|
+ _type_annotations = NULL;
|
|
+ _fields_annotations = NULL;
|
|
+ _fields_type_annotations = NULL;
|
|
}
|
|
|
|
+// Transfer ownership of metadata allocated to the InstanceKlass.
|
|
+void ClassFileParser::apply_parsed_class_metadata(
|
|
+ instanceKlassHandle this_klass,
|
|
+ int java_fields_count, TRAPS) {
|
|
_cp->set_pool_holder(this_klass());
|
|
this_klass->set_constants(_cp);
|
|
this_klass->set_fields(_fields, java_fields_count);
|
|
@@ -3080,6 +3098,7 @@
|
|
this_klass->set_inner_classes(_inner_classes);
|
|
this_klass->set_local_interfaces(_local_interfaces);
|
|
this_klass->set_transitive_interfaces(_transitive_interfaces);
|
|
+ this_klass->set_annotations(_combined_annotations);
|
|
|
|
// Clear out these fields so they don't get deallocated by the destructor
|
|
clear_class_metadata();
|
|
@@ -3691,6 +3710,7 @@
|
|
JvmtiCachedClassFileData *cached_class_file = NULL;
|
|
Handle class_loader(THREAD, loader_data->class_loader());
|
|
bool has_default_methods = false;
|
|
+ bool declares_default_methods = false;
|
|
ResourceMark rm(THREAD);
|
|
|
|
ClassFileStream* cfs = stream();
|
|
@@ -3928,13 +3948,20 @@
|
|
Array<Method*>* methods = parse_methods(access_flags.is_interface(),
|
|
&promoted_flags,
|
|
&has_final_method,
|
|
- &has_default_methods,
|
|
+ &declares_default_methods,
|
|
CHECK_(nullHandle));
|
|
+ if (declares_default_methods) {
|
|
+ has_default_methods = true;
|
|
+ }
|
|
|
|
// Additional attributes
|
|
ClassAnnotationCollector parsed_annotations;
|
|
parse_classfile_attributes(&parsed_annotations, CHECK_(nullHandle));
|
|
|
|
+ // Finalize the Annotations metadata object,
|
|
+ // now that all annotation arrays have been created.
|
|
+ create_combined_annotations(CHECK_(nullHandle));
|
|
+
|
|
// Make sure this is the end of class file stream
|
|
guarantee_property(cfs->at_eos(), "Extra bytes at the end of class file %s", CHECK_(nullHandle));
|
|
|
|
@@ -4072,6 +4099,7 @@
|
|
this_klass->set_minor_version(minor_version);
|
|
this_klass->set_major_version(major_version);
|
|
this_klass->set_has_default_methods(has_default_methods);
|
|
+ this_klass->set_declares_default_methods(declares_default_methods);
|
|
|
|
if (!host_klass.is_null()) {
|
|
assert (this_klass->is_anonymous(), "should be the same");
|
|
@@ -4234,10 +4262,27 @@
|
|
InstanceKlass::deallocate_interfaces(_loader_data, _super_klass(),
|
|
_local_interfaces, _transitive_interfaces);
|
|
|
|
+ if (_combined_annotations != NULL) {
|
|
+ // After all annotations arrays have been created, they are installed into the
|
|
+ // Annotations object that will be assigned to the InstanceKlass being created.
|
|
+
|
|
+ // Deallocate the Annotations object and the installed annotations arrays.
|
|
+ _combined_annotations->deallocate_contents(_loader_data);
|
|
+
|
|
+ // If the _combined_annotations pointer is non-NULL,
|
|
+ // then the other annotations fields should have been cleared.
|
|
+ assert(_annotations == NULL, "Should have been cleared");
|
|
+ assert(_type_annotations == NULL, "Should have been cleared");
|
|
+ assert(_fields_annotations == NULL, "Should have been cleared");
|
|
+ assert(_fields_type_annotations == NULL, "Should have been cleared");
|
|
+ } else {
|
|
+ // If the annotations arrays were not installed into the Annotations object,
|
|
+ // then they have to be deallocated explicitly.
|
|
MetadataFactory::free_array<u1>(_loader_data, _annotations);
|
|
MetadataFactory::free_array<u1>(_loader_data, _type_annotations);
|
|
Annotations::free_contents(_loader_data, _fields_annotations);
|
|
Annotations::free_contents(_loader_data, _fields_type_annotations);
|
|
+ }
|
|
|
|
clear_class_metadata();
|
|
|
|
--- jdk8/hotspot/src/share/vm/classfile/classFileParser.hpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/classfile/classFileParser.hpp 2015-01-08 21:23:31.150148864 +0100
|
|
@@ -75,6 +75,7 @@
|
|
Array<u2>* _inner_classes;
|
|
Array<Klass*>* _local_interfaces;
|
|
Array<Klass*>* _transitive_interfaces;
|
|
+ Annotations* _combined_annotations;
|
|
AnnotationArray* _annotations;
|
|
AnnotationArray* _type_annotations;
|
|
Array<AnnotationArray*>* _fields_annotations;
|
|
@@ -86,6 +87,8 @@
|
|
void set_class_generic_signature_index(u2 x) { _generic_signature_index = x; }
|
|
void set_class_sde_buffer(char* x, int len) { _sde_buffer = x; _sde_length = len; }
|
|
|
|
+ void create_combined_annotations(TRAPS);
|
|
+
|
|
void init_parsed_class_attributes(ClassLoaderData* loader_data) {
|
|
_loader_data = loader_data;
|
|
_synthetic_flag = false;
|
|
@@ -110,6 +113,7 @@
|
|
_inner_classes = NULL;
|
|
_local_interfaces = NULL;
|
|
_transitive_interfaces = NULL;
|
|
+ _combined_annotations = NULL;
|
|
_annotations = _type_annotations = NULL;
|
|
_fields_annotations = _fields_type_annotations = NULL;
|
|
}
|
|
@@ -247,7 +251,7 @@
|
|
Array<Method*>* parse_methods(bool is_interface,
|
|
AccessFlags* promoted_flags,
|
|
bool* has_final_method,
|
|
- bool* has_default_method,
|
|
+ bool* declares_default_methods,
|
|
TRAPS);
|
|
intArray* sort_methods(Array<Method*>* methods);
|
|
|
|
--- jdk8/hotspot/src/share/vm/classfile/classLoader.cpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/classfile/classLoader.cpp 2015-01-08 21:23:31.151148840 +0100
|
|
@@ -610,7 +610,7 @@
|
|
}
|
|
#endif
|
|
|
|
-void ClassLoader::setup_search_path(const char *class_path) {
|
|
+void ClassLoader::setup_search_path(const char *class_path, bool canonicalize) {
|
|
int offset = 0;
|
|
int len = (int)strlen(class_path);
|
|
int end = 0;
|
|
@@ -625,7 +625,13 @@
|
|
char* path = NEW_RESOURCE_ARRAY(char, end - start + 1);
|
|
strncpy(path, &class_path[start], end - start);
|
|
path[end - start] = '\0';
|
|
- update_class_path_entry_list(path, false);
|
|
+ if (canonicalize) {
|
|
+ char* canonical_path = NEW_RESOURCE_ARRAY(char, JVM_MAXPATHLEN + 1);
|
|
+ if (get_canonical_path(path, canonical_path, JVM_MAXPATHLEN)) {
|
|
+ path = canonical_path;
|
|
+ }
|
|
+ }
|
|
+ update_class_path_entry_list(path, /*check_for_duplicates=*/canonicalize);
|
|
#if INCLUDE_CDS
|
|
if (DumpSharedSpaces) {
|
|
check_shared_classpath(path);
|
|
@@ -1131,7 +1137,7 @@
|
|
h = context.record_result(classpath_index, e, result, THREAD);
|
|
} else {
|
|
if (DumpSharedSpaces) {
|
|
- tty->print_cr("Preload Error: Cannot find %s", class_name);
|
|
+ tty->print_cr("Preload Warning: Cannot find %s", class_name);
|
|
}
|
|
}
|
|
|
|
--- jdk8/hotspot/src/share/vm/classfile/classLoaderData.cpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/classfile/classLoaderData.cpp 2015-01-08 21:23:31.151148840 +0100
|
|
@@ -747,7 +746,7 @@
|
|
|
|
// Move class loader data from main list to the unloaded list for unloading
|
|
// and deallocation later.
|
|
-bool ClassLoaderDataGraph::do_unloading(BoolObjectClosure* is_alive_closure) {
|
|
+bool ClassLoaderDataGraph::do_unloading(BoolObjectClosure* is_alive_closure, bool clean_alive) {
|
|
ClassLoaderData* data = _head;
|
|
ClassLoaderData* prev = NULL;
|
|
bool seen_dead_loader = false;
|
|
@@ -756,16 +755,8 @@
|
|
// purging and we don't want to rewalk the previously unloaded class loader data.
|
|
_saved_unloading = _unloading;
|
|
|
|
- // mark metadata seen on the stack and code cache so we can delete
|
|
- // unneeded entries.
|
|
- bool has_redefined_a_class = JvmtiExport::has_redefined_a_class();
|
|
- MetadataOnStackMark md_on_stack;
|
|
while (data != NULL) {
|
|
if (data->is_alive(is_alive_closure)) {
|
|
- if (has_redefined_a_class) {
|
|
- data->classes_do(InstanceKlass::purge_previous_versions);
|
|
- }
|
|
- data->free_deallocate_list();
|
|
prev = data;
|
|
data = data->next();
|
|
continue;
|
|
@@ -787,6 +778,11 @@
|
|
_unloading = dead;
|
|
}
|
|
|
|
+ if (clean_alive) {
|
|
+ // Clean previous versions and the deallocate list.
|
|
+ ClassLoaderDataGraph::clean_metaspaces();
|
|
+ }
|
|
+
|
|
if (seen_dead_loader) {
|
|
post_class_unload_events();
|
|
}
|
|
@@ -794,6 +790,26 @@
|
|
return seen_dead_loader;
|
|
}
|
|
|
|
+void ClassLoaderDataGraph::clean_metaspaces() {
|
|
+ // mark metadata seen on the stack and code cache so we can delete unneeded entries.
|
|
+ bool has_redefined_a_class = JvmtiExport::has_redefined_a_class();
|
|
+ MetadataOnStackMark md_on_stack(has_redefined_a_class);
|
|
+
|
|
+ if (has_redefined_a_class) {
|
|
+ // purge_previous_versions also cleans weak method links. Because
|
|
+ // one method's MDO can reference another method from another
|
|
+ // class loader, we need to first clean weak method links for all
|
|
+ // class loaders here. Below, we can then free redefined methods
|
|
+ // for all class loaders.
|
|
+ for (ClassLoaderData* data = _head; data != NULL; data = data->next()) {
|
|
+ data->classes_do(InstanceKlass::purge_previous_versions);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ // Need to purge the previous version before deallocating.
|
|
+ free_deallocate_lists();
|
|
+}
|
|
+
|
|
void ClassLoaderDataGraph::purge() {
|
|
assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint!");
|
|
ClassLoaderData* list = _unloading;
|
|
@@ -821,6 +837,14 @@
|
|
#endif
|
|
}
|
|
|
|
+void ClassLoaderDataGraph::free_deallocate_lists() {
|
|
+ for (ClassLoaderData* cld = _head; cld != NULL; cld = cld->next()) {
|
|
+ // We need to keep this data until InstanceKlass::purge_previous_version has been
|
|
+ // called on all alive classes. See the comment in ClassLoaderDataGraph::clean_metaspaces.
|
|
+ cld->free_deallocate_list();
|
|
+ }
|
|
+}
|
|
+
|
|
// CDS support
|
|
|
|
// Global metaspaces for writing information to the shared archive. When
|
|
@@ -959,4 +983,4 @@
|
|
event.commit();
|
|
}
|
|
|
|
-#endif /* INCLUDE_TRACE */
|
|
+#endif // INCLUDE_TRACE
|
|
--- jdk8/hotspot/src/share/vm/classfile/classLoaderData.hpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/classfile/classLoaderData.hpp 2015-01-08 21:23:31.151148840 +0100
|
|
@@ -31,6 +31,7 @@
|
|
#include "memory/metaspaceCounters.hpp"
|
|
#include "runtime/mutex.hpp"
|
|
#include "utilities/growableArray.hpp"
|
|
+#include "utilities/macros.hpp"
|
|
#if INCLUDE_TRACE
|
|
# include "utilities/ticks.hpp"
|
|
#endif
|
|
@@ -71,6 +72,7 @@
|
|
|
|
static ClassLoaderData* add(Handle class_loader, bool anonymous, TRAPS);
|
|
static void post_class_unload_events(void);
|
|
+ static void clean_metaspaces();
|
|
public:
|
|
static ClassLoaderData* find_or_create(Handle class_loader, TRAPS);
|
|
static void purge();
|
|
@@ -89,7 +91,7 @@
|
|
static void classes_do(void f(Klass* const));
|
|
static void loaded_classes_do(KlassClosure* klass_closure);
|
|
static void classes_unloading_do(void f(Klass* const));
|
|
- static bool do_unloading(BoolObjectClosure* is_alive);
|
|
+ static bool do_unloading(BoolObjectClosure* is_alive, bool clean_alive);
|
|
|
|
// CMS support.
|
|
static void remember_new_clds(bool remember) { _saved_head = (remember ? _head : NULL); }
|
|
@@ -105,6 +107,8 @@
|
|
}
|
|
}
|
|
|
|
+ static void free_deallocate_lists();
|
|
+
|
|
static void dump_on(outputStream * const out) PRODUCT_RETURN;
|
|
static void dump() { dump_on(tty); }
|
|
static void verify();
|
|
--- jdk8/hotspot/src/share/vm/classfile/classLoaderExt.hpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/classfile/classLoaderExt.hpp 2015-01-08 21:23:31.151148840 +0100
|
|
@@ -63,7 +63,19 @@
|
|
ClassPathEntry* new_entry) {
|
|
ClassLoader::add_to_list(new_entry);
|
|
}
|
|
+ static void append_boot_classpath(ClassPathEntry* new_entry) {
|
|
+ ClassLoader::add_to_list(new_entry);
|
|
+ }
|
|
static void setup_search_paths() {}
|
|
+
|
|
+ static void init_lookup_cache(TRAPS) {}
|
|
+ static void copy_lookup_cache_to_archive(char** top, char* end) {}
|
|
+ static char* restore_lookup_cache_from_archive(char* buffer) {return buffer;}
|
|
+ static inline bool is_lookup_cache_enabled() {return false;}
|
|
+
|
|
+ static bool known_to_not_exist(JNIEnv *env, jobject loader, const char *classname, TRAPS) {return false;}
|
|
+ static jobjectArray get_lookup_cache_urls(JNIEnv *env, jobject loader, TRAPS) {return NULL;}
|
|
+ static jintArray get_lookup_cache(JNIEnv *env, jobject loader, const char *pkgname, TRAPS) {return NULL;}
|
|
};
|
|
|
|
#endif // SHARE_VM_CLASSFILE_CLASSLOADEREXT_HPP
|
|
--- jdk8/hotspot/src/share/vm/classfile/classLoader.hpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/classfile/classLoader.hpp 2015-01-08 21:23:31.151148840 +0100
|
|
@@ -129,8 +129,8 @@
|
|
bool _has_error;
|
|
bool _throw_exception;
|
|
volatile ClassPathEntry* _resolved_entry;
|
|
- ClassPathEntry* resolve_entry(TRAPS);
|
|
public:
|
|
+ ClassPathEntry* resolve_entry(TRAPS);
|
|
bool is_jar_file();
|
|
const char* name() { return _path; }
|
|
LazyClassPathEntry(const char* path, const struct stat* st, bool throw_exception);
|
|
@@ -218,7 +218,7 @@
|
|
static void setup_meta_index(const char* meta_index_path, const char* meta_index_dir,
|
|
int start_index);
|
|
static void setup_bootstrap_search_path();
|
|
- static void setup_search_path(const char *class_path);
|
|
+ static void setup_search_path(const char *class_path, bool canonicalize=false);
|
|
|
|
static void load_zip_library();
|
|
static ClassPathEntry* create_class_path_entry(const char *path, const struct stat* st,
|
|
@@ -329,6 +329,10 @@
|
|
return e;
|
|
}
|
|
|
|
+ static int num_classpath_entries() {
|
|
+ return _num_entries;
|
|
+ }
|
|
+
|
|
#if INCLUDE_CDS
|
|
// Sharing dump and restore
|
|
static void copy_package_info_buckets(char** top, char* end);
|
|
--- jdk8/hotspot/src/share/vm/classfile/dictionary.cpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/classfile/dictionary.cpp 2015-01-08 21:23:31.152148816 +0100
|
|
@@ -223,7 +223,7 @@
|
|
}
|
|
free_entry(probe);
|
|
ResourceMark rm;
|
|
- tty->print_cr("Removed error class: %s", ik->external_name());
|
|
+ tty->print_cr("Preload Warning: Removed error class: %s", ik->external_name());
|
|
continue;
|
|
}
|
|
|
|
--- jdk8/hotspot/src/share/vm/classfile/javaClasses.cpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/classfile/javaClasses.cpp 2015-01-08 21:23:31.152148816 +0100
|
|
@@ -41,6 +41,7 @@
|
|
#include "oops/method.hpp"
|
|
#include "oops/symbol.hpp"
|
|
#include "oops/typeArrayOop.hpp"
|
|
+#include "prims/jvmtiRedefineClassesTrace.hpp"
|
|
#include "runtime/fieldDescriptor.hpp"
|
|
#include "runtime/handles.inline.hpp"
|
|
#include "runtime/interfaceSupport.hpp"
|
|
@@ -2775,12 +2776,35 @@
|
|
return (Metadata*)mname->address_field(_vmtarget_offset);
|
|
}
|
|
|
|
+bool java_lang_invoke_MemberName::is_method(oop mname) {
|
|
+ assert(is_instance(mname), "must be MemberName");
|
|
+ return (flags(mname) & (MN_IS_METHOD | MN_IS_CONSTRUCTOR)) > 0;
|
|
+}
|
|
+
|
|
#if INCLUDE_JVMTI
|
|
// Can be executed on VM thread only
|
|
-void java_lang_invoke_MemberName::adjust_vmtarget(oop mname, Metadata* ref) {
|
|
- assert((is_instance(mname) && (flags(mname) & (MN_IS_METHOD | MN_IS_CONSTRUCTOR)) > 0), "wrong type");
|
|
+void java_lang_invoke_MemberName::adjust_vmtarget(oop mname, Method* old_method,
|
|
+ Method* new_method, bool* trace_name_printed) {
|
|
+ assert(is_method(mname), "wrong type");
|
|
assert(Thread::current()->is_VM_thread(), "not VM thread");
|
|
- mname->address_field_put(_vmtarget_offset, (address)ref);
|
|
+
|
|
+ Method* target = (Method*)mname->address_field(_vmtarget_offset);
|
|
+ if (target == old_method) {
|
|
+ mname->address_field_put(_vmtarget_offset, (address)new_method);
|
|
+
|
|
+ if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) {
|
|
+ if (!(*trace_name_printed)) {
|
|
+ // RC_TRACE_MESG macro has an embedded ResourceMark
|
|
+ RC_TRACE_MESG(("adjust: name=%s",
|
|
+ old_method->method_holder()->external_name()));
|
|
+ *trace_name_printed = true;
|
|
+ }
|
|
+ // RC_TRACE macro has an embedded ResourceMark
|
|
+ RC_TRACE(0x00400000, ("MemberName method update: %s(%s)",
|
|
+ new_method->name()->as_C_string(),
|
|
+ new_method->signature()->as_C_string()));
|
|
+ }
|
|
+ }
|
|
}
|
|
#endif // INCLUDE_JVMTI
|
|
|
|
--- jdk8/hotspot/src/share/vm/classfile/javaClasses.hpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/classfile/javaClasses.hpp 2015-01-08 21:23:31.153148792 +0100
|
|
@@ -1096,7 +1096,8 @@
|
|
static Metadata* vmtarget(oop mname);
|
|
static void set_vmtarget(oop mname, Metadata* target);
|
|
#if INCLUDE_JVMTI
|
|
- static void adjust_vmtarget(oop mname, Metadata* target);
|
|
+ static void adjust_vmtarget(oop mname, Method* old_method, Method* new_method,
|
|
+ bool* trace_name_printed);
|
|
#endif // INCLUDE_JVMTI
|
|
|
|
static intptr_t vmindex(oop mname);
|
|
@@ -1110,6 +1111,8 @@
|
|
return obj != NULL && is_subclass(obj->klass());
|
|
}
|
|
|
|
+ static bool is_method(oop obj);
|
|
+
|
|
// Relevant integer codes (keep these in synch. with MethodHandleNatives.Constants):
|
|
enum {
|
|
MN_IS_METHOD = 0x00010000, // method (not constructor)
|
|
--- jdk8/hotspot/src/share/vm/classfile/metadataOnStackMark.cpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/classfile/metadataOnStackMark.cpp 2015-01-08 21:23:31.153148792 +0100
|
|
@@ -31,25 +31,23 @@
|
|
#include "runtime/synchronizer.hpp"
|
|
#include "runtime/thread.hpp"
|
|
#include "services/threadService.hpp"
|
|
-#include "utilities/growableArray.hpp"
|
|
+#include "utilities/chunkedList.hpp"
|
|
|
|
+volatile MetadataOnStackBuffer* MetadataOnStackMark::_used_buffers = NULL;
|
|
+volatile MetadataOnStackBuffer* MetadataOnStackMark::_free_buffers = NULL;
|
|
|
|
-// Keep track of marked on-stack metadata so it can be cleared.
|
|
-GrowableArray<Metadata*>* _marked_objects = NULL;
|
|
NOT_PRODUCT(bool MetadataOnStackMark::_is_active = false;)
|
|
|
|
// Walk metadata on the stack and mark it so that redefinition doesn't delete
|
|
// it. Class unloading also walks the previous versions and might try to
|
|
// delete it, so this class is used by class unloading also.
|
|
-MetadataOnStackMark::MetadataOnStackMark() {
|
|
+MetadataOnStackMark::MetadataOnStackMark(bool visit_code_cache) {
|
|
assert(SafepointSynchronize::is_at_safepoint(), "sanity check");
|
|
+ assert(_used_buffers == NULL, "sanity check");
|
|
NOT_PRODUCT(_is_active = true;)
|
|
- if (_marked_objects == NULL) {
|
|
- _marked_objects = new (ResourceObj::C_HEAP, mtClass) GrowableArray<Metadata*>(1000, true);
|
|
- }
|
|
|
|
Threads::metadata_do(Metadata::mark_on_stack);
|
|
- if (JvmtiExport::has_redefined_a_class()) {
|
|
+ if (visit_code_cache) {
|
|
CodeCache::alive_nmethods_do(nmethod::mark_on_stack);
|
|
}
|
|
CompileBroker::mark_on_stack();
|
|
@@ -62,15 +60,93 @@
|
|
// Unmark everything that was marked. Can't do the same walk because
|
|
// redefine classes messes up the code cache so the set of methods
|
|
// might not be the same.
|
|
- for (int i = 0; i< _marked_objects->length(); i++) {
|
|
- _marked_objects->at(i)->set_on_stack(false);
|
|
+
|
|
+ retire_buffer_for_thread(Thread::current());
|
|
+
|
|
+ MetadataOnStackBuffer* buffer = const_cast<MetadataOnStackBuffer* >(_used_buffers);
|
|
+ while (buffer != NULL) {
|
|
+ // Clear on stack state for all metadata.
|
|
+ size_t size = buffer->size();
|
|
+ for (size_t i = 0; i < size; i++) {
|
|
+ Metadata* md = buffer->at(i);
|
|
+ md->set_on_stack(false);
|
|
+ }
|
|
+
|
|
+ MetadataOnStackBuffer* next = buffer->next_used();
|
|
+
|
|
+ // Move the buffer to the free list.
|
|
+ buffer->clear();
|
|
+ buffer->set_next_used(NULL);
|
|
+ buffer->set_next_free(const_cast<MetadataOnStackBuffer*>(_free_buffers));
|
|
+ _free_buffers = buffer;
|
|
+
|
|
+ // Step to next used buffer.
|
|
+ buffer = next;
|
|
}
|
|
- _marked_objects->clear(); // reuse growable array for next time.
|
|
+
|
|
+ _used_buffers = NULL;
|
|
+
|
|
NOT_PRODUCT(_is_active = false;)
|
|
}
|
|
|
|
+void MetadataOnStackMark::retire_buffer(MetadataOnStackBuffer* buffer) {
|
|
+ if (buffer == NULL) {
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ MetadataOnStackBuffer* old_head;
|
|
+
|
|
+ do {
|
|
+ old_head = const_cast<MetadataOnStackBuffer*>(_used_buffers);
|
|
+ buffer->set_next_used(old_head);
|
|
+ } while (Atomic::cmpxchg_ptr(buffer, &_used_buffers, old_head) != old_head);
|
|
+}
|
|
+
|
|
+void MetadataOnStackMark::retire_buffer_for_thread(Thread* thread) {
|
|
+ retire_buffer(thread->metadata_on_stack_buffer());
|
|
+ thread->set_metadata_on_stack_buffer(NULL);
|
|
+}
|
|
+
|
|
+bool MetadataOnStackMark::has_buffer_for_thread(Thread* thread) {
|
|
+ return thread->metadata_on_stack_buffer() != NULL;
|
|
+}
|
|
+
|
|
+MetadataOnStackBuffer* MetadataOnStackMark::allocate_buffer() {
|
|
+ MetadataOnStackBuffer* allocated;
|
|
+ MetadataOnStackBuffer* new_head;
|
|
+
|
|
+ do {
|
|
+ allocated = const_cast<MetadataOnStackBuffer*>(_free_buffers);
|
|
+ if (allocated == NULL) {
|
|
+ break;
|
|
+ }
|
|
+ new_head = allocated->next_free();
|
|
+ } while (Atomic::cmpxchg_ptr(new_head, &_free_buffers, allocated) != allocated);
|
|
+
|
|
+ if (allocated == NULL) {
|
|
+ allocated = new MetadataOnStackBuffer();
|
|
+ }
|
|
+
|
|
+ assert(!allocated->is_full(), err_msg("Should not be full: " PTR_FORMAT, p2i(allocated)));
|
|
+
|
|
+ return allocated;
|
|
+}
|
|
+
|
|
// Record which objects are marked so we can unmark the same objects.
|
|
-void MetadataOnStackMark::record(Metadata* m) {
|
|
+void MetadataOnStackMark::record(Metadata* m, Thread* thread) {
|
|
assert(_is_active, "metadata on stack marking is active");
|
|
- _marked_objects->push(m);
|
|
+
|
|
+ MetadataOnStackBuffer* buffer = thread->metadata_on_stack_buffer();
|
|
+
|
|
+ if (buffer != NULL && buffer->is_full()) {
|
|
+ retire_buffer(buffer);
|
|
+ buffer = NULL;
|
|
+ }
|
|
+
|
|
+ if (buffer == NULL) {
|
|
+ buffer = allocate_buffer();
|
|
+ thread->set_metadata_on_stack_buffer(buffer);
|
|
+ }
|
|
+
|
|
+ buffer->push(m);
|
|
}
|
|
--- jdk8/hotspot/src/share/vm/classfile/metadataOnStackMark.hpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/classfile/metadataOnStackMark.hpp 2015-01-08 21:23:31.153148792 +0100
|
|
@@ -26,9 +26,12 @@
|
|
#define SHARE_VM_CLASSFILE_METADATAONSTACKMARK_HPP
|
|
|
|
#include "memory/allocation.hpp"
|
|
+#include "utilities/chunkedList.hpp"
|
|
|
|
class Metadata;
|
|
|
|
+typedef ChunkedList<Metadata*, mtInternal> MetadataOnStackBuffer;
|
|
+
|
|
// Helper class to mark and unmark metadata used on the stack as either handles
|
|
// or executing methods, so that it can't be deleted during class redefinition
|
|
// and class unloading.
|
|
@@ -36,10 +39,20 @@
|
|
// metadata during parsing, relocated methods, and methods in backtraces.
|
|
class MetadataOnStackMark : public StackObj {
|
|
NOT_PRODUCT(static bool _is_active;)
|
|
+
|
|
+ static volatile MetadataOnStackBuffer* _used_buffers;
|
|
+ static volatile MetadataOnStackBuffer* _free_buffers;
|
|
+
|
|
+ static MetadataOnStackBuffer* allocate_buffer();
|
|
+ static void retire_buffer(MetadataOnStackBuffer* buffer);
|
|
+
|
|
public:
|
|
- MetadataOnStackMark();
|
|
+ MetadataOnStackMark(bool visit_code_cache);
|
|
~MetadataOnStackMark();
|
|
- static void record(Metadata* m);
|
|
+
|
|
+ static void record(Metadata* m, Thread* thread);
|
|
+ static void retire_buffer_for_thread(Thread* thread);
|
|
+ static bool has_buffer_for_thread(Thread* thread);
|
|
};
|
|
|
|
#endif // SHARE_VM_CLASSFILE_METADATAONSTACKMARK_HPP
|
|
--- jdk8/hotspot/src/share/vm/classfile/systemDictionary.cpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/classfile/systemDictionary.cpp 2015-01-08 21:23:31.153148792 +0100
|
|
@@ -1691,9 +1690,9 @@
|
|
|
|
// Assumes classes in the SystemDictionary are only unloaded at a safepoint
|
|
// Note: anonymous classes are not in the SD.
|
|
-bool SystemDictionary::do_unloading(BoolObjectClosure* is_alive) {
|
|
+bool SystemDictionary::do_unloading(BoolObjectClosure* is_alive, bool clean_alive) {
|
|
// First, mark for unload all ClassLoaderData referencing a dead class loader.
|
|
- bool unloading_occurred = ClassLoaderDataGraph::do_unloading(is_alive);
|
|
+ bool unloading_occurred = ClassLoaderDataGraph::do_unloading(is_alive, clean_alive);
|
|
if (unloading_occurred) {
|
|
dictionary()->do_unloading();
|
|
constraints()->purge_loader_constraints();
|
|
@@ -2664,7 +2663,7 @@
|
|
class_loader->klass() : (Klass*)NULL);
|
|
event.commit();
|
|
}
|
|
-#endif /* INCLUDE_TRACE */
|
|
+#endif // INCLUDE_TRACE
|
|
}
|
|
|
|
#ifndef PRODUCT
|
|
--- jdk8/hotspot/src/share/vm/classfile/systemDictionary.hpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/classfile/systemDictionary.hpp 2015-01-08 21:23:31.154148768 +0100
|
|
@@ -175,6 +175,8 @@
|
|
do_klass(URL_klass, java_net_URL, Pre ) \
|
|
do_klass(Jar_Manifest_klass, java_util_jar_Manifest, Pre ) \
|
|
do_klass(sun_misc_Launcher_klass, sun_misc_Launcher, Pre ) \
|
|
+ do_klass(sun_misc_Launcher_AppClassLoader_klass, sun_misc_Launcher_AppClassLoader, Pre ) \
|
|
+ do_klass(sun_misc_Launcher_ExtClassLoader_klass, sun_misc_Launcher_ExtClassLoader, Pre ) \
|
|
do_klass(CodeSource_klass, java_security_CodeSource, Pre ) \
|
|
\
|
|
/* It's NULL in non-1.4 JDKs. */ \
|
|
@@ -339,7 +341,7 @@
|
|
|
|
// Unload (that is, break root links to) all unmarked classes and
|
|
// loaders. Returns "true" iff something was unloaded.
|
|
- static bool do_unloading(BoolObjectClosure* is_alive);
|
|
+ static bool do_unloading(BoolObjectClosure* is_alive, bool clean_alive = true);
|
|
|
|
// Used by DumpSharedSpaces only to remove classes that failed verification
|
|
static void remove_classes_in_error_state();
|
|
--- jdk8/hotspot/src/share/vm/classfile/vmSymbols.hpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/classfile/vmSymbols.hpp 2015-01-08 21:23:31.154148768 +0100
|
|
@@ -116,6 +116,7 @@
|
|
template(java_lang_AssertionStatusDirectives, "java/lang/AssertionStatusDirectives") \
|
|
template(getBootClassPathEntryForClass_name, "getBootClassPathEntryForClass") \
|
|
template(sun_misc_PostVMInitHook, "sun/misc/PostVMInitHook") \
|
|
+ template(sun_misc_Launcher_AppClassLoader, "sun/misc/Launcher$AppClassLoader") \
|
|
template(sun_misc_Launcher_ExtClassLoader, "sun/misc/Launcher$ExtClassLoader") \
|
|
\
|
|
/* Java runtime version access */ \
|
|
--- jdk8/hotspot/src/share/vm/code/dependencies.cpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/code/dependencies.cpp 2015-01-08 21:23:31.154148768 +0100
|
|
@@ -879,6 +879,8 @@
|
|
bool is_witness(Klass* k) {
|
|
if (doing_subtype_search()) {
|
|
return Dependencies::is_concrete_klass(k);
|
|
+ } else if (!k->oop_is_instance()) {
|
|
+ return false; // no methods to find in an array type
|
|
} else {
|
|
Method* m = InstanceKlass::cast(k)->find_method(_name, _signature);
|
|
if (m == NULL || !Dependencies::is_concrete_method(m)) return false;
|
|
@@ -1085,7 +1087,7 @@
|
|
Klass* chain; // scratch variable
|
|
#define ADD_SUBCLASS_CHAIN(k) { \
|
|
assert(chaini < CHAINMAX, "oob"); \
|
|
- chain = InstanceKlass::cast(k)->subklass(); \
|
|
+ chain = k->subklass(); \
|
|
if (chain != NULL) chains[chaini++] = chain; }
|
|
|
|
// Look for non-abstract subclasses.
|
|
@@ -1096,6 +1098,7 @@
|
|
// (Their subclasses are additional indirect implementors.
|
|
// See InstanceKlass::add_implementor.)
|
|
// (Note: nof_implementors is always zero for non-interfaces.)
|
|
+ if (top_level_call) {
|
|
int nof_impls = InstanceKlass::cast(context_type)->nof_implementors();
|
|
if (nof_impls > 1) {
|
|
// Avoid this case: *I.m > { A.m, C }; B.m > C
|
|
@@ -1127,6 +1130,7 @@
|
|
ADD_SUBCLASS_CHAIN(impl);
|
|
}
|
|
}
|
|
+ }
|
|
|
|
// Recursively process each non-trivial sibling chain.
|
|
while (chaini > 0) {
|
|
--- jdk8/hotspot/src/share/vm/code/nmethod.cpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/code/nmethod.cpp 2015-01-08 21:23:31.155148744 +0100
|
|
@@ -1741,11 +1741,17 @@
|
|
set_unload_reported();
|
|
}
|
|
|
|
-void static clean_ic_if_metadata_is_dead(CompiledIC *ic, BoolObjectClosure *is_alive) {
|
|
+void static clean_ic_if_metadata_is_dead(CompiledIC *ic, BoolObjectClosure *is_alive, bool mark_on_stack) {
|
|
if (ic->is_icholder_call()) {
|
|
// The only exception is compiledICHolder oops which may
|
|
// yet be marked below. (We check this further below).
|
|
CompiledICHolder* cichk_oop = ic->cached_icholder();
|
|
+
|
|
+ if (mark_on_stack) {
|
|
+ Metadata::mark_on_stack(cichk_oop->holder_method());
|
|
+ Metadata::mark_on_stack(cichk_oop->holder_klass());
|
|
+ }
|
|
+
|
|
if (cichk_oop->holder_method()->method_holder()->is_loader_alive(is_alive) &&
|
|
cichk_oop->holder_klass()->is_loader_alive(is_alive)) {
|
|
return;
|
|
@@ -1753,6 +1759,10 @@
|
|
} else {
|
|
Metadata* ic_oop = ic->cached_metadata();
|
|
if (ic_oop != NULL) {
|
|
+ if (mark_on_stack) {
|
|
+ Metadata::mark_on_stack(ic_oop);
|
|
+ }
|
|
+
|
|
if (ic_oop->is_klass()) {
|
|
if (((Klass*)ic_oop)->is_loader_alive(is_alive)) {
|
|
return;
|
|
@@ -1813,7 +1823,7 @@
|
|
while(iter.next()) {
|
|
if (iter.type() == relocInfo::virtual_call_type) {
|
|
CompiledIC *ic = CompiledIC_at(&iter);
|
|
- clean_ic_if_metadata_is_dead(ic, is_alive);
|
|
+ clean_ic_if_metadata_is_dead(ic, is_alive, false);
|
|
}
|
|
}
|
|
}
|
|
@@ -1881,6 +1891,53 @@
|
|
return clean_if_nmethod_is_unloaded(csc, csc->destination(), is_alive, from);
|
|
}
|
|
|
|
+bool nmethod::unload_if_dead_at(RelocIterator* iter_at_oop, BoolObjectClosure *is_alive, bool unloading_occurred) {
|
|
+ assert(iter_at_oop->type() == relocInfo::oop_type, "Wrong relocation type");
|
|
+
|
|
+ oop_Relocation* r = iter_at_oop->oop_reloc();
|
|
+ // Traverse those oops directly embedded in the code.
|
|
+ // Other oops (oop_index>0) are seen as part of scopes_oops.
|
|
+ assert(1 == (r->oop_is_immediate()) +
|
|
+ (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
|
|
+ "oop must be found in exactly one place");
|
|
+ if (r->oop_is_immediate() && r->oop_value() != NULL) {
|
|
+ // Unload this nmethod if the oop is dead.
|
|
+ if (can_unload(is_alive, r->oop_addr(), unloading_occurred)) {
|
|
+ return true;;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return false;
|
|
+}
|
|
+
|
|
+void nmethod::mark_metadata_on_stack_at(RelocIterator* iter_at_metadata) {
|
|
+ assert(iter_at_metadata->type() == relocInfo::metadata_type, "Wrong relocation type");
|
|
+
|
|
+ metadata_Relocation* r = iter_at_metadata->metadata_reloc();
|
|
+ // In this metadata, we must only follow those metadatas directly embedded in
|
|
+ // the code. Other metadatas (oop_index>0) are seen as part of
|
|
+ // the metadata section below.
|
|
+ assert(1 == (r->metadata_is_immediate()) +
|
|
+ (r->metadata_addr() >= metadata_begin() && r->metadata_addr() < metadata_end()),
|
|
+ "metadata must be found in exactly one place");
|
|
+ if (r->metadata_is_immediate() && r->metadata_value() != NULL) {
|
|
+ Metadata* md = r->metadata_value();
|
|
+ if (md != _method) Metadata::mark_on_stack(md);
|
|
+ }
|
|
+}
|
|
+
|
|
+void nmethod::mark_metadata_on_stack_non_relocs() {
|
|
+ // Visit the metadata section
|
|
+ for (Metadata** p = metadata_begin(); p < metadata_end(); p++) {
|
|
+ if (*p == Universe::non_oop_word() || *p == NULL) continue; // skip non-oops
|
|
+ Metadata* md = *p;
|
|
+ Metadata::mark_on_stack(md);
|
|
+ }
|
|
+
|
|
+ // Visit metadata not embedded in the other places.
|
|
+ if (_method != NULL) Metadata::mark_on_stack(_method);
|
|
+}
|
|
+
|
|
bool nmethod::do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred) {
|
|
ResourceMark rm;
|
|
|
|
@@ -1910,6 +1967,11 @@
|
|
unloading_occurred = true;
|
|
}
|
|
|
|
+ // When class redefinition is used all metadata in the CodeCache has to be recorded,
|
|
+ // so that unused "previous versions" can be purged. Since walking the CodeCache can
|
|
+ // be expensive, the "mark on stack" is piggy-backed on this parallel unloading code.
|
|
+ bool mark_metadata_on_stack = a_class_was_redefined;
|
|
+
|
|
// Exception cache
|
|
clean_exception_cache(is_alive);
|
|
|
|
@@ -1925,7 +1987,7 @@
|
|
if (unloading_occurred) {
|
|
// If class unloading occurred we first iterate over all inline caches and
|
|
// clear ICs where the cached oop is referring to an unloaded klass or method.
|
|
- clean_ic_if_metadata_is_dead(CompiledIC_at(&iter), is_alive);
|
|
+ clean_ic_if_metadata_is_dead(CompiledIC_at(&iter), is_alive, mark_metadata_on_stack);
|
|
}
|
|
|
|
postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
|
|
@@ -1941,23 +2003,20 @@
|
|
|
|
case relocInfo::oop_type:
|
|
if (!is_unloaded) {
|
|
- // Unload check
|
|
- oop_Relocation* r = iter.oop_reloc();
|
|
- // Traverse those oops directly embedded in the code.
|
|
- // Other oops (oop_index>0) are seen as part of scopes_oops.
|
|
- assert(1 == (r->oop_is_immediate()) +
|
|
- (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
|
|
- "oop must be found in exactly one place");
|
|
- if (r->oop_is_immediate() && r->oop_value() != NULL) {
|
|
- if (can_unload(is_alive, r->oop_addr(), unloading_occurred)) {
|
|
- is_unloaded = true;
|
|
- }
|
|
- }
|
|
+ is_unloaded = unload_if_dead_at(&iter, is_alive, unloading_occurred);
|
|
}
|
|
break;
|
|
|
|
+ case relocInfo::metadata_type:
|
|
+ if (mark_metadata_on_stack) {
|
|
+ mark_metadata_on_stack_at(&iter);
|
|
}
|
|
}
|
|
+ }
|
|
+
|
|
+ if (mark_metadata_on_stack) {
|
|
+ mark_metadata_on_stack_non_relocs();
|
|
+ }
|
|
|
|
if (is_unloaded) {
|
|
return postponed;
|
|
@@ -2106,7 +2165,7 @@
|
|
while (iter.next()) {
|
|
if (iter.type() == relocInfo::metadata_type ) {
|
|
metadata_Relocation* r = iter.metadata_reloc();
|
|
- // In this lmetadata, we must only follow those metadatas directly embedded in
|
|
+ // In this metadata, we must only follow those metadatas directly embedded in
|
|
// the code. Other metadatas (oop_index>0) are seen as part of
|
|
// the metadata section below.
|
|
assert(1 == (r->metadata_is_immediate()) +
|
|
@@ -2140,7 +2199,7 @@
|
|
f(md);
|
|
}
|
|
|
|
- // Call function Method*, not embedded in these other places.
|
|
+ // Visit metadata not embedded in the other places.
|
|
if (_method != NULL) f(_method);
|
|
}
|
|
|
|
--- jdk8/hotspot/src/share/vm/code/nmethod.hpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/code/nmethod.hpp 2015-01-08 21:23:31.155148744 +0100
|
|
@@ -614,9 +614,16 @@
|
|
// The parallel versions are used by G1.
|
|
bool do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred);
|
|
void do_unloading_parallel_postponed(BoolObjectClosure* is_alive, bool unloading_occurred);
|
|
+
|
|
+ private:
|
|
// Unload a nmethod if the *root object is dead.
|
|
bool can_unload(BoolObjectClosure* is_alive, oop* root, bool unloading_occurred);
|
|
+ bool unload_if_dead_at(RelocIterator *iter_at_oop, BoolObjectClosure* is_alive, bool unloading_occurred);
|
|
+
|
|
+ void mark_metadata_on_stack_at(RelocIterator* iter_at_metadata);
|
|
+ void mark_metadata_on_stack_non_relocs();
|
|
|
|
+ public:
|
|
void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map,
|
|
OopClosure* f);
|
|
void oops_do(OopClosure* f) { oops_do(f, false); }
|
|
--- jdk8/hotspot/src/share/vm/compiler/compileBroker.cpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/compiler/compileBroker.cpp 2015-01-08 21:23:31.156148720 +0100
|
|
@@ -183,9 +183,8 @@
|
|
|
|
long CompileBroker::_peak_compilation_time = 0;
|
|
|
|
-CompileQueue* CompileBroker::_c2_method_queue = NULL;
|
|
-CompileQueue* CompileBroker::_c1_method_queue = NULL;
|
|
-CompileTask* CompileBroker::_task_free_list = NULL;
|
|
+CompileQueue* CompileBroker::_c2_compile_queue = NULL;
|
|
+CompileQueue* CompileBroker::_c1_compile_queue = NULL;
|
|
|
|
GrowableArray<CompilerThread*>* CompileBroker::_compiler_threads = NULL;
|
|
|
|
@@ -253,13 +252,56 @@
|
|
|
|
// By convention, the compiling thread is responsible for
|
|
// recycling a non-blocking CompileTask.
|
|
- CompileBroker::free_task(task);
|
|
+ CompileTask::free(task);
|
|
}
|
|
}
|
|
|
|
|
|
-// ------------------------------------------------------------------
|
|
-// CompileTask::initialize
|
|
+CompileTask* CompileTask::_task_free_list = NULL;
|
|
+#ifdef ASSERT
|
|
+int CompileTask::_num_allocated_tasks = 0;
|
|
+#endif
|
|
+/**
|
|
+ * Allocate a CompileTask, from the free list if possible.
|
|
+ */
|
|
+CompileTask* CompileTask::allocate() {
|
|
+ MutexLocker locker(CompileTaskAlloc_lock);
|
|
+ CompileTask* task = NULL;
|
|
+
|
|
+ if (_task_free_list != NULL) {
|
|
+ task = _task_free_list;
|
|
+ _task_free_list = task->next();
|
|
+ task->set_next(NULL);
|
|
+ } else {
|
|
+ task = new CompileTask();
|
|
+ DEBUG_ONLY(_num_allocated_tasks++;)
|
|
+ assert (_num_allocated_tasks < 10000, "Leaking compilation tasks?");
|
|
+ task->set_next(NULL);
|
|
+ task->set_is_free(true);
|
|
+ }
|
|
+ assert(task->is_free(), "Task must be free.");
|
|
+ task->set_is_free(false);
|
|
+ return task;
|
|
+}
|
|
+
|
|
+
|
|
+/**
|
|
+ * Add a task to the free list.
|
|
+ */
|
|
+void CompileTask::free(CompileTask* task) {
|
|
+ MutexLocker locker(CompileTaskAlloc_lock);
|
|
+ if (!task->is_free()) {
|
|
+ task->set_code(NULL);
|
|
+ assert(!task->lock()->is_locked(), "Should not be locked when freed");
|
|
+ JNIHandles::destroy_global(task->_method_holder);
|
|
+ JNIHandles::destroy_global(task->_hot_method_holder);
|
|
+
|
|
+ task->set_is_free(true);
|
|
+ task->set_next(_task_free_list);
|
|
+ _task_free_list = task;
|
|
+ }
|
|
+}
|
|
+
|
|
void CompileTask::initialize(int compile_id,
|
|
methodHandle method,
|
|
int osr_bci,
|
|
@@ -318,15 +360,6 @@
|
|
if (nm == NULL) _code_handle = NULL; // drop the handle also
|
|
}
|
|
|
|
-// ------------------------------------------------------------------
|
|
-// CompileTask::free
|
|
-void CompileTask::free() {
|
|
- set_code(NULL);
|
|
- assert(!_lock->is_locked(), "Should not be locked when freed");
|
|
- JNIHandles::destroy_global(_method_holder);
|
|
- JNIHandles::destroy_global(_hot_method_holder);
|
|
-}
|
|
-
|
|
|
|
void CompileTask::mark_on_stack() {
|
|
// Mark these methods as something redefine classes cannot remove.
|
|
@@ -594,9 +627,12 @@
|
|
|
|
|
|
|
|
-// Add a CompileTask to a CompileQueue
|
|
+/**
|
|
+ * Add a CompileTask to a CompileQueue
|
|
+ */
|
|
void CompileQueue::add(CompileTask* task) {
|
|
assert(lock()->owned_by_self(), "must own lock");
|
|
+ assert(!CompileBroker::is_compilation_disabled_forever(), "Do not add task if compilation is turned off forever");
|
|
|
|
task->set_next(NULL);
|
|
task->set_prev(NULL);
|
|
@@ -618,9 +654,7 @@
|
|
// Mark the method as being in the compile queue.
|
|
task->method()->set_queued_for_compilation();
|
|
|
|
- if (CIPrintCompileQueue) {
|
|
- print();
|
|
- }
|
|
+ NOT_PRODUCT(print();)
|
|
|
|
if (LogCompilation && xtty != NULL) {
|
|
task->log_task_queued();
|
|
@@ -630,14 +664,32 @@
|
|
lock()->notify_all();
|
|
}
|
|
|
|
-void CompileQueue::delete_all() {
|
|
- assert(lock()->owned_by_self(), "must own lock");
|
|
- if (_first != NULL) {
|
|
- for (CompileTask* task = _first; task != NULL; task = task->next()) {
|
|
- delete task;
|
|
+/**
|
|
+ * Empties compilation queue by putting all compilation tasks onto
|
|
+ * a freelist. Furthermore, the method wakes up all threads that are
|
|
+ * waiting on a compilation task to finish. This can happen if background
|
|
+ * compilation is disabled.
|
|
+ */
|
|
+void CompileQueue::free_all() {
|
|
+ MutexLocker mu(lock());
|
|
+ CompileTask* next = _first;
|
|
+
|
|
+ // Iterate over all tasks in the compile queue
|
|
+ while (next != NULL) {
|
|
+ CompileTask* current = next;
|
|
+ next = current->next();
|
|
+ {
|
|
+ // Wake up thread that blocks on the compile task.
|
|
+ MutexLocker ct_lock(current->lock());
|
|
+ current->lock()->notify();
|
|
}
|
|
- _first = NULL;
|
|
+ // Put the task back on the freelist.
|
|
+ CompileTask::free(current);
|
|
}
|
|
+ _first = NULL;
|
|
+
|
|
+ // Wake up all threads that block on the queue.
|
|
+ lock()->notify_all();
|
|
}
|
|
|
|
// ------------------------------------------------------------------
|
|
@@ -767,9 +819,13 @@
|
|
}
|
|
}
|
|
|
|
-// ------------------------------------------------------------------
|
|
-// CompileQueue::print
|
|
+#ifndef PRODUCT
|
|
+/**
|
|
+ * Print entire compilation queue.
|
|
+ */
|
|
void CompileQueue::print() {
|
|
+ if (CIPrintCompileQueue) {
|
|
+ ttyLocker ttyl;
|
|
tty->print_cr("Contents of %s", name());
|
|
tty->print_cr("----------------------");
|
|
CompileTask* task = _first;
|
|
@@ -779,6 +835,8 @@
|
|
}
|
|
tty->print_cr("----------------------");
|
|
}
|
|
+}
|
|
+#endif // PRODUCT
|
|
|
|
CompilerCounters::CompilerCounters(const char* thread_name, int instance, TRAPS) {
|
|
|
|
@@ -851,9 +909,6 @@
|
|
_compilers[1] = new SharkCompiler();
|
|
#endif // SHARK
|
|
|
|
- // Initialize the CompileTask free list
|
|
- _task_free_list = NULL;
|
|
-
|
|
// Start the CompilerThreads
|
|
init_compiler_threads(c1_count, c2_count);
|
|
// totalTime performance counter is always created as it is required
|
|
@@ -1046,11 +1101,11 @@
|
|
#endif // !ZERO && !SHARK
|
|
// Initialize the compilation queue
|
|
if (c2_compiler_count > 0) {
|
|
- _c2_method_queue = new CompileQueue("C2MethodQueue", MethodCompileQueue_lock);
|
|
+ _c2_compile_queue = new CompileQueue("C2 CompileQueue", MethodCompileQueue_lock);
|
|
_compilers[1]->set_num_compiler_threads(c2_compiler_count);
|
|
}
|
|
if (c1_compiler_count > 0) {
|
|
- _c1_method_queue = new CompileQueue("C1MethodQueue", MethodCompileQueue_lock);
|
|
+ _c1_compile_queue = new CompileQueue("C1 CompileQueue", MethodCompileQueue_lock);
|
|
_compilers[0]->set_num_compiler_threads(c1_compiler_count);
|
|
}
|
|
|
|
@@ -1065,7 +1120,7 @@
|
|
sprintf(name_buffer, "C2 CompilerThread%d", i);
|
|
CompilerCounters* counters = new CompilerCounters("compilerThread", i, CHECK);
|
|
// Shark and C2
|
|
- CompilerThread* new_thread = make_compiler_thread(name_buffer, _c2_method_queue, counters, _compilers[1], CHECK);
|
|
+ CompilerThread* new_thread = make_compiler_thread(name_buffer, _c2_compile_queue, counters, _compilers[1], CHECK);
|
|
_compiler_threads->append(new_thread);
|
|
}
|
|
|
|
@@ -1074,7 +1129,7 @@
|
|
sprintf(name_buffer, "C1 CompilerThread%d", i);
|
|
CompilerCounters* counters = new CompilerCounters("compilerThread", i, CHECK);
|
|
// C1
|
|
- CompilerThread* new_thread = make_compiler_thread(name_buffer, _c1_method_queue, counters, _compilers[0], CHECK);
|
|
+ CompilerThread* new_thread = make_compiler_thread(name_buffer, _c1_compile_queue, counters, _compilers[0], CHECK);
|
|
_compiler_threads->append(new_thread);
|
|
}
|
|
|
|
@@ -1084,14 +1139,19 @@
|
|
}
|
|
|
|
|
|
-// Set the methods on the stack as on_stack so that redefine classes doesn't
|
|
-// reclaim them
|
|
+/**
|
|
+ * Set the methods on the stack as on_stack so that redefine classes doesn't
|
|
+ * reclaim them. This method is executed at a safepoint.
|
|
+ */
|
|
void CompileBroker::mark_on_stack() {
|
|
- if (_c2_method_queue != NULL) {
|
|
- _c2_method_queue->mark_on_stack();
|
|
+ assert(SafepointSynchronize::is_at_safepoint(), "sanity check");
|
|
+ // Since we are at a safepoint, we do not need a lock to access
|
|
+ // the compile queues.
|
|
+ if (_c2_compile_queue != NULL) {
|
|
+ _c2_compile_queue->mark_on_stack();
|
|
}
|
|
- if (_c1_method_queue != NULL) {
|
|
- _c1_method_queue->mark_on_stack();
|
|
+ if (_c1_compile_queue != NULL) {
|
|
+ _c1_compile_queue->mark_on_stack();
|
|
}
|
|
}
|
|
|
|
@@ -1154,7 +1214,7 @@
|
|
|
|
// If this method is already in the compile queue, then
|
|
// we do not block the current thread.
|
|
- if (compilation_is_in_queue(method, osr_bci)) {
|
|
+ if (compilation_is_in_queue(method)) {
|
|
// We may want to decay our counter a bit here to prevent
|
|
// multiple denied requests for compilation. This is an
|
|
// open compilation policy issue. Note: The other possibility,
|
|
@@ -1193,7 +1253,7 @@
|
|
// Make sure the method has not slipped into the queues since
|
|
// last we checked; note that those checks were "fast bail-outs".
|
|
// Here we need to be more careful, see 14012000 below.
|
|
- if (compilation_is_in_queue(method, osr_bci)) {
|
|
+ if (compilation_is_in_queue(method)) {
|
|
return;
|
|
}
|
|
|
|
@@ -1214,7 +1274,7 @@
|
|
}
|
|
|
|
// Should this thread wait for completion of the compile?
|
|
- blocking = is_compile_blocking(method, osr_bci);
|
|
+ blocking = is_compile_blocking();
|
|
|
|
// We will enter the compilation in the queue.
|
|
// 14012000: Note that this sets the queued_for_compile bits in
|
|
@@ -1406,19 +1466,17 @@
|
|
}
|
|
|
|
|
|
-// ------------------------------------------------------------------
|
|
-// CompileBroker::compilation_is_in_queue
|
|
-//
|
|
-// See if this compilation is already requested.
|
|
-//
|
|
-// Implementation note: there is only a single "is in queue" bit
|
|
-// for each method. This means that the check below is overly
|
|
-// conservative in the sense that an osr compilation in the queue
|
|
-// will block a normal compilation from entering the queue (and vice
|
|
-// versa). This can be remedied by a full queue search to disambiguate
|
|
-// cases. If it is deemed profitible, this may be done.
|
|
-bool CompileBroker::compilation_is_in_queue(methodHandle method,
|
|
- int osr_bci) {
|
|
+/**
|
|
+ * See if this compilation is already requested.
|
|
+ *
|
|
+ * Implementation note: there is only a single "is in queue" bit
|
|
+ * for each method. This means that the check below is overly
|
|
+ * conservative in the sense that an osr compilation in the queue
|
|
+ * will block a normal compilation from entering the queue (and vice
|
|
+ * versa). This can be remedied by a full queue search to disambiguate
|
|
+ * cases. If it is deemed profitable, this may be done.
|
|
+ */
|
|
+bool CompileBroker::compilation_is_in_queue(methodHandle method) {
|
|
return method->queued_for_compilation();
|
|
}
|
|
|
|
@@ -1498,13 +1556,11 @@
|
|
#endif
|
|
}
|
|
|
|
-
|
|
-// ------------------------------------------------------------------
|
|
-// CompileBroker::is_compile_blocking
|
|
-//
|
|
-// Should the current thread be blocked until this compilation request
|
|
-// has been fulfilled?
|
|
-bool CompileBroker::is_compile_blocking(methodHandle method, int osr_bci) {
|
|
+/**
|
|
+ * Should the current thread block until this compilation request
|
|
+ * has been fulfilled?
|
|
+ */
|
|
+bool CompileBroker::is_compile_blocking() {
|
|
assert(!InstanceRefKlass::owns_pending_list_lock(JavaThread::current()), "possible deadlock");
|
|
return !BackgroundCompilation;
|
|
}
|
|
@@ -1532,7 +1588,7 @@
|
|
int hot_count,
|
|
const char* comment,
|
|
bool blocking) {
|
|
- CompileTask* new_task = allocate_task();
|
|
+ CompileTask* new_task = CompileTask::allocate();
|
|
new_task->initialize(compile_id, method, osr_bci, comp_level,
|
|
hot_method, hot_count, comment,
|
|
blocking);
|
|
@@ -1541,43 +1597,12 @@
|
|
}
|
|
|
|
|
|
-// ------------------------------------------------------------------
|
|
-// CompileBroker::allocate_task
|
|
-//
|
|
-// Allocate a CompileTask, from the free list if possible.
|
|
-CompileTask* CompileBroker::allocate_task() {
|
|
- MutexLocker locker(CompileTaskAlloc_lock);
|
|
- CompileTask* task = NULL;
|
|
- if (_task_free_list != NULL) {
|
|
- task = _task_free_list;
|
|
- _task_free_list = task->next();
|
|
- task->set_next(NULL);
|
|
- } else {
|
|
- task = new CompileTask();
|
|
- task->set_next(NULL);
|
|
- }
|
|
- return task;
|
|
-}
|
|
-
|
|
-
|
|
-// ------------------------------------------------------------------
|
|
-// CompileBroker::free_task
|
|
-//
|
|
-// Add a task to the free list.
|
|
-void CompileBroker::free_task(CompileTask* task) {
|
|
- MutexLocker locker(CompileTaskAlloc_lock);
|
|
- task->free();
|
|
- task->set_next(_task_free_list);
|
|
- _task_free_list = task;
|
|
-}
|
|
-
|
|
-
|
|
-// ------------------------------------------------------------------
|
|
-// CompileBroker::wait_for_completion
|
|
-//
|
|
-// Wait for the given method CompileTask to complete.
|
|
+/**
|
|
+ * Wait for the compilation task to complete.
|
|
+ */
|
|
void CompileBroker::wait_for_completion(CompileTask* task) {
|
|
if (CIPrintCompileQueue) {
|
|
+ ttyLocker ttyl;
|
|
tty->print_cr("BLOCKING FOR COMPILE");
|
|
}
|
|
|
|
@@ -1590,26 +1615,34 @@
|
|
{
|
|
MutexLocker waiter(task->lock(), thread);
|
|
|
|
- while (!task->is_complete())
|
|
+ while (!task->is_complete() && !is_compilation_disabled_forever()) {
|
|
task->lock()->wait();
|
|
}
|
|
+ }
|
|
+
|
|
+ thread->set_blocked_on_compilation(false);
|
|
+ if (is_compilation_disabled_forever()) {
|
|
+ CompileTask::free(task);
|
|
+ return;
|
|
+ }
|
|
+
|
|
// It is harmless to check this status without the lock, because
|
|
// completion is a stable property (until the task object is recycled).
|
|
assert(task->is_complete(), "Compilation should have completed");
|
|
assert(task->code_handle() == NULL, "must be reset");
|
|
|
|
- thread->set_blocked_on_compilation(false);
|
|
-
|
|
// By convention, the waiter is responsible for recycling a
|
|
// blocking CompileTask. Since there is only one waiter ever
|
|
// waiting on a CompileTask, we know that no one else will
|
|
// be using this CompileTask; we can free it.
|
|
- free_task(task);
|
|
+ CompileTask::free(task);
|
|
}
|
|
|
|
-// Initialize compiler thread(s) + compiler object(s). The postcondition
|
|
-// of this function is that the compiler runtimes are initialized and that
|
|
-//compiler threads can start compiling.
|
|
+/**
|
|
+ * Initialize compiler thread(s) + compiler object(s). The postcondition
|
|
+ * of this function is that the compiler runtimes are initialized and that
|
|
+ * compiler threads can start compiling.
|
|
+ */
|
|
bool CompileBroker::init_compiler_runtime() {
|
|
CompilerThread* thread = CompilerThread::current();
|
|
AbstractCompiler* comp = thread->compiler();
|
|
@@ -1660,9 +1692,11 @@
|
|
return true;
|
|
}
|
|
|
|
-// If C1 and/or C2 initialization failed, we shut down all compilation.
|
|
-// We do this to keep things simple. This can be changed if it ever turns out to be
|
|
-// a problem.
|
|
+/**
|
|
+ * If C1 and/or C2 initialization failed, we shut down all compilation.
|
|
+ * We do this to keep things simple. This can be changed if it ever turns
|
|
+ * out to be a problem.
|
|
+ */
|
|
void CompileBroker::shutdown_compiler_runtime(AbstractCompiler* comp, CompilerThread* thread) {
|
|
// Free buffer blob, if allocated
|
|
if (thread->get_buffer_blob() != NULL) {
|
|
@@ -1674,28 +1708,25 @@
|
|
// There are two reasons for shutting down the compiler
|
|
// 1) compiler runtime initialization failed
|
|
// 2) The code cache is full and the following flag is set: -XX:-UseCodeCacheFlushing
|
|
- warning("Shutting down compiler %s (no space to run compilers)", comp->name());
|
|
+ warning("%s initialization failed. Shutting down all compilers", comp->name());
|
|
|
|
// Only one thread per compiler runtime object enters here
|
|
// Set state to shut down
|
|
comp->set_shut_down();
|
|
|
|
- MutexLocker mu(MethodCompileQueue_lock, thread);
|
|
- CompileQueue* queue;
|
|
- if (_c1_method_queue != NULL) {
|
|
- _c1_method_queue->delete_all();
|
|
- queue = _c1_method_queue;
|
|
- _c1_method_queue = NULL;
|
|
- delete _c1_method_queue;
|
|
+ // Delete all queued compilation tasks to make compiler threads exit faster.
|
|
+ if (_c1_compile_queue != NULL) {
|
|
+ _c1_compile_queue->free_all();
|
|
}
|
|
|
|
- if (_c2_method_queue != NULL) {
|
|
- _c2_method_queue->delete_all();
|
|
- queue = _c2_method_queue;
|
|
- _c2_method_queue = NULL;
|
|
- delete _c2_method_queue;
|
|
+ if (_c2_compile_queue != NULL) {
|
|
+ _c2_compile_queue->free_all();
|
|
}
|
|
|
|
+ // Set flags so that we continue execution with using interpreter only.
|
|
+ UseCompiler = false;
|
|
+ UseInterpreter = true;
|
|
+
|
|
// We could delete compiler runtimes also. However, there are references to
|
|
// the compiler runtime(s) (e.g., nmethod::is_compiled_by_c1()) which then
|
|
// fail. This can be done later if necessary.
|
|
@@ -1781,22 +1812,6 @@
|
|
if (method()->number_of_breakpoints() == 0) {
|
|
// Compile the method.
|
|
if ((UseCompiler || AlwaysCompileLoopMethods) && CompileBroker::should_compile_new_jobs()) {
|
|
-#ifdef COMPILER1
|
|
- // Allow repeating compilations for the purpose of benchmarking
|
|
- // compile speed. This is not useful for customers.
|
|
- if (CompilationRepeat != 0) {
|
|
- int compile_count = CompilationRepeat;
|
|
- while (compile_count > 0) {
|
|
- invoke_compiler_on_method(task);
|
|
- nmethod* nm = method->code();
|
|
- if (nm != NULL) {
|
|
- nm->make_zombie();
|
|
- method->clear_code();
|
|
- }
|
|
- compile_count--;
|
|
- }
|
|
- }
|
|
-#endif /* COMPILER1 */
|
|
invoke_compiler_on_method(task);
|
|
} else {
|
|
// After compilation is disabled, remove remaining methods from queue
|
|
@@ -1830,7 +1845,7 @@
|
|
os::file_separator(), thread_id, os::current_process_id());
|
|
}
|
|
|
|
- fp = fopen(file_name, "at");
|
|
+ fp = fopen(file_name, "wt");
|
|
if (fp != NULL) {
|
|
if (LogCompilation && Verbose) {
|
|
tty->print_cr("Opening compilation log %s", file_name);
|
|
--- jdk8/hotspot/src/share/vm/compiler/compileBroker.hpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/compiler/compileBroker.hpp 2015-01-08 21:23:31.156148720 +0100
|
|
@@ -40,6 +40,11 @@
|
|
friend class VMStructs;
|
|
|
|
private:
|
|
+ static CompileTask* _task_free_list;
|
|
+#ifdef ASSERT
|
|
+ static int _num_allocated_tasks;
|
|
+#endif
|
|
+
|
|
Monitor* _lock;
|
|
uint _compile_id;
|
|
Method* _method;
|
|
@@ -52,7 +57,7 @@
|
|
int _num_inlined_bytecodes;
|
|
nmethodLocker* _code_handle; // holder of eventual result
|
|
CompileTask* _next, *_prev;
|
|
-
|
|
+ bool _is_free;
|
|
// Fields used for logging why the compilation was initiated:
|
|
jlong _time_queued; // in units of os::elapsed_counter()
|
|
Method* _hot_method; // which method actually triggered this task
|
|
@@ -70,7 +75,8 @@
|
|
methodHandle hot_method, int hot_count, const char* comment,
|
|
bool is_blocking);
|
|
|
|
- void free();
|
|
+ static CompileTask* allocate();
|
|
+ static void free(CompileTask* task);
|
|
|
|
int compile_id() const { return _compile_id; }
|
|
Method* method() const { return _method; }
|
|
@@ -99,6 +105,8 @@
|
|
void set_next(CompileTask* next) { _next = next; }
|
|
CompileTask* prev() const { return _prev; }
|
|
void set_prev(CompileTask* prev) { _prev = prev; }
|
|
+ bool is_free() const { return _is_free; }
|
|
+ void set_is_free(bool val) { _is_free = val; }
|
|
|
|
private:
|
|
static void print_compilation_impl(outputStream* st, Method* method, int compile_id, int comp_level,
|
|
@@ -225,8 +233,8 @@
|
|
|
|
// Redefine Classes support
|
|
void mark_on_stack();
|
|
- void delete_all();
|
|
- void print();
|
|
+ void free_all();
|
|
+ NOT_PRODUCT (void print();)
|
|
|
|
~CompileQueue() {
|
|
assert (is_empty(), " Compile Queue must be empty");
|
|
@@ -279,9 +287,8 @@
|
|
static int _last_compile_level;
|
|
static char _last_method_compiled[name_buffer_length];
|
|
|
|
- static CompileQueue* _c2_method_queue;
|
|
- static CompileQueue* _c1_method_queue;
|
|
- static CompileTask* _task_free_list;
|
|
+ static CompileQueue* _c2_compile_queue;
|
|
+ static CompileQueue* _c1_compile_queue;
|
|
|
|
static GrowableArray<CompilerThread*>* _compiler_threads;
|
|
|
|
@@ -334,7 +341,7 @@
|
|
static void init_compiler_threads(int c1_compiler_count, int c2_compiler_count);
|
|
static bool compilation_is_complete (methodHandle method, int osr_bci, int comp_level);
|
|
static bool compilation_is_prohibited(methodHandle method, int osr_bci, int comp_level);
|
|
- static bool is_compile_blocking (methodHandle method, int osr_bci);
|
|
+ static bool is_compile_blocking ();
|
|
static void preload_classes (methodHandle method, TRAPS);
|
|
|
|
static CompileTask* create_compile_task(CompileQueue* queue,
|
|
@@ -346,8 +353,6 @@
|
|
int hot_count,
|
|
const char* comment,
|
|
bool blocking);
|
|
- static CompileTask* allocate_task();
|
|
- static void free_task(CompileTask* task);
|
|
static void wait_for_completion(CompileTask* task);
|
|
|
|
static void invoke_compiler_on_method(CompileTask* task);
|
|
@@ -365,8 +370,8 @@
|
|
const char* comment,
|
|
Thread* thread);
|
|
static CompileQueue* compile_queue(int comp_level) {
|
|
- if (is_c2_compile(comp_level)) return _c2_method_queue;
|
|
- if (is_c1_compile(comp_level)) return _c1_method_queue;
|
|
+ if (is_c2_compile(comp_level)) return _c2_compile_queue;
|
|
+ if (is_c1_compile(comp_level)) return _c1_compile_queue;
|
|
return NULL;
|
|
}
|
|
static bool init_compiler_runtime();
|
|
@@ -384,7 +389,7 @@
|
|
return NULL;
|
|
}
|
|
|
|
- static bool compilation_is_in_queue(methodHandle method, int osr_bci);
|
|
+ static bool compilation_is_in_queue(methodHandle method);
|
|
static int queue_size(int comp_level) {
|
|
CompileQueue *q = compile_queue(comp_level);
|
|
return q != NULL ? q->size() : 0;
|
|
--- jdk8/hotspot/src/share/vm/compiler/compileLog.cpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/compiler/compileLog.cpp 2015-01-08 21:23:31.156148720 +0100
|
|
@@ -55,8 +55,10 @@
|
|
}
|
|
|
|
CompileLog::~CompileLog() {
|
|
- delete _out;
|
|
+ delete _out; // Close fd in fileStream::~fileStream()
|
|
_out = NULL;
|
|
+ // Remove partial file after merging in CompileLog::finish_log_on_error
|
|
+ unlink(_file);
|
|
FREE_C_HEAP_ARRAY(char, _identities, mtCompiler);
|
|
FREE_C_HEAP_ARRAY(char, _file, mtCompiler);
|
|
}
|
|
@@ -268,10 +270,9 @@
|
|
}
|
|
file->print_raw_cr("</compilation_log>");
|
|
close(partial_fd);
|
|
- unlink(partial_file);
|
|
}
|
|
CompileLog* next_log = log->_next;
|
|
- delete log;
|
|
+ delete log; // Removes partial file
|
|
log = next_log;
|
|
}
|
|
_first = NULL;
|
|
--- jdk8/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp 2015-01-08 21:23:31.157148696 +0100
|
|
@@ -2641,7 +2641,7 @@
|
|
// Get the #blocks we want to claim
|
|
size_t n_blks = (size_t)_blocks_to_claim[word_sz].average();
|
|
assert(n_blks > 0, "Error");
|
|
- assert(ResizePLAB || n_blks == OldPLABSize, "Error");
|
|
+ assert(ResizeOldPLAB || n_blks == OldPLABSize, "Error");
|
|
// In some cases, when the application has a phase change,
|
|
// there may be a sudden and sharp shift in the object survival
|
|
// profile, and updating the counts at the end of a scavenge
|
|
--- jdk8/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.cpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.cpp 2015-01-08 21:23:31.157148696 +0100
|
|
@@ -50,8 +50,12 @@
|
|
void VM_CMS_Operation::acquire_pending_list_lock() {
|
|
// The caller may block while communicating
|
|
// with the SLT thread in order to acquire/release the PLL.
|
|
- ConcurrentMarkSweepThread::slt()->
|
|
- manipulatePLL(SurrogateLockerThread::acquirePLL);
|
|
+ SurrogateLockerThread* slt = ConcurrentMarkSweepThread::slt();
|
|
+ if (slt != NULL) {
|
|
+ slt->manipulatePLL(SurrogateLockerThread::acquirePLL);
|
|
+ } else {
|
|
+ SurrogateLockerThread::report_missing_slt();
|
|
+ }
|
|
}
|
|
|
|
void VM_CMS_Operation::release_and_notify_pending_list_lock() {
|
|
--- jdk8/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp 2015-01-08 21:23:31.158148672 +0100
|
|
@@ -23,6 +23,7 @@
|
|
*/
|
|
|
|
#include "precompiled.hpp"
|
|
+#include "classfile/metadataOnStackMark.hpp"
|
|
#include "classfile/symbolTable.hpp"
|
|
#include "code/codeCache.hpp"
|
|
#include "gc_implementation/g1/concurrentMark.inline.hpp"
|
|
@@ -2174,6 +2175,7 @@
|
|
// We reclaimed old regions so we should calculate the sizes to make
|
|
// sure we update the old gen/space data.
|
|
g1h->g1mm()->update_sizes();
|
|
+ g1h->allocation_context_stats().update_after_mark();
|
|
|
|
g1h->trace_heap_after_concurrent_cycle();
|
|
}
|
|
@@ -2602,17 +2604,27 @@
|
|
G1RemarkGCTraceTime trace("Unloading", G1Log::finer());
|
|
|
|
if (ClassUnloadingWithConcurrentMark) {
|
|
+ // Cleaning of klasses depends on correct information from MetadataMarkOnStack. The CodeCache::mark_on_stack
|
|
+ // part is too slow to be done serially, so it is handled during the weakRefsWorkParallelPart phase.
|
|
+ // Defer the cleaning until we have complete on_stack data.
|
|
+ MetadataOnStackMark md_on_stack(false /* Don't visit the code cache at this point */);
|
|
+
|
|
bool purged_classes;
|
|
|
|
{
|
|
G1RemarkGCTraceTime trace("System Dictionary Unloading", G1Log::finest());
|
|
- purged_classes = SystemDictionary::do_unloading(&g1_is_alive);
|
|
+ purged_classes = SystemDictionary::do_unloading(&g1_is_alive, false /* Defer klass cleaning */);
|
|
}
|
|
|
|
{
|
|
G1RemarkGCTraceTime trace("Parallel Unloading", G1Log::finest());
|
|
weakRefsWorkParallelPart(&g1_is_alive, purged_classes);
|
|
}
|
|
+
|
|
+ {
|
|
+ G1RemarkGCTraceTime trace("Deallocate Metadata", G1Log::finest());
|
|
+ ClassLoaderDataGraph::free_deallocate_lists();
|
|
+ }
|
|
}
|
|
|
|
if (G1StringDedup::is_enabled()) {
|
|
@@ -3335,7 +3347,6 @@
|
|
} else {
|
|
g1_par_agg_task.work(0);
|
|
}
|
|
- _g1h->allocation_context_stats().update_at_remark();
|
|
}
|
|
|
|
// Clear the per-worker arrays used to store the per-region counting data
|
|
--- jdk8/hotspot/src/share/vm/gc_implementation/g1/g1AllocationContext.hpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/gc_implementation/g1/g1AllocationContext.hpp 2015-01-08 21:23:31.158148672 +0100
|
|
@@ -45,7 +45,7 @@
|
|
public:
|
|
inline void clear() { }
|
|
inline void update(bool full_gc) { }
|
|
- inline void update_at_remark() { }
|
|
+ inline void update_after_mark() { }
|
|
inline bool available() { return false; }
|
|
};
|
|
|
|
--- jdk8/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp 2015-01-08 21:23:31.159148648 +0100
|
|
@@ -27,6 +27,7 @@
|
|
#endif
|
|
|
|
#include "precompiled.hpp"
|
|
+#include "classfile/metadataOnStackMark.hpp"
|
|
#include "code/codeCache.hpp"
|
|
#include "code/icBuffer.hpp"
|
|
#include "gc_implementation/g1/bufferingOopClosure.hpp"
|
|
@@ -2478,6 +2479,7 @@
|
|
|
|
unsigned int gc_count_before;
|
|
unsigned int old_marking_count_before;
|
|
+ unsigned int full_gc_count_before;
|
|
bool retry_gc;
|
|
|
|
do {
|
|
@@ -2488,6 +2490,7 @@
|
|
|
|
// Read the GC count while holding the Heap_lock
|
|
gc_count_before = total_collections();
|
|
+ full_gc_count_before = total_full_collections();
|
|
old_marking_count_before = _old_marking_cycles_started;
|
|
}
|
|
|
|
@@ -2532,7 +2535,7 @@
|
|
VMThread::execute(&op);
|
|
} else {
|
|
// Schedule a Full GC.
|
|
- VM_G1CollectFull op(gc_count_before, old_marking_count_before, cause);
|
|
+ VM_G1CollectFull op(gc_count_before, full_gc_count_before, cause);
|
|
VMThread::execute(&op);
|
|
}
|
|
}
|
|
@@ -5131,6 +5134,10 @@
|
|
clean_nmethod(claimed_nmethods[i]);
|
|
}
|
|
}
|
|
+
|
|
+ // The nmethod cleaning helps out and does the CodeCache part of MetadataOnStackMark.
|
|
+ // Need to retire the buffers now that this thread has stopped cleaning nmethods.
|
|
+ MetadataOnStackMark::retire_buffer_for_thread(Thread::current());
|
|
}
|
|
|
|
void work_second_pass(uint worker_id) {
|
|
@@ -5183,6 +5190,9 @@
|
|
// G1 specific cleanup work that has
|
|
// been moved here to be done in parallel.
|
|
ik->clean_dependent_nmethods();
|
|
+ if (JvmtiExport::has_redefined_a_class()) {
|
|
+ InstanceKlass::purge_previous_versions(ik);
|
|
+ }
|
|
}
|
|
|
|
void work() {
|
|
@@ -5217,8 +5227,20 @@
|
|
_klass_cleaning_task(is_alive) {
|
|
}
|
|
|
|
+ void pre_work_verification() {
|
|
+ // The VM Thread will have registered Metadata during the single-threaded phase of MetadataStackOnMark.
|
|
+ assert(Thread::current()->is_VM_thread()
|
|
+ || !MetadataOnStackMark::has_buffer_for_thread(Thread::current()), "Should be empty");
|
|
+ }
|
|
+
|
|
+ void post_work_verification() {
|
|
+ assert(!MetadataOnStackMark::has_buffer_for_thread(Thread::current()), "Should be empty");
|
|
+ }
|
|
+
|
|
// The parallel work done by all worker threads.
|
|
void work(uint worker_id) {
|
|
+ pre_work_verification();
|
|
+
|
|
// Do first pass of code cache cleaning.
|
|
_code_cache_task.work_first_pass(worker_id);
|
|
|
|
@@ -5237,6 +5259,8 @@
|
|
|
|
// Clean all klasses that were not unloaded.
|
|
_klass_cleaning_task.work();
|
|
+
|
|
+ post_work_verification();
|
|
}
|
|
};
|
|
|
|
--- jdk8/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap_ext.cpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap_ext.cpp 2015-01-08 21:23:31.160148624 +0100
|
|
@@ -25,8 +25,9 @@
|
|
#include "precompiled.hpp"
|
|
#include "gc_implementation/g1/g1CollectedHeap.hpp"
|
|
|
|
-void G1CollectedHeap::copy_allocation_context_stats(const jint* contexts,
|
|
+bool G1CollectedHeap::copy_allocation_context_stats(const jint* contexts,
|
|
jlong* totals,
|
|
jbyte* accuracy,
|
|
jint len) {
|
|
+ return false;
|
|
}
|
|
--- jdk8/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp 2015-01-08 21:23:31.160148624 +0100
|
|
@@ -1249,7 +1249,7 @@
|
|
// The same as above but assume that the caller holds the Heap_lock.
|
|
void collect_locked(GCCause::Cause cause);
|
|
|
|
- virtual void copy_allocation_context_stats(const jint* contexts,
|
|
+ virtual bool copy_allocation_context_stats(const jint* contexts,
|
|
jlong* totals,
|
|
jbyte* accuracy,
|
|
jint len);
|
|
--- jdk8/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp 2015-01-08 21:23:31.160148624 +0100
|
|
@@ -1425,6 +1425,18 @@
|
|
#endif // PRODUCT
|
|
}
|
|
|
|
+bool G1CollectorPolicy::is_young_list_full() {
|
|
+ uint young_list_length = _g1->young_list()->length();
|
|
+ uint young_list_target_length = _young_list_target_length;
|
|
+ return young_list_length >= young_list_target_length;
|
|
+}
|
|
+
|
|
+bool G1CollectorPolicy::can_expand_young_list() {
|
|
+ uint young_list_length = _g1->young_list()->length();
|
|
+ uint young_list_max_length = _young_list_max_length;
|
|
+ return young_list_length < young_list_max_length;
|
|
+}
|
|
+
|
|
uint G1CollectorPolicy::max_regions(int purpose) {
|
|
switch (purpose) {
|
|
case GCAllocForSurvived:
|
|
--- jdk8/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy_ext.hpp 1970-01-01 01:00:00.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy_ext.hpp 2015-01-08 21:23:31.161148600 +0100
|
|
@@ -0,0 +1,32 @@
|
|
+/*
|
|
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
|
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
+ *
|
|
+ * This code is free software; you can redistribute it and/or modify it
|
|
+ * under the terms of the GNU General Public License version 2 only, as
|
|
+ * published by the Free Software Foundation.
|
|
+ *
|
|
+ * This code is distributed in the hope that it will be useful, but WITHOUT
|
|
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
+ * version 2 for more details (a copy is included in the LICENSE file that
|
|
+ * accompanied this code).
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License version
|
|
+ * 2 along with this work; if not, write to the Free Software Foundation,
|
|
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
+ *
|
|
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
|
+ * or visit www.oracle.com if you need additional information or have any
|
|
+ * questions.
|
|
+ *
|
|
+ */
|
|
+
|
|
+#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_EXT_HPP
|
|
+#define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_EXT_HPP
|
|
+
|
|
+#include "gc_implementation/g1/g1CollectorPolicy.hpp"
|
|
+
|
|
+class G1CollectorPolicyExt : public G1CollectorPolicy { };
|
|
+
|
|
+#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_EXT_HPP
|
|
--- jdk8/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp 2015-01-08 21:23:31.161148600 +0100
|
|
@@ -26,6 +26,7 @@
|
|
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_HPP
|
|
|
|
#include "gc_implementation/g1/collectionSetChooser.hpp"
|
|
+#include "gc_implementation/g1/g1Allocator.hpp"
|
|
#include "gc_implementation/g1/g1MMUTracker.hpp"
|
|
#include "memory/collectorPolicy.hpp"
|
|
|
|
@@ -803,7 +804,7 @@
|
|
|
|
// If an expansion would be appropriate, because recent GC overhead had
|
|
// exceeded the desired limit, return an amount to expand by.
|
|
- size_t expansion_amount();
|
|
+ virtual size_t expansion_amount();
|
|
|
|
// Print tracing information.
|
|
void print_tracing_info() const;
|
|
@@ -822,17 +823,9 @@
|
|
|
|
size_t young_list_target_length() const { return _young_list_target_length; }
|
|
|
|
- bool is_young_list_full() {
|
|
- uint young_list_length = _g1->young_list()->length();
|
|
- uint young_list_target_length = _young_list_target_length;
|
|
- return young_list_length >= young_list_target_length;
|
|
- }
|
|
+ bool is_young_list_full();
|
|
|
|
- bool can_expand_young_list() {
|
|
- uint young_list_length = _g1->young_list()->length();
|
|
- uint young_list_max_length = _young_list_max_length;
|
|
- return young_list_length < young_list_max_length;
|
|
- }
|
|
+ bool can_expand_young_list();
|
|
|
|
uint young_list_max_length() {
|
|
return _young_list_max_length;
|
|
--- jdk8/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp 2015-01-08 21:23:31.161148600 +0100
|
|
@@ -1015,11 +1015,14 @@
|
|
HeapWord* G1OffsetTableContigSpace::saved_mark_word() const {
|
|
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
|
assert( _gc_time_stamp <= g1h->get_gc_time_stamp(), "invariant" );
|
|
- if (_gc_time_stamp < g1h->get_gc_time_stamp())
|
|
- return top();
|
|
- else
|
|
+ HeapWord* local_top = top();
|
|
+ OrderAccess::loadload();
|
|
+ if (_gc_time_stamp < g1h->get_gc_time_stamp()) {
|
|
+ return local_top;
|
|
+ } else {
|
|
return Space::saved_mark_word();
|
|
}
|
|
+}
|
|
|
|
void G1OffsetTableContigSpace::record_top_and_timestamp() {
|
|
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
|
--- jdk8/hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp 2015-01-08 21:23:31.161148600 +0100
|
|
@@ -213,8 +213,12 @@
|
|
assert(_needs_pll, "don't call this otherwise");
|
|
// The caller may block while communicating
|
|
// with the SLT thread in order to acquire/release the PLL.
|
|
- ConcurrentMarkThread::slt()->
|
|
- manipulatePLL(SurrogateLockerThread::acquirePLL);
|
|
+ SurrogateLockerThread* slt = ConcurrentMarkThread::slt();
|
|
+ if (slt != NULL) {
|
|
+ slt->manipulatePLL(SurrogateLockerThread::acquirePLL);
|
|
+ } else {
|
|
+ SurrogateLockerThread::report_missing_slt();
|
|
+ }
|
|
}
|
|
|
|
void VM_CGC_Operation::release_and_notify_pending_list_lock() {
|
|
--- jdk8/hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.hpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.hpp 2015-01-08 21:23:31.161148600 +0100
|
|
@@ -60,7 +60,7 @@
|
|
VM_G1CollectFull(unsigned int gc_count_before,
|
|
unsigned int full_gc_count_before,
|
|
GCCause::Cause cause)
|
|
- : VM_GC_Operation(gc_count_before, cause, full_gc_count_before) { }
|
|
+ : VM_GC_Operation(gc_count_before, cause, full_gc_count_before, true) { }
|
|
virtual VMOp_Type type() const { return VMOp_G1CollectFull; }
|
|
virtual void doit();
|
|
virtual const char* name() const {
|
|
--- jdk8/hotspot/src/share/vm/gc_implementation/shared/concurrentGCThread.cpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/gc_implementation/shared/concurrentGCThread.cpp 2015-01-08 21:23:31.161148600 +0100
|
|
@@ -137,6 +137,13 @@
|
|
return res;
|
|
}
|
|
|
|
+void SurrogateLockerThread::report_missing_slt() {
|
|
+ vm_exit_during_initialization(
|
|
+ "GC before GC support fully initialized: "
|
|
+ "SLT is needed but has not yet been created.");
|
|
+ ShouldNotReachHere();
|
|
+}
|
|
+
|
|
void SurrogateLockerThread::manipulatePLL(SLT_msg_type msg) {
|
|
MutexLockerEx x(&_monitor, Mutex::_no_safepoint_check_flag);
|
|
assert(_buffer == empty, "Should be empty");
|
|
--- jdk8/hotspot/src/share/vm/gc_implementation/shared/concurrentGCThread.hpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/gc_implementation/shared/concurrentGCThread.hpp 2015-01-08 21:23:31.162148576 +0100
|
|
@@ -93,6 +93,9 @@
|
|
public:
|
|
static SurrogateLockerThread* make(TRAPS);
|
|
|
|
+ // Terminate VM with error message that SLT needed but not yet created.
|
|
+ static void report_missing_slt();
|
|
+
|
|
SurrogateLockerThread();
|
|
|
|
bool is_hidden_from_external_view() const { return true; }
|
|
--- jdk8/hotspot/src/share/vm/gc_implementation/shared/objectCountEventSender.cpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/gc_implementation/shared/objectCountEventSender.cpp 2015-01-08 21:23:31.162148576 +0100
|
|
@@ -29,8 +29,8 @@
|
|
#include "memory/heapInspection.hpp"
|
|
#include "trace/tracing.hpp"
|
|
#include "utilities/globalDefinitions.hpp"
|
|
+#include "utilities/macros.hpp"
|
|
#include "utilities/ticks.hpp"
|
|
-
|
|
#if INCLUDE_SERVICES
|
|
|
|
void ObjectCountEventSender::send(const KlassInfoEntry* entry, GCId gc_id, const Ticks& timestamp) {
|
|
--- jdk8/hotspot/src/share/vm/gc_interface/collectedHeap.hpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/gc_interface/collectedHeap.hpp 2015-01-08 21:23:31.162148576 +0100
|
|
@@ -641,10 +641,13 @@
|
|
// For each context in contexts, set the corresponding entries in the totals
|
|
// and accuracy arrays to the current values held by the statistics. Each
|
|
// array should be of length len.
|
|
- virtual void copy_allocation_context_stats(const jint* contexts,
|
|
+ // Returns true if there are more stats available.
|
|
+ virtual bool copy_allocation_context_stats(const jint* contexts,
|
|
jlong* totals,
|
|
jbyte* accuracy,
|
|
- jint len) { }
|
|
+ jint len) {
|
|
+ return false;
|
|
+ }
|
|
|
|
/////////////// Unit tests ///////////////
|
|
|
|
--- jdk8/hotspot/src/share/vm/interpreter/bytecodeInterpreter.cpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/interpreter/bytecodeInterpreter.cpp 2015-01-08 21:23:31.163148552 +0100
|
|
@@ -2813,11 +2813,11 @@
|
|
if (TraceExceptions) {
|
|
ttyLocker ttyl;
|
|
ResourceMark rm;
|
|
- tty->print_cr("Exception <%s> (" INTPTR_FORMAT ")", except_oop->print_value_string(), (void*)except_oop());
|
|
+ tty->print_cr("Exception <%s> (" INTPTR_FORMAT ")", except_oop->print_value_string(), p2i(except_oop()));
|
|
tty->print_cr(" thrown in interpreter method <%s>", METHOD->print_value_string());
|
|
tty->print_cr(" at bci %d, continuing at %d for thread " INTPTR_FORMAT,
|
|
- istate->bcp() - (intptr_t)METHOD->code_base(),
|
|
- continuation_bci, THREAD);
|
|
+ (int)(istate->bcp() - METHOD->code_base()),
|
|
+ (int)continuation_bci, p2i(THREAD));
|
|
}
|
|
// for AbortVMOnException flag
|
|
NOT_PRODUCT(Exceptions::debug_check_abort(except_oop));
|
|
@@ -2829,11 +2829,11 @@
|
|
if (TraceExceptions) {
|
|
ttyLocker ttyl;
|
|
ResourceMark rm;
|
|
- tty->print_cr("Exception <%s> (" INTPTR_FORMAT ")", except_oop->print_value_string(), (void*)except_oop());
|
|
+ tty->print_cr("Exception <%s> (" INTPTR_FORMAT ")", except_oop->print_value_string(), p2i(except_oop()));
|
|
tty->print_cr(" thrown in interpreter method <%s>", METHOD->print_value_string());
|
|
tty->print_cr(" at bci %d, unwinding for thread " INTPTR_FORMAT,
|
|
- istate->bcp() - (intptr_t)METHOD->code_base(),
|
|
- THREAD);
|
|
+ (int)(istate->bcp() - METHOD->code_base()),
|
|
+ p2i(THREAD));
|
|
}
|
|
// for AbortVMOnException flag
|
|
NOT_PRODUCT(Exceptions::debug_check_abort(except_oop));
|
|
@@ -3432,7 +3432,7 @@
|
|
tty->print_cr("osr._osr_buf: " INTPTR_FORMAT, (uintptr_t) this->_result._osr._osr_buf);
|
|
tty->print_cr("osr._osr_entry: " INTPTR_FORMAT, (uintptr_t) this->_result._osr._osr_entry);
|
|
tty->print_cr("prev_link: " INTPTR_FORMAT, (uintptr_t) this->_prev_link);
|
|
- tty->print_cr("native_mirror: " INTPTR_FORMAT, (void*) this->_oop_temp);
|
|
+ tty->print_cr("native_mirror: " INTPTR_FORMAT, (uintptr_t) this->_oop_temp);
|
|
tty->print_cr("stack_base: " INTPTR_FORMAT, (uintptr_t) this->_stack_base);
|
|
tty->print_cr("stack_limit: " INTPTR_FORMAT, (uintptr_t) this->_stack_limit);
|
|
tty->print_cr("monitor_base: " INTPTR_FORMAT, (uintptr_t) this->_monitor_base);
|
|
--- jdk8/hotspot/src/share/vm/interpreter/bytecodeInterpreterProfiling.hpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/interpreter/bytecodeInterpreterProfiling.hpp 2015-01-08 21:23:31.163148552 +0100
|
|
@@ -86,11 +86,11 @@
|
|
" \t-> " PTR_FORMAT "(%d)", \
|
|
(int) THREAD->osthread()->thread_id(), \
|
|
BCI(), \
|
|
- MDX(), \
|
|
+ p2i(MDX()), \
|
|
(MDX() == NULL \
|
|
? 0 \
|
|
: istate->method()->method_data()->dp_to_di((address)MDX())), \
|
|
- mdx, \
|
|
+ p2i(mdx), \
|
|
istate->method()->method_data()->dp_to_di((address)mdx) \
|
|
); \
|
|
}; \
|
|
@@ -107,7 +107,7 @@
|
|
MethodData *md = istate->method()->method_data(); \
|
|
tty->cr(); \
|
|
tty->print("method data at mdx " PTR_FORMAT "(0) for", \
|
|
- md->data_layout_at(md->bci_to_di(0))); \
|
|
+ p2i(md->data_layout_at(md->bci_to_di(0)))); \
|
|
istate->method()->print_short_name(tty); \
|
|
tty->cr(); \
|
|
if (md != NULL) { \
|
|
@@ -115,7 +115,7 @@
|
|
address mdx = (address) MDX(); \
|
|
if (mdx != NULL) { \
|
|
tty->print_cr("current mdx " PTR_FORMAT "(%d)", \
|
|
- mdx, \
|
|
+ p2i(mdx), \
|
|
istate->method()->method_data()->dp_to_di(mdx)); \
|
|
} \
|
|
} else { \
|
|
--- jdk8/hotspot/src/share/vm/interpreter/bytecodes.hpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/interpreter/bytecodes.hpp 2015-01-08 21:23:31.163148552 +0100
|
|
@@ -423,8 +423,10 @@
|
|
static bool is_astore (Code code) { return (code == _astore || code == _astore_0 || code == _astore_1
|
|
|| code == _astore_2 || code == _astore_3); }
|
|
|
|
+ static bool is_const (Code code) { return (_aconst_null <= code && code <= _ldc2_w); }
|
|
static bool is_zero_const (Code code) { return (code == _aconst_null || code == _iconst_0
|
|
|| code == _fconst_0 || code == _dconst_0); }
|
|
+ static bool is_return (Code code) { return (_ireturn <= code && code <= _return); }
|
|
static bool is_invoke (Code code) { return (_invokevirtual <= code && code <= _invokedynamic); }
|
|
static bool has_receiver (Code code) { assert(is_invoke(code), ""); return code == _invokevirtual ||
|
|
code == _invokespecial ||
|
|
--- jdk8/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp Thu Oct 23 15:32:14 2014 -0700
|
|
+++ jdk8/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp Wed Dec 17 10:43:38 2014 -0800
|
|
@@ -398,6 +398,18 @@
|
|
int handler_bci;
|
|
int current_bci = bci(thread);
|
|
|
|
+ if (thread->frames_to_pop_failed_realloc() > 0) {
|
|
+ // Allocation of scalar replaced object used in this frame
|
|
+ // failed. Unconditionally pop the frame.
|
|
+ thread->dec_frames_to_pop_failed_realloc();
|
|
+ thread->set_vm_result(h_exception());
|
|
+ // If the method is synchronized we already unlocked the monitor
|
|
+ // during deoptimization so the interpreter needs to skip it when
|
|
+ // the frame is popped.
|
|
+ thread->set_do_not_unlock_if_synchronized(true);
|
|
+ return Interpreter::remove_activation_entry();
|
|
+ }
|
|
+
|
|
// Need to do this check first since when _do_not_unlock_if_synchronized
|
|
// is set, we don't want to trigger any classloading which may make calls
|
|
// into java, or surprisingly find a matching exception handler for bci 0
|
|
--- jdk8/hotspot/src/share/vm/memory/collectorPolicy.cpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/memory/collectorPolicy.cpp 2015-01-08 21:23:31.164148528 +0100
|
|
@@ -183,13 +183,9 @@
|
|
// Requirements of any new remembered set implementations must be added here.
|
|
size_t alignment = GenRemSet::max_alignment_constraint(GenRemSet::CardTable);
|
|
|
|
- // Parallel GC does its own alignment of the generations to avoid requiring a
|
|
- // large page (256M on some platforms) for the permanent generation. The
|
|
- // other collectors should also be updated to do their own alignment and then
|
|
- // this use of lcm() should be removed.
|
|
- if (UseLargePages && !UseParallelGC) {
|
|
- // in presence of large pages we have to make sure that our
|
|
- // alignment is large page aware
|
|
+ if (UseLargePages) {
|
|
+ // In presence of large pages we have to make sure that our
|
|
+ // alignment is large page aware.
|
|
alignment = lcm(os::large_page_size(), alignment);
|
|
}
|
|
|
|
--- jdk8/hotspot/src/share/vm/memory/filemap.cpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/memory/filemap.cpp 2015-01-08 21:23:31.164148528 +0100
|
|
@@ -97,12 +97,12 @@
|
|
tty->print_cr("UseSharedSpaces: %s", msg);
|
|
}
|
|
}
|
|
- }
|
|
- va_end(ap);
|
|
UseSharedSpaces = false;
|
|
assert(current_info() != NULL, "singleton must be registered");
|
|
current_info()->close();
|
|
}
|
|
+ va_end(ap);
|
|
+}
|
|
|
|
// Fill in the fileMapInfo structure with data about this VM instance.
|
|
|
|
--- jdk8/hotspot/src/share/vm/memory/metadataFactory.hpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/memory/metadataFactory.hpp 2015-01-08 21:23:31.164148528 +0100
|
|
@@ -64,6 +64,12 @@
|
|
|
|
template <typename T>
|
|
static void free_array(ClassLoaderData* loader_data, Array<T>* data) {
|
|
+ if (DumpSharedSpaces) {
|
|
+ // FIXME: the freeing code is buggy, especially when PrintSharedSpaces is enabled.
|
|
+ // Disable for now -- this means if you specify bad classes in your classlist you
|
|
+ // may have wasted space inside the archive.
|
|
+ return;
|
|
+ }
|
|
if (data != NULL) {
|
|
assert(loader_data != NULL, "shouldn't pass null");
|
|
assert(!data->is_shared(), "cannot deallocate array in shared spaces");
|
|
--- jdk8/hotspot/src/share/vm/memory/metaspace.cpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/memory/metaspace.cpp 2015-01-08 21:23:31.165148504 +0100
|
|
@@ -3181,7 +3181,7 @@
|
|
MetaspaceGC::initialize();
|
|
|
|
// Initialize the alignment for shared spaces.
|
|
- int max_alignment = os::vm_page_size();
|
|
+ int max_alignment = os::vm_allocation_granularity();
|
|
size_t cds_total = 0;
|
|
|
|
MetaspaceShared::set_max_alignment(max_alignment);
|
|
@@ -3195,6 +3195,16 @@
|
|
SharedMiscDataSize = align_size_up(SharedMiscDataSize, max_alignment);
|
|
SharedMiscCodeSize = align_size_up(SharedMiscCodeSize, max_alignment);
|
|
|
|
+ // the min_misc_code_size estimate is based on MetaspaceShared::generate_vtable_methods()
|
|
+ uintx min_misc_code_size = align_size_up(
|
|
+ (MetaspaceShared::num_virtuals * MetaspaceShared::vtbl_list_size) *
|
|
+ (sizeof(void*) + MetaspaceShared::vtbl_method_size) + MetaspaceShared::vtbl_common_code_size,
|
|
+ max_alignment);
|
|
+
|
|
+ if (SharedMiscCodeSize < min_misc_code_size) {
|
|
+ report_out_of_shared_space(SharedMiscCode);
|
|
+ }
|
|
+
|
|
// Initialize with the sum of the shared space sizes. The read-only
|
|
// and read write metaspace chunks will be allocated out of this and the
|
|
// remainder is the misc code and data chunks.
|
|
--- jdk8/hotspot/src/share/vm/memory/metaspaceShared.cpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/memory/metaspaceShared.cpp 2015-01-08 21:23:31.165148504 +0100
|
|
@@ -24,6 +24,7 @@
|
|
|
|
#include "precompiled.hpp"
|
|
#include "classfile/dictionary.hpp"
|
|
+#include "classfile/classLoaderExt.hpp"
|
|
#include "classfile/loaderConstraints.hpp"
|
|
#include "classfile/placeholders.hpp"
|
|
#include "classfile/sharedClassUtil.hpp"
|
|
@@ -39,6 +40,7 @@
|
|
#include "runtime/signature.hpp"
|
|
#include "runtime/vm_operations.hpp"
|
|
#include "runtime/vmThread.hpp"
|
|
+#include "utilities/hashtable.hpp"
|
|
#include "utilities/hashtable.inline.hpp"
|
|
|
|
PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
|
|
@@ -533,6 +535,8 @@
|
|
ClassLoader::copy_package_info_table(&md_top, md_end);
|
|
ClassLoader::verify();
|
|
|
|
+ ClassLoaderExt::copy_lookup_cache_to_archive(&md_top, md_end);
|
|
+
|
|
// Write the other data to the output array.
|
|
WriteClosure wc(md_top, md_end);
|
|
MetaspaceShared::serialize(&wc);
|
|
@@ -745,6 +749,8 @@
|
|
}
|
|
tty->print_cr("Loading classes to share: done.");
|
|
|
|
+ ClassLoaderExt::init_lookup_cache(THREAD);
|
|
+
|
|
if (PrintSharedSpaces) {
|
|
tty->print_cr("Shared spaces: preloaded %d classes", class_count);
|
|
}
|
|
@@ -845,7 +851,7 @@
|
|
ik->link_class(THREAD);
|
|
if (HAS_PENDING_EXCEPTION) {
|
|
ResourceMark rm;
|
|
- tty->print_cr("Preload Error: Verification failed for %s",
|
|
+ tty->print_cr("Preload Warning: Verification failed for %s",
|
|
ik->external_name());
|
|
CLEAR_PENDING_EXCEPTION;
|
|
ik->set_in_error_state();
|
|
@@ -961,7 +967,7 @@
|
|
#endif
|
|
// If -Xshare:on is specified, print out the error message and exit VM,
|
|
// otherwise, set UseSharedSpaces to false and continue.
|
|
- if (RequireSharedSpaces) {
|
|
+ if (RequireSharedSpaces || PrintSharedArchiveAndExit) {
|
|
vm_exit_during_initialization("Unable to use shared archive.", "Failed map_region for using -Xshare:on.");
|
|
} else {
|
|
FLAG_SET_DEFAULT(UseSharedSpaces, false);
|
|
@@ -1060,6 +1066,8 @@
|
|
buffer += sizeof(intptr_t);
|
|
buffer += len;
|
|
|
|
+ buffer = ClassLoaderExt::restore_lookup_cache_from_archive(buffer);
|
|
+
|
|
intptr_t* array = (intptr_t*)buffer;
|
|
ReadClosure rc(&array);
|
|
serialize(&rc);
|
|
--- jdk8/hotspot/src/share/vm/memory/metaspaceShared.hpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/memory/metaspaceShared.hpp 2015-01-08 21:23:31.165148504 +0100
|
|
@@ -32,9 +32,9 @@
|
|
|
|
#define LargeSharedArchiveSize (300*M)
|
|
#define HugeSharedArchiveSize (800*M)
|
|
-#define ReadOnlyRegionPercentage 0.4
|
|
-#define ReadWriteRegionPercentage 0.55
|
|
-#define MiscDataRegionPercentage 0.03
|
|
+#define ReadOnlyRegionPercentage 0.39
|
|
+#define ReadWriteRegionPercentage 0.50
|
|
+#define MiscDataRegionPercentage 0.09
|
|
#define MiscCodeRegionPercentage 0.02
|
|
#define LargeThresholdClassCount 5000
|
|
#define HugeThresholdClassCount 40000
|
|
@@ -58,10 +58,15 @@
|
|
public:
|
|
enum {
|
|
vtbl_list_size = 17, // number of entries in the shared space vtable list.
|
|
- num_virtuals = 200 // maximum number of virtual functions
|
|
+ num_virtuals = 200, // maximum number of virtual functions
|
|
// If virtual functions are added to Metadata,
|
|
// this number needs to be increased. Also,
|
|
// SharedMiscCodeSize will need to be increased.
|
|
+ // The following 2 sizes were based on
|
|
+ // MetaspaceShared::generate_vtable_methods()
|
|
+ vtbl_method_size = 16, // conservative size of the mov1 and jmp instructions
|
|
+ // for the x64 platform
|
|
+ vtbl_common_code_size = (1*K) // conservative size of the "common_code" for the x64 platform
|
|
};
|
|
|
|
enum {
|
|
--- jdk8/hotspot/src/share/vm/memory/universe.cpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/memory/universe.cpp 2015-01-08 21:23:31.166148480 +0100
|
|
@@ -78,7 +78,7 @@
|
|
#include "gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp"
|
|
#include "gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp"
|
|
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
|
|
-#include "gc_implementation/g1/g1CollectorPolicy.hpp"
|
|
+#include "gc_implementation/g1/g1CollectorPolicy_ext.hpp"
|
|
#include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
|
|
#endif // INCLUDE_ALL_GCS
|
|
|
|
@@ -119,6 +119,7 @@
|
|
oop Universe::_out_of_memory_error_class_metaspace = NULL;
|
|
oop Universe::_out_of_memory_error_array_size = NULL;
|
|
oop Universe::_out_of_memory_error_gc_overhead_limit = NULL;
|
|
+oop Universe::_out_of_memory_error_realloc_objects = NULL;
|
|
objArrayOop Universe::_preallocated_out_of_memory_error_array = NULL;
|
|
volatile jint Universe::_preallocated_out_of_memory_error_avail_count = 0;
|
|
bool Universe::_verify_in_progress = false;
|
|
@@ -190,6 +191,7 @@
|
|
f->do_oop((oop*)&_out_of_memory_error_class_metaspace);
|
|
f->do_oop((oop*)&_out_of_memory_error_array_size);
|
|
f->do_oop((oop*)&_out_of_memory_error_gc_overhead_limit);
|
|
+ f->do_oop((oop*)&_out_of_memory_error_realloc_objects);
|
|
f->do_oop((oop*)&_preallocated_out_of_memory_error_array);
|
|
f->do_oop((oop*)&_null_ptr_exception_instance);
|
|
f->do_oop((oop*)&_arithmetic_exception_instance);
|
|
@@ -574,7 +576,8 @@
|
|
(throwable() != Universe::_out_of_memory_error_metaspace) &&
|
|
(throwable() != Universe::_out_of_memory_error_class_metaspace) &&
|
|
(throwable() != Universe::_out_of_memory_error_array_size) &&
|
|
- (throwable() != Universe::_out_of_memory_error_gc_overhead_limit));
|
|
+ (throwable() != Universe::_out_of_memory_error_gc_overhead_limit) &&
|
|
+ (throwable() != Universe::_out_of_memory_error_realloc_objects));
|
|
}
|
|
|
|
|
|
@@ -798,7 +801,7 @@
|
|
|
|
} else if (UseG1GC) {
|
|
#if INCLUDE_ALL_GCS
|
|
- G1CollectorPolicy* g1p = new G1CollectorPolicy();
|
|
+ G1CollectorPolicyExt* g1p = new G1CollectorPolicyExt();
|
|
g1p->initialize_all();
|
|
G1CollectedHeap* g1h = new G1CollectedHeap(g1p);
|
|
Universe::_collectedHeap = g1h;
|
|
@@ -1044,6 +1047,7 @@
|
|
Universe::_out_of_memory_error_array_size = k_h->allocate_instance(CHECK_false);
|
|
Universe::_out_of_memory_error_gc_overhead_limit =
|
|
k_h->allocate_instance(CHECK_false);
|
|
+ Universe::_out_of_memory_error_realloc_objects = k_h->allocate_instance(CHECK_false);
|
|
|
|
// Setup preallocated NullPointerException
|
|
// (this is currently used for a cheap & dirty solution in compiler exception handling)
|
|
@@ -1083,6 +1087,9 @@
|
|
msg = java_lang_String::create_from_str("GC overhead limit exceeded", CHECK_false);
|
|
java_lang_Throwable::set_message(Universe::_out_of_memory_error_gc_overhead_limit, msg());
|
|
|
|
+ msg = java_lang_String::create_from_str("Java heap space: failed reallocation of scalar replaced objects", CHECK_false);
|
|
+ java_lang_Throwable::set_message(Universe::_out_of_memory_error_realloc_objects, msg());
|
|
+
|
|
msg = java_lang_String::create_from_str("/ by zero", CHECK_false);
|
|
java_lang_Throwable::set_message(Universe::_arithmetic_exception_instance, msg());
|
|
|
|
--- jdk8/hotspot/src/share/vm/memory/universe.hpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/memory/universe.hpp 2015-01-08 21:23:31.166148480 +0100
|
|
@@ -157,6 +157,7 @@
|
|
static oop _out_of_memory_error_class_metaspace;
|
|
static oop _out_of_memory_error_array_size;
|
|
static oop _out_of_memory_error_gc_overhead_limit;
|
|
+ static oop _out_of_memory_error_realloc_objects;
|
|
|
|
static Array<int>* _the_empty_int_array; // Canonicalized int array
|
|
static Array<u2>* _the_empty_short_array; // Canonicalized short array
|
|
@@ -328,6 +329,7 @@
|
|
static oop out_of_memory_error_class_metaspace() { return gen_out_of_memory_error(_out_of_memory_error_class_metaspace); }
|
|
static oop out_of_memory_error_array_size() { return gen_out_of_memory_error(_out_of_memory_error_array_size); }
|
|
static oop out_of_memory_error_gc_overhead_limit() { return gen_out_of_memory_error(_out_of_memory_error_gc_overhead_limit); }
|
|
+ static oop out_of_memory_error_realloc_objects() { return gen_out_of_memory_error(_out_of_memory_error_realloc_objects); }
|
|
|
|
// Accessors needed for fast allocation
|
|
static Klass** boolArrayKlassObj_addr() { return &_boolArrayKlassObj; }
|
|
--- jdk8/hotspot/src/share/vm/oops/constantPool.cpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/oops/constantPool.cpp 2015-01-08 21:23:31.166148480 +0100
|
|
@@ -1817,11 +1817,22 @@
|
|
|
|
void ConstantPool::set_on_stack(const bool value) {
|
|
if (value) {
|
|
- _flags |= _on_stack;
|
|
+ int old_flags = *const_cast<volatile int *>(&_flags);
|
|
+ while ((old_flags & _on_stack) == 0) {
|
|
+ int new_flags = old_flags | _on_stack;
|
|
+ int result = Atomic::cmpxchg(new_flags, &_flags, old_flags);
|
|
+
|
|
+ if (result == old_flags) {
|
|
+ // Succeeded.
|
|
+ MetadataOnStackMark::record(this, Thread::current());
|
|
+ return;
|
|
+ }
|
|
+ old_flags = result;
|
|
+ }
|
|
} else {
|
|
+ // Clearing is done single-threadedly.
|
|
_flags &= ~_on_stack;
|
|
}
|
|
- if (value) MetadataOnStackMark::record(this);
|
|
}
|
|
|
|
// JSR 292 support for patching constant pool oops after the class is linked and
|
|
--- jdk8/hotspot/src/share/vm/oops/instanceKlass.cpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/oops/instanceKlass.cpp 2015-01-08 21:23:31.167148456 +0100
|
|
@@ -780,6 +780,41 @@
|
|
}
|
|
}
|
|
|
|
+// Eagerly initialize superinterfaces that declare default methods (concrete instance: any access)
|
|
+void InstanceKlass::initialize_super_interfaces(instanceKlassHandle this_oop, TRAPS) {
|
|
+ if (this_oop->has_default_methods()) {
|
|
+ for (int i = 0; i < this_oop->local_interfaces()->length(); ++i) {
|
|
+ Klass* iface = this_oop->local_interfaces()->at(i);
|
|
+ InstanceKlass* ik = InstanceKlass::cast(iface);
|
|
+ if (ik->should_be_initialized()) {
|
|
+ if (ik->has_default_methods()) {
|
|
+ ik->initialize_super_interfaces(ik, THREAD);
|
|
+ }
|
|
+ // Only initialize() interfaces that "declare" concrete methods.
|
|
+ // has_default_methods drives searching superinterfaces since it
|
|
+ // means has_default_methods in its superinterface hierarchy
|
|
+ if (!HAS_PENDING_EXCEPTION && ik->declares_default_methods()) {
|
|
+ ik->initialize(THREAD);
|
|
+ }
|
|
+ if (HAS_PENDING_EXCEPTION) {
|
|
+ Handle e(THREAD, PENDING_EXCEPTION);
|
|
+ CLEAR_PENDING_EXCEPTION;
|
|
+ {
|
|
+ EXCEPTION_MARK;
|
|
+ // Locks object, set state, and notify all waiting threads
|
|
+ this_oop->set_initialization_state_and_notify(
|
|
+ initialization_error, THREAD);
|
|
+
|
|
+ // ignore any exception thrown, superclass initialization error is
|
|
+ // thrown below
|
|
+ CLEAR_PENDING_EXCEPTION;
|
|
+ }
|
|
+ THROW_OOP(e());
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+}
|
|
|
|
void InstanceKlass::initialize_impl(instanceKlassHandle this_oop, TRAPS) {
|
|
// Make sure klass is linked (verified) before initialization
|
|
@@ -859,33 +894,11 @@
|
|
}
|
|
}
|
|
|
|
+ // Recursively initialize any superinterfaces that declare default methods
|
|
+ // Only need to recurse if has_default_methods which includes declaring and
|
|
+ // inheriting default methods
|
|
if (this_oop->has_default_methods()) {
|
|
- // Step 7.5: initialize any interfaces which have default methods
|
|
- for (int i = 0; i < this_oop->local_interfaces()->length(); ++i) {
|
|
- Klass* iface = this_oop->local_interfaces()->at(i);
|
|
- InstanceKlass* ik = InstanceKlass::cast(iface);
|
|
- if (ik->has_default_methods() && ik->should_be_initialized()) {
|
|
- ik->initialize(THREAD);
|
|
-
|
|
- if (HAS_PENDING_EXCEPTION) {
|
|
- Handle e(THREAD, PENDING_EXCEPTION);
|
|
- CLEAR_PENDING_EXCEPTION;
|
|
- {
|
|
- EXCEPTION_MARK;
|
|
- // Locks object, set state, and notify all waiting threads
|
|
- this_oop->set_initialization_state_and_notify(
|
|
- initialization_error, THREAD);
|
|
-
|
|
- // ignore any exception thrown, superclass initialization error is
|
|
- // thrown below
|
|
- CLEAR_PENDING_EXCEPTION;
|
|
- }
|
|
- DTRACE_CLASSINIT_PROBE_WAIT(
|
|
- super__failed, InstanceKlass::cast(this_oop()), -1, wait);
|
|
- THROW_OOP(e());
|
|
- }
|
|
- }
|
|
- }
|
|
+ this_oop->initialize_super_interfaces(this_oop, CHECK);
|
|
}
|
|
|
|
// Step 8
|
|
@@ -2877,6 +2890,22 @@
|
|
OsrList_lock->unlock();
|
|
}
|
|
|
|
+int InstanceKlass::mark_osr_nmethods(const Method* m) {
|
|
+ // This is a short non-blocking critical region, so the no safepoint check is ok.
|
|
+ MutexLockerEx ml(OsrList_lock, Mutex::_no_safepoint_check_flag);
|
|
+ nmethod* osr = osr_nmethods_head();
|
|
+ int found = 0;
|
|
+ while (osr != NULL) {
|
|
+ assert(osr->is_osr_method(), "wrong kind of nmethod found in chain");
|
|
+ if (osr->method() == m) {
|
|
+ osr->mark_for_deoptimization();
|
|
+ found++;
|
|
+ }
|
|
+ osr = osr->osr_link();
|
|
+ }
|
|
+ return found;
|
|
+}
|
|
+
|
|
nmethod* InstanceKlass::lookup_osr_nmethod(const Method* m, int bci, int comp_level, bool match_level) const {
|
|
// This is a short non-blocking critical region, so the no safepoint check is ok.
|
|
OsrList_lock->lock_without_safepoint_check();
|
|
@@ -2918,28 +2947,27 @@
|
|
return NULL;
|
|
}
|
|
|
|
-void InstanceKlass::add_member_name(int index, Handle mem_name) {
|
|
+bool InstanceKlass::add_member_name(Handle mem_name) {
|
|
jweak mem_name_wref = JNIHandles::make_weak_global(mem_name);
|
|
MutexLocker ml(MemberNameTable_lock);
|
|
- assert(0 <= index && index < idnum_allocated_count(), "index is out of bounds");
|
|
DEBUG_ONLY(No_Safepoint_Verifier nsv);
|
|
|
|
- if (_member_names == NULL) {
|
|
- _member_names = new (ResourceObj::C_HEAP, mtClass) MemberNameTable(idnum_allocated_count());
|
|
- }
|
|
- _member_names->add_member_name(index, mem_name_wref);
|
|
+ // Check if method has been redefined while taking out MemberNameTable_lock, if so
|
|
+ // return false. We cannot cache obsolete methods. They will crash when the function
|
|
+ // is called!
|
|
+ Method* method = (Method*)java_lang_invoke_MemberName::vmtarget(mem_name());
|
|
+ if (method->is_obsolete()) {
|
|
+ return false;
|
|
+ } else if (method->is_old()) {
|
|
+ // Replace method with redefined version
|
|
+ java_lang_invoke_MemberName::set_vmtarget(mem_name(), method_with_idnum(method->method_idnum()));
|
|
}
|
|
|
|
-oop InstanceKlass::get_member_name(int index) {
|
|
- MutexLocker ml(MemberNameTable_lock);
|
|
- assert(0 <= index && index < idnum_allocated_count(), "index is out of bounds");
|
|
- DEBUG_ONLY(No_Safepoint_Verifier nsv);
|
|
-
|
|
if (_member_names == NULL) {
|
|
- return NULL;
|
|
+ _member_names = new (ResourceObj::C_HEAP, mtClass) MemberNameTable(idnum_allocated_count());
|
|
}
|
|
- oop mem_name =_member_names->get_member_name(index);
|
|
- return mem_name;
|
|
+ _member_names->add_member_name(mem_name_wref);
|
|
+ return true;
|
|
}
|
|
|
|
// -----------------------------------------------------------------------------------------------------
|
|
--- jdk8/hotspot/src/share/vm/oops/instanceKlass.hpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/oops/instanceKlass.hpp 2015-01-08 21:23:31.167148456 +0100
|
|
@@ -234,7 +234,8 @@
|
|
_misc_should_verify_class = 1 << 2, // allow caching of preverification
|
|
_misc_is_anonymous = 1 << 3, // has embedded _host_klass field
|
|
_misc_is_contended = 1 << 4, // marked with contended annotation
|
|
- _misc_has_default_methods = 1 << 5 // class/superclass/implemented interfaces has default methods
|
|
+ _misc_has_default_methods = 1 << 5, // class/superclass/implemented interfaces has default methods
|
|
+ _misc_declares_default_methods = 1 << 6 // directly declares default methods (any access)
|
|
};
|
|
u2 _misc_flags;
|
|
u2 _minor_version; // minor version number of class file
|
|
@@ -680,6 +681,17 @@
|
|
}
|
|
}
|
|
|
|
+ bool declares_default_methods() const {
|
|
+ return (_misc_flags & _misc_declares_default_methods) != 0;
|
|
+ }
|
|
+ void set_declares_default_methods(bool b) {
|
|
+ if (b) {
|
|
+ _misc_flags |= _misc_declares_default_methods;
|
|
+ } else {
|
|
+ _misc_flags &= ~_misc_declares_default_methods;
|
|
+ }
|
|
+ }
|
|
+
|
|
// for adding methods, ConstMethod::UNSET_IDNUM means no more ids available
|
|
inline u2 next_method_idnum();
|
|
void set_initial_method_idnum(u2 value) { _idnum_allocated_count = value; }
|
|
@@ -771,6 +783,7 @@
|
|
void set_osr_nmethods_head(nmethod* h) { _osr_nmethods_head = h; };
|
|
void add_osr_nmethod(nmethod* n);
|
|
void remove_osr_nmethod(nmethod* n);
|
|
+ int mark_osr_nmethods(const Method* m);
|
|
nmethod* lookup_osr_nmethod(const Method* m, int bci, int level, bool match_level) const;
|
|
|
|
// Breakpoint support (see methods on Method* for details)
|
|
@@ -1046,6 +1059,7 @@
|
|
static bool link_class_impl (instanceKlassHandle this_oop, bool throw_verifyerror, TRAPS);
|
|
static bool verify_code (instanceKlassHandle this_oop, bool throw_verifyerror, TRAPS);
|
|
static void initialize_impl (instanceKlassHandle this_oop, TRAPS);
|
|
+ static void initialize_super_interfaces (instanceKlassHandle this_oop, TRAPS);
|
|
static void eager_initialize_impl (instanceKlassHandle this_oop);
|
|
static void set_initialization_state_and_notify_impl (instanceKlassHandle this_oop, ClassState state, TRAPS);
|
|
static void call_class_initializer_impl (instanceKlassHandle this_oop, TRAPS);
|
|
@@ -1077,8 +1091,7 @@
|
|
// JSR-292 support
|
|
MemberNameTable* member_names() { return _member_names; }
|
|
void set_member_names(MemberNameTable* member_names) { _member_names = member_names; }
|
|
- void add_member_name(int index, Handle member_name);
|
|
- oop get_member_name(int index);
|
|
+ bool add_member_name(Handle member_name);
|
|
|
|
public:
|
|
// JVMTI support
|
|
--- jdk8/hotspot/src/share/vm/oops/method.cpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/oops/method.cpp 2015-01-08 21:23:31.168148432 +0100
|
|
@@ -563,6 +563,15 @@
|
|
return true;
|
|
}
|
|
|
|
+bool Method::is_constant_getter() const {
|
|
+ int last_index = code_size() - 1;
|
|
+ // Check if the first 1-3 bytecodes are a constant push
|
|
+ // and the last bytecode is a return.
|
|
+ return (2 <= code_size() && code_size() <= 4 &&
|
|
+ Bytecodes::is_const(java_code_at(0)) &&
|
|
+ Bytecodes::length_for(java_code_at(0)) == last_index &&
|
|
+ Bytecodes::is_return(java_code_at(last_index)));
|
|
+}
|
|
|
|
bool Method::is_initializer() const {
|
|
return name() == vmSymbols::object_initializer_name() || is_static_initializer();
|
|
@@ -1880,9 +1889,12 @@
|
|
void Method::set_on_stack(const bool value) {
|
|
// Set both the method itself and its constant pool. The constant pool
|
|
// on stack means some method referring to it is also on the stack.
|
|
- _access_flags.set_on_stack(value);
|
|
constants()->set_on_stack(value);
|
|
- if (value) MetadataOnStackMark::record(this);
|
|
+
|
|
+ bool succeeded = _access_flags.set_on_stack(value);
|
|
+ if (value && succeeded) {
|
|
+ MetadataOnStackMark::record(this, Thread::current());
|
|
+ }
|
|
}
|
|
|
|
// Called when the class loader is unloaded to make all methods weak.
|
|
--- jdk8/hotspot/src/share/vm/oops/methodData.cpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/oops/methodData.cpp 2015-01-08 21:23:31.168148432 +0100
|
|
@@ -1153,7 +1153,7 @@
|
|
_backedge_counter_start = 0;
|
|
_num_loops = 0;
|
|
_num_blocks = 0;
|
|
- _would_profile = true;
|
|
+ _would_profile = unknown;
|
|
|
|
#if INCLUDE_RTM_OPT
|
|
_rtm_state = NoRTM; // No RTM lock eliding by default
|
|
--- jdk8/hotspot/src/share/vm/oops/methodData.hpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/oops/methodData.hpp 2015-01-08 21:23:31.169148408 +0100
|
|
@@ -2099,7 +2099,8 @@
|
|
short _num_loops;
|
|
short _num_blocks;
|
|
// Does this method contain anything worth profiling?
|
|
- bool _would_profile;
|
|
+ enum WouldProfile {unknown, no_profile, profile};
|
|
+ WouldProfile _would_profile;
|
|
|
|
// Size of _data array in bytes. (Excludes header and extra_data fields.)
|
|
int _data_size;
|
|
@@ -2268,8 +2269,8 @@
|
|
}
|
|
#endif
|
|
|
|
- void set_would_profile(bool p) { _would_profile = p; }
|
|
- bool would_profile() const { return _would_profile; }
|
|
+ void set_would_profile(bool p) { _would_profile = p ? profile : no_profile; }
|
|
+ bool would_profile() const { return _would_profile != no_profile; }
|
|
|
|
int num_loops() const { return _num_loops; }
|
|
void set_num_loops(int n) { _num_loops = n; }
|
|
--- jdk8/hotspot/src/share/vm/oops/method.hpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/oops/method.hpp 2015-01-08 21:23:31.168148432 +0100
|
|
@@ -624,6 +624,9 @@
|
|
// returns true if the method is an accessor function (setter/getter).
|
|
bool is_accessor() const;
|
|
|
|
+ // returns true if the method does nothing but return a constant of primitive type
|
|
+ bool is_constant_getter() const;
|
|
+
|
|
// returns true if the method is an initializer (<init> or <clinit>).
|
|
bool is_initializer() const;
|
|
|
|
@@ -798,6 +801,10 @@
|
|
return method_holder()->lookup_osr_nmethod(this, InvocationEntryBci, level, match_level) != NULL;
|
|
}
|
|
|
|
+ int mark_osr_nmethods() {
|
|
+ return method_holder()->mark_osr_nmethods(this);
|
|
+ }
|
|
+
|
|
nmethod* lookup_osr_nmethod_for(int bci, int level, bool match_level) {
|
|
return method_holder()->lookup_osr_nmethod(this, bci, level, match_level);
|
|
}
|
|
--- jdk8/hotspot/src/share/vm/oops/objArrayOop.hpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/oops/objArrayOop.hpp 2015-01-08 21:23:31.169148408 +0100
|
|
@@ -45,9 +45,10 @@
|
|
private:
|
|
// Give size of objArrayOop in HeapWords minus the header
|
|
static int array_size(int length) {
|
|
- const int OopsPerHeapWord = HeapWordSize/heapOopSize;
|
|
+ const uint OopsPerHeapWord = HeapWordSize/heapOopSize;
|
|
assert(OopsPerHeapWord >= 1 && (HeapWordSize % heapOopSize == 0),
|
|
"Else the following (new) computation would be in error");
|
|
+ uint res = ((uint)length + OopsPerHeapWord - 1)/OopsPerHeapWord;
|
|
#ifdef ASSERT
|
|
// The old code is left in for sanity-checking; it'll
|
|
// go away pretty soon. XXX
|
|
@@ -55,16 +56,15 @@
|
|
// oop->length() * HeapWordsPerOop;
|
|
// With narrowOops, HeapWordsPerOop is 1/2 or equal 0 as an integer.
|
|
// The oop elements are aligned up to wordSize
|
|
- const int HeapWordsPerOop = heapOopSize/HeapWordSize;
|
|
- int old_res;
|
|
+ const uint HeapWordsPerOop = heapOopSize/HeapWordSize;
|
|
+ uint old_res;
|
|
if (HeapWordsPerOop > 0) {
|
|
old_res = length * HeapWordsPerOop;
|
|
} else {
|
|
- old_res = align_size_up(length, OopsPerHeapWord)/OopsPerHeapWord;
|
|
+ old_res = align_size_up((uint)length, OopsPerHeapWord)/OopsPerHeapWord;
|
|
}
|
|
-#endif // ASSERT
|
|
- int res = ((uint)length + OopsPerHeapWord - 1)/OopsPerHeapWord;
|
|
assert(res == old_res, "Inconsistency between old and new.");
|
|
+#endif // ASSERT
|
|
return res;
|
|
}
|
|
|
|
--- jdk8/hotspot/src/share/vm/oops/typeArrayOop.hpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/oops/typeArrayOop.hpp 2015-01-08 21:23:31.169148408 +0100
|
|
@@ -150,7 +150,7 @@
|
|
DEBUG_ONLY(BasicType etype = Klass::layout_helper_element_type(lh));
|
|
assert(length <= arrayOopDesc::max_array_length(etype), "no overflow");
|
|
|
|
- julong size_in_bytes = length;
|
|
+ julong size_in_bytes = (juint)length;
|
|
size_in_bytes <<= element_shift;
|
|
size_in_bytes += instance_header_size;
|
|
julong size_in_words = ((size_in_bytes + (HeapWordSize-1)) >> LogHeapWordSize);
|
|
--- jdk8/hotspot/src/share/vm/opto/c2_globals.hpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/opto/c2_globals.hpp 2015-01-08 21:23:31.169148408 +0100
|
|
@@ -476,6 +476,9 @@
|
|
product(bool, DoEscapeAnalysis, true, \
|
|
"Perform escape analysis") \
|
|
\
|
|
+ product(double, EscapeAnalysisTimeout, 20. DEBUG_ONLY(+40.), \
|
|
+ "Abort EA when it reaches time limit (in sec)") \
|
|
+ \
|
|
develop(bool, ExitEscapeAnalysisOnTimeout, true, \
|
|
"Exit or throw assert in EA when it reaches time limit") \
|
|
\
|
|
@@ -647,7 +650,7 @@
|
|
develop(bool, AlwaysIncrementalInline, false, \
|
|
"do all inlining incrementally") \
|
|
\
|
|
- product(intx, LiveNodeCountInliningCutoff, 20000, \
|
|
+ product(intx, LiveNodeCountInliningCutoff, 40000, \
|
|
"max number of live nodes in a method") \
|
|
\
|
|
diagnostic(bool, OptimizeExpensiveOps, true, \
|
|
--- jdk8/hotspot/src/share/vm/opto/callGenerator.cpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/opto/callGenerator.cpp 2015-01-08 21:23:31.170148384 +0100
|
|
@@ -862,7 +862,7 @@
|
|
call_does_dispatch, vtable_index); // out-parameters
|
|
// We lack profiling at this call but type speculation may
|
|
// provide us with a type
|
|
- speculative_receiver_type = receiver_type->speculative_type();
|
|
+ speculative_receiver_type = (receiver_type != NULL) ? receiver_type->speculative_type() : NULL;
|
|
}
|
|
|
|
CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms, true, PROB_ALWAYS, speculative_receiver_type, true, true);
|
|
--- jdk8/hotspot/src/share/vm/opto/coalesce.cpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/opto/coalesce.cpp 2015-01-08 21:23:31.170148384 +0100
|
|
@@ -281,8 +281,10 @@
|
|
Block *pred = _phc._cfg.get_block_for_node(b->pred(j));
|
|
Node *copy;
|
|
assert(!m->is_Con() || m->is_Mach(), "all Con must be Mach");
|
|
- // Rematerialize constants instead of copying them
|
|
- if( m->is_Mach() && m->as_Mach()->is_Con() &&
|
|
+ // Rematerialize constants instead of copying them.
|
|
+ // We do this only for immediate constants, we avoid constant table loads
|
|
+ // because that will unsafely extend the live range of the constant table base.
|
|
+ if (m->is_Mach() && m->as_Mach()->is_Con() && !m->as_Mach()->is_MachConstant() &&
|
|
m->as_Mach()->rematerialize() ) {
|
|
copy = m->clone();
|
|
// Insert the copy in the predecessor basic block
|
|
@@ -317,7 +319,7 @@
|
|
assert(!m->is_Con() || m->is_Mach(), "all Con must be Mach");
|
|
// At this point it is unsafe to extend live ranges (6550579).
|
|
// Rematerialize only constants as we do for Phi above.
|
|
- if(m->is_Mach() && m->as_Mach()->is_Con() &&
|
|
+ if (m->is_Mach() && m->as_Mach()->is_Con() && !m->as_Mach()->is_MachConstant() &&
|
|
m->as_Mach()->rematerialize()) {
|
|
copy = m->clone();
|
|
// Insert the copy in the basic block, just before us
|
|
--- jdk8/hotspot/src/share/vm/opto/compile.cpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/opto/compile.cpp 2015-01-08 21:23:31.171148360 +0100
|
|
@@ -685,7 +685,8 @@
|
|
_inlining_incrementally(false),
|
|
_print_inlining_list(NULL),
|
|
_print_inlining_idx(0),
|
|
- _interpreter_frame_size(0) {
|
|
+ _interpreter_frame_size(0),
|
|
+ _max_node_limit(MaxNodeLimit) {
|
|
C = this;
|
|
|
|
CompileWrapper cw(this);
|
|
@@ -1027,7 +1028,8 @@
|
|
_print_inlining_list(NULL),
|
|
_print_inlining_idx(0),
|
|
_allowed_reasons(0),
|
|
- _interpreter_frame_size(0) {
|
|
+ _interpreter_frame_size(0),
|
|
+ _max_node_limit(MaxNodeLimit) {
|
|
C = this;
|
|
|
|
#ifndef PRODUCT
|
|
@@ -1137,6 +1139,7 @@
|
|
set_do_count_invocations(false);
|
|
set_do_method_data_update(false);
|
|
set_rtm_state(NoRTM); // No RTM lock eliding by default
|
|
+ method_has_option_value("MaxNodeLimit", _max_node_limit);
|
|
#if INCLUDE_RTM_OPT
|
|
if (UseRTMLocking && has_method() && (method()->method_data_or_null() != NULL)) {
|
|
int rtm_state = method()->method_data()->rtm_state();
|
|
--- jdk8/hotspot/src/share/vm/opto/compile.hpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/opto/compile.hpp 2015-01-08 21:23:31.171148360 +0100
|
|
@@ -290,6 +290,7 @@
|
|
int _freq_inline_size; // Max hot method inline size for this compilation
|
|
int _fixed_slots; // count of frame slots not allocated by the register
|
|
// allocator i.e. locks, original deopt pc, etc.
|
|
+ uintx _max_node_limit; // Max unique node count during a single compilation.
|
|
// For deopt
|
|
int _orig_pc_slot;
|
|
int _orig_pc_slot_offset_in_bytes;
|
|
@@ -594,6 +595,9 @@
|
|
void set_rtm_state(RTMState s) { _rtm_state = s; }
|
|
bool use_rtm() const { return (_rtm_state & NoRTM) == 0; }
|
|
bool profile_rtm() const { return _rtm_state == ProfileRTM; }
|
|
+ uint max_node_limit() const { return (uint)_max_node_limit; }
|
|
+ void set_max_node_limit(uint n) { _max_node_limit = n; }
|
|
+
|
|
// check the CompilerOracle for special behaviours for this compile
|
|
bool method_has_option(const char * option) {
|
|
return method() != NULL && method()->has_option(option);
|
|
@@ -723,7 +727,7 @@
|
|
record_method_not_compilable(reason, true);
|
|
}
|
|
bool check_node_count(uint margin, const char* reason) {
|
|
- if (live_nodes() + margin > (uint)MaxNodeLimit) {
|
|
+ if (live_nodes() + margin > max_node_limit()) {
|
|
record_method_not_compilable(reason);
|
|
return true;
|
|
} else {
|
|
--- jdk8/hotspot/src/share/vm/opto/connode.cpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/opto/connode.cpp 2015-01-08 21:23:31.171148360 +0100
|
|
@@ -441,6 +441,102 @@
|
|
return this;
|
|
}
|
|
|
|
+uint CastIINode::size_of() const {
|
|
+ return sizeof(*this);
|
|
+}
|
|
+
|
|
+uint CastIINode::cmp(const Node &n) const {
|
|
+ return TypeNode::cmp(n) && ((CastIINode&)n)._carry_dependency == _carry_dependency;
|
|
+}
|
|
+
|
|
+Node *CastIINode::Identity(PhaseTransform *phase) {
|
|
+ if (_carry_dependency) {
|
|
+ return this;
|
|
+ }
|
|
+ return ConstraintCastNode::Identity(phase);
|
|
+}
|
|
+
|
|
+const Type *CastIINode::Value(PhaseTransform *phase) const {
|
|
+ const Type *res = ConstraintCastNode::Value(phase);
|
|
+
|
|
+ // Try to improve the type of the CastII if we recognize a CmpI/If
|
|
+ // pattern.
|
|
+ if (_carry_dependency) {
|
|
+ if (in(0) != NULL && in(0)->in(0) != NULL && in(0)->in(0)->is_If()) {
|
|
+ assert(in(0)->is_IfFalse() || in(0)->is_IfTrue(), "should be If proj");
|
|
+ Node* proj = in(0);
|
|
+ if (proj->in(0)->in(1)->is_Bool()) {
|
|
+ Node* b = proj->in(0)->in(1);
|
|
+ if (b->in(1)->Opcode() == Op_CmpI) {
|
|
+ Node* cmp = b->in(1);
|
|
+ if (cmp->in(1) == in(1) && phase->type(cmp->in(2))->isa_int()) {
|
|
+ const TypeInt* in2_t = phase->type(cmp->in(2))->is_int();
|
|
+ const Type* t = TypeInt::INT;
|
|
+ BoolTest test = b->as_Bool()->_test;
|
|
+ if (proj->is_IfFalse()) {
|
|
+ test = test.negate();
|
|
+ }
|
|
+ BoolTest::mask m = test._test;
|
|
+ jlong lo_long = min_jint;
|
|
+ jlong hi_long = max_jint;
|
|
+ if (m == BoolTest::le || m == BoolTest::lt) {
|
|
+ hi_long = in2_t->_hi;
|
|
+ if (m == BoolTest::lt) {
|
|
+ hi_long -= 1;
|
|
+ }
|
|
+ } else if (m == BoolTest::ge || m == BoolTest::gt) {
|
|
+ lo_long = in2_t->_lo;
|
|
+ if (m == BoolTest::gt) {
|
|
+ lo_long += 1;
|
|
+ }
|
|
+ } else if (m == BoolTest::eq) {
|
|
+ lo_long = in2_t->_lo;
|
|
+ hi_long = in2_t->_hi;
|
|
+ } else if (m == BoolTest::ne) {
|
|
+ // can't do any better
|
|
+ } else {
|
|
+ stringStream ss;
|
|
+ test.dump_on(&ss);
|
|
+ fatal(err_msg_res("unexpected comparison %s", ss.as_string()));
|
|
+ }
|
|
+ int lo_int = (int)lo_long;
|
|
+ int hi_int = (int)hi_long;
|
|
+
|
|
+ if (lo_long != (jlong)lo_int) {
|
|
+ lo_int = min_jint;
|
|
+ }
|
|
+ if (hi_long != (jlong)hi_int) {
|
|
+ hi_int = max_jint;
|
|
+ }
|
|
+
|
|
+ t = TypeInt::make(lo_int, hi_int, Type::WidenMax);
|
|
+
|
|
+ res = res->filter_speculative(t);
|
|
+
|
|
+ return res;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ return res;
|
|
+}
|
|
+
|
|
+Node *CastIINode::Ideal_DU_postCCP(PhaseCCP *ccp) {
|
|
+ if (_carry_dependency) {
|
|
+ return NULL;
|
|
+ }
|
|
+ return ConstraintCastNode::Ideal_DU_postCCP(ccp);
|
|
+}
|
|
+
|
|
+#ifndef PRODUCT
|
|
+void CastIINode::dump_spec(outputStream *st) const {
|
|
+ TypeNode::dump_spec(st);
|
|
+ if (_carry_dependency) {
|
|
+ st->print(" carry dependency");
|
|
+ }
|
|
+}
|
|
+#endif
|
|
|
|
//=============================================================================
|
|
|
|
--- jdk8/hotspot/src/share/vm/opto/connode.hpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/opto/connode.hpp 2015-01-08 21:23:31.171148360 +0100
|
|
@@ -241,10 +241,25 @@
|
|
//------------------------------CastIINode-------------------------------------
|
|
// cast integer to integer (different range)
|
|
class CastIINode: public ConstraintCastNode {
|
|
+ private:
|
|
+ // Can this node be removed post CCP or does it carry a required dependency?
|
|
+ const bool _carry_dependency;
|
|
+
|
|
+ protected:
|
|
+ virtual uint cmp( const Node &n ) const;
|
|
+ virtual uint size_of() const;
|
|
+
|
|
public:
|
|
- CastIINode (Node *n, const Type *t ): ConstraintCastNode(n,t) {}
|
|
+ CastIINode(Node *n, const Type *t, bool carry_dependency = false)
|
|
+ : ConstraintCastNode(n,t), _carry_dependency(carry_dependency) {}
|
|
virtual int Opcode() const;
|
|
virtual uint ideal_reg() const { return Op_RegI; }
|
|
+ virtual Node *Identity( PhaseTransform *phase );
|
|
+ virtual const Type *Value( PhaseTransform *phase ) const;
|
|
+ virtual Node *Ideal_DU_postCCP( PhaseCCP * );
|
|
+#ifndef PRODUCT
|
|
+ virtual void dump_spec(outputStream *st) const;
|
|
+#endif
|
|
};
|
|
|
|
//------------------------------CastPPNode-------------------------------------
|
|
--- jdk8/hotspot/src/share/vm/opto/doCall.cpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/opto/doCall.cpp 2015-01-08 21:23:31.172148336 +0100
|
|
@@ -410,6 +410,11 @@
|
|
ciInstanceKlass* klass = ciEnv::get_instance_klass_for_declared_method_holder(holder);
|
|
assert(declared_signature != NULL, "cannot be null");
|
|
|
|
+ // Bump max node limit for JSR292 users
|
|
+ if (bc() == Bytecodes::_invokedynamic || orig_callee->is_method_handle_intrinsic()) {
|
|
+ C->set_max_node_limit(3*MaxNodeLimit);
|
|
+ }
|
|
+
|
|
// uncommon-trap when callee is unloaded, uninitialized or will not link
|
|
// bailout when too many arguments for register representation
|
|
if (!will_link || can_not_compile_call_site(orig_callee, klass)) {
|
|
@@ -791,7 +796,7 @@
|
|
Node* ex_klass_node = NULL;
|
|
if (has_ex_handler() && !ex_type->klass_is_exact()) {
|
|
Node* p = basic_plus_adr( ex_node, ex_node, oopDesc::klass_offset_in_bytes());
|
|
- ex_klass_node = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeInstPtr::KLASS, TypeKlassPtr::OBJECT) );
|
|
+ ex_klass_node = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, TypeInstPtr::KLASS, TypeKlassPtr::OBJECT));
|
|
|
|
// Compute the exception klass a little more cleverly.
|
|
// Obvious solution is to simple do a LoadKlass from the 'ex_node'.
|
|
@@ -809,7 +814,7 @@
|
|
continue;
|
|
}
|
|
Node* p = basic_plus_adr(ex_in, ex_in, oopDesc::klass_offset_in_bytes());
|
|
- Node* k = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeInstPtr::KLASS, TypeKlassPtr::OBJECT) );
|
|
+ Node* k = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, TypeInstPtr::KLASS, TypeKlassPtr::OBJECT));
|
|
ex_klass_node->init_req( i, k );
|
|
}
|
|
_gvn.set_type(ex_klass_node, TypeKlassPtr::OBJECT);
|
|
--- jdk8/hotspot/src/share/vm/opto/escape.cpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/opto/escape.cpp 2015-01-08 21:23:31.172148336 +0100
|
|
@@ -37,6 +37,8 @@
|
|
|
|
ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn) :
|
|
_nodes(C->comp_arena(), C->unique(), C->unique(), NULL),
|
|
+ _in_worklist(C->comp_arena()),
|
|
+ _next_pidx(0),
|
|
_collecting(true),
|
|
_verify(false),
|
|
_compile(C),
|
|
@@ -124,13 +126,19 @@
|
|
if (C->root() != NULL) {
|
|
ideal_nodes.push(C->root());
|
|
}
|
|
+ // Processed ideal nodes are unique on ideal_nodes list
|
|
+ // but several ideal nodes are mapped to the phantom_obj.
|
|
+ // To avoid duplicated entries on the following worklists
|
|
+ // add the phantom_obj only once to them.
|
|
+ ptnodes_worklist.append(phantom_obj);
|
|
+ java_objects_worklist.append(phantom_obj);
|
|
for( uint next = 0; next < ideal_nodes.size(); ++next ) {
|
|
Node* n = ideal_nodes.at(next);
|
|
// Create PointsTo nodes and add them to Connection Graph. Called
|
|
// only once per ideal node since ideal_nodes is Unique_Node list.
|
|
add_node_to_connection_graph(n, &delayed_worklist);
|
|
PointsToNode* ptn = ptnode_adr(n->_idx);
|
|
- if (ptn != NULL) {
|
|
+ if (ptn != NULL && ptn != phantom_obj) {
|
|
ptnodes_worklist.append(ptn);
|
|
if (ptn->is_JavaObject()) {
|
|
java_objects_worklist.append(ptn->as_JavaObject());
|
|
@@ -414,7 +422,7 @@
|
|
}
|
|
case Op_CreateEx: {
|
|
// assume that all exception objects globally escape
|
|
- add_java_object(n, PointsToNode::GlobalEscape);
|
|
+ map_ideal_node(n, phantom_obj);
|
|
break;
|
|
}
|
|
case Op_LoadKlass:
|
|
@@ -1065,13 +1073,8 @@
|
|
// on graph complexity. Observed 8 passes in jvm2008 compiler.compiler.
|
|
// Set limit to 20 to catch situation when something did go wrong and
|
|
// bailout Escape Analysis.
|
|
- // Also limit build time to 30 sec (60 in debug VM).
|
|
+ // Also limit build time to 20 sec (60 in debug VM), EscapeAnalysisTimeout flag.
|
|
#define CG_BUILD_ITER_LIMIT 20
|
|
-#ifdef ASSERT
|
|
-#define CG_BUILD_TIME_LIMIT 60.0
|
|
-#else
|
|
-#define CG_BUILD_TIME_LIMIT 30.0
|
|
-#endif
|
|
|
|
// Propagate GlobalEscape and ArgEscape escape states and check that
|
|
// we still have non-escaping objects. The method pushs on _worklist
|
|
@@ -1082,12 +1085,13 @@
|
|
// Now propagate references to all JavaObject nodes.
|
|
int java_objects_length = java_objects_worklist.length();
|
|
elapsedTimer time;
|
|
+ bool timeout = false;
|
|
int new_edges = 1;
|
|
int iterations = 0;
|
|
do {
|
|
while ((new_edges > 0) &&
|
|
- (iterations++ < CG_BUILD_ITER_LIMIT) &&
|
|
- (time.seconds() < CG_BUILD_TIME_LIMIT)) {
|
|
+ (iterations++ < CG_BUILD_ITER_LIMIT)) {
|
|
+ double start_time = time.seconds();
|
|
time.start();
|
|
new_edges = 0;
|
|
// Propagate references to phantom_object for nodes pushed on _worklist
|
|
@@ -1096,7 +1100,29 @@
|
|
for (int next = 0; next < java_objects_length; ++next) {
|
|
JavaObjectNode* ptn = java_objects_worklist.at(next);
|
|
new_edges += add_java_object_edges(ptn, true);
|
|
+
|
|
+#define SAMPLE_SIZE 4
|
|
+ if ((next % SAMPLE_SIZE) == 0) {
|
|
+ // Each 4 iterations calculate how much time it will take
|
|
+ // to complete graph construction.
|
|
+ time.stop();
|
|
+ // Poll for requests from shutdown mechanism to quiesce compiler
|
|
+ // because Connection graph construction may take long time.
|
|
+ CompileBroker::maybe_block();
|
|
+ double stop_time = time.seconds();
|
|
+ double time_per_iter = (stop_time - start_time) / (double)SAMPLE_SIZE;
|
|
+ double time_until_end = time_per_iter * (double)(java_objects_length - next);
|
|
+ if ((start_time + time_until_end) >= EscapeAnalysisTimeout) {
|
|
+ timeout = true;
|
|
+ break; // Timeout
|
|
}
|
|
+ start_time = stop_time;
|
|
+ time.start();
|
|
+ }
|
|
+#undef SAMPLE_SIZE
|
|
+
|
|
+ }
|
|
+ if (timeout) break;
|
|
if (new_edges > 0) {
|
|
// Update escape states on each iteration if graph was updated.
|
|
if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_worklist)) {
|
|
@@ -1104,9 +1130,12 @@
|
|
}
|
|
}
|
|
time.stop();
|
|
+ if (time.seconds() >= EscapeAnalysisTimeout) {
|
|
+ timeout = true;
|
|
+ break;
|
|
}
|
|
- if ((iterations < CG_BUILD_ITER_LIMIT) &&
|
|
- (time.seconds() < CG_BUILD_TIME_LIMIT)) {
|
|
+ }
|
|
+ if ((iterations < CG_BUILD_ITER_LIMIT) && !timeout) {
|
|
time.start();
|
|
// Find fields which have unknown value.
|
|
int fields_length = oop_fields_worklist.length();
|
|
@@ -1119,18 +1148,21 @@
|
|
}
|
|
}
|
|
time.stop();
|
|
+ if (time.seconds() >= EscapeAnalysisTimeout) {
|
|
+ timeout = true;
|
|
+ break;
|
|
+ }
|
|
} else {
|
|
new_edges = 0; // Bailout
|
|
}
|
|
} while (new_edges > 0);
|
|
|
|
// Bailout if passed limits.
|
|
- if ((iterations >= CG_BUILD_ITER_LIMIT) ||
|
|
- (time.seconds() >= CG_BUILD_TIME_LIMIT)) {
|
|
+ if ((iterations >= CG_BUILD_ITER_LIMIT) || timeout) {
|
|
Compile* C = _compile;
|
|
if (C->log() != NULL) {
|
|
C->log()->begin_elem("connectionGraph_bailout reason='reached ");
|
|
- C->log()->text("%s", (iterations >= CG_BUILD_ITER_LIMIT) ? "iterations" : "time");
|
|
+ C->log()->text("%s", timeout ? "time" : "iterations");
|
|
C->log()->end_elem(" limit'");
|
|
}
|
|
assert(ExitEscapeAnalysisOnTimeout, err_msg_res("infinite EA connection graph build (%f sec, %d iterations) with %d nodes and worklist size %d",
|
|
@@ -1147,7 +1179,6 @@
|
|
#endif
|
|
|
|
#undef CG_BUILD_ITER_LIMIT
|
|
-#undef CG_BUILD_TIME_LIMIT
|
|
|
|
// Find fields initialized by NULL for non-escaping Allocations.
|
|
int non_escaped_length = non_escaped_worklist.length();
|
|
@@ -1271,8 +1302,8 @@
|
|
}
|
|
}
|
|
}
|
|
- while(_worklist.length() > 0) {
|
|
- PointsToNode* use = _worklist.pop();
|
|
+ for (int l = 0; l < _worklist.length(); l++) {
|
|
+ PointsToNode* use = _worklist.at(l);
|
|
if (PointsToNode::is_base_use(use)) {
|
|
// Add reference from jobj to field and from field to jobj (field's base).
|
|
use = PointsToNode::get_use_node(use)->as_Field();
|
|
@@ -1319,6 +1350,8 @@
|
|
add_field_uses_to_worklist(use->as_Field());
|
|
}
|
|
}
|
|
+ _worklist.clear();
|
|
+ _in_worklist.Reset();
|
|
return new_edges;
|
|
}
|
|
|
|
@@ -1898,7 +1931,7 @@
|
|
return;
|
|
}
|
|
Compile* C = _compile;
|
|
- ptadr = new (C->comp_arena()) LocalVarNode(C, n, es);
|
|
+ ptadr = new (C->comp_arena()) LocalVarNode(this, n, es);
|
|
_nodes.at_put(n->_idx, ptadr);
|
|
}
|
|
|
|
@@ -1909,7 +1942,7 @@
|
|
return;
|
|
}
|
|
Compile* C = _compile;
|
|
- ptadr = new (C->comp_arena()) JavaObjectNode(C, n, es);
|
|
+ ptadr = new (C->comp_arena()) JavaObjectNode(this, n, es);
|
|
_nodes.at_put(n->_idx, ptadr);
|
|
}
|
|
|
|
@@ -1925,7 +1958,7 @@
|
|
es = PointsToNode::GlobalEscape;
|
|
}
|
|
Compile* C = _compile;
|
|
- FieldNode* field = new (C->comp_arena()) FieldNode(C, n, es, offset, is_oop);
|
|
+ FieldNode* field = new (C->comp_arena()) FieldNode(this, n, es, offset, is_oop);
|
|
_nodes.at_put(n->_idx, field);
|
|
}
|
|
|
|
@@ -1939,7 +1972,7 @@
|
|
return;
|
|
}
|
|
Compile* C = _compile;
|
|
- ptadr = new (C->comp_arena()) ArraycopyNode(C, n, es);
|
|
+ ptadr = new (C->comp_arena()) ArraycopyNode(this, n, es);
|
|
_nodes.at_put(n->_idx, ptadr);
|
|
// Add edge from arraycopy node to source object.
|
|
(void)add_edge(ptadr, src);
|
|
@@ -2379,7 +2412,7 @@
|
|
}
|
|
}
|
|
}
|
|
- if ((int) (C->live_nodes() + 2*NodeLimitFudgeFactor) > MaxNodeLimit) {
|
|
+ if (C->live_nodes() + 2*NodeLimitFudgeFactor > C->max_node_limit()) {
|
|
if (C->do_escape_analysis() == true && !C->failing()) {
|
|
// Retry compilation without escape analysis.
|
|
// If this is the first failure, the sentinel string will "stick"
|
|
@@ -2839,6 +2872,13 @@
|
|
continue;
|
|
}
|
|
}
|
|
+
|
|
+ const TypeOopPtr *t = igvn->type(n)->isa_oopptr();
|
|
+ if (t == NULL)
|
|
+ continue; // not a TypeOopPtr
|
|
+ if (!t->klass_is_exact())
|
|
+ continue; // not an unique type
|
|
+
|
|
if (alloc->is_Allocate()) {
|
|
// Set the scalar_replaceable flag for allocation
|
|
// so it could be eliminated.
|
|
@@ -2857,10 +2897,7 @@
|
|
// - not determined to be ineligible by escape analysis
|
|
set_map(alloc, n);
|
|
set_map(n, alloc);
|
|
- const TypeOopPtr *t = igvn->type(n)->isa_oopptr();
|
|
- if (t == NULL)
|
|
- continue; // not a TypeOopPtr
|
|
- const TypeOopPtr* tinst = t->cast_to_exactness(true)->is_oopptr()->cast_to_instance_id(ni);
|
|
+ const TypeOopPtr* tinst = t->cast_to_instance_id(ni);
|
|
igvn->hash_delete(n);
|
|
igvn->set_type(n, tinst);
|
|
n->raise_bottom_type(tinst);
|
|
--- jdk8/hotspot/src/share/vm/opto/escape.hpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/opto/escape.hpp 2015-01-08 21:23:31.173148312 +0100
|
|
@@ -125,6 +125,8 @@
|
|
class FieldNode;
|
|
class ArraycopyNode;
|
|
|
|
+class ConnectionGraph;
|
|
+
|
|
// ConnectionGraph nodes
|
|
class PointsToNode : public ResourceObj {
|
|
GrowableArray<PointsToNode*> _edges; // List of nodes this node points to
|
|
@@ -137,6 +139,7 @@
|
|
|
|
Node* const _node; // Ideal node corresponding to this PointsTo node.
|
|
const int _idx; // Cached ideal node's _idx
|
|
+ const uint _pidx; // Index of this node
|
|
|
|
public:
|
|
typedef enum {
|
|
@@ -165,17 +168,9 @@
|
|
} NodeFlags;
|
|
|
|
|
|
- PointsToNode(Compile *C, Node* n, EscapeState es, NodeType type):
|
|
- _edges(C->comp_arena(), 2, 0, NULL),
|
|
- _uses (C->comp_arena(), 2, 0, NULL),
|
|
- _node(n),
|
|
- _idx(n->_idx),
|
|
- _type((u1)type),
|
|
- _escape((u1)es),
|
|
- _fields_escape((u1)es),
|
|
- _flags(ScalarReplaceable) {
|
|
- assert(n != NULL && es != UnknownEscape, "sanity");
|
|
- }
|
|
+ inline PointsToNode(ConnectionGraph* CG, Node* n, EscapeState es, NodeType type);
|
|
+
|
|
+ uint pidx() const { return _pidx; }
|
|
|
|
Node* ideal_node() const { return _node; }
|
|
int idx() const { return _idx; }
|
|
@@ -243,14 +238,14 @@
|
|
|
|
class LocalVarNode: public PointsToNode {
|
|
public:
|
|
- LocalVarNode(Compile *C, Node* n, EscapeState es):
|
|
- PointsToNode(C, n, es, LocalVar) {}
|
|
+ LocalVarNode(ConnectionGraph *CG, Node* n, EscapeState es):
|
|
+ PointsToNode(CG, n, es, LocalVar) {}
|
|
};
|
|
|
|
class JavaObjectNode: public PointsToNode {
|
|
public:
|
|
- JavaObjectNode(Compile *C, Node* n, EscapeState es):
|
|
- PointsToNode(C, n, es, JavaObject) {
|
|
+ JavaObjectNode(ConnectionGraph *CG, Node* n, EscapeState es):
|
|
+ PointsToNode(CG, n, es, JavaObject) {
|
|
if (es > NoEscape)
|
|
set_scalar_replaceable(false);
|
|
}
|
|
@@ -262,8 +257,8 @@
|
|
const bool _is_oop; // Field points to object
|
|
bool _has_unknown_base; // Has phantom_object base
|
|
public:
|
|
- FieldNode(Compile *C, Node* n, EscapeState es, int offs, bool is_oop):
|
|
- PointsToNode(C, n, es, Field),
|
|
+ FieldNode(ConnectionGraph *CG, Node* n, EscapeState es, int offs, bool is_oop):
|
|
+ PointsToNode(CG, n, es, Field),
|
|
_offset(offs), _is_oop(is_oop),
|
|
_has_unknown_base(false) {}
|
|
|
|
@@ -284,8 +279,8 @@
|
|
|
|
class ArraycopyNode: public PointsToNode {
|
|
public:
|
|
- ArraycopyNode(Compile *C, Node* n, EscapeState es):
|
|
- PointsToNode(C, n, es, Arraycopy) {}
|
|
+ ArraycopyNode(ConnectionGraph *CG, Node* n, EscapeState es):
|
|
+ PointsToNode(CG, n, es, Arraycopy) {}
|
|
};
|
|
|
|
// Iterators for PointsTo node's edges:
|
|
@@ -323,11 +318,14 @@
|
|
|
|
|
|
class ConnectionGraph: public ResourceObj {
|
|
+ friend class PointsToNode;
|
|
private:
|
|
GrowableArray<PointsToNode*> _nodes; // Map from ideal nodes to
|
|
// ConnectionGraph nodes.
|
|
|
|
GrowableArray<PointsToNode*> _worklist; // Nodes to be processed
|
|
+ VectorSet _in_worklist;
|
|
+ uint _next_pidx;
|
|
|
|
bool _collecting; // Indicates whether escape information
|
|
// is still being collected. If false,
|
|
@@ -353,6 +351,8 @@
|
|
}
|
|
uint nodes_size() const { return _nodes.length(); }
|
|
|
|
+ uint next_pidx() { return _next_pidx++; }
|
|
+
|
|
// Add nodes to ConnectionGraph.
|
|
void add_local_var(Node* n, PointsToNode::EscapeState es);
|
|
void add_java_object(Node* n, PointsToNode::EscapeState es);
|
|
@@ -396,15 +396,26 @@
|
|
int add_java_object_edges(JavaObjectNode* jobj, bool populate_worklist);
|
|
|
|
// Put node on worklist if it is (or was) not there.
|
|
- void add_to_worklist(PointsToNode* pt) {
|
|
- _worklist.push(pt);
|
|
- return;
|
|
+ inline void add_to_worklist(PointsToNode* pt) {
|
|
+ PointsToNode* ptf = pt;
|
|
+ uint pidx_bias = 0;
|
|
+ if (PointsToNode::is_base_use(pt)) {
|
|
+ // Create a separate entry in _in_worklist for a marked base edge
|
|
+ // because _worklist may have an entry for a normal edge pointing
|
|
+ // to the same node. To separate them use _next_pidx as bias.
|
|
+ ptf = PointsToNode::get_use_node(pt)->as_Field();
|
|
+ pidx_bias = _next_pidx;
|
|
+ }
|
|
+ if (!_in_worklist.test_set(ptf->pidx() + pidx_bias)) {
|
|
+ _worklist.append(pt);
|
|
+ }
|
|
}
|
|
|
|
// Put on worklist all uses of this node.
|
|
- void add_uses_to_worklist(PointsToNode* pt) {
|
|
- for (UseIterator i(pt); i.has_next(); i.next())
|
|
- _worklist.push(i.get());
|
|
+ inline void add_uses_to_worklist(PointsToNode* pt) {
|
|
+ for (UseIterator i(pt); i.has_next(); i.next()) {
|
|
+ add_to_worklist(i.get());
|
|
+ }
|
|
}
|
|
|
|
// Put on worklist all field's uses and related field nodes.
|
|
@@ -587,4 +598,17 @@
|
|
#endif
|
|
};
|
|
|
|
+inline PointsToNode::PointsToNode(ConnectionGraph *CG, Node* n, EscapeState es, NodeType type):
|
|
+ _edges(CG->_compile->comp_arena(), 2, 0, NULL),
|
|
+ _uses (CG->_compile->comp_arena(), 2, 0, NULL),
|
|
+ _node(n),
|
|
+ _idx(n->_idx),
|
|
+ _pidx(CG->next_pidx()),
|
|
+ _type((u1)type),
|
|
+ _escape((u1)es),
|
|
+ _fields_escape((u1)es),
|
|
+ _flags(ScalarReplaceable) {
|
|
+ assert(n != NULL && es != UnknownEscape, "sanity");
|
|
+}
|
|
+
|
|
#endif // SHARE_VM_OPTO_ESCAPE_HPP
|
|
--- jdk8/hotspot/src/share/vm/opto/graphKit.cpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/opto/graphKit.cpp 2015-01-08 21:23:31.173148312 +0100
|
|
@@ -1150,7 +1150,7 @@
|
|
Node* akls = AllocateNode::Ideal_klass(obj, &_gvn);
|
|
if (akls != NULL) return akls;
|
|
Node* k_adr = basic_plus_adr(obj, oopDesc::klass_offset_in_bytes());
|
|
- return _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), k_adr, TypeInstPtr::KLASS) );
|
|
+ return _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), k_adr, TypeInstPtr::KLASS));
|
|
}
|
|
|
|
//-------------------------load_array_length-----------------------------------
|
|
@@ -2542,7 +2542,7 @@
|
|
// cache which is mutable so can't use immutable memory. Other
|
|
// types load from the super-class display table which is immutable.
|
|
Node *kmem = might_be_cache ? memory(p2) : immutable_memory();
|
|
- Node *nkls = _gvn.transform( LoadKlassNode::make( _gvn, kmem, p2, _gvn.type(p2)->is_ptr(), TypeKlassPtr::OBJECT_OR_NULL ) );
|
|
+ Node* nkls = _gvn.transform(LoadKlassNode::make(_gvn, NULL, kmem, p2, _gvn.type(p2)->is_ptr(), TypeKlassPtr::OBJECT_OR_NULL));
|
|
|
|
// Compile speed common case: ARE a subtype and we canNOT fail
|
|
if( superklass == nkls )
|
|
--- jdk8/hotspot/src/share/vm/opto/ifg.cpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/opto/ifg.cpp 2015-01-08 21:23:31.174148288 +0100
|
|
@@ -541,6 +541,25 @@
|
|
if( !n->is_Proj() ||
|
|
// Could also be a flags-projection of a dead ADD or such.
|
|
(_lrg_map.live_range_id(def) && !liveout.member(_lrg_map.live_range_id(def)))) {
|
|
+ bool remove = true;
|
|
+ if (n->is_MachProj()) {
|
|
+ // Don't remove KILL projections if their "defining" nodes have
|
|
+ // memory effects (have SCMemProj projection node) -
|
|
+ // they are not dead even when their result is not used.
|
|
+ // For example, compareAndSwapL (and other CAS) and EncodeISOArray nodes.
|
|
+ // The method add_input_to_liveout() keeps such nodes alive (put them on liveout list)
|
|
+ // when it sees SCMemProj node in a block. Unfortunately SCMemProj node could be placed
|
|
+ // in block in such order that KILL MachProj nodes are processed first.
|
|
+ uint cnt = def->outcnt();
|
|
+ for (uint i = 0; i < cnt; i++) {
|
|
+ Node* proj = def->raw_out(i);
|
|
+ if (proj->Opcode() == Op_SCMemProj) {
|
|
+ remove = false;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ if (remove) {
|
|
block->remove_node(j - 1);
|
|
if (lrgs(r)._def == n) {
|
|
lrgs(r)._def = 0;
|
|
@@ -553,6 +572,7 @@
|
|
hrp_index[1]--;
|
|
continue;
|
|
}
|
|
+ }
|
|
|
|
// Fat-projections kill many registers which cannot be used to
|
|
// hold live ranges.
|
|
--- jdk8/hotspot/src/share/vm/opto/ifnode.cpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/opto/ifnode.cpp 2015-01-08 21:23:31.174148288 +0100
|
|
@@ -820,6 +820,11 @@
|
|
|
|
static IfNode* idealize_test(PhaseGVN* phase, IfNode* iff);
|
|
|
|
+struct RangeCheck {
|
|
+ Node* ctl;
|
|
+ jint off;
|
|
+};
|
|
+
|
|
//------------------------------Ideal------------------------------------------
|
|
// Return a node which is more "ideal" than the current node. Strip out
|
|
// control copies
|
|
@@ -861,23 +866,25 @@
|
|
jint offset1;
|
|
int flip1 = is_range_check(range1, index1, offset1);
|
|
if( flip1 ) {
|
|
- Node *first_prev_dom = NULL;
|
|
-
|
|
// Try to remove extra range checks. All 'up_one_dom' gives up at merges
|
|
// so all checks we inspect post-dominate the top-most check we find.
|
|
// If we are going to fail the current check and we reach the top check
|
|
// then we are guaranteed to fail, so just start interpreting there.
|
|
- // We 'expand' the top 2 range checks to include all post-dominating
|
|
+ // We 'expand' the top 3 range checks to include all post-dominating
|
|
// checks.
|
|
|
|
- // The top 2 range checks seen
|
|
- Node *prev_chk1 = NULL;
|
|
- Node *prev_chk2 = NULL;
|
|
+ // The top 3 range checks seen
|
|
+ const int NRC =3;
|
|
+ RangeCheck prev_checks[NRC];
|
|
+ int nb_checks = 0;
|
|
+
|
|
// Low and high offsets seen so far
|
|
jint off_lo = offset1;
|
|
jint off_hi = offset1;
|
|
|
|
- // Scan for the top 2 checks and collect range of offsets
|
|
+ bool found_immediate_dominator = false;
|
|
+
|
|
+ // Scan for the top checks and collect range of offsets
|
|
for( int dist = 0; dist < 999; dist++ ) { // Range-Check scan limit
|
|
if( dom->Opcode() == Op_If && // Not same opcode?
|
|
prev_dom->in(0) == dom ) { // One path of test does dominate?
|
|
@@ -890,16 +897,21 @@
|
|
// the same array bounds.
|
|
if( flip2 == flip1 && range2 == range1 && index2 == index1 &&
|
|
dom->outcnt() == 2 ) {
|
|
+ if (nb_checks == 0 && dom->in(1) == in(1)) {
|
|
+ // Found an immediately dominating test at the same offset.
|
|
+ // This kind of back-to-back test can be eliminated locally,
|
|
+ // and there is no need to search further for dominating tests.
|
|
+ assert(offset2 == offset1, "Same test but different offsets");
|
|
+ found_immediate_dominator = true;
|
|
+ break;
|
|
+ }
|
|
// Gather expanded bounds
|
|
off_lo = MIN2(off_lo,offset2);
|
|
off_hi = MAX2(off_hi,offset2);
|
|
- // Record top 2 range checks
|
|
- prev_chk2 = prev_chk1;
|
|
- prev_chk1 = prev_dom;
|
|
- // If we match the test exactly, then the top test covers
|
|
- // both our lower and upper bounds.
|
|
- if( dom->in(1) == in(1) )
|
|
- prev_chk2 = prev_chk1;
|
|
+ // Record top NRC range checks
|
|
+ prev_checks[nb_checks%NRC].ctl = prev_dom;
|
|
+ prev_checks[nb_checks%NRC].off = offset2;
|
|
+ nb_checks++;
|
|
}
|
|
}
|
|
prev_dom = dom;
|
|
@@ -907,36 +919,87 @@
|
|
if( !dom ) break;
|
|
}
|
|
|
|
-
|
|
+ if (!found_immediate_dominator) {
|
|
// Attempt to widen the dominating range check to cover some later
|
|
// ones. Since range checks "fail" by uncommon-trapping to the
|
|
- // interpreter, widening a check can make us speculative enter the
|
|
- // interpreter. If we see range-check deopt's, do not widen!
|
|
+ // interpreter, widening a check can make us speculatively enter
|
|
+ // the interpreter. If we see range-check deopt's, do not widen!
|
|
if (!phase->C->allow_range_check_smearing()) return NULL;
|
|
|
|
+ // Didn't find prior covering check, so cannot remove anything.
|
|
+ if (nb_checks == 0) {
|
|
+ return NULL;
|
|
+ }
|
|
// Constant indices only need to check the upper bound.
|
|
- // Non-constance indices must check both low and high.
|
|
+ // Non-constant indices must check both low and high.
|
|
+ int chk0 = (nb_checks - 1) % NRC;
|
|
if( index1 ) {
|
|
- // Didn't find 2 prior covering checks, so cannot remove anything.
|
|
- if( !prev_chk2 ) return NULL;
|
|
- // 'Widen' the offsets of the 1st and 2nd covering check
|
|
- adjust_check( prev_chk1, range1, index1, flip1, off_lo, igvn );
|
|
- // Do not call adjust_check twice on the same projection
|
|
- // as the first call may have transformed the BoolNode to a ConI
|
|
- if( prev_chk1 != prev_chk2 ) {
|
|
- adjust_check( prev_chk2, range1, index1, flip1, off_hi, igvn );
|
|
+ if (nb_checks == 1) {
|
|
+ return NULL;
|
|
+ } else {
|
|
+ // If the top range check's constant is the min or max of
|
|
+ // all constants we widen the next one to cover the whole
|
|
+ // range of constants.
|
|
+ RangeCheck rc0 = prev_checks[chk0];
|
|
+ int chk1 = (nb_checks - 2) % NRC;
|
|
+ RangeCheck rc1 = prev_checks[chk1];
|
|
+ if (rc0.off == off_lo) {
|
|
+ adjust_check(rc1.ctl, range1, index1, flip1, off_hi, igvn);
|
|
+ prev_dom = rc1.ctl;
|
|
+ } else if (rc0.off == off_hi) {
|
|
+ adjust_check(rc1.ctl, range1, index1, flip1, off_lo, igvn);
|
|
+ prev_dom = rc1.ctl;
|
|
+ } else {
|
|
+ // If the top test's constant is not the min or max of all
|
|
+ // constants, we need 3 range checks. We must leave the
|
|
+ // top test unchanged because widening it would allow the
|
|
+ // accesses it protects to successfully read/write out of
|
|
+ // bounds.
|
|
+ if (nb_checks == 2) {
|
|
+ return NULL;
|
|
}
|
|
- // Test is now covered by prior checks, dominate it out
|
|
- prev_dom = prev_chk2;
|
|
+ int chk2 = (nb_checks - 3) % NRC;
|
|
+ RangeCheck rc2 = prev_checks[chk2];
|
|
+ // The top range check a+i covers interval: -a <= i < length-a
|
|
+ // The second range check b+i covers interval: -b <= i < length-b
|
|
+ if (rc1.off <= rc0.off) {
|
|
+ // if b <= a, we change the second range check to:
|
|
+ // -min_of_all_constants <= i < length-min_of_all_constants
|
|
+ // Together top and second range checks now cover:
|
|
+ // -min_of_all_constants <= i < length-a
|
|
+ // which is more restrictive than -b <= i < length-b:
|
|
+ // -b <= -min_of_all_constants <= i < length-a <= length-b
|
|
+ // The third check is then changed to:
|
|
+ // -max_of_all_constants <= i < length-max_of_all_constants
|
|
+ // so 2nd and 3rd checks restrict allowed values of i to:
|
|
+ // -min_of_all_constants <= i < length-max_of_all_constants
|
|
+ adjust_check(rc1.ctl, range1, index1, flip1, off_lo, igvn);
|
|
+ adjust_check(rc2.ctl, range1, index1, flip1, off_hi, igvn);
|
|
} else {
|
|
- // Didn't find prior covering check, so cannot remove anything.
|
|
- if( !prev_chk1 ) return NULL;
|
|
+ // if b > a, we change the second range check to:
|
|
+ // -max_of_all_constants <= i < length-max_of_all_constants
|
|
+ // Together top and second range checks now cover:
|
|
+ // -a <= i < length-max_of_all_constants
|
|
+ // which is more restrictive than -b <= i < length-b:
|
|
+ // -b < -a <= i < length-max_of_all_constants <= length-b
|
|
+ // The third check is then changed to:
|
|
+ // -max_of_all_constants <= i < length-max_of_all_constants
|
|
+ // so 2nd and 3rd checks restrict allowed values of i to:
|
|
+ // -min_of_all_constants <= i < length-max_of_all_constants
|
|
+ adjust_check(rc1.ctl, range1, index1, flip1, off_hi, igvn);
|
|
+ adjust_check(rc2.ctl, range1, index1, flip1, off_lo, igvn);
|
|
+ }
|
|
+ prev_dom = rc2.ctl;
|
|
+ }
|
|
+ }
|
|
+ } else {
|
|
+ RangeCheck rc0 = prev_checks[chk0];
|
|
// 'Widen' the offset of the 1st and only covering check
|
|
- adjust_check( prev_chk1, range1, index1, flip1, off_hi, igvn );
|
|
+ adjust_check(rc0.ctl, range1, index1, flip1, off_hi, igvn);
|
|
// Test is now covered by prior checks, dominate it out
|
|
- prev_dom = prev_chk1;
|
|
+ prev_dom = rc0.ctl;
|
|
+ }
|
|
}
|
|
-
|
|
|
|
} else { // Scan for an equivalent test
|
|
|
|
--- jdk8/hotspot/src/share/vm/opto/lcm.cpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/opto/lcm.cpp 2015-01-08 21:23:31.174148288 +0100
|
|
@@ -487,9 +487,7 @@
|
|
iop == Op_CreateEx || // Create-exception must start block
|
|
iop == Op_CheckCastPP
|
|
) {
|
|
- // select the node n
|
|
- // remove n from worklist and retain the order of remaining nodes
|
|
- worklist.remove((uint)i);
|
|
+ worklist.map(i,worklist.pop());
|
|
return n;
|
|
}
|
|
|
|
@@ -575,9 +573,7 @@
|
|
assert(idx >= 0, "index should be set");
|
|
Node *n = worklist[(uint)idx]; // Get the winner
|
|
|
|
- // select the node n
|
|
- // remove n from worklist and retain the order of remaining nodes
|
|
- worklist.remove((uint)idx);
|
|
+ worklist.map((uint)idx, worklist.pop()); // Compress worklist
|
|
return n;
|
|
}
|
|
|
|
--- jdk8/hotspot/src/share/vm/opto/library_call.cpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/opto/library_call.cpp 2015-01-08 21:23:31.175148264 +0100
|
|
@@ -3398,7 +3398,7 @@
|
|
if (region == NULL) never_see_null = true;
|
|
Node* p = basic_plus_adr(mirror, offset);
|
|
const TypeKlassPtr* kls_type = TypeKlassPtr::OBJECT_OR_NULL;
|
|
- Node* kls = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeRawPtr::BOTTOM, kls_type));
|
|
+ Node* kls = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, TypeRawPtr::BOTTOM, kls_type));
|
|
Node* null_ctl = top();
|
|
kls = null_check_oop(kls, &null_ctl, never_see_null);
|
|
if (region != NULL) {
|
|
@@ -3574,7 +3574,7 @@
|
|
phi->add_req(makecon(TypeInstPtr::make(env()->Object_klass()->java_mirror())));
|
|
// If we fall through, it's a plain class. Get its _super.
|
|
p = basic_plus_adr(kls, in_bytes(Klass::super_offset()));
|
|
- kls = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeRawPtr::BOTTOM, TypeKlassPtr::OBJECT_OR_NULL));
|
|
+ kls = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, TypeRawPtr::BOTTOM, TypeKlassPtr::OBJECT_OR_NULL));
|
|
null_ctl = top();
|
|
kls = null_check_oop(kls, &null_ctl);
|
|
if (null_ctl != top()) {
|
|
@@ -3656,7 +3656,7 @@
|
|
args[which_arg] = arg;
|
|
|
|
Node* p = basic_plus_adr(arg, class_klass_offset);
|
|
- Node* kls = LoadKlassNode::make(_gvn, immutable_memory(), p, adr_type, kls_type);
|
|
+ Node* kls = LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, adr_type, kls_type);
|
|
klasses[which_arg] = _gvn.transform(kls);
|
|
}
|
|
|
|
@@ -5172,7 +5172,7 @@
|
|
// (At this point we can assume disjoint_bases, since types differ.)
|
|
int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset());
|
|
Node* p1 = basic_plus_adr(dest_klass, ek_offset);
|
|
- Node* n1 = LoadKlassNode::make(_gvn, immutable_memory(), p1, TypeRawPtr::BOTTOM);
|
|
+ Node* n1 = LoadKlassNode::make(_gvn, NULL, immutable_memory(), p1, TypeRawPtr::BOTTOM);
|
|
Node* dest_elem_klass = _gvn.transform(n1);
|
|
Node* cv = generate_checkcast_arraycopy(adr_type,
|
|
dest_elem_klass,
|
|
--- jdk8/hotspot/src/share/vm/opto/loopnode.hpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/opto/loopnode.hpp 2015-01-08 21:23:31.176148240 +0100
|
|
@@ -602,6 +602,8 @@
|
|
return ctrl;
|
|
}
|
|
|
|
+ bool cast_incr_before_loop(Node* incr, Node* ctrl, Node* loop);
|
|
+
|
|
public:
|
|
bool has_node( Node* n ) const {
|
|
guarantee(n != NULL, "No Node.");
|
|
--- jdk8/hotspot/src/share/vm/opto/loopopts.cpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/opto/loopopts.cpp 2015-01-08 21:23:31.177148216 +0100
|
|
@@ -239,8 +239,13 @@
|
|
ProjNode* dp_proj = dp->as_Proj();
|
|
ProjNode* unc_proj = iff->as_If()->proj_out(1 - dp_proj->_con)->as_Proj();
|
|
if (exclude_loop_predicate &&
|
|
- unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_predicate))
|
|
+ (unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_predicate) ||
|
|
+ unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_range_check))) {
|
|
+ // If this is a range check (IfNode::is_range_check), do not
|
|
+ // reorder because Compile::allow_range_check_smearing might have
|
|
+ // changed the check.
|
|
return; // Let IGVN transformation change control dependence.
|
|
+ }
|
|
|
|
IdealLoopTree *old_loop = get_loop(dp);
|
|
|
|
@@ -734,7 +739,7 @@
|
|
for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
|
|
weight += region->fast_out(i)->outcnt();
|
|
}
|
|
- int nodes_left = MaxNodeLimit - C->live_nodes();
|
|
+ int nodes_left = C->max_node_limit() - C->live_nodes();
|
|
if (weight * 8 > nodes_left) {
|
|
#ifndef PRODUCT
|
|
if (PrintOpto)
|
|
@@ -900,7 +905,7 @@
|
|
Node *bol = n->in(1);
|
|
uint max = bol->outcnt();
|
|
// Check for same test used more than once?
|
|
- if( n_op == Op_If && max > 1 && bol->is_Bool() ) {
|
|
+ if (max > 1 && bol->is_Bool()) {
|
|
// Search up IDOMs to see if this IF is dominated.
|
|
Node *cutoff = get_ctrl(bol);
|
|
|
|
--- jdk8/hotspot/src/share/vm/opto/loopTransform.cpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/opto/loopTransform.cpp 2015-01-08 21:23:31.176148240 +0100
|
|
@@ -269,10 +269,9 @@
|
|
bool IdealLoopTree::policy_peeling( PhaseIdealLoop *phase ) const {
|
|
Node *test = ((IdealLoopTree*)this)->tail();
|
|
int body_size = ((IdealLoopTree*)this)->_body.size();
|
|
- int live_node_count = phase->C->live_nodes();
|
|
// Peeling does loop cloning which can result in O(N^2) node construction
|
|
if( body_size > 255 /* Prevent overflow for large body_size */
|
|
- || (body_size * body_size + live_node_count > MaxNodeLimit) ) {
|
|
+ || (body_size * body_size + phase->C->live_nodes()) > phase->C->max_node_limit() ) {
|
|
return false; // too large to safely clone
|
|
}
|
|
while( test != _head ) { // Scan till run off top of loop
|
|
@@ -601,7 +600,7 @@
|
|
return false;
|
|
if (new_body_size > unroll_limit ||
|
|
// Unrolling can result in a large amount of node construction
|
|
- new_body_size >= MaxNodeLimit - (uint) phase->C->live_nodes()) {
|
|
+ new_body_size >= phase->C->max_node_limit() - phase->C->live_nodes()) {
|
|
return false;
|
|
}
|
|
|
|
@@ -882,6 +881,20 @@
|
|
return n;
|
|
}
|
|
|
|
+bool PhaseIdealLoop::cast_incr_before_loop(Node* incr, Node* ctrl, Node* loop) {
|
|
+ Node* castii = new (C) CastIINode(incr, TypeInt::INT, true);
|
|
+ castii->set_req(0, ctrl);
|
|
+ register_new_node(castii, ctrl);
|
|
+ for (DUIterator_Fast imax, i = incr->fast_outs(imax); i < imax; i++) {
|
|
+ Node* n = incr->fast_out(i);
|
|
+ if (n->is_Phi() && n->in(0) == loop) {
|
|
+ int nrep = n->replace_edge(incr, castii);
|
|
+ return true;
|
|
+ }
|
|
+ }
|
|
+ return false;
|
|
+}
|
|
+
|
|
//------------------------------insert_pre_post_loops--------------------------
|
|
// Insert pre and post loops. If peel_only is set, the pre-loop can not have
|
|
// more iterations added. It acts as a 'peel' only, no lower-bound RCE, no
|
|
@@ -1080,6 +1093,24 @@
|
|
}
|
|
}
|
|
|
|
+ // Nodes inside the loop may be control dependent on a predicate
|
|
+ // that was moved before the preloop. If the back branch of the main
|
|
+ // or post loops becomes dead, those nodes won't be dependent on the
|
|
+ // test that guards that loop nest anymore which could lead to an
|
|
+ // incorrect array access because it executes independently of the
|
|
+ // test that was guarding the loop nest. We add a special CastII on
|
|
+ // the if branch that enters the loop, between the input induction
|
|
+ // variable value and the induction variable Phi to preserve correct
|
|
+ // dependencies.
|
|
+
|
|
+ // CastII for the post loop:
|
|
+ bool inserted = cast_incr_before_loop(zer_opaq->in(1), zer_taken, post_head);
|
|
+ assert(inserted, "no castII inserted");
|
|
+
|
|
+ // CastII for the main loop:
|
|
+ inserted = cast_incr_before_loop(pre_incr, min_taken, main_head);
|
|
+ assert(inserted, "no castII inserted");
|
|
+
|
|
// Step B4: Shorten the pre-loop to run only 1 iteration (for now).
|
|
// RCE and alignment may change this later.
|
|
Node *cmp_end = pre_end->cmp_node();
|
|
@@ -2287,8 +2318,8 @@
|
|
|
|
// Skip next optimizations if running low on nodes. Note that
|
|
// policy_unswitching and policy_maximally_unroll have this check.
|
|
- uint nodes_left = MaxNodeLimit - (uint) phase->C->live_nodes();
|
|
- if ((2 * _body.size()) > nodes_left) {
|
|
+ int nodes_left = phase->C->max_node_limit() - phase->C->live_nodes();
|
|
+ if ((int)(2 * _body.size()) > nodes_left) {
|
|
return true;
|
|
}
|
|
|
|
--- jdk8/hotspot/src/share/vm/opto/loopUnswitch.cpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/opto/loopUnswitch.cpp 2015-01-08 21:23:31.176148240 +0100
|
|
@@ -59,8 +59,8 @@
|
|
if (!_head->is_Loop()) {
|
|
return false;
|
|
}
|
|
- uint nodes_left = MaxNodeLimit - phase->C->live_nodes();
|
|
- if (2 * _body.size() > nodes_left) {
|
|
+ int nodes_left = phase->C->max_node_limit() - phase->C->live_nodes();
|
|
+ if ((int)(2 * _body.size()) > nodes_left) {
|
|
return false; // Too speculative if running low on nodes.
|
|
}
|
|
LoopNode* head = _head->as_Loop();
|
|
--- jdk8/hotspot/src/share/vm/opto/macro.cpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/opto/macro.cpp 2015-01-08 21:23:31.177148216 +0100
|
|
@@ -964,7 +964,11 @@
|
|
}
|
|
|
|
bool PhaseMacroExpand::eliminate_allocate_node(AllocateNode *alloc) {
|
|
- if (!EliminateAllocations || !alloc->_is_non_escaping) {
|
|
+ // Don't do scalar replacement if the frame can be popped by JVMTI:
|
|
+ // if reallocation fails during deoptimization we'll pop all
|
|
+ // interpreter frames for this compiled frame and that won't play
|
|
+ // nice with JVMTI popframe.
|
|
+ if (!EliminateAllocations || JvmtiExport::can_pop_frame() || !alloc->_is_non_escaping) {
|
|
return false;
|
|
}
|
|
Node* klass = alloc->in(AllocateNode::KlassNode);
|
|
@@ -2194,7 +2198,7 @@
|
|
Node* klass_node = AllocateNode::Ideal_klass(obj, &_igvn);
|
|
if (klass_node == NULL) {
|
|
Node* k_adr = basic_plus_adr(obj, oopDesc::klass_offset_in_bytes());
|
|
- klass_node = transform_later( LoadKlassNode::make(_igvn, mem, k_adr, _igvn.type(k_adr)->is_ptr()) );
|
|
+ klass_node = transform_later(LoadKlassNode::make(_igvn, NULL, mem, k_adr, _igvn.type(k_adr)->is_ptr()));
|
|
#ifdef _LP64
|
|
if (UseCompressedClassPointers && klass_node->is_DecodeNKlass()) {
|
|
assert(klass_node->in(1)->Opcode() == Op_LoadNKlass, "sanity");
|
|
--- jdk8/hotspot/src/share/vm/opto/memnode.cpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/opto/memnode.cpp 2015-01-08 21:23:31.178148192 +0100
|
|
@@ -870,6 +870,10 @@
|
|
|
|
|
|
//=============================================================================
|
|
+// Should LoadNode::Ideal() attempt to remove control edges?
|
|
+bool LoadNode::can_remove_control() const {
|
|
+ return true;
|
|
+}
|
|
uint LoadNode::size_of() const { return sizeof(*this); }
|
|
uint LoadNode::cmp( const Node &n ) const
|
|
{ return !Type::cmp( _type, ((LoadNode&)n)._type ); }
|
|
@@ -1262,6 +1266,16 @@
|
|
result = new (phase->C) ConvI2LNode(phase->transform(result));
|
|
}
|
|
#endif
|
|
+ // Boxing/unboxing can be done from signed & unsigned loads (e.g. LoadUB -> ... -> LoadB pair).
|
|
+ // Need to preserve unboxing load type if it is unsigned.
|
|
+ switch(this->Opcode()) {
|
|
+ case Op_LoadUB:
|
|
+ result = new (phase->C) AndINode(phase->transform(result), phase->intcon(0xFF));
|
|
+ break;
|
|
+ case Op_LoadUS:
|
|
+ result = new (phase->C) AndINode(phase->transform(result), phase->intcon(0xFFFF));
|
|
+ break;
|
|
+ }
|
|
return result;
|
|
}
|
|
}
|
|
@@ -1466,7 +1480,7 @@
|
|
}
|
|
|
|
//------------------------------Ideal------------------------------------------
|
|
-// If the load is from Field memory and the pointer is non-null, we can
|
|
+// If the load is from Field memory and the pointer is non-null, it might be possible to
|
|
// zero out the control input.
|
|
// If the offset is constant and the base is an object allocation,
|
|
// try to hook me up to the exact initializing store.
|
|
@@ -1491,6 +1505,7 @@
|
|
&& phase->C->get_alias_index(phase->type(address)->is_ptr()) != Compile::AliasIdxRaw) {
|
|
// Check for useless control edge in some common special cases
|
|
if (in(MemNode::Control) != NULL
|
|
+ && can_remove_control()
|
|
&& phase->type(base)->higher_equal(TypePtr::NOTNULL)
|
|
&& all_controls_dominate(base, phase->C->start())) {
|
|
// A method-invariant, non-null address (constant or 'this' argument).
|
|
@@ -2018,9 +2033,8 @@
|
|
//=============================================================================
|
|
//----------------------------LoadKlassNode::make------------------------------
|
|
// Polymorphic factory method:
|
|
-Node *LoadKlassNode::make( PhaseGVN& gvn, Node *mem, Node *adr, const TypePtr* at, const TypeKlassPtr *tk ) {
|
|
+Node* LoadKlassNode::make(PhaseGVN& gvn, Node* ctl, Node *mem, Node *adr, const TypePtr* at, const TypeKlassPtr *tk) {
|
|
Compile* C = gvn.C;
|
|
- Node *ctl = NULL;
|
|
// sanity check the alias category against the created node type
|
|
const TypePtr *adr_type = adr->bottom_type()->isa_ptr();
|
|
assert(adr_type != NULL, "expecting TypeKlassPtr");
|
|
@@ -2040,6 +2054,12 @@
|
|
return klass_value_common(phase);
|
|
}
|
|
|
|
+// In most cases, LoadKlassNode does not have the control input set. If the control
|
|
+// input is set, it must not be removed (by LoadNode::Ideal()).
|
|
+bool LoadKlassNode::can_remove_control() const {
|
|
+ return false;
|
|
+}
|
|
+
|
|
const Type *LoadNode::klass_value_common( PhaseTransform *phase ) const {
|
|
// Either input is TOP ==> the result is TOP
|
|
const Type *t1 = phase->type( in(MemNode::Memory) );
|
|
--- jdk8/hotspot/src/share/vm/opto/memnode.hpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/opto/memnode.hpp 2015-01-08 21:23:31.179148168 +0100
|
|
@@ -151,6 +151,8 @@
|
|
protected:
|
|
virtual uint cmp(const Node &n) const;
|
|
virtual uint size_of() const; // Size is bigger
|
|
+ // Should LoadNode::Ideal() attempt to remove control edges?
|
|
+ virtual bool can_remove_control() const;
|
|
const Type* const _type; // What kind of value is loaded?
|
|
public:
|
|
|
|
@@ -174,8 +176,10 @@
|
|
// we are equivalent to. We look for Load of a Store.
|
|
virtual Node *Identity( PhaseTransform *phase );
|
|
|
|
- // If the load is from Field memory and the pointer is non-null, we can
|
|
+ // If the load is from Field memory and the pointer is non-null, it might be possible to
|
|
// zero out the control input.
|
|
+ // If the offset is constant and the base is an object allocation,
|
|
+ // try to hook me up to the exact initializing store.
|
|
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
|
|
|
|
// Split instance field load through Phi.
|
|
@@ -416,6 +420,10 @@
|
|
//------------------------------LoadKlassNode----------------------------------
|
|
// Load a Klass from an object
|
|
class LoadKlassNode : public LoadPNode {
|
|
+protected:
|
|
+ // In most cases, LoadKlassNode does not have the control input set. If the control
|
|
+ // input is set, it must not be removed (by LoadNode::Ideal()).
|
|
+ virtual bool can_remove_control() const;
|
|
public:
|
|
LoadKlassNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeKlassPtr *tk, MemOrd mo)
|
|
: LoadPNode(c, mem, adr, at, tk, mo) {}
|
|
@@ -425,7 +433,7 @@
|
|
virtual bool depends_only_on_test() const { return true; }
|
|
|
|
// Polymorphic factory method:
|
|
- static Node* make( PhaseGVN& gvn, Node *mem, Node *adr, const TypePtr* at,
|
|
+ static Node* make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* at,
|
|
const TypeKlassPtr *tk = TypeKlassPtr::OBJECT );
|
|
};
|
|
|
|
--- jdk8/hotspot/src/share/vm/opto/node.cpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/opto/node.cpp 2015-01-08 21:23:31.179148168 +0100
|
|
@@ -69,7 +69,7 @@
|
|
Compile::set_debug_idx(new_debug_idx);
|
|
set_debug_idx( new_debug_idx );
|
|
assert(Compile::current()->unique() < (INT_MAX - 1), "Node limit exceeded INT_MAX");
|
|
- assert(Compile::current()->live_nodes() < (uint)MaxNodeLimit, "Live Node limit exceeded limit");
|
|
+ assert(Compile::current()->live_nodes() < Compile::current()->max_node_limit(), "Live Node limit exceeded limit");
|
|
if (BreakAtNode != 0 && (_debug_idx == BreakAtNode || (int)_idx == BreakAtNode)) {
|
|
tty->print_cr("BreakAtNode: _idx=%d _debug_idx=%d", _idx, _debug_idx);
|
|
BREAKPOINT;
|
|
@@ -326,7 +326,7 @@
|
|
Node::Node(uint req)
|
|
: _idx(IDX_INIT(req))
|
|
{
|
|
- assert( req < (uint)(MaxNodeLimit - NodeLimitFudgeFactor), "Input limit exceeded" );
|
|
+ assert( req < Compile::current()->max_node_limit() - NodeLimitFudgeFactor, "Input limit exceeded" );
|
|
debug_only( verify_construction() );
|
|
NOT_PRODUCT(nodes_created++);
|
|
if (req == 0) {
|
|
--- jdk8/hotspot/src/share/vm/opto/parse1.cpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/opto/parse1.cpp 2015-01-08 21:23:31.180148144 +0100
|
|
@@ -1958,7 +1958,7 @@
|
|
// finalization. In general this will fold up since the concrete
|
|
// class is often visible so the access flags are constant.
|
|
Node* klass_addr = basic_plus_adr( receiver, receiver, oopDesc::klass_offset_in_bytes() );
|
|
- Node* klass = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), klass_addr, TypeInstPtr::KLASS) );
|
|
+ Node* klass = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), klass_addr, TypeInstPtr::KLASS));
|
|
|
|
Node* access_flags_addr = basic_plus_adr(klass, klass, in_bytes(Klass::access_flags_offset()));
|
|
Node* access_flags = make_load(NULL, access_flags_addr, TypeInt::INT, T_INT, MemNode::unordered);
|
|
--- jdk8/hotspot/src/share/vm/opto/parseHelper.cpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/opto/parseHelper.cpp 2015-01-08 21:23:31.180148144 +0100
|
|
@@ -156,22 +156,43 @@
|
|
int klass_offset = oopDesc::klass_offset_in_bytes();
|
|
Node* p = basic_plus_adr( ary, ary, klass_offset );
|
|
// p's type is array-of-OOPS plus klass_offset
|
|
- Node* array_klass = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeInstPtr::KLASS) );
|
|
+ Node* array_klass = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, TypeInstPtr::KLASS));
|
|
// Get the array klass
|
|
const TypeKlassPtr *tak = _gvn.type(array_klass)->is_klassptr();
|
|
|
|
- // array_klass's type is generally INexact array-of-oop. Heroically
|
|
- // cast the array klass to EXACT array and uncommon-trap if the cast
|
|
- // fails.
|
|
+ // The type of array_klass is usually INexact array-of-oop. Heroically
|
|
+ // cast array_klass to EXACT array and uncommon-trap if the cast fails.
|
|
+ // Make constant out of the inexact array klass, but use it only if the cast
|
|
+ // succeeds.
|
|
bool always_see_exact_class = false;
|
|
if (MonomorphicArrayCheck
|
|
- && !too_many_traps(Deoptimization::Reason_array_check)) {
|
|
+ && !too_many_traps(Deoptimization::Reason_array_check)
|
|
+ && !tak->klass_is_exact()
|
|
+ && tak != TypeKlassPtr::OBJECT) {
|
|
+ // Regarding the fourth condition in the if-statement from above:
|
|
+ //
|
|
+ // If the compiler has determined that the type of array 'ary' (represented
|
|
+ // by 'array_klass') is java/lang/Object, the compiler must not assume that
|
|
+ // the array 'ary' is monomorphic.
|
|
+ //
|
|
+ // If 'ary' were of type java/lang/Object, this arraystore would have to fail,
|
|
+ // because it is not possible to perform a arraystore into an object that is not
|
|
+ // a "proper" array.
|
|
+ //
|
|
+ // Therefore, let's obtain at runtime the type of 'ary' and check if we can still
|
|
+ // successfully perform the store.
|
|
+ //
|
|
+ // The implementation reasons for the condition are the following:
|
|
+ //
|
|
+ // java/lang/Object is the superclass of all arrays, but it is represented by the VM
|
|
+ // as an InstanceKlass. The checks generated by gen_checkcast() (see below) expect
|
|
+ // 'array_klass' to be ObjArrayKlass, which can result in invalid memory accesses.
|
|
+ //
|
|
+ // See issue JDK-8057622 for details.
|
|
+
|
|
always_see_exact_class = true;
|
|
// (If no MDO at all, hope for the best, until a trap actually occurs.)
|
|
- }
|
|
|
|
- // Is the array klass is exactly its defined type?
|
|
- if (always_see_exact_class && !tak->klass_is_exact()) {
|
|
// Make a constant out of the inexact array klass
|
|
const TypeKlassPtr *extak = tak->cast_to_exactness(true)->is_klassptr();
|
|
Node* con = makecon(extak);
|
|
@@ -202,7 +223,11 @@
|
|
// Extract the array element class
|
|
int element_klass_offset = in_bytes(ObjArrayKlass::element_klass_offset());
|
|
Node *p2 = basic_plus_adr(array_klass, array_klass, element_klass_offset);
|
|
- Node *a_e_klass = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p2, tak) );
|
|
+ // We are allowed to use the constant type only if cast succeeded. If always_see_exact_class is true,
|
|
+ // we must set a control edge from the IfTrue node created by the uncommon_trap above to the
|
|
+ // LoadKlassNode.
|
|
+ Node* a_e_klass = _gvn.transform(LoadKlassNode::make(_gvn, always_see_exact_class ? control() : NULL,
|
|
+ immutable_memory(), p2, tak));
|
|
|
|
// Check (the hard way) and throw if not a subklass.
|
|
// Result is ignored, we just need the CFG effects.
|
|
--- jdk8/hotspot/src/share/vm/opto/phaseX.cpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/opto/phaseX.cpp 2015-01-08 21:23:31.180148144 +0100
|
|
@@ -1340,15 +1340,27 @@
|
|
}
|
|
}
|
|
|
|
+ uint use_op = use->Opcode();
|
|
if( use->is_Cmp() ) { // Enable CMP/BOOL optimization
|
|
add_users_to_worklist(use); // Put Bool on worklist
|
|
- // Look for the 'is_x2logic' pattern: "x ? : 0 : 1" and put the
|
|
- // phi merging either 0 or 1 onto the worklist
|
|
if (use->outcnt() > 0) {
|
|
Node* bol = use->raw_out(0);
|
|
if (bol->outcnt() > 0) {
|
|
Node* iff = bol->raw_out(0);
|
|
- if (iff->outcnt() == 2) {
|
|
+ if (use_op == Op_CmpI &&
|
|
+ iff->is_CountedLoopEnd()) {
|
|
+ CountedLoopEndNode* cle = iff->as_CountedLoopEnd();
|
|
+ if (cle->limit() == n && cle->phi() != NULL) {
|
|
+ // If an opaque node feeds into the limit condition of a
|
|
+ // CountedLoop, we need to process the Phi node for the
|
|
+ // induction variable when the opaque node is removed:
|
|
+ // the range of values taken by the Phi is now known and
|
|
+ // so its type is also known.
|
|
+ _worklist.push(cle->phi());
|
|
+ }
|
|
+ } else if (iff->outcnt() == 2) {
|
|
+ // Look for the 'is_x2logic' pattern: "x ? : 0 : 1" and put the
|
|
+ // phi merging either 0 or 1 onto the worklist
|
|
Node* ifproj0 = iff->raw_out(0);
|
|
Node* ifproj1 = iff->raw_out(1);
|
|
if (ifproj0->outcnt() > 0 && ifproj1->outcnt() > 0) {
|
|
@@ -1360,9 +1372,26 @@
|
|
}
|
|
}
|
|
}
|
|
+ if (use_op == Op_CmpI) {
|
|
+ Node* in1 = use->in(1);
|
|
+ for (uint i = 0; i < in1->outcnt(); i++) {
|
|
+ if (in1->raw_out(i)->Opcode() == Op_CastII) {
|
|
+ Node* castii = in1->raw_out(i);
|
|
+ if (castii->in(0) != NULL && castii->in(0)->in(0) != NULL && castii->in(0)->in(0)->is_If()) {
|
|
+ Node* ifnode = castii->in(0)->in(0);
|
|
+ if (ifnode->in(1) != NULL && ifnode->in(1)->is_Bool() && ifnode->in(1)->in(1) == use) {
|
|
+ // Reprocess a CastII node that may depend on an
|
|
+ // opaque node value when the opaque node is
|
|
+ // removed. In case it carries a dependency we can do
|
|
+ // a better job of computing its type.
|
|
+ _worklist.push(castii);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ }
|
|
}
|
|
|
|
- uint use_op = use->Opcode();
|
|
// If changed Cast input, check Phi users for simple cycles
|
|
if( use->is_ConstraintCast() || use->is_CheckCastPP() ) {
|
|
for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
|
|
--- jdk8/hotspot/src/share/vm/opto/subnode.cpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/opto/subnode.cpp 2015-01-08 21:23:31.180148144 +0100
|
|
@@ -1147,12 +1147,10 @@
|
|
|
|
//------------------------------dump_spec-------------------------------------
|
|
// Print special per-node info
|
|
-#ifndef PRODUCT
|
|
void BoolTest::dump_on(outputStream *st) const {
|
|
const char *msg[] = {"eq","gt","of","lt","ne","le","nof","ge"};
|
|
st->print("%s", msg[_test]);
|
|
}
|
|
-#endif
|
|
|
|
//=============================================================================
|
|
uint BoolNode::hash() const { return (Node::hash() << 3)|(_test._test+1); }
|
|
--- jdk8/hotspot/src/share/vm/opto/subnode.hpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/opto/subnode.hpp 2015-01-08 21:23:31.181148120 +0100
|
|
@@ -275,9 +275,7 @@
|
|
mask commute( ) const { return mask("032147658"[_test]-'0'); }
|
|
mask negate( ) const { return mask(_test^4); }
|
|
bool is_canonical( ) const { return (_test == BoolTest::ne || _test == BoolTest::lt || _test == BoolTest::le || _test == BoolTest::overflow); }
|
|
-#ifndef PRODUCT
|
|
void dump_on(outputStream *st) const;
|
|
-#endif
|
|
};
|
|
|
|
//------------------------------BoolNode---------------------------------------
|
|
--- jdk8/hotspot/src/share/vm/prims/jni.cpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/prims/jni.cpp 2015-01-08 21:23:31.182148096 +0100
|
|
@@ -5080,6 +5080,7 @@
|
|
void TestNewSize_test();
|
|
void TestKlass_test();
|
|
void Test_linked_list();
|
|
+void TestChunkedList_test();
|
|
#if INCLUDE_ALL_GCS
|
|
void TestOldFreeSpaceCalculation_test();
|
|
void TestG1BiasedArray_test();
|
|
@@ -5108,6 +5109,7 @@
|
|
run_unit_test(TestNewSize_test());
|
|
run_unit_test(TestKlass_test());
|
|
run_unit_test(Test_linked_list());
|
|
+ run_unit_test(TestChunkedList_test());
|
|
#if INCLUDE_VM_STRUCTS
|
|
run_unit_test(VMStructs::test());
|
|
#endif
|
|
--- jdk8/hotspot/src/share/vm/prims/jvm.cpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/prims/jvm.cpp 2015-01-08 21:23:31.182148096 +0100
|
|
@@ -24,6 +24,7 @@
|
|
|
|
#include "precompiled.hpp"
|
|
#include "classfile/classLoader.hpp"
|
|
+#include "classfile/classLoaderExt.hpp"
|
|
#include "classfile/javaAssertions.hpp"
|
|
#include "classfile/javaClasses.hpp"
|
|
#include "classfile/symbolTable.hpp"
|
|
@@ -393,6 +394,14 @@
|
|
}
|
|
}
|
|
|
|
+ const char* enableSharedLookupCache = "false";
|
|
+#if INCLUDE_CDS
|
|
+ if (ClassLoaderExt::is_lookup_cache_enabled()) {
|
|
+ enableSharedLookupCache = "true";
|
|
+ }
|
|
+#endif
|
|
+ PUTPROP(props, "sun.cds.enableSharedLookupCache", enableSharedLookupCache);
|
|
+
|
|
return properties;
|
|
JVM_END
|
|
|
|
@@ -594,13 +603,14 @@
|
|
|
|
// Make shallow object copy
|
|
const int size = obj->size();
|
|
- oop new_obj = NULL;
|
|
+ oop new_obj_oop = NULL;
|
|
if (obj->is_array()) {
|
|
const int length = ((arrayOop)obj())->length();
|
|
- new_obj = CollectedHeap::array_allocate(klass, size, length, CHECK_NULL);
|
|
+ new_obj_oop = CollectedHeap::array_allocate(klass, size, length, CHECK_NULL);
|
|
} else {
|
|
- new_obj = CollectedHeap::obj_allocate(klass, size, CHECK_NULL);
|
|
+ new_obj_oop = CollectedHeap::obj_allocate(klass, size, CHECK_NULL);
|
|
}
|
|
+
|
|
// 4839641 (4840070): We must do an oop-atomic copy, because if another thread
|
|
// is modifying a reference field in the clonee, a non-oop-atomic copy might
|
|
// be suspended in the middle of copying the pointer and end up with parts
|
|
@@ -611,24 +621,41 @@
|
|
// The same is true of StubRoutines::object_copy and the various oop_copy
|
|
// variants, and of the code generated by the inline_native_clone intrinsic.
|
|
assert(MinObjAlignmentInBytes >= BytesPerLong, "objects misaligned");
|
|
- Copy::conjoint_jlongs_atomic((jlong*)obj(), (jlong*)new_obj,
|
|
+ Copy::conjoint_jlongs_atomic((jlong*)obj(), (jlong*)new_obj_oop,
|
|
(size_t)align_object_size(size) / HeapWordsPerLong);
|
|
// Clear the header
|
|
- new_obj->init_mark();
|
|
+ new_obj_oop->init_mark();
|
|
|
|
// Store check (mark entire object and let gc sort it out)
|
|
BarrierSet* bs = Universe::heap()->barrier_set();
|
|
assert(bs->has_write_region_opt(), "Barrier set does not have write_region");
|
|
- bs->write_region(MemRegion((HeapWord*)new_obj, size));
|
|
+ bs->write_region(MemRegion((HeapWord*)new_obj_oop, size));
|
|
+
|
|
+ Handle new_obj(THREAD, new_obj_oop);
|
|
+ // Special handling for MemberNames. Since they contain Method* metadata, they
|
|
+ // must be registered so that RedefineClasses can fix metadata contained in them.
|
|
+ if (java_lang_invoke_MemberName::is_instance(new_obj()) &&
|
|
+ java_lang_invoke_MemberName::is_method(new_obj())) {
|
|
+ Method* method = (Method*)java_lang_invoke_MemberName::vmtarget(new_obj());
|
|
+ // MemberName may be unresolved, so doesn't need registration until resolved.
|
|
+ if (method != NULL) {
|
|
+ methodHandle m(THREAD, method);
|
|
+ // This can safepoint and redefine method, so need both new_obj and method
|
|
+ // in a handle, for two different reasons. new_obj can move, method can be
|
|
+ // deleted if nothing is using it on the stack.
|
|
+ m->method_holder()->add_member_name(new_obj());
|
|
+ }
|
|
+ }
|
|
|
|
// Caution: this involves a java upcall, so the clone should be
|
|
// "gc-robust" by this stage.
|
|
if (klass->has_finalizer()) {
|
|
assert(obj->is_instance(), "should be instanceOop");
|
|
- new_obj = InstanceKlass::register_finalizer(instanceOop(new_obj), CHECK_NULL);
|
|
+ new_obj_oop = InstanceKlass::register_finalizer(instanceOop(new_obj()), CHECK_NULL);
|
|
+ new_obj = Handle(THREAD, new_obj_oop);
|
|
}
|
|
|
|
- return JNIHandles::make_local(env, oop(new_obj));
|
|
+ return JNIHandles::make_local(env, new_obj());
|
|
JVM_END
|
|
|
|
// java.lang.Compiler ////////////////////////////////////////////////////
|
|
@@ -766,6 +793,36 @@
|
|
JVM_END
|
|
|
|
|
|
+JVM_ENTRY(jboolean, JVM_KnownToNotExist(JNIEnv *env, jobject loader, const char *classname))
|
|
+ JVMWrapper("JVM_KnownToNotExist");
|
|
+#if INCLUDE_CDS
|
|
+ return ClassLoaderExt::known_to_not_exist(env, loader, classname, CHECK_(false));
|
|
+#else
|
|
+ return false;
|
|
+#endif
|
|
+JVM_END
|
|
+
|
|
+
|
|
+JVM_ENTRY(jobjectArray, JVM_GetResourceLookupCacheURLs(JNIEnv *env, jobject loader))
|
|
+ JVMWrapper("JVM_GetResourceLookupCacheURLs");
|
|
+#if INCLUDE_CDS
|
|
+ return ClassLoaderExt::get_lookup_cache_urls(env, loader, CHECK_NULL);
|
|
+#else
|
|
+ return NULL;
|
|
+#endif
|
|
+JVM_END
|
|
+
|
|
+
|
|
+JVM_ENTRY(jintArray, JVM_GetResourceLookupCache(JNIEnv *env, jobject loader, const char *resource_name))
|
|
+ JVMWrapper("JVM_GetResourceLookupCache");
|
|
+#if INCLUDE_CDS
|
|
+ return ClassLoaderExt::get_lookup_cache(env, loader, resource_name, CHECK_NULL);
|
|
+#else
|
|
+ return NULL;
|
|
+#endif
|
|
+JVM_END
|
|
+
|
|
+
|
|
// Returns a class loaded by the bootstrap class loader; or null
|
|
// if not found. ClassNotFoundException is not thrown.
|
|
//
|
|
@@ -4497,7 +4554,7 @@
|
|
|
|
JVM_ENTRY(void, JVM_GetVersionInfo(JNIEnv* env, jvm_version_info* info, size_t info_size))
|
|
{
|
|
- memset(info, 0, sizeof(info_size));
|
|
+ memset(info, 0, info_size);
|
|
|
|
info->jvm_version = Abstract_VM_Version::jvm_version();
|
|
info->update_version = 0; /* 0 in HotSpot Express VM */
|
|
--- jdk8/hotspot/src/share/vm/prims/jvm.h 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/prims/jvm.h 2015-01-08 21:23:31.183148072 +0100
|
|
@@ -1553,6 +1553,31 @@
|
|
JNIEXPORT jobjectArray JNICALL
|
|
JVM_GetThreadStateNames(JNIEnv* env, jint javaThreadState, jintArray values);
|
|
|
|
+/*
|
|
+ * Returns true if the JVM's lookup cache indicates that this class is
|
|
+ * known to NOT exist for the given loader.
|
|
+ */
|
|
+JNIEXPORT jboolean JNICALL
|
|
+JVM_KnownToNotExist(JNIEnv *env, jobject loader, const char *classname);
|
|
+
|
|
+/*
|
|
+ * Returns an array of all URLs that are stored in the JVM's lookup cache
|
|
+ * for the given loader. NULL if the lookup cache is unavailable.
|
|
+ */
|
|
+JNIEXPORT jobjectArray JNICALL
|
|
+JVM_GetResourceLookupCacheURLs(JNIEnv *env, jobject loader);
|
|
+
|
|
+/*
|
|
+ * Returns an array of all URLs that *may* contain the resource_name for the
|
|
+ * given loader. This function returns an integer array, each element
|
|
+ * of which can be used to index into the array returned by
|
|
+ * JVM_GetResourceLookupCacheURLs of the same loader to determine the
|
|
+ * URLs.
|
|
+ */
|
|
+JNIEXPORT jintArray JNICALL
|
|
+JVM_GetResourceLookupCache(JNIEnv *env, jobject loader, const char *resource_name);
|
|
+
|
|
+
|
|
/* =========================================================================
|
|
* The following defines a private JVM interface that the JDK can query
|
|
* for the JVM version and capabilities. sun.misc.Version defines
|
|
--- jdk8/hotspot/src/share/vm/prims/jvmtiClassFileReconstituter.cpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/prims/jvmtiClassFileReconstituter.cpp 2015-01-08 21:23:31.183148072 +0100
|
|
@@ -57,6 +57,7 @@
|
|
void JvmtiClassFileReconstituter::write_field_infos() {
|
|
HandleMark hm(thread());
|
|
Array<AnnotationArray*>* fields_anno = ikh()->fields_annotations();
|
|
+ Array<AnnotationArray*>* fields_type_anno = ikh()->fields_type_annotations();
|
|
|
|
// Compute the real number of Java fields
|
|
int java_fields = ikh()->java_fields_count();
|
|
@@ -71,6 +72,7 @@
|
|
// int offset = ikh()->field_offset( index );
|
|
int generic_signature_index = fs.generic_signature_index();
|
|
AnnotationArray* anno = fields_anno == NULL ? NULL : fields_anno->at(fs.index());
|
|
+ AnnotationArray* type_anno = fields_type_anno == NULL ? NULL : fields_type_anno->at(fs.index());
|
|
|
|
// JVMSpec| field_info {
|
|
// JVMSpec| u2 access_flags;
|
|
@@ -96,6 +98,9 @@
|
|
if (anno != NULL) {
|
|
++attr_count; // has RuntimeVisibleAnnotations attribute
|
|
}
|
|
+ if (type_anno != NULL) {
|
|
+ ++attr_count; // has RuntimeVisibleTypeAnnotations attribute
|
|
+ }
|
|
|
|
write_u2(attr_count);
|
|
|
|
@@ -113,6 +118,9 @@
|
|
if (anno != NULL) {
|
|
write_annotations_attribute("RuntimeVisibleAnnotations", anno);
|
|
}
|
|
+ if (type_anno != NULL) {
|
|
+ write_annotations_attribute("RuntimeVisibleTypeAnnotations", type_anno);
|
|
+ }
|
|
}
|
|
}
|
|
|
|
@@ -553,6 +561,7 @@
|
|
AnnotationArray* anno = method->annotations();
|
|
AnnotationArray* param_anno = method->parameter_annotations();
|
|
AnnotationArray* default_anno = method->annotation_default();
|
|
+ AnnotationArray* type_anno = method->type_annotations();
|
|
|
|
// skip generated default interface methods
|
|
if (method->is_overpass()) {
|
|
@@ -588,6 +597,9 @@
|
|
if (param_anno != NULL) {
|
|
++attr_count; // has RuntimeVisibleParameterAnnotations attribute
|
|
}
|
|
+ if (type_anno != NULL) {
|
|
+ ++attr_count; // has RuntimeVisibleTypeAnnotations attribute
|
|
+ }
|
|
|
|
write_u2(attr_count);
|
|
if (const_method->code_size() > 0) {
|
|
@@ -612,6 +624,9 @@
|
|
if (param_anno != NULL) {
|
|
write_annotations_attribute("RuntimeVisibleParameterAnnotations", param_anno);
|
|
}
|
|
+ if (type_anno != NULL) {
|
|
+ write_annotations_attribute("RuntimeVisibleTypeAnnotations", type_anno);
|
|
+ }
|
|
}
|
|
|
|
// Write the class attributes portion of ClassFile structure
|
|
@@ -621,6 +636,7 @@
|
|
u2 inner_classes_length = inner_classes_attribute_length();
|
|
Symbol* generic_signature = ikh()->generic_signature();
|
|
AnnotationArray* anno = ikh()->class_annotations();
|
|
+ AnnotationArray* type_anno = ikh()->class_type_annotations();
|
|
|
|
int attr_count = 0;
|
|
if (generic_signature != NULL) {
|
|
@@ -638,6 +654,9 @@
|
|
if (anno != NULL) {
|
|
++attr_count; // has RuntimeVisibleAnnotations attribute
|
|
}
|
|
+ if (type_anno != NULL) {
|
|
+ ++attr_count; // has RuntimeVisibleTypeAnnotations attribute
|
|
+ }
|
|
if (cpool()->operands() != NULL) {
|
|
++attr_count;
|
|
}
|
|
@@ -659,6 +678,9 @@
|
|
if (anno != NULL) {
|
|
write_annotations_attribute("RuntimeVisibleAnnotations", anno);
|
|
}
|
|
+ if (type_anno != NULL) {
|
|
+ write_annotations_attribute("RuntimeVisibleTypeAnnotations", type_anno);
|
|
+ }
|
|
if (cpool()->operands() != NULL) {
|
|
write_bootstrapmethod_attribute();
|
|
}
|
|
--- jdk8/hotspot/src/share/vm/prims/jvmtiEnv.cpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/prims/jvmtiEnv.cpp 2015-01-08 21:23:31.184148048 +0100
|
|
@@ -23,6 +23,7 @@
|
|
*/
|
|
|
|
#include "precompiled.hpp"
|
|
+#include "classfile/classLoaderExt.hpp"
|
|
#include "classfile/systemDictionary.hpp"
|
|
#include "classfile/vmSymbols.hpp"
|
|
#include "interpreter/bytecodeStream.hpp"
|
|
@@ -475,7 +476,7 @@
|
|
if (TraceClassLoading) {
|
|
tty->print_cr("[Opened %s]", zip_entry->name());
|
|
}
|
|
- ClassLoader::add_to_list(zip_entry);
|
|
+ ClassLoaderExt::append_boot_classpath(zip_entry);
|
|
return JVMTI_ERROR_NONE;
|
|
} else {
|
|
return JVMTI_ERROR_WRONG_PHASE;
|
|
--- jdk8/hotspot/src/share/vm/prims/jvmtiRedefineClasses.cpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/prims/jvmtiRedefineClasses.cpp 2015-01-08 21:23:31.185148024 +0100
|
|
@@ -135,7 +135,7 @@
|
|
|
|
// Mark methods seen on stack and everywhere else so old methods are not
|
|
// cleaned up if they're on the stack.
|
|
- MetadataOnStackMark md_on_stack;
|
|
+ MetadataOnStackMark md_on_stack(true);
|
|
HandleMark hm(thread); // make sure any handles created are deleted
|
|
// before the stack walk again.
|
|
|
|
@@ -1569,6 +1569,29 @@
|
|
return false;
|
|
}
|
|
|
|
+ // rewrite constant pool references in the class_type_annotations:
|
|
+ if (!rewrite_cp_refs_in_class_type_annotations(scratch_class, THREAD)) {
|
|
+ // propagate failure back to caller
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ // rewrite constant pool references in the fields_type_annotations:
|
|
+ if (!rewrite_cp_refs_in_fields_type_annotations(scratch_class, THREAD)) {
|
|
+ // propagate failure back to caller
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ // rewrite constant pool references in the methods_type_annotations:
|
|
+ if (!rewrite_cp_refs_in_methods_type_annotations(scratch_class, THREAD)) {
|
|
+ // propagate failure back to caller
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ // There can be type annotations in the Code part of a method_info attribute.
|
|
+ // These annotations are not accessible, even by reflection.
|
|
+ // Currently they are not even parsed by the ClassFileParser.
|
|
+ // If runtime access is added they will also need to be rewritten.
|
|
+
|
|
// rewrite source file name index:
|
|
u2 source_file_name_idx = scratch_class->source_file_name_index();
|
|
if (source_file_name_idx != 0) {
|
|
@@ -2239,6 +2262,588 @@
|
|
} // end rewrite_cp_refs_in_methods_default_annotations()
|
|
|
|
|
|
+// Rewrite constant pool references in a class_type_annotations field.
|
|
+bool VM_RedefineClasses::rewrite_cp_refs_in_class_type_annotations(
|
|
+ instanceKlassHandle scratch_class, TRAPS) {
|
|
+
|
|
+ AnnotationArray* class_type_annotations = scratch_class->class_type_annotations();
|
|
+ if (class_type_annotations == NULL || class_type_annotations->length() == 0) {
|
|
+ // no class_type_annotations so nothing to do
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ RC_TRACE_WITH_THREAD(0x02000000, THREAD,
|
|
+ ("class_type_annotations length=%d", class_type_annotations->length()));
|
|
+
|
|
+ int byte_i = 0; // byte index into class_type_annotations
|
|
+ return rewrite_cp_refs_in_type_annotations_typeArray(class_type_annotations,
|
|
+ byte_i, "ClassFile", THREAD);
|
|
+} // end rewrite_cp_refs_in_class_type_annotations()
|
|
+
|
|
+
|
|
+// Rewrite constant pool references in a fields_type_annotations field.
|
|
+bool VM_RedefineClasses::rewrite_cp_refs_in_fields_type_annotations(
|
|
+ instanceKlassHandle scratch_class, TRAPS) {
|
|
+
|
|
+ Array<AnnotationArray*>* fields_type_annotations = scratch_class->fields_type_annotations();
|
|
+ if (fields_type_annotations == NULL || fields_type_annotations->length() == 0) {
|
|
+ // no fields_type_annotations so nothing to do
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ RC_TRACE_WITH_THREAD(0x02000000, THREAD,
|
|
+ ("fields_type_annotations length=%d", fields_type_annotations->length()));
|
|
+
|
|
+ for (int i = 0; i < fields_type_annotations->length(); i++) {
|
|
+ AnnotationArray* field_type_annotations = fields_type_annotations->at(i);
|
|
+ if (field_type_annotations == NULL || field_type_annotations->length() == 0) {
|
|
+ // this field does not have any annotations so skip it
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ int byte_i = 0; // byte index into field_type_annotations
|
|
+ if (!rewrite_cp_refs_in_type_annotations_typeArray(field_type_annotations,
|
|
+ byte_i, "field_info", THREAD)) {
|
|
+ RC_TRACE_WITH_THREAD(0x02000000, THREAD,
|
|
+ ("bad field_type_annotations at %d", i));
|
|
+ // propagate failure back to caller
|
|
+ return false;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return true;
|
|
+} // end rewrite_cp_refs_in_fields_type_annotations()
|
|
+
|
|
+
|
|
+// Rewrite constant pool references in a methods_type_annotations field.
|
|
+bool VM_RedefineClasses::rewrite_cp_refs_in_methods_type_annotations(
|
|
+ instanceKlassHandle scratch_class, TRAPS) {
|
|
+
|
|
+ for (int i = 0; i < scratch_class->methods()->length(); i++) {
|
|
+ Method* m = scratch_class->methods()->at(i);
|
|
+ AnnotationArray* method_type_annotations = m->constMethod()->type_annotations();
|
|
+
|
|
+ if (method_type_annotations == NULL || method_type_annotations->length() == 0) {
|
|
+ // this method does not have any annotations so skip it
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ RC_TRACE_WITH_THREAD(0x02000000, THREAD,
|
|
+ ("methods type_annotations length=%d", method_type_annotations->length()));
|
|
+
|
|
+ int byte_i = 0; // byte index into method_type_annotations
|
|
+ if (!rewrite_cp_refs_in_type_annotations_typeArray(method_type_annotations,
|
|
+ byte_i, "method_info", THREAD)) {
|
|
+ RC_TRACE_WITH_THREAD(0x02000000, THREAD,
|
|
+ ("bad method_type_annotations at %d", i));
|
|
+ // propagate failure back to caller
|
|
+ return false;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return true;
|
|
+} // end rewrite_cp_refs_in_methods_type_annotations()
|
|
+
|
|
+
|
|
+// Rewrite constant pool references in a type_annotations
|
|
+// field. This "structure" is adapted from the
|
|
+// RuntimeVisibleTypeAnnotations_attribute described in
|
|
+// section 4.7.20 of the Java SE 8 Edition of the VM spec:
|
|
+//
|
|
+// type_annotations_typeArray {
|
|
+// u2 num_annotations;
|
|
+// type_annotation annotations[num_annotations];
|
|
+// }
|
|
+//
|
|
+bool VM_RedefineClasses::rewrite_cp_refs_in_type_annotations_typeArray(
|
|
+ AnnotationArray* type_annotations_typeArray, int &byte_i_ref,
|
|
+ const char * location_mesg, TRAPS) {
|
|
+
|
|
+ if ((byte_i_ref + 2) > type_annotations_typeArray->length()) {
|
|
+ // not enough room for num_annotations field
|
|
+ RC_TRACE_WITH_THREAD(0x02000000, THREAD,
|
|
+ ("length() is too small for num_annotations field"));
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ u2 num_annotations = Bytes::get_Java_u2((address)
|
|
+ type_annotations_typeArray->adr_at(byte_i_ref));
|
|
+ byte_i_ref += 2;
|
|
+
|
|
+ RC_TRACE_WITH_THREAD(0x02000000, THREAD,
|
|
+ ("num_type_annotations=%d", num_annotations));
|
|
+
|
|
+ int calc_num_annotations = 0;
|
|
+ for (; calc_num_annotations < num_annotations; calc_num_annotations++) {
|
|
+ if (!rewrite_cp_refs_in_type_annotation_struct(type_annotations_typeArray,
|
|
+ byte_i_ref, location_mesg, THREAD)) {
|
|
+ RC_TRACE_WITH_THREAD(0x02000000, THREAD,
|
|
+ ("bad type_annotation_struct at %d", calc_num_annotations));
|
|
+ // propagate failure back to caller
|
|
+ return false;
|
|
+ }
|
|
+ }
|
|
+ assert(num_annotations == calc_num_annotations, "sanity check");
|
|
+
|
|
+ if (byte_i_ref != type_annotations_typeArray->length()) {
|
|
+ RC_TRACE_WITH_THREAD(0x02000000, THREAD,
|
|
+ ("read wrong amount of bytes at end of processing "
|
|
+ "type_annotations_typeArray (%d of %d bytes were read)",
|
|
+ byte_i_ref, type_annotations_typeArray->length()));
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ return true;
|
|
+} // end rewrite_cp_refs_in_type_annotations_typeArray()
|
|
+
|
|
+
|
|
+// Rewrite constant pool references in a type_annotation
|
|
+// field. This "structure" is adapted from the
|
|
+// RuntimeVisibleTypeAnnotations_attribute described in
|
|
+// section 4.7.20 of the Java SE 8 Edition of the VM spec:
|
|
+//
|
|
+// type_annotation {
|
|
+// u1 target_type;
|
|
+// union {
|
|
+// type_parameter_target;
|
|
+// supertype_target;
|
|
+// type_parameter_bound_target;
|
|
+// empty_target;
|
|
+// method_formal_parameter_target;
|
|
+// throws_target;
|
|
+// localvar_target;
|
|
+// catch_target;
|
|
+// offset_target;
|
|
+// type_argument_target;
|
|
+// } target_info;
|
|
+// type_path target_path;
|
|
+// annotation anno;
|
|
+// }
|
|
+//
|
|
+bool VM_RedefineClasses::rewrite_cp_refs_in_type_annotation_struct(
|
|
+ AnnotationArray* type_annotations_typeArray, int &byte_i_ref,
|
|
+ const char * location_mesg, TRAPS) {
|
|
+
|
|
+ if (!skip_type_annotation_target(type_annotations_typeArray,
|
|
+ byte_i_ref, location_mesg, THREAD)) {
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ if (!skip_type_annotation_type_path(type_annotations_typeArray,
|
|
+ byte_i_ref, THREAD)) {
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ if (!rewrite_cp_refs_in_annotation_struct(type_annotations_typeArray,
|
|
+ byte_i_ref, THREAD)) {
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ return true;
|
|
+} // end rewrite_cp_refs_in_type_annotation_struct()
|
|
+
|
|
+
|
|
+// Read, verify and skip over the target_type and target_info part
|
|
+// so that rewriting can continue in the later parts of the struct.
|
|
+//
|
|
+// u1 target_type;
|
|
+// union {
|
|
+// type_parameter_target;
|
|
+// supertype_target;
|
|
+// type_parameter_bound_target;
|
|
+// empty_target;
|
|
+// method_formal_parameter_target;
|
|
+// throws_target;
|
|
+// localvar_target;
|
|
+// catch_target;
|
|
+// offset_target;
|
|
+// type_argument_target;
|
|
+// } target_info;
|
|
+//
|
|
+bool VM_RedefineClasses::skip_type_annotation_target(
|
|
+ AnnotationArray* type_annotations_typeArray, int &byte_i_ref,
|
|
+ const char * location_mesg, TRAPS) {
|
|
+
|
|
+ if ((byte_i_ref + 1) > type_annotations_typeArray->length()) {
|
|
+ // not enough room for a target_type let alone the rest of a type_annotation
|
|
+ RC_TRACE_WITH_THREAD(0x02000000, THREAD,
|
|
+ ("length() is too small for a target_type"));
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ u1 target_type = type_annotations_typeArray->at(byte_i_ref);
|
|
+ byte_i_ref += 1;
|
|
+ RC_TRACE_WITH_THREAD(0x02000000, THREAD, ("target_type=0x%.2x", target_type));
|
|
+ RC_TRACE_WITH_THREAD(0x02000000, THREAD, ("location=%s", location_mesg));
|
|
+
|
|
+ // Skip over target_info
|
|
+ switch (target_type) {
|
|
+ case 0x00:
|
|
+ // kind: type parameter declaration of generic class or interface
|
|
+ // location: ClassFile
|
|
+ case 0x01:
|
|
+ // kind: type parameter declaration of generic method or constructor
|
|
+ // location: method_info
|
|
+
|
|
+ {
|
|
+ // struct:
|
|
+ // type_parameter_target {
|
|
+ // u1 type_parameter_index;
|
|
+ // }
|
|
+ //
|
|
+ if ((byte_i_ref + 1) > type_annotations_typeArray->length()) {
|
|
+ RC_TRACE_WITH_THREAD(0x02000000, THREAD,
|
|
+ ("length() is too small for a type_parameter_target"));
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ u1 type_parameter_index = type_annotations_typeArray->at(byte_i_ref);
|
|
+ byte_i_ref += 1;
|
|
+
|
|
+ RC_TRACE_WITH_THREAD(0x02000000, THREAD,
|
|
+ ("type_parameter_target: type_parameter_index=%d",
|
|
+ type_parameter_index));
|
|
+ } break;
|
|
+
|
|
+ case 0x10:
|
|
+ // kind: type in extends clause of class or interface declaration
|
|
+ // (including the direct superclass of an anonymous class declaration),
|
|
+ // or in implements clause of interface declaration
|
|
+ // location: ClassFile
|
|
+
|
|
+ {
|
|
+ // struct:
|
|
+ // supertype_target {
|
|
+ // u2 supertype_index;
|
|
+ // }
|
|
+ //
|
|
+ if ((byte_i_ref + 2) > type_annotations_typeArray->length()) {
|
|
+ RC_TRACE_WITH_THREAD(0x02000000, THREAD,
|
|
+ ("length() is too small for a supertype_target"));
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ u2 supertype_index = Bytes::get_Java_u2((address)
|
|
+ type_annotations_typeArray->adr_at(byte_i_ref));
|
|
+ byte_i_ref += 2;
|
|
+
|
|
+ RC_TRACE_WITH_THREAD(0x02000000, THREAD,
|
|
+ ("supertype_target: supertype_index=%d", supertype_index));
|
|
+ } break;
|
|
+
|
|
+ case 0x11:
|
|
+ // kind: type in bound of type parameter declaration of generic class or interface
|
|
+ // location: ClassFile
|
|
+ case 0x12:
|
|
+ // kind: type in bound of type parameter declaration of generic method or constructor
|
|
+ // location: method_info
|
|
+
|
|
+ {
|
|
+ // struct:
|
|
+ // type_parameter_bound_target {
|
|
+ // u1 type_parameter_index;
|
|
+ // u1 bound_index;
|
|
+ // }
|
|
+ //
|
|
+ if ((byte_i_ref + 2) > type_annotations_typeArray->length()) {
|
|
+ RC_TRACE_WITH_THREAD(0x02000000, THREAD,
|
|
+ ("length() is too small for a type_parameter_bound_target"));
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ u1 type_parameter_index = type_annotations_typeArray->at(byte_i_ref);
|
|
+ byte_i_ref += 1;
|
|
+ u1 bound_index = type_annotations_typeArray->at(byte_i_ref);
|
|
+ byte_i_ref += 1;
|
|
+
|
|
+ RC_TRACE_WITH_THREAD(0x02000000, THREAD,
|
|
+ ("type_parameter_bound_target: type_parameter_index=%d, bound_index=%d",
|
|
+ type_parameter_index, bound_index));
|
|
+ } break;
|
|
+
|
|
+ case 0x13:
|
|
+ // kind: type in field declaration
|
|
+ // location: field_info
|
|
+ case 0x14:
|
|
+ // kind: return type of method, or type of newly constructed object
|
|
+ // location: method_info
|
|
+ case 0x15:
|
|
+ // kind: receiver type of method or constructor
|
|
+ // location: method_info
|
|
+
|
|
+ {
|
|
+ // struct:
|
|
+ // empty_target {
|
|
+ // }
|
|
+ //
|
|
+ RC_TRACE_WITH_THREAD(0x02000000, THREAD,
|
|
+ ("empty_target"));
|
|
+ } break;
|
|
+
|
|
+ case 0x16:
|
|
+ // kind: type in formal parameter declaration of method, constructor, or lambda expression
|
|
+ // location: method_info
|
|
+
|
|
+ {
|
|
+ // struct:
|
|
+ // formal_parameter_target {
|
|
+ // u1 formal_parameter_index;
|
|
+ // }
|
|
+ //
|
|
+ if ((byte_i_ref + 1) > type_annotations_typeArray->length()) {
|
|
+ RC_TRACE_WITH_THREAD(0x02000000, THREAD,
|
|
+ ("length() is too small for a formal_parameter_target"));
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ u1 formal_parameter_index = type_annotations_typeArray->at(byte_i_ref);
|
|
+ byte_i_ref += 1;
|
|
+
|
|
+ RC_TRACE_WITH_THREAD(0x02000000, THREAD,
|
|
+ ("formal_parameter_target: formal_parameter_index=%d",
|
|
+ formal_parameter_index));
|
|
+ } break;
|
|
+
|
|
+ case 0x17:
|
|
+ // kind: type in throws clause of method or constructor
|
|
+ // location: method_info
|
|
+
|
|
+ {
|
|
+ // struct:
|
|
+ // throws_target {
|
|
+ // u2 throws_type_index
|
|
+ // }
|
|
+ //
|
|
+ if ((byte_i_ref + 2) > type_annotations_typeArray->length()) {
|
|
+ RC_TRACE_WITH_THREAD(0x02000000, THREAD,
|
|
+ ("length() is too small for a throws_target"));
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ u2 throws_type_index = Bytes::get_Java_u2((address)
|
|
+ type_annotations_typeArray->adr_at(byte_i_ref));
|
|
+ byte_i_ref += 2;
|
|
+
|
|
+ RC_TRACE_WITH_THREAD(0x02000000, THREAD,
|
|
+ ("throws_target: throws_type_index=%d", throws_type_index));
|
|
+ } break;
|
|
+
|
|
+ case 0x40:
|
|
+ // kind: type in local variable declaration
|
|
+ // location: Code
|
|
+ case 0x41:
|
|
+ // kind: type in resource variable declaration
|
|
+ // location: Code
|
|
+
|
|
+ {
|
|
+ // struct:
|
|
+ // localvar_target {
|
|
+ // u2 table_length;
|
|
+ // struct {
|
|
+ // u2 start_pc;
|
|
+ // u2 length;
|
|
+ // u2 index;
|
|
+ // } table[table_length];
|
|
+ // }
|
|
+ //
|
|
+ if ((byte_i_ref + 2) > type_annotations_typeArray->length()) {
|
|
+ // not enough room for a table_length let alone the rest of a localvar_target
|
|
+ RC_TRACE_WITH_THREAD(0x02000000, THREAD,
|
|
+ ("length() is too small for a localvar_target table_length"));
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ u2 table_length = Bytes::get_Java_u2((address)
|
|
+ type_annotations_typeArray->adr_at(byte_i_ref));
|
|
+ byte_i_ref += 2;
|
|
+
|
|
+ RC_TRACE_WITH_THREAD(0x02000000, THREAD,
|
|
+ ("localvar_target: table_length=%d", table_length));
|
|
+
|
|
+ int table_struct_size = 2 + 2 + 2; // 3 u2 variables per table entry
|
|
+ int table_size = table_length * table_struct_size;
|
|
+
|
|
+ if ((byte_i_ref + table_size) > type_annotations_typeArray->length()) {
|
|
+ // not enough room for a table
|
|
+ RC_TRACE_WITH_THREAD(0x02000000, THREAD,
|
|
+ ("length() is too small for a table array of length %d", table_length));
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ // Skip over table
|
|
+ byte_i_ref += table_size;
|
|
+ } break;
|
|
+
|
|
+ case 0x42:
|
|
+ // kind: type in exception parameter declaration
|
|
+ // location: Code
|
|
+
|
|
+ {
|
|
+ // struct:
|
|
+ // catch_target {
|
|
+ // u2 exception_table_index;
|
|
+ // }
|
|
+ //
|
|
+ if ((byte_i_ref + 2) > type_annotations_typeArray->length()) {
|
|
+ RC_TRACE_WITH_THREAD(0x02000000, THREAD,
|
|
+ ("length() is too small for a catch_target"));
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ u2 exception_table_index = Bytes::get_Java_u2((address)
|
|
+ type_annotations_typeArray->adr_at(byte_i_ref));
|
|
+ byte_i_ref += 2;
|
|
+
|
|
+ RC_TRACE_WITH_THREAD(0x02000000, THREAD,
|
|
+ ("catch_target: exception_table_index=%d", exception_table_index));
|
|
+ } break;
|
|
+
|
|
+ case 0x43:
|
|
+ // kind: type in instanceof expression
|
|
+ // location: Code
|
|
+ case 0x44:
|
|
+ // kind: type in new expression
|
|
+ // location: Code
|
|
+ case 0x45:
|
|
+ // kind: type in method reference expression using ::new
|
|
+ // location: Code
|
|
+ case 0x46:
|
|
+ // kind: type in method reference expression using ::Identifier
|
|
+ // location: Code
|
|
+
|
|
+ {
|
|
+ // struct:
|
|
+ // offset_target {
|
|
+ // u2 offset;
|
|
+ // }
|
|
+ //
|
|
+ if ((byte_i_ref + 2) > type_annotations_typeArray->length()) {
|
|
+ RC_TRACE_WITH_THREAD(0x02000000, THREAD,
|
|
+ ("length() is too small for a offset_target"));
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ u2 offset = Bytes::get_Java_u2((address)
|
|
+ type_annotations_typeArray->adr_at(byte_i_ref));
|
|
+ byte_i_ref += 2;
|
|
+
|
|
+ RC_TRACE_WITH_THREAD(0x02000000, THREAD,
|
|
+ ("offset_target: offset=%d", offset));
|
|
+ } break;
|
|
+
|
|
+ case 0x47:
|
|
+ // kind: type in cast expression
|
|
+ // location: Code
|
|
+ case 0x48:
|
|
+ // kind: type argument for generic constructor in new expression or
|
|
+ // explicit constructor invocation statement
|
|
+ // location: Code
|
|
+ case 0x49:
|
|
+ // kind: type argument for generic method in method invocation expression
|
|
+ // location: Code
|
|
+ case 0x4A:
|
|
+ // kind: type argument for generic constructor in method reference expression using ::new
|
|
+ // location: Code
|
|
+ case 0x4B:
|
|
+ // kind: type argument for generic method in method reference expression using ::Identifier
|
|
+ // location: Code
|
|
+
|
|
+ {
|
|
+ // struct:
|
|
+ // type_argument_target {
|
|
+ // u2 offset;
|
|
+ // u1 type_argument_index;
|
|
+ // }
|
|
+ //
|
|
+ if ((byte_i_ref + 3) > type_annotations_typeArray->length()) {
|
|
+ RC_TRACE_WITH_THREAD(0x02000000, THREAD,
|
|
+ ("length() is too small for a type_argument_target"));
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ u2 offset = Bytes::get_Java_u2((address)
|
|
+ type_annotations_typeArray->adr_at(byte_i_ref));
|
|
+ byte_i_ref += 2;
|
|
+ u1 type_argument_index = type_annotations_typeArray->at(byte_i_ref);
|
|
+ byte_i_ref += 1;
|
|
+
|
|
+ RC_TRACE_WITH_THREAD(0x02000000, THREAD,
|
|
+ ("type_argument_target: offset=%d, type_argument_index=%d",
|
|
+ offset, type_argument_index));
|
|
+ } break;
|
|
+
|
|
+ default:
|
|
+ RC_TRACE_WITH_THREAD(0x02000000, THREAD,
|
|
+ ("unknown target_type"));
|
|
+#ifdef ASSERT
|
|
+ ShouldNotReachHere();
|
|
+#endif
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ return true;
|
|
+} // end skip_type_annotation_target()
|
|
+
|
|
+
|
|
+// Read, verify and skip over the type_path part so that rewriting
|
|
+// can continue in the later parts of the struct.
|
|
+//
|
|
+// type_path {
|
|
+// u1 path_length;
|
|
+// {
|
|
+// u1 type_path_kind;
|
|
+// u1 type_argument_index;
|
|
+// } path[path_length];
|
|
+// }
|
|
+//
|
|
+bool VM_RedefineClasses::skip_type_annotation_type_path(
|
|
+ AnnotationArray* type_annotations_typeArray, int &byte_i_ref, TRAPS) {
|
|
+
|
|
+ if ((byte_i_ref + 1) > type_annotations_typeArray->length()) {
|
|
+ // not enough room for a path_length let alone the rest of the type_path
|
|
+ RC_TRACE_WITH_THREAD(0x02000000, THREAD,
|
|
+ ("length() is too small for a type_path"));
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ u1 path_length = type_annotations_typeArray->at(byte_i_ref);
|
|
+ byte_i_ref += 1;
|
|
+
|
|
+ RC_TRACE_WITH_THREAD(0x02000000, THREAD,
|
|
+ ("type_path: path_length=%d", path_length));
|
|
+
|
|
+ int calc_path_length = 0;
|
|
+ for (; calc_path_length < path_length; calc_path_length++) {
|
|
+ if ((byte_i_ref + 1 + 1) > type_annotations_typeArray->length()) {
|
|
+ // not enough room for a path
|
|
+ RC_TRACE_WITH_THREAD(0x02000000, THREAD,
|
|
+ ("length() is too small for path entry %d of %d",
|
|
+ calc_path_length, path_length));
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ u1 type_path_kind = type_annotations_typeArray->at(byte_i_ref);
|
|
+ byte_i_ref += 1;
|
|
+ u1 type_argument_index = type_annotations_typeArray->at(byte_i_ref);
|
|
+ byte_i_ref += 1;
|
|
+
|
|
+ RC_TRACE_WITH_THREAD(0x02000000, THREAD,
|
|
+ ("type_path: path[%d]: type_path_kind=%d, type_argument_index=%d",
|
|
+ calc_path_length, type_path_kind, type_argument_index));
|
|
+
|
|
+ if (type_path_kind > 3 || (type_path_kind != 3 && type_argument_index != 0)) {
|
|
+ // not enough room for a path
|
|
+ RC_TRACE_WITH_THREAD(0x02000000, THREAD,
|
|
+ ("inconsistent type_path values"));
|
|
+ return false;
|
|
+ }
|
|
+ }
|
|
+ assert(path_length == calc_path_length, "sanity check");
|
|
+
|
|
+ return true;
|
|
+} // end skip_type_annotation_type_path()
|
|
+
|
|
+
|
|
// Rewrite constant pool references in the method's stackmap table.
|
|
// These "structures" are adapted from the StackMapTable_attribute that
|
|
// is described in section 4.8.4 of the 6.0 version of the VM spec
|
|
@@ -3223,23 +3828,6 @@
|
|
|
|
void VM_RedefineClasses::swap_annotations(instanceKlassHandle the_class,
|
|
instanceKlassHandle scratch_class) {
|
|
- // Since there is currently no rewriting of type annotations indexes
|
|
- // into the CP, we null out type annotations on scratch_class before
|
|
- // we swap annotations with the_class rather than facing the
|
|
- // possibility of shipping annotations with broken indexes to
|
|
- // Java-land.
|
|
- ClassLoaderData* loader_data = scratch_class->class_loader_data();
|
|
- AnnotationArray* new_class_type_annotations = scratch_class->class_type_annotations();
|
|
- if (new_class_type_annotations != NULL) {
|
|
- MetadataFactory::free_array<u1>(loader_data, new_class_type_annotations);
|
|
- scratch_class->annotations()->set_class_type_annotations(NULL);
|
|
- }
|
|
- Array<AnnotationArray*>* new_field_type_annotations = scratch_class->fields_type_annotations();
|
|
- if (new_field_type_annotations != NULL) {
|
|
- Annotations::free_contents(loader_data, new_field_type_annotations);
|
|
- scratch_class->annotations()->set_fields_type_annotations(NULL);
|
|
- }
|
|
-
|
|
// Swap annotation fields values
|
|
Annotations* old_annotations = the_class->annotations();
|
|
the_class->set_annotations(scratch_class->annotations());
|
|
--- jdk8/hotspot/src/share/vm/prims/jvmtiRedefineClasses.hpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/prims/jvmtiRedefineClasses.hpp 2015-01-08 21:23:31.185148024 +0100
|
|
@@ -457,6 +457,17 @@
|
|
instanceKlassHandle scratch_class, TRAPS);
|
|
bool rewrite_cp_refs_in_element_value(
|
|
AnnotationArray* class_annotations, int &byte_i_ref, TRAPS);
|
|
+ bool rewrite_cp_refs_in_type_annotations_typeArray(
|
|
+ AnnotationArray* type_annotations_typeArray, int &byte_i_ref,
|
|
+ const char * location_mesg, TRAPS);
|
|
+ bool rewrite_cp_refs_in_type_annotation_struct(
|
|
+ AnnotationArray* type_annotations_typeArray, int &byte_i_ref,
|
|
+ const char * location_mesg, TRAPS);
|
|
+ bool skip_type_annotation_target(
|
|
+ AnnotationArray* type_annotations_typeArray, int &byte_i_ref,
|
|
+ const char * location_mesg, TRAPS);
|
|
+ bool skip_type_annotation_type_path(
|
|
+ AnnotationArray* type_annotations_typeArray, int &byte_i_ref, TRAPS);
|
|
bool rewrite_cp_refs_in_fields_annotations(
|
|
instanceKlassHandle scratch_class, TRAPS);
|
|
void rewrite_cp_refs_in_method(methodHandle method,
|
|
@@ -468,6 +479,12 @@
|
|
instanceKlassHandle scratch_class, TRAPS);
|
|
bool rewrite_cp_refs_in_methods_parameter_annotations(
|
|
instanceKlassHandle scratch_class, TRAPS);
|
|
+ bool rewrite_cp_refs_in_class_type_annotations(
|
|
+ instanceKlassHandle scratch_class, TRAPS);
|
|
+ bool rewrite_cp_refs_in_fields_type_annotations(
|
|
+ instanceKlassHandle scratch_class, TRAPS);
|
|
+ bool rewrite_cp_refs_in_methods_type_annotations(
|
|
+ instanceKlassHandle scratch_class, TRAPS);
|
|
void rewrite_cp_refs_in_stack_map_table(methodHandle method, TRAPS);
|
|
void rewrite_cp_refs_in_verification_type_info(
|
|
address& stackmap_addr_ref, address stackmap_end, u2 frame_i,
|
|
--- jdk8/hotspot/src/share/vm/prims/methodHandles.cpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/prims/methodHandles.cpp 2015-01-08 21:23:31.185148024 +0100
|
|
@@ -29,7 +29,6 @@
|
|
#include "interpreter/oopMapCache.hpp"
|
|
#include "memory/allocation.inline.hpp"
|
|
#include "memory/oopFactory.hpp"
|
|
-#include "prims/jvmtiRedefineClassesTrace.hpp"
|
|
#include "prims/methodHandles.hpp"
|
|
#include "runtime/compilationPolicy.hpp"
|
|
#include "runtime/javaCalls.hpp"
|
|
@@ -271,9 +270,12 @@
|
|
// This is done eagerly, since it is readily available without
|
|
// constructing any new objects.
|
|
// TO DO: maybe intern mname_oop
|
|
- m->method_holder()->add_member_name(m->method_idnum(), mname);
|
|
-
|
|
+ if (m->method_holder()->add_member_name(mname)) {
|
|
return mname();
|
|
+ } else {
|
|
+ // Redefinition caused this to fail. Return NULL (and an exception?)
|
|
+ return NULL;
|
|
+ }
|
|
}
|
|
|
|
oop MethodHandles::init_field_MemberName(Handle mname, fieldDescriptor& fd, bool is_setter) {
|
|
@@ -946,63 +948,27 @@
|
|
}
|
|
}
|
|
|
|
-void MemberNameTable::add_member_name(int index, jweak mem_name_wref) {
|
|
+void MemberNameTable::add_member_name(jweak mem_name_wref) {
|
|
assert_locked_or_safepoint(MemberNameTable_lock);
|
|
- this->at_put_grow(index, mem_name_wref);
|
|
-}
|
|
-
|
|
-// Return a member name oop or NULL.
|
|
-oop MemberNameTable::get_member_name(int index) {
|
|
- assert_locked_or_safepoint(MemberNameTable_lock);
|
|
-
|
|
- jweak ref = this->at(index);
|
|
- oop mem_name = JNIHandles::resolve(ref);
|
|
- return mem_name;
|
|
+ this->push(mem_name_wref);
|
|
}
|
|
|
|
#if INCLUDE_JVMTI
|
|
-oop MemberNameTable::find_member_name_by_method(Method* old_method) {
|
|
- assert_locked_or_safepoint(MemberNameTable_lock);
|
|
- oop found = NULL;
|
|
- int len = this->length();
|
|
-
|
|
- for (int idx = 0; idx < len; idx++) {
|
|
- oop mem_name = JNIHandles::resolve(this->at(idx));
|
|
- if (mem_name == NULL) {
|
|
- continue;
|
|
- }
|
|
- Method* method = (Method*)java_lang_invoke_MemberName::vmtarget(mem_name);
|
|
- if (method == old_method) {
|
|
- found = mem_name;
|
|
- break;
|
|
- }
|
|
- }
|
|
- return found;
|
|
-}
|
|
-
|
|
-// It is called at safepoint only
|
|
+// It is called at safepoint only for RedefineClasses
|
|
void MemberNameTable::adjust_method_entries(Method** old_methods, Method** new_methods,
|
|
int methods_length, bool *trace_name_printed) {
|
|
assert(SafepointSynchronize::is_at_safepoint(), "only called at safepoint");
|
|
- // search the MemberNameTable for uses of either obsolete or EMCP methods
|
|
+ // For each redefined method
|
|
for (int j = 0; j < methods_length; j++) {
|
|
Method* old_method = old_methods[j];
|
|
Method* new_method = new_methods[j];
|
|
- oop mem_name = find_member_name_by_method(old_method);
|
|
- if (mem_name != NULL) {
|
|
- java_lang_invoke_MemberName::adjust_vmtarget(mem_name, new_method);
|
|
|
|
- if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) {
|
|
- if (!(*trace_name_printed)) {
|
|
- // RC_TRACE_MESG macro has an embedded ResourceMark
|
|
- RC_TRACE_MESG(("adjust: name=%s",
|
|
- old_method->method_holder()->external_name()));
|
|
- *trace_name_printed = true;
|
|
- }
|
|
- // RC_TRACE macro has an embedded ResourceMark
|
|
- RC_TRACE(0x00400000, ("MemberName method update: %s(%s)",
|
|
- new_method->name()->as_C_string(),
|
|
- new_method->signature()->as_C_string()));
|
|
+ // search the MemberNameTable for uses of either obsolete or EMCP methods
|
|
+ for (int idx = 0; idx < length(); idx++) {
|
|
+ oop mem_name = JNIHandles::resolve(this->at(idx));
|
|
+ if (mem_name != NULL) {
|
|
+ java_lang_invoke_MemberName::adjust_vmtarget(mem_name, old_method, new_method,
|
|
+ trace_name_printed);
|
|
}
|
|
}
|
|
}
|
|
--- jdk8/hotspot/src/share/vm/prims/methodHandles.hpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/prims/methodHandles.hpp 2015-01-08 21:23:31.185148024 +0100
|
|
@@ -239,18 +239,14 @@
|
|
public:
|
|
MemberNameTable(int methods_cnt);
|
|
~MemberNameTable();
|
|
- void add_member_name(int index, jweak mem_name_ref);
|
|
- oop get_member_name(int index);
|
|
+ void add_member_name(jweak mem_name_ref);
|
|
|
|
#if INCLUDE_JVMTI
|
|
- public:
|
|
// RedefineClasses() API support:
|
|
// If a MemberName refers to old_method then update it
|
|
// to refer to new_method.
|
|
void adjust_method_entries(Method** old_methods, Method** new_methods,
|
|
int methods_length, bool *trace_name_printed);
|
|
- private:
|
|
- oop find_member_name_by_method(Method* old_method);
|
|
#endif // INCLUDE_JVMTI
|
|
};
|
|
|
|
--- jdk8/hotspot/src/share/vm/prims/whitebox.cpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/prims/whitebox.cpp 2015-01-08 21:23:31.186148000 +0100
|
|
@@ -56,6 +56,7 @@
|
|
#endif // INCLUDE_NMT
|
|
|
|
#include "compiler/compileBroker.hpp"
|
|
+#include "jvmtifiles/jvmtiEnv.hpp"
|
|
#include "runtime/compilationPolicy.hpp"
|
|
|
|
PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
|
|
@@ -104,6 +105,51 @@
|
|
return closure.found();
|
|
WB_END
|
|
|
|
+WB_ENTRY(jboolean, WB_ClassKnownToNotExist(JNIEnv* env, jobject o, jobject loader, jstring name))
|
|
+ ThreadToNativeFromVM ttnfv(thread); // can't be in VM when we call JNI
|
|
+ const char* class_name = env->GetStringUTFChars(name, NULL);
|
|
+ jboolean result = JVM_KnownToNotExist(env, loader, class_name);
|
|
+ env->ReleaseStringUTFChars(name, class_name);
|
|
+ return result;
|
|
+WB_END
|
|
+
|
|
+WB_ENTRY(jobjectArray, WB_GetLookupCacheURLs(JNIEnv* env, jobject o, jobject loader))
|
|
+ ThreadToNativeFromVM ttnfv(thread); // can't be in VM when we call JNI
|
|
+ return JVM_GetResourceLookupCacheURLs(env, loader);
|
|
+WB_END
|
|
+
|
|
+WB_ENTRY(jintArray, WB_GetLookupCacheMatches(JNIEnv* env, jobject o, jobject loader, jstring name))
|
|
+ ThreadToNativeFromVM ttnfv(thread); // can't be in VM when we call JNI
|
|
+ const char* resource_name = env->GetStringUTFChars(name, NULL);
|
|
+ jintArray result = JVM_GetResourceLookupCache(env, loader, resource_name);
|
|
+
|
|
+ env->ReleaseStringUTFChars(name, resource_name);
|
|
+ return result;
|
|
+WB_END
|
|
+
|
|
+WB_ENTRY(void, WB_AddToBootstrapClassLoaderSearch(JNIEnv* env, jobject o, jstring segment)) {
|
|
+#if INCLUDE_JVMTI
|
|
+ ResourceMark rm;
|
|
+ const char* seg = java_lang_String::as_utf8_string(JNIHandles::resolve_non_null(segment));
|
|
+ JvmtiEnv* jvmti_env = JvmtiEnv::create_a_jvmti(JVMTI_VERSION);
|
|
+ jvmtiError err = jvmti_env->AddToBootstrapClassLoaderSearch(seg);
|
|
+ assert(err == JVMTI_ERROR_NONE, "must not fail");
|
|
+#endif
|
|
+}
|
|
+WB_END
|
|
+
|
|
+WB_ENTRY(void, WB_AddToSystemClassLoaderSearch(JNIEnv* env, jobject o, jstring segment)) {
|
|
+#if INCLUDE_JVMTI
|
|
+ ResourceMark rm;
|
|
+ const char* seg = java_lang_String::as_utf8_string(JNIHandles::resolve_non_null(segment));
|
|
+ JvmtiEnv* jvmti_env = JvmtiEnv::create_a_jvmti(JVMTI_VERSION);
|
|
+ jvmtiError err = jvmti_env->AddToSystemClassLoaderSearch(seg);
|
|
+ assert(err == JVMTI_ERROR_NONE, "must not fail");
|
|
+#endif
|
|
+}
|
|
+WB_END
|
|
+
|
|
+
|
|
WB_ENTRY(jlong, WB_GetCompressedOopsMaxHeapSize(JNIEnv* env, jobject o)) {
|
|
return (jlong)Arguments::max_heap_for_compressed_oops();
|
|
}
|
|
@@ -287,7 +333,7 @@
|
|
WB_ENTRY(jlong, WB_NMTMallocWithPseudoStack(JNIEnv* env, jobject o, jlong size, jint pseudo_stack))
|
|
address pc = (address)(size_t)pseudo_stack;
|
|
NativeCallStack stack(&pc, 1);
|
|
- return (jlong)os::malloc(size, mtTest, stack);
|
|
+ return (jlong)(uintptr_t)os::malloc(size, mtTest, stack);
|
|
WB_END
|
|
|
|
// Free the memory allocated by NMTAllocTest
|
|
@@ -322,15 +368,6 @@
|
|
return MemTracker::tracking_level() == NMT_detail;
|
|
WB_END
|
|
|
|
-WB_ENTRY(void, WB_NMTOverflowHashBucket(JNIEnv* env, jobject o, jlong num))
|
|
- address pc = (address)1;
|
|
- for (jlong index = 0; index < num; index ++) {
|
|
- NativeCallStack stack(&pc, 1);
|
|
- os::malloc(0, mtTest, stack);
|
|
- pc += MallocSiteTable::hash_buckets();
|
|
- }
|
|
-WB_END
|
|
-
|
|
WB_ENTRY(jboolean, WB_NMTChangeTrackingLevel(JNIEnv* env))
|
|
// Test that we can downgrade NMT levels but not upgrade them.
|
|
if (MemTracker::tracking_level() == NMT_off) {
|
|
@@ -361,6 +398,12 @@
|
|
return MemTracker::tracking_level() == NMT_minimal;
|
|
}
|
|
WB_END
|
|
+
|
|
+WB_ENTRY(jint, WB_NMTGetHashSize(JNIEnv* env, jobject o))
|
|
+ int hash_size = MallocSiteTable::hash_buckets();
|
|
+ assert(hash_size > 0, "NMT hash_size should be > 0");
|
|
+ return (jint)hash_size;
|
|
+WB_END
|
|
#endif // INCLUDE_NMT
|
|
|
|
static jmethodID reflected_method_to_jmid(JavaThread* thread, JNIEnv* env, jobject method) {
|
|
@@ -382,19 +425,10 @@
|
|
CHECK_JNI_EXCEPTION_(env, result);
|
|
MutexLockerEx mu(Compile_lock);
|
|
methodHandle mh(THREAD, Method::checked_resolve_jmethod_id(jmid));
|
|
- nmethod* code;
|
|
if (is_osr) {
|
|
- int bci = InvocationEntryBci;
|
|
- while ((code = mh->lookup_osr_nmethod_for(bci, CompLevel_none, false)) != NULL) {
|
|
- code->mark_for_deoptimization();
|
|
- ++result;
|
|
- bci = code->osr_entry_bci() + 1;
|
|
- }
|
|
- } else {
|
|
- code = mh->code();
|
|
- }
|
|
- if (code != NULL) {
|
|
- code->mark_for_deoptimization();
|
|
+ result += mh->mark_osr_nmethods();
|
|
+ } else if (mh->code() != NULL) {
|
|
+ mh->code()->mark_for_deoptimization();
|
|
++result;
|
|
}
|
|
result += CodeCache::mark_for_deoptimization(mh());
|
|
@@ -939,10 +973,19 @@
|
|
{CC"isObjectInOldGen", CC"(Ljava/lang/Object;)Z", (void*)&WB_isObjectInOldGen },
|
|
{CC"getHeapOopSize", CC"()I", (void*)&WB_GetHeapOopSize },
|
|
{CC"isClassAlive0", CC"(Ljava/lang/String;)Z", (void*)&WB_IsClassAlive },
|
|
+ {CC"classKnownToNotExist",
|
|
+ CC"(Ljava/lang/ClassLoader;Ljava/lang/String;)Z",(void*)&WB_ClassKnownToNotExist},
|
|
+ {CC"getLookupCacheURLs", CC"(Ljava/lang/ClassLoader;)[Ljava/net/URL;", (void*)&WB_GetLookupCacheURLs},
|
|
+ {CC"getLookupCacheMatches", CC"(Ljava/lang/ClassLoader;Ljava/lang/String;)[I",
|
|
+ (void*)&WB_GetLookupCacheMatches},
|
|
{CC"parseCommandLine",
|
|
CC"(Ljava/lang/String;[Lsun/hotspot/parser/DiagnosticCommand;)[Ljava/lang/Object;",
|
|
(void*) &WB_ParseCommandLine
|
|
},
|
|
+ {CC"addToBootstrapClassLoaderSearch", CC"(Ljava/lang/String;)V",
|
|
+ (void*)&WB_AddToBootstrapClassLoaderSearch},
|
|
+ {CC"addToSystemClassLoaderSearch", CC"(Ljava/lang/String;)V",
|
|
+ (void*)&WB_AddToSystemClassLoaderSearch},
|
|
{CC"getCompressedOopsMaxHeapSize", CC"()J",
|
|
(void*)&WB_GetCompressedOopsMaxHeapSize},
|
|
{CC"printHeapSizes", CC"()V", (void*)&WB_PrintHeapSizes },
|
|
@@ -963,9 +1006,9 @@
|
|
{CC"NMTCommitMemory", CC"(JJ)V", (void*)&WB_NMTCommitMemory },
|
|
{CC"NMTUncommitMemory", CC"(JJ)V", (void*)&WB_NMTUncommitMemory },
|
|
{CC"NMTReleaseMemory", CC"(JJ)V", (void*)&WB_NMTReleaseMemory },
|
|
- {CC"NMTOverflowHashBucket", CC"(J)V", (void*)&WB_NMTOverflowHashBucket},
|
|
{CC"NMTIsDetailSupported",CC"()Z", (void*)&WB_NMTIsDetailSupported},
|
|
{CC"NMTChangeTrackingLevel", CC"()Z", (void*)&WB_NMTChangeTrackingLevel},
|
|
+ {CC"NMTGetHashSize", CC"()I", (void*)&WB_NMTGetHashSize },
|
|
#endif // INCLUDE_NMT
|
|
{CC"deoptimizeAll", CC"()V", (void*)&WB_DeoptimizeAll },
|
|
{CC"deoptimizeMethod", CC"(Ljava/lang/reflect/Executable;Z)I",
|
|
--- jdk8/hotspot/src/share/vm/runtime/advancedThresholdPolicy.cpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/runtime/advancedThresholdPolicy.cpp 2015-01-08 21:23:31.186148000 +0100
|
|
@@ -451,7 +451,7 @@
|
|
if (should_create_mdo(mh(), level)) {
|
|
create_mdo(mh, thread);
|
|
}
|
|
- if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh, InvocationEntryBci)) {
|
|
+ if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh)) {
|
|
CompLevel next_level = call_event(mh(), level);
|
|
if (next_level != level) {
|
|
compile(mh, InvocationEntryBci, next_level, thread);
|
|
@@ -475,7 +475,7 @@
|
|
CompLevel next_osr_level = loop_event(imh(), level);
|
|
CompLevel max_osr_level = (CompLevel)imh->highest_osr_comp_level();
|
|
// At the very least compile the OSR version
|
|
- if (!CompileBroker::compilation_is_in_queue(imh, bci) && next_osr_level != level) {
|
|
+ if (!CompileBroker::compilation_is_in_queue(imh) && (next_osr_level != level)) {
|
|
compile(imh, bci, next_osr_level, thread);
|
|
}
|
|
|
|
@@ -509,7 +509,7 @@
|
|
nm->make_not_entrant();
|
|
}
|
|
}
|
|
- if (!CompileBroker::compilation_is_in_queue(mh, InvocationEntryBci)) {
|
|
+ if (!CompileBroker::compilation_is_in_queue(mh)) {
|
|
// Fix up next_level if necessary to avoid deopts
|
|
if (next_level == CompLevel_limited_profile && max_osr_level == CompLevel_full_profile) {
|
|
next_level = CompLevel_full_profile;
|
|
@@ -521,7 +521,7 @@
|
|
} else {
|
|
cur_level = comp_level(imh());
|
|
next_level = call_event(imh(), cur_level);
|
|
- if (!CompileBroker::compilation_is_in_queue(imh, bci) && next_level != cur_level) {
|
|
+ if (!CompileBroker::compilation_is_in_queue(imh) && (next_level != cur_level)) {
|
|
compile(imh, InvocationEntryBci, next_level, thread);
|
|
}
|
|
}
|
|
--- jdk8/hotspot/src/share/vm/runtime/arguments.cpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/runtime/arguments.cpp 2015-01-08 21:23:31.187147976 +0100
|
|
@@ -66,7 +66,7 @@
|
|
#endif // INCLUDE_ALL_GCS
|
|
|
|
// Note: This is a special bug reporting site for the JVM
|
|
-#define DEFAULT_VENDOR_URL_BUG "http://bugreport.sun.com/bugreport/crash.jsp"
|
|
+#define DEFAULT_VENDOR_URL_BUG "http://bugreport.java.com/bugreport/crash.jsp"
|
|
#define DEFAULT_JAVA_LAUNCHER "generic"
|
|
|
|
// Disable options not supported in this release, with a warning if they
|
|
@@ -300,6 +300,7 @@
|
|
{ "UseStringCache", JDK_Version::jdk(8), JDK_Version::jdk(9) },
|
|
{ "UseOldInlining", JDK_Version::jdk(9), JDK_Version::jdk(10) },
|
|
{ "AutoShutdownNMT", JDK_Version::jdk(9), JDK_Version::jdk(10) },
|
|
+ { "CompilationRepeat", JDK_Version::jdk(8), JDK_Version::jdk(9) },
|
|
#ifdef PRODUCT
|
|
{ "DesiredMethodLimit",
|
|
JDK_Version::jdk_update(7, 2), JDK_Version::jdk(8) },
|
|
@@ -1146,6 +1147,32 @@
|
|
}
|
|
}
|
|
|
|
+/**
|
|
+ * Returns the minimum number of compiler threads needed to run the JVM. The following
|
|
+ * configurations are possible.
|
|
+ *
|
|
+ * 1) The JVM is build using an interpreter only. As a result, the minimum number of
|
|
+ * compiler threads is 0.
|
|
+ * 2) The JVM is build using the compiler(s) and tiered compilation is disabled. As
|
|
+ * a result, either C1 or C2 is used, so the minimum number of compiler threads is 1.
|
|
+ * 3) The JVM is build using the compiler(s) and tiered compilation is enabled. However,
|
|
+ * the option "TieredStopAtLevel < CompLevel_full_optimization". As a result, only
|
|
+ * C1 can be used, so the minimum number of compiler threads is 1.
|
|
+ * 4) The JVM is build using the compilers and tiered compilation is enabled. The option
|
|
+ * 'TieredStopAtLevel = CompLevel_full_optimization' (the default value). As a result,
|
|
+ * the minimum number of compiler threads is 2.
|
|
+ */
|
|
+int Arguments::get_min_number_of_compiler_threads() {
|
|
+#if !defined(COMPILER1) && !defined(COMPILER2) && !defined(SHARK)
|
|
+ return 0; // case 1
|
|
+#else
|
|
+ if (!TieredCompilation || (TieredStopAtLevel < CompLevel_full_optimization)) {
|
|
+ return 1; // case 2 or case 3
|
|
+ }
|
|
+ return 2; // case 4 (tiered)
|
|
+#endif
|
|
+}
|
|
+
|
|
#if INCLUDE_ALL_GCS
|
|
static void disable_adaptive_size_policy(const char* collector_name) {
|
|
if (UseAdaptiveSizePolicy) {
|
|
@@ -2207,7 +2234,7 @@
|
|
FLAG_SET_DEFAULT(UseGCOverheadLimit, false);
|
|
}
|
|
|
|
- status = status && ArgumentsExt::check_gc_consistency_user();
|
|
+ status = status && check_gc_consistency_user();
|
|
status = status && check_stack_pages();
|
|
|
|
if (CMSIncrementalMode) {
|
|
@@ -2462,6 +2489,12 @@
|
|
status &= verify_interval(SafepointPollOffset, 0, os::vm_page_size() - BytesPerWord, "SafepointPollOffset");
|
|
#endif
|
|
|
|
+ int min_number_of_compiler_threads = get_min_number_of_compiler_threads();
|
|
+ // The default CICompilerCount's value is CI_COMPILER_COUNT.
|
|
+ assert(min_number_of_compiler_threads <= CI_COMPILER_COUNT, "minimum should be less or equal default number");
|
|
+ // Check the minimum number of compiler threads
|
|
+ status &=verify_min_value(CICompilerCount, min_number_of_compiler_threads, "CICompilerCount");
|
|
+
|
|
return status;
|
|
}
|
|
|
|
@@ -2930,6 +2963,23 @@
|
|
#endif
|
|
// -D
|
|
} else if (match_option(option, "-D", &tail)) {
|
|
+ if (CheckEndorsedAndExtDirs) {
|
|
+ if (match_option(option, "-Djava.endorsed.dirs=", &tail)) {
|
|
+ // abort if -Djava.endorsed.dirs is set
|
|
+ jio_fprintf(defaultStream::output_stream(),
|
|
+ "-Djava.endorsed.dirs will not be supported in a future release.\n"
|
|
+ "Refer to JEP 220 for details (http://openjdk.java.net/jeps/220).\n");
|
|
+ return JNI_EINVAL;
|
|
+ }
|
|
+ if (match_option(option, "-Djava.ext.dirs=", &tail)) {
|
|
+ // abort if -Djava.ext.dirs is set
|
|
+ jio_fprintf(defaultStream::output_stream(),
|
|
+ "-Djava.ext.dirs will not be supported in a future release.\n"
|
|
+ "Refer to JEP 220 for details (http://openjdk.java.net/jeps/220).\n");
|
|
+ return JNI_EINVAL;
|
|
+ }
|
|
+ }
|
|
+
|
|
if (!add_property(tail)) {
|
|
return JNI_ENOMEM;
|
|
}
|
|
@@ -3363,6 +3413,146 @@
|
|
}
|
|
}
|
|
|
|
+static bool has_jar_files(const char* directory) {
|
|
+ DIR* dir = os::opendir(directory);
|
|
+ if (dir == NULL) return false;
|
|
+
|
|
+ struct dirent *entry;
|
|
+ char *dbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(directory), mtInternal);
|
|
+ bool hasJarFile = false;
|
|
+ while (!hasJarFile && (entry = os::readdir(dir, (dirent *) dbuf)) != NULL) {
|
|
+ const char* name = entry->d_name;
|
|
+ const char* ext = name + strlen(name) - 4;
|
|
+ hasJarFile = ext > name && (os::file_name_strcmp(ext, ".jar") == 0);
|
|
+ }
|
|
+ FREE_C_HEAP_ARRAY(char, dbuf, mtInternal);
|
|
+ os::closedir(dir);
|
|
+ return hasJarFile ;
|
|
+}
|
|
+
|
|
+// returns the number of directories in the given path containing JAR files
|
|
+// If the skip argument is not NULL, it will skip that directory
|
|
+static int check_non_empty_dirs(const char* path, const char* type, const char* skip) {
|
|
+ const char separator = *os::path_separator();
|
|
+ const char* const end = path + strlen(path);
|
|
+ int nonEmptyDirs = 0;
|
|
+ while (path < end) {
|
|
+ const char* tmp_end = strchr(path, separator);
|
|
+ if (tmp_end == NULL) {
|
|
+ if ((skip == NULL || strcmp(path, skip) != 0) && has_jar_files(path)) {
|
|
+ nonEmptyDirs++;
|
|
+ jio_fprintf(defaultStream::output_stream(),
|
|
+ "Non-empty %s directory: %s\n", type, path);
|
|
+ }
|
|
+ path = end;
|
|
+ } else {
|
|
+ char* dirpath = NEW_C_HEAP_ARRAY(char, tmp_end - path + 1, mtInternal);
|
|
+ memcpy(dirpath, path, tmp_end - path);
|
|
+ dirpath[tmp_end - path] = '\0';
|
|
+ if ((skip == NULL || strcmp(dirpath, skip) != 0) && has_jar_files(dirpath)) {
|
|
+ nonEmptyDirs++;
|
|
+ jio_fprintf(defaultStream::output_stream(),
|
|
+ "Non-empty %s directory: %s\n", type, dirpath);
|
|
+ }
|
|
+ FREE_C_HEAP_ARRAY(char, dirpath, mtInternal);
|
|
+ path = tmp_end + 1;
|
|
+ }
|
|
+ }
|
|
+ return nonEmptyDirs;
|
|
+}
|
|
+
|
|
+// Returns true if endorsed standards override mechanism and extension mechanism
|
|
+// are not used.
|
|
+static bool check_endorsed_and_ext_dirs() {
|
|
+ if (!CheckEndorsedAndExtDirs)
|
|
+ return true;
|
|
+
|
|
+ char endorsedDir[JVM_MAXPATHLEN];
|
|
+ char extDir[JVM_MAXPATHLEN];
|
|
+ const char* fileSep = os::file_separator();
|
|
+ jio_snprintf(endorsedDir, sizeof(endorsedDir), "%s%slib%sendorsed",
|
|
+ Arguments::get_java_home(), fileSep, fileSep);
|
|
+ jio_snprintf(extDir, sizeof(extDir), "%s%slib%sext",
|
|
+ Arguments::get_java_home(), fileSep, fileSep);
|
|
+
|
|
+ // check endorsed directory
|
|
+ int nonEmptyDirs = check_non_empty_dirs(Arguments::get_endorsed_dir(), "endorsed", NULL);
|
|
+
|
|
+ // check the extension directories but skip the default lib/ext directory
|
|
+ nonEmptyDirs += check_non_empty_dirs(Arguments::get_ext_dirs(), "extension", extDir);
|
|
+
|
|
+ // List of JAR files installed in the default lib/ext directory.
|
|
+ // -XX:+CheckEndorsedAndExtDirs checks if any non-JDK file installed
|
|
+ static const char* jdk_ext_jars[] = {
|
|
+ "access-bridge-32.jar",
|
|
+ "access-bridge-64.jar",
|
|
+ "access-bridge.jar",
|
|
+ "cldrdata.jar",
|
|
+ "dnsns.jar",
|
|
+ "jaccess.jar",
|
|
+ "jfxrt.jar",
|
|
+ "localedata.jar",
|
|
+ "nashorn.jar",
|
|
+ "sunec.jar",
|
|
+ "sunjce_provider.jar",
|
|
+ "sunmscapi.jar",
|
|
+ "sunpkcs11.jar",
|
|
+ "ucrypto.jar",
|
|
+ "zipfs.jar",
|
|
+ NULL
|
|
+ };
|
|
+
|
|
+ // check if the default lib/ext directory has any non-JDK jar files; if so, error
|
|
+ DIR* dir = os::opendir(extDir);
|
|
+ if (dir != NULL) {
|
|
+ int num_ext_jars = 0;
|
|
+ struct dirent *entry;
|
|
+ char *dbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(extDir), mtInternal);
|
|
+ while ((entry = os::readdir(dir, (dirent *) dbuf)) != NULL) {
|
|
+ const char* name = entry->d_name;
|
|
+ const char* ext = name + strlen(name) - 4;
|
|
+ if (ext > name && (os::file_name_strcmp(ext, ".jar") == 0)) {
|
|
+ bool is_jdk_jar = false;
|
|
+ const char* jarfile = NULL;
|
|
+ for (int i=0; (jarfile = jdk_ext_jars[i]) != NULL; i++) {
|
|
+ if (os::file_name_strcmp(name, jarfile) == 0) {
|
|
+ is_jdk_jar = true;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ if (!is_jdk_jar) {
|
|
+ jio_fprintf(defaultStream::output_stream(),
|
|
+ "%s installed in <JAVA_HOME>/lib/ext\n", name);
|
|
+ num_ext_jars++;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ FREE_C_HEAP_ARRAY(char, dbuf, mtInternal);
|
|
+ os::closedir(dir);
|
|
+ if (num_ext_jars > 0) {
|
|
+ nonEmptyDirs += 1;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ // check if the default lib/endorsed directory exists; if so, error
|
|
+ dir = os::opendir(endorsedDir);
|
|
+ if (dir != NULL) {
|
|
+ jio_fprintf(defaultStream::output_stream(), "<JAVA_HOME>/lib/endorsed exists\n");
|
|
+ os::closedir(dir);
|
|
+ nonEmptyDirs += 1;
|
|
+ }
|
|
+
|
|
+ if (nonEmptyDirs > 0) {
|
|
+ jio_fprintf(defaultStream::output_stream(),
|
|
+ "Endorsed standards override mechanism and extension mechanism "
|
|
+ "will not be supported in a future release.\n"
|
|
+ "Refer to JEP 220 for details (http://openjdk.java.net/jeps/220).\n");
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ return true;
|
|
+}
|
|
+
|
|
jint Arguments::finalize_vm_init_args(SysClassPath* scp_p, bool scp_assembly_required) {
|
|
// This must be done after all -D arguments have been processed.
|
|
scp_p->expand_endorsed();
|
|
@@ -3372,6 +3562,10 @@
|
|
Arguments::set_sysclasspath(scp_p->combined_path());
|
|
}
|
|
|
|
+ if (!check_endorsed_and_ext_dirs()) {
|
|
+ return JNI_ERR;
|
|
+ }
|
|
+
|
|
// This must be done after all arguments have been processed.
|
|
// java_compiler() true means set to "NONE" or empty.
|
|
if (java_compiler() && !xdebug_mode()) {
|
|
@@ -3432,7 +3626,7 @@
|
|
}
|
|
}
|
|
|
|
- if (!ArgumentsExt::check_vm_args_consistency()) {
|
|
+ if (!check_vm_args_consistency()) {
|
|
return JNI_ERR;
|
|
}
|
|
|
|
@@ -3618,6 +3812,8 @@
|
|
bool settings_file_specified = false;
|
|
bool needs_hotspotrc_warning = false;
|
|
|
|
+ ArgumentsExt::process_options(args);
|
|
+
|
|
const char* flags_file;
|
|
int index;
|
|
for (index = 0; index < args->nOptions; index++) {
|
|
@@ -3833,7 +4029,7 @@
|
|
// Set heap size based on available physical memory
|
|
set_heap_size();
|
|
|
|
- set_gc_specific_flags();
|
|
+ ArgumentsExt::set_gc_specific_flags();
|
|
|
|
// Initialize Metaspace flags and alignments.
|
|
Metaspace::ergo_initialize();
|
|
--- jdk8/hotspot/src/share/vm/runtime/arguments_ext.hpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/runtime/arguments_ext.hpp 2015-01-08 21:23:31.187147976 +0100
|
|
@@ -31,25 +31,21 @@
|
|
class ArgumentsExt: AllStatic {
|
|
public:
|
|
static inline void select_gc_ergonomically();
|
|
- static inline bool check_gc_consistency_user();
|
|
+ static inline void set_gc_specific_flags();
|
|
static inline bool check_gc_consistency_ergo();
|
|
- static inline bool check_vm_args_consistency();
|
|
+ static void process_options(const JavaVMInitArgs* args) {}
|
|
};
|
|
|
|
void ArgumentsExt::select_gc_ergonomically() {
|
|
Arguments::select_gc_ergonomically();
|
|
}
|
|
|
|
-bool ArgumentsExt::check_gc_consistency_user() {
|
|
- return Arguments::check_gc_consistency_user();
|
|
+void ArgumentsExt::set_gc_specific_flags() {
|
|
+ Arguments::set_gc_specific_flags();
|
|
}
|
|
|
|
bool ArgumentsExt::check_gc_consistency_ergo() {
|
|
return Arguments::check_gc_consistency_ergo();
|
|
}
|
|
|
|
-bool ArgumentsExt::check_vm_args_consistency() {
|
|
- return Arguments::check_vm_args_consistency();
|
|
-}
|
|
-
|
|
#endif // SHARE_VM_RUNTIME_ARGUMENTS_EXT_HPP
|
|
--- jdk8/hotspot/src/share/vm/runtime/arguments.hpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/runtime/arguments.hpp 2015-01-08 21:23:31.187147976 +0100
|
|
@@ -327,6 +327,7 @@
|
|
|
|
// Tiered
|
|
static void set_tiered_flags();
|
|
+ static int get_min_number_of_compiler_threads();
|
|
// CMS/ParNew garbage collectors
|
|
static void set_parnew_gc_flags();
|
|
static void set_cms_and_parnew_gc_flags();
|
|
@@ -341,7 +342,6 @@
|
|
static void select_gc();
|
|
static void set_ergonomics_flags();
|
|
static void set_shared_spaces_flags();
|
|
- static void set_gc_specific_flags();
|
|
// limits the given memory size by the maximum amount of memory this process is
|
|
// currently allowed to allocate or reserve.
|
|
static julong limit_by_allocatable_memory(julong size);
|
|
@@ -453,6 +453,7 @@
|
|
// Adjusts the arguments after the OS have adjusted the arguments
|
|
static jint adjust_after_os();
|
|
|
|
+ static void set_gc_specific_flags();
|
|
static inline bool gc_selected(); // whether a gc has been selected
|
|
static void select_gc_ergonomically();
|
|
|
|
--- jdk8/hotspot/src/share/vm/runtime/deoptimization.cpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/runtime/deoptimization.cpp 2015-01-08 21:23:31.188147952 +0100
|
|
@@ -219,6 +219,8 @@
|
|
assert(vf->is_compiled_frame(), "Wrong frame type");
|
|
chunk->push(compiledVFrame::cast(vf));
|
|
|
|
+ bool realloc_failures = false;
|
|
+
|
|
#ifdef COMPILER2
|
|
// Reallocate the non-escaping objects and restore their fields. Then
|
|
// relock objects if synchronization on them was eliminated.
|
|
@@ -249,19 +251,16 @@
|
|
tty->print_cr("SAVED OOP RESULT " INTPTR_FORMAT " in thread " INTPTR_FORMAT, (void *)result, thread);
|
|
}
|
|
}
|
|
- bool reallocated = false;
|
|
if (objects != NULL) {
|
|
JRT_BLOCK
|
|
- reallocated = realloc_objects(thread, &deoptee, objects, THREAD);
|
|
+ realloc_failures = realloc_objects(thread, &deoptee, objects, THREAD);
|
|
JRT_END
|
|
- }
|
|
- if (reallocated) {
|
|
- reassign_fields(&deoptee, &map, objects);
|
|
+ reassign_fields(&deoptee, &map, objects, realloc_failures);
|
|
#ifndef PRODUCT
|
|
if (TraceDeoptimization) {
|
|
ttyLocker ttyl;
|
|
tty->print_cr("REALLOC OBJECTS in thread " INTPTR_FORMAT, thread);
|
|
- print_objects(objects);
|
|
+ print_objects(objects, realloc_failures);
|
|
}
|
|
#endif
|
|
}
|
|
@@ -279,7 +278,7 @@
|
|
assert (cvf->scope() != NULL,"expect only compiled java frames");
|
|
GrowableArray<MonitorInfo*>* monitors = cvf->monitors();
|
|
if (monitors->is_nonempty()) {
|
|
- relock_objects(monitors, thread);
|
|
+ relock_objects(monitors, thread, realloc_failures);
|
|
#ifndef PRODUCT
|
|
if (TraceDeoptimization) {
|
|
ttyLocker ttyl;
|
|
@@ -290,10 +289,15 @@
|
|
first = false;
|
|
tty->print_cr("RELOCK OBJECTS in thread " INTPTR_FORMAT, thread);
|
|
}
|
|
+ if (mi->owner_is_scalar_replaced()) {
|
|
+ Klass* k = java_lang_Class::as_Klass(mi->owner_klass());
|
|
+ tty->print_cr(" failed reallocation for klass %s", k->external_name());
|
|
+ } else {
|
|
tty->print_cr(" object <" INTPTR_FORMAT "> locked", (void *)mi->owner());
|
|
}
|
|
}
|
|
}
|
|
+ }
|
|
#endif
|
|
}
|
|
}
|
|
@@ -305,9 +309,14 @@
|
|
// out the java state residing in the vframeArray will be missed.
|
|
No_Safepoint_Verifier no_safepoint;
|
|
|
|
- vframeArray* array = create_vframeArray(thread, deoptee, &map, chunk);
|
|
+ vframeArray* array = create_vframeArray(thread, deoptee, &map, chunk, realloc_failures);
|
|
+#ifdef COMPILER2
|
|
+ if (realloc_failures) {
|
|
+ pop_frames_failed_reallocs(thread, array);
|
|
+ }
|
|
+#endif
|
|
|
|
- assert(thread->vframe_array_head() == NULL, "Pending deopt!");;
|
|
+ assert(thread->vframe_array_head() == NULL, "Pending deopt!");
|
|
thread->set_vframe_array_head(array);
|
|
|
|
// Now that the vframeArray has been created if we have any deferred local writes
|
|
@@ -759,6 +768,8 @@
|
|
int exception_line = thread->exception_line();
|
|
thread->clear_pending_exception();
|
|
|
|
+ bool failures = false;
|
|
+
|
|
for (int i = 0; i < objects->length(); i++) {
|
|
assert(objects->at(i)->is_object(), "invalid debug information");
|
|
ObjectValue* sv = (ObjectValue*) objects->at(i);
|
|
@@ -768,27 +779,34 @@
|
|
|
|
if (k->oop_is_instance()) {
|
|
InstanceKlass* ik = InstanceKlass::cast(k());
|
|
- obj = ik->allocate_instance(CHECK_(false));
|
|
+ obj = ik->allocate_instance(THREAD);
|
|
} else if (k->oop_is_typeArray()) {
|
|
TypeArrayKlass* ak = TypeArrayKlass::cast(k());
|
|
assert(sv->field_size() % type2size[ak->element_type()] == 0, "non-integral array length");
|
|
int len = sv->field_size() / type2size[ak->element_type()];
|
|
- obj = ak->allocate(len, CHECK_(false));
|
|
+ obj = ak->allocate(len, THREAD);
|
|
} else if (k->oop_is_objArray()) {
|
|
ObjArrayKlass* ak = ObjArrayKlass::cast(k());
|
|
- obj = ak->allocate(sv->field_size(), CHECK_(false));
|
|
+ obj = ak->allocate(sv->field_size(), THREAD);
|
|
+ }
|
|
+
|
|
+ if (obj == NULL) {
|
|
+ failures = true;
|
|
}
|
|
|
|
- assert(obj != NULL, "allocation failed");
|
|
assert(sv->value().is_null(), "redundant reallocation");
|
|
+ assert(obj != NULL || HAS_PENDING_EXCEPTION, "allocation should succeed or we should get an exception");
|
|
+ CLEAR_PENDING_EXCEPTION;
|
|
sv->set_value(obj);
|
|
}
|
|
|
|
- if (pending_exception.not_null()) {
|
|
+ if (failures) {
|
|
+ THROW_OOP_(Universe::out_of_memory_error_realloc_objects(), failures);
|
|
+ } else if (pending_exception.not_null()) {
|
|
thread->set_pending_exception(pending_exception(), exception_file, exception_line);
|
|
}
|
|
|
|
- return true;
|
|
+ return failures;
|
|
}
|
|
|
|
// This assumes that the fields are stored in ObjectValue in the same order
|
|
@@ -926,12 +944,15 @@
|
|
|
|
|
|
// restore fields of all eliminated objects and arrays
|
|
-void Deoptimization::reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects) {
|
|
+void Deoptimization::reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, bool realloc_failures) {
|
|
for (int i = 0; i < objects->length(); i++) {
|
|
ObjectValue* sv = (ObjectValue*) objects->at(i);
|
|
KlassHandle k(java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()()));
|
|
Handle obj = sv->value();
|
|
- assert(obj.not_null(), "reallocation was missed");
|
|
+ assert(obj.not_null() || realloc_failures, "reallocation was missed");
|
|
+ if (obj.is_null()) {
|
|
+ continue;
|
|
+ }
|
|
|
|
if (k->oop_is_instance()) {
|
|
InstanceKlass* ik = InstanceKlass::cast(k());
|
|
@@ -948,11 +969,12 @@
|
|
|
|
|
|
// relock objects for which synchronization was eliminated
|
|
-void Deoptimization::relock_objects(GrowableArray<MonitorInfo*>* monitors, JavaThread* thread) {
|
|
+void Deoptimization::relock_objects(GrowableArray<MonitorInfo*>* monitors, JavaThread* thread, bool realloc_failures) {
|
|
for (int i = 0; i < monitors->length(); i++) {
|
|
MonitorInfo* mon_info = monitors->at(i);
|
|
if (mon_info->eliminated()) {
|
|
- assert(mon_info->owner() != NULL, "reallocation was missed");
|
|
+ assert(!mon_info->owner_is_scalar_replaced() || realloc_failures, "reallocation was missed");
|
|
+ if (!mon_info->owner_is_scalar_replaced()) {
|
|
Handle obj = Handle(mon_info->owner());
|
|
markOop mark = obj->mark();
|
|
if (UseBiasedLocking && mark->has_bias_pattern()) {
|
|
@@ -967,15 +989,16 @@
|
|
}
|
|
BasicLock* lock = mon_info->lock();
|
|
ObjectSynchronizer::slow_enter(obj, lock, thread);
|
|
- }
|
|
assert(mon_info->owner()->is_locked(), "object must be locked now");
|
|
}
|
|
}
|
|
+ }
|
|
+}
|
|
|
|
|
|
#ifndef PRODUCT
|
|
// print information about reallocated objects
|
|
-void Deoptimization::print_objects(GrowableArray<ScopeValue*>* objects) {
|
|
+void Deoptimization::print_objects(GrowableArray<ScopeValue*>* objects, bool realloc_failures) {
|
|
fieldDescriptor fd;
|
|
|
|
for (int i = 0; i < objects->length(); i++) {
|
|
@@ -985,10 +1008,15 @@
|
|
|
|
tty->print(" object <" INTPTR_FORMAT "> of type ", (void *)sv->value()());
|
|
k->print_value();
|
|
+ assert(obj.not_null() || realloc_failures, "reallocation was missed");
|
|
+ if (obj.is_null()) {
|
|
+ tty->print(" allocation failed");
|
|
+ } else {
|
|
tty->print(" allocated (%d bytes)", obj->size() * HeapWordSize);
|
|
+ }
|
|
tty->cr();
|
|
|
|
- if (Verbose) {
|
|
+ if (Verbose && !obj.is_null()) {
|
|
k->oop_print_on(obj(), tty);
|
|
}
|
|
}
|
|
@@ -996,7 +1024,7 @@
|
|
#endif
|
|
#endif // COMPILER2
|
|
|
|
-vframeArray* Deoptimization::create_vframeArray(JavaThread* thread, frame fr, RegisterMap *reg_map, GrowableArray<compiledVFrame*>* chunk) {
|
|
+vframeArray* Deoptimization::create_vframeArray(JavaThread* thread, frame fr, RegisterMap *reg_map, GrowableArray<compiledVFrame*>* chunk, bool realloc_failures) {
|
|
Events::log(thread, "DEOPT PACKING pc=" INTPTR_FORMAT " sp=" INTPTR_FORMAT, fr.pc(), fr.sp());
|
|
|
|
#ifndef PRODUCT
|
|
@@ -1039,7 +1067,7 @@
|
|
// Since the Java thread being deoptimized will eventually adjust it's own stack,
|
|
// the vframeArray containing the unpacking information is allocated in the C heap.
|
|
// For Compiler1, the caller of the deoptimized frame is saved for use by unpack_frames().
|
|
- vframeArray* array = vframeArray::allocate(thread, frame_size, chunk, reg_map, sender, caller, fr);
|
|
+ vframeArray* array = vframeArray::allocate(thread, frame_size, chunk, reg_map, sender, caller, fr, realloc_failures);
|
|
|
|
// Compare the vframeArray to the collected vframes
|
|
assert(array->structural_compare(thread, chunk), "just checking");
|
|
@@ -1054,6 +1082,33 @@
|
|
return array;
|
|
}
|
|
|
|
+#ifdef COMPILER2
|
|
+void Deoptimization::pop_frames_failed_reallocs(JavaThread* thread, vframeArray* array) {
|
|
+ // Reallocation of some scalar replaced objects failed. Record
|
|
+ // that we need to pop all the interpreter frames for the
|
|
+ // deoptimized compiled frame.
|
|
+ assert(thread->frames_to_pop_failed_realloc() == 0, "missed frames to pop?");
|
|
+ thread->set_frames_to_pop_failed_realloc(array->frames());
|
|
+ // Unlock all monitors here otherwise the interpreter will see a
|
|
+ // mix of locked and unlocked monitors (because of failed
|
|
+ // reallocations of synchronized objects) and be confused.
|
|
+ for (int i = 0; i < array->frames(); i++) {
|
|
+ MonitorChunk* monitors = array->element(i)->monitors();
|
|
+ if (monitors != NULL) {
|
|
+ for (int j = 0; j < monitors->number_of_monitors(); j++) {
|
|
+ BasicObjectLock* src = monitors->at(j);
|
|
+ if (src->obj() != NULL) {
|
|
+ ObjectSynchronizer::fast_exit(src->obj(), src->lock(), thread);
|
|
+ }
|
|
+ }
|
|
+ array->element(i)->free_monitors(thread);
|
|
+#ifdef ASSERT
|
|
+ array->element(i)->set_removed_monitors();
|
|
+#endif
|
|
+ }
|
|
+ }
|
|
+}
|
|
+#endif
|
|
|
|
static void collect_monitors(compiledVFrame* cvf, GrowableArray<Handle>* objects_to_revoke) {
|
|
GrowableArray<MonitorInfo*>* monitors = cvf->monitors();
|
|
--- jdk8/hotspot/src/share/vm/runtime/deoptimization.hpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/runtime/deoptimization.hpp 2015-01-08 21:23:31.188147952 +0100
|
|
@@ -120,13 +120,14 @@
|
|
static bool realloc_objects(JavaThread* thread, frame* fr, GrowableArray<ScopeValue*>* objects, TRAPS);
|
|
static void reassign_type_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, typeArrayOop obj, BasicType type);
|
|
static void reassign_object_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, objArrayOop obj);
|
|
- static void reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects);
|
|
- static void relock_objects(GrowableArray<MonitorInfo*>* monitors, JavaThread* thread);
|
|
- NOT_PRODUCT(static void print_objects(GrowableArray<ScopeValue*>* objects);)
|
|
+ static void reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, bool realloc_failures);
|
|
+ static void relock_objects(GrowableArray<MonitorInfo*>* monitors, JavaThread* thread, bool realloc_failures);
|
|
+ static void pop_frames_failed_reallocs(JavaThread* thread, vframeArray* array);
|
|
+ NOT_PRODUCT(static void print_objects(GrowableArray<ScopeValue*>* objects, bool realloc_failures);)
|
|
#endif // COMPILER2
|
|
|
|
public:
|
|
- static vframeArray* create_vframeArray(JavaThread* thread, frame fr, RegisterMap *reg_map, GrowableArray<compiledVFrame*>* chunk);
|
|
+ static vframeArray* create_vframeArray(JavaThread* thread, frame fr, RegisterMap *reg_map, GrowableArray<compiledVFrame*>* chunk, bool realloc_failures);
|
|
|
|
// Interface used for unpacking deoptimized frames
|
|
|
|
--- jdk8/hotspot/src/share/vm/runtime/globals.cpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/runtime/globals.cpp 2015-01-08 21:23:31.189147928 +0100
|
|
@@ -243,6 +243,11 @@
|
|
return is_unlocked_ext();
|
|
}
|
|
|
|
+void Flag::unlock_diagnostic() {
|
|
+ assert(is_diagnostic(), "sanity");
|
|
+ _flags = Flags(_flags & ~KIND_DIAGNOSTIC);
|
|
+}
|
|
+
|
|
// Get custom message for this locked flag, or return NULL if
|
|
// none is available.
|
|
void Flag::get_locked_message(char* buf, int buflen) const {
|
|
--- jdk8/hotspot/src/share/vm/runtime/globals.hpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/runtime/globals.hpp 2015-01-08 21:25:24.532426997 +0100
|
|
@@ -325,6 +325,8 @@
|
|
bool is_writeable_ext() const;
|
|
bool is_external_ext() const;
|
|
|
|
+ void unlock_diagnostic();
|
|
+
|
|
void get_locked_message(char*, int) const;
|
|
void get_locked_message_ext(char*, int) const;
|
|
|
|
@@ -1220,6 +1222,9 @@
|
|
product(bool, CheckJNICalls, false, \
|
|
"Verify all arguments to JNI calls") \
|
|
\
|
|
+ product(bool, CheckEndorsedAndExtDirs, false, \
|
|
+ "Verify the endorsed and extension directories are not used") \
|
|
+ \
|
|
product(bool, UseFastJNIAccessors, false, \
|
|
"Use optimized versions of Get<Primitive>Field") \
|
|
\
|
|
--- jdk8/hotspot/src/share/vm/runtime/interfaceSupport.cpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/runtime/interfaceSupport.cpp 2015-01-08 21:23:31.191147880 +0100
|
|
@@ -85,7 +85,7 @@
|
|
// Short-circuit any possible re-entrant gc-a-lot attempt
|
|
if (thread->skip_gcalot()) return;
|
|
|
|
- if (is_init_completed()) {
|
|
+ if (Threads::is_vm_complete()) {
|
|
|
|
if (++_fullgc_alot_invocation < FullGCALotStart) {
|
|
return;
|
|
--- jdk8/hotspot/src/share/vm/runtime/os.cpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/runtime/os.cpp 2015-01-08 21:23:31.191147880 +0100
|
|
@@ -571,17 +571,6 @@
|
|
NOT_PRODUCT(inc_stat_counter(&num_mallocs, 1));
|
|
NOT_PRODUCT(inc_stat_counter(&alloc_bytes, size));
|
|
|
|
-#if INCLUDE_NMT
|
|
- // NMT can not track malloc allocation size > MAX_MALLOC_SIZE, which is
|
|
- // (1GB - 1) on 32-bit system. It is not an issue on 64-bit system, where
|
|
- // MAX_MALLOC_SIZE = ((1 << 62) - 1).
|
|
- // VM code does not have such large malloc allocation. However, it can come
|
|
- // Unsafe call.
|
|
- if (MemTracker::tracking_level() >= NMT_summary && size > MAX_MALLOC_SIZE) {
|
|
- return NULL;
|
|
- }
|
|
-#endif
|
|
-
|
|
#ifdef ASSERT
|
|
// checking for the WatcherThread and crash_protection first
|
|
// since os::malloc can be called when the libjvm.{dll,so} is
|
|
@@ -652,12 +641,6 @@
|
|
}
|
|
|
|
void* os::realloc(void *memblock, size_t size, MEMFLAGS memflags, const NativeCallStack& stack) {
|
|
-#if INCLUDE_NMT
|
|
- // See comments in os::malloc() above
|
|
- if (MemTracker::tracking_level() >= NMT_summary && size > MAX_MALLOC_SIZE) {
|
|
- return NULL;
|
|
- }
|
|
-#endif
|
|
|
|
#ifndef ASSERT
|
|
NOT_PRODUCT(inc_stat_counter(&num_mallocs, 1));
|
|
--- jdk8/hotspot/src/share/vm/runtime/sharedRuntime.cpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/runtime/sharedRuntime.cpp 2015-01-08 21:23:31.192147856 +0100
|
|
@@ -490,6 +490,7 @@
|
|
|
|
address SharedRuntime::raw_exception_handler_for_return_address(JavaThread* thread, address return_address) {
|
|
assert(frame::verify_return_pc(return_address), err_msg("must be a return address: " INTPTR_FORMAT, return_address));
|
|
+ assert(thread->frames_to_pop_failed_realloc() == 0 || Interpreter::contains(return_address), "missed frames to pop?");
|
|
|
|
// Reset method handle flag.
|
|
thread->set_is_method_handle_return(false);
|
|
--- jdk8/hotspot/src/share/vm/runtime/simpleThresholdPolicy.cpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/runtime/simpleThresholdPolicy.cpp 2015-01-08 21:23:31.192147856 +0100
|
|
@@ -196,7 +196,6 @@
|
|
// Don't trigger other compiles in testing mode
|
|
return NULL;
|
|
}
|
|
- nmethod *osr_nm = NULL;
|
|
|
|
handle_counter_overflow(method());
|
|
if (method() != inlinee()) {
|
|
@@ -210,14 +209,16 @@
|
|
if (bci == InvocationEntryBci) {
|
|
method_invocation_event(method, inlinee, comp_level, nm, thread);
|
|
} else {
|
|
- method_back_branch_event(method, inlinee, bci, comp_level, nm, thread);
|
|
// method == inlinee if the event originated in the main method
|
|
- int highest_level = inlinee->highest_osr_comp_level();
|
|
- if (highest_level > comp_level) {
|
|
- osr_nm = inlinee->lookup_osr_nmethod_for(bci, highest_level, false);
|
|
+ method_back_branch_event(method, inlinee, bci, comp_level, nm, thread);
|
|
+ // Check if event led to a higher level OSR compilation
|
|
+ nmethod* osr_nm = inlinee->lookup_osr_nmethod_for(bci, comp_level, false);
|
|
+ if (osr_nm != NULL && osr_nm->comp_level() > comp_level) {
|
|
+ // Perform OSR with new nmethod
|
|
+ return osr_nm;
|
|
}
|
|
}
|
|
- return osr_nm;
|
|
+ return NULL;
|
|
}
|
|
|
|
// Check if the method can be compiled, change level if necessary
|
|
@@ -239,7 +240,7 @@
|
|
if (bci != InvocationEntryBci && mh->is_not_osr_compilable(level)) {
|
|
return;
|
|
}
|
|
- if (!CompileBroker::compilation_is_in_queue(mh, bci)) {
|
|
+ if (!CompileBroker::compilation_is_in_queue(mh)) {
|
|
if (PrintTieredEvents) {
|
|
print_event(COMPILE, mh, mh, bci, level);
|
|
}
|
|
@@ -378,7 +379,7 @@
|
|
// Handle the invocation event.
|
|
void SimpleThresholdPolicy::method_invocation_event(methodHandle mh, methodHandle imh,
|
|
CompLevel level, nmethod* nm, JavaThread* thread) {
|
|
- if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh, InvocationEntryBci)) {
|
|
+ if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh)) {
|
|
CompLevel next_level = call_event(mh(), level);
|
|
if (next_level != level) {
|
|
compile(mh, InvocationEntryBci, next_level, thread);
|
|
@@ -391,8 +392,8 @@
|
|
void SimpleThresholdPolicy::method_back_branch_event(methodHandle mh, methodHandle imh,
|
|
int bci, CompLevel level, nmethod* nm, JavaThread* thread) {
|
|
// If the method is already compiling, quickly bail out.
|
|
- if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh, bci)) {
|
|
- // Use loop event as an opportinity to also check there's been
|
|
+ if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh)) {
|
|
+ // Use loop event as an opportunity to also check there's been
|
|
// enough calls.
|
|
CompLevel cur_level = comp_level(mh());
|
|
CompLevel next_level = call_event(mh(), cur_level);
|
|
--- jdk8/hotspot/src/share/vm/runtime/simpleThresholdPolicy.inline.hpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/runtime/simpleThresholdPolicy.inline.hpp 2015-01-08 21:23:31.192147856 +0100
|
|
@@ -54,13 +54,17 @@
|
|
// Simple methods are as good being compiled with C1 as C2.
|
|
// Determine if a given method is such a case.
|
|
bool SimpleThresholdPolicy::is_trivial(Method* method) {
|
|
- if (method->is_accessor()) return true;
|
|
- if (method->code() != NULL) {
|
|
- MethodData* mdo = method->method_data();
|
|
- if (mdo != NULL && mdo->num_loops() == 0 &&
|
|
- (method->code_size() < 5 || (mdo->num_blocks() < 4) && (method->code_size() < 15))) {
|
|
- return !mdo->would_profile();
|
|
+ if (method->is_accessor() ||
|
|
+ method->is_constant_getter()) {
|
|
+ return true;
|
|
+ }
|
|
+ if (method->has_loops() || method->code_size() >= 15) {
|
|
+ return false;
|
|
}
|
|
+ MethodData* mdo = method->method_data();
|
|
+ if (mdo != NULL && !mdo->would_profile() &&
|
|
+ (method->code_size() < 5 || (mdo->num_blocks() < 4))) {
|
|
+ return true;
|
|
}
|
|
return false;
|
|
}
|
|
--- jdk8/hotspot/src/share/vm/runtime/thread.cpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/runtime/thread.cpp 2015-01-08 21:23:31.193147832 +0100
|
|
@@ -234,6 +234,8 @@
|
|
// This initial value ==> never claimed.
|
|
_oops_do_parity = 0;
|
|
|
|
+ _metadata_on_stack_buffer = NULL;
|
|
+
|
|
// the handle mark links itself to last_handle_mark
|
|
new HandleMark(this);
|
|
|
|
@@ -1493,6 +1495,7 @@
|
|
_popframe_condition = popframe_inactive;
|
|
_popframe_preserved_args = NULL;
|
|
_popframe_preserved_args_size = 0;
|
|
+ _frames_to_pop_failed_realloc = 0;
|
|
|
|
pd_initialize();
|
|
}
|
|
--- jdk8/hotspot/src/share/vm/runtime/thread.hpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/runtime/thread.hpp 2015-01-08 21:23:31.193147832 +0100
|
|
@@ -42,11 +42,10 @@
|
|
#include "runtime/threadLocalStorage.hpp"
|
|
#include "runtime/thread_ext.hpp"
|
|
#include "runtime/unhandledOops.hpp"
|
|
-#include "utilities/macros.hpp"
|
|
-
|
|
#include "trace/traceBackend.hpp"
|
|
#include "trace/traceMacros.hpp"
|
|
#include "utilities/exceptions.hpp"
|
|
+#include "utilities/macros.hpp"
|
|
#include "utilities/top.hpp"
|
|
#if INCLUDE_ALL_GCS
|
|
#include "gc_implementation/g1/dirtyCardQueue.hpp"
|
|
@@ -83,6 +82,10 @@
|
|
class ThreadClosure;
|
|
class IdealGraphPrinter;
|
|
|
|
+class Metadata;
|
|
+template <class T, MEMFLAGS F> class ChunkedList;
|
|
+typedef ChunkedList<Metadata*, mtInternal> MetadataOnStackBuffer;
|
|
+
|
|
DEBUG_ONLY(class ResourceMark;)
|
|
|
|
class WorkerThread;
|
|
@@ -256,6 +259,9 @@
|
|
jlong _allocated_bytes; // Cumulative number of bytes allocated on
|
|
// the Java heap
|
|
|
|
+ // Thread-local buffer used by MetadataOnStackMark.
|
|
+ MetadataOnStackBuffer* _metadata_on_stack_buffer;
|
|
+
|
|
TRACE_DATA _trace_data; // Thread-local data for tracing
|
|
|
|
ThreadExt _ext;
|
|
@@ -517,6 +523,9 @@
|
|
// creation fails due to lack of memory, too many threads etc.
|
|
bool set_as_starting_thread();
|
|
|
|
+ void set_metadata_on_stack_buffer(MetadataOnStackBuffer* buffer) { _metadata_on_stack_buffer = buffer; }
|
|
+ MetadataOnStackBuffer* metadata_on_stack_buffer() const { return _metadata_on_stack_buffer; }
|
|
+
|
|
protected:
|
|
// OS data associated with the thread
|
|
OSThread* _osthread; // Platform-specific thread information
|
|
@@ -924,6 +933,12 @@
|
|
// This is set to popframe_pending to signal that top Java frame should be popped immediately
|
|
int _popframe_condition;
|
|
|
|
+ // If reallocation of scalar replaced objects fails, we throw OOM
|
|
+ // and during exception propagation, pop the top
|
|
+ // _frames_to_pop_failed_realloc frames, the ones that reference
|
|
+ // failed reallocations.
|
|
+ int _frames_to_pop_failed_realloc;
|
|
+
|
|
#ifndef PRODUCT
|
|
int _jmp_ring_index;
|
|
struct {
|
|
@@ -1576,6 +1591,10 @@
|
|
void clr_pop_frame_in_process(void) { _popframe_condition &= ~popframe_processing_bit; }
|
|
#endif
|
|
|
|
+ int frames_to_pop_failed_realloc() const { return _frames_to_pop_failed_realloc; }
|
|
+ void set_frames_to_pop_failed_realloc(int nb) { _frames_to_pop_failed_realloc = nb; }
|
|
+ void dec_frames_to_pop_failed_realloc() { _frames_to_pop_failed_realloc--; }
|
|
+
|
|
private:
|
|
// Saved incoming arguments to popped frame.
|
|
// Used only when popped interpreted frame returns to deoptimized frame.
|
|
--- jdk8/hotspot/src/share/vm/runtime/vframeArray.cpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/runtime/vframeArray.cpp 2015-01-08 21:23:31.194147808 +0100
|
|
@@ -56,7 +56,7 @@
|
|
}
|
|
}
|
|
|
|
-void vframeArrayElement::fill_in(compiledVFrame* vf) {
|
|
+void vframeArrayElement::fill_in(compiledVFrame* vf, bool realloc_failures) {
|
|
|
|
// Copy the information from the compiled vframe to the
|
|
// interpreter frame we will be creating to replace vf
|
|
@@ -64,6 +64,9 @@
|
|
_method = vf->method();
|
|
_bci = vf->raw_bci();
|
|
_reexecute = vf->should_reexecute();
|
|
+#ifdef ASSERT
|
|
+ _removed_monitors = false;
|
|
+#endif
|
|
|
|
int index;
|
|
|
|
@@ -81,13 +84,17 @@
|
|
// Migrate the BasicLocks from the stack to the monitor chunk
|
|
for (index = 0; index < list->length(); index++) {
|
|
MonitorInfo* monitor = list->at(index);
|
|
- assert(!monitor->owner_is_scalar_replaced(), "object should be reallocated already");
|
|
- assert(monitor->owner() == NULL || (!monitor->owner()->is_unlocked() && !monitor->owner()->has_bias_pattern()), "object must be null or locked, and unbiased");
|
|
+ assert(!monitor->owner_is_scalar_replaced() || realloc_failures, "object should be reallocated already");
|
|
BasicObjectLock* dest = _monitors->at(index);
|
|
+ if (monitor->owner_is_scalar_replaced()) {
|
|
+ dest->set_obj(NULL);
|
|
+ } else {
|
|
+ assert(monitor->owner() == NULL || (!monitor->owner()->is_unlocked() && !monitor->owner()->has_bias_pattern()), "object must be null or locked, and unbiased");
|
|
dest->set_obj(monitor->owner());
|
|
monitor->lock()->move_to(monitor->owner(), dest->lock());
|
|
}
|
|
}
|
|
+ }
|
|
|
|
// Convert the vframe locals and expressions to off stack
|
|
// values. Because we will not gc all oops can be converted to
|
|
@@ -110,7 +117,7 @@
|
|
StackValue* value = locs->at(index);
|
|
switch(value->type()) {
|
|
case T_OBJECT:
|
|
- assert(!value->obj_is_scalar_replaced(), "object should be reallocated already");
|
|
+ assert(!value->obj_is_scalar_replaced() || realloc_failures, "object should be reallocated already");
|
|
// preserve object type
|
|
_locals->add( new StackValue(cast_from_oop<intptr_t>((value->get_obj()())), T_OBJECT ));
|
|
break;
|
|
@@ -135,7 +142,7 @@
|
|
StackValue* value = exprs->at(index);
|
|
switch(value->type()) {
|
|
case T_OBJECT:
|
|
- assert(!value->obj_is_scalar_replaced(), "object should be reallocated already");
|
|
+ assert(!value->obj_is_scalar_replaced() || realloc_failures, "object should be reallocated already");
|
|
// preserve object type
|
|
_expressions->add( new StackValue(cast_from_oop<intptr_t>((value->get_obj()())), T_OBJECT ));
|
|
break;
|
|
@@ -286,7 +293,7 @@
|
|
|
|
_frame.patch_pc(thread, pc);
|
|
|
|
- assert (!method()->is_synchronized() || locks > 0, "synchronized methods must have monitors");
|
|
+ assert (!method()->is_synchronized() || locks > 0 || _removed_monitors, "synchronized methods must have monitors");
|
|
|
|
BasicObjectLock* top = iframe()->interpreter_frame_monitor_begin();
|
|
for (int index = 0; index < locks; index++) {
|
|
@@ -438,7 +445,8 @@
|
|
|
|
|
|
vframeArray* vframeArray::allocate(JavaThread* thread, int frame_size, GrowableArray<compiledVFrame*>* chunk,
|
|
- RegisterMap *reg_map, frame sender, frame caller, frame self) {
|
|
+ RegisterMap *reg_map, frame sender, frame caller, frame self,
|
|
+ bool realloc_failures) {
|
|
|
|
// Allocate the vframeArray
|
|
vframeArray * result = (vframeArray*) AllocateHeap(sizeof(vframeArray) + // fixed part
|
|
@@ -450,19 +458,20 @@
|
|
result->_caller = caller;
|
|
result->_original = self;
|
|
result->set_unroll_block(NULL); // initialize it
|
|
- result->fill_in(thread, frame_size, chunk, reg_map);
|
|
+ result->fill_in(thread, frame_size, chunk, reg_map, realloc_failures);
|
|
return result;
|
|
}
|
|
|
|
void vframeArray::fill_in(JavaThread* thread,
|
|
int frame_size,
|
|
GrowableArray<compiledVFrame*>* chunk,
|
|
- const RegisterMap *reg_map) {
|
|
+ const RegisterMap *reg_map,
|
|
+ bool realloc_failures) {
|
|
// Set owner first, it is used when adding monitor chunks
|
|
|
|
_frame_size = frame_size;
|
|
for(int i = 0; i < chunk->length(); i++) {
|
|
- element(i)->fill_in(chunk->at(i));
|
|
+ element(i)->fill_in(chunk->at(i), realloc_failures);
|
|
}
|
|
|
|
// Copy registers for callee-saved registers
|
|
--- jdk8/hotspot/src/share/vm/runtime/vframeArray.hpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/runtime/vframeArray.hpp 2015-01-08 21:23:31.194147808 +0100
|
|
@@ -58,6 +58,9 @@
|
|
MonitorChunk* _monitors; // active monitors for this vframe
|
|
StackValueCollection* _locals;
|
|
StackValueCollection* _expressions;
|
|
+#ifdef ASSERT
|
|
+ bool _removed_monitors;
|
|
+#endif
|
|
|
|
public:
|
|
|
|
@@ -78,7 +81,7 @@
|
|
|
|
StackValueCollection* expressions(void) const { return _expressions; }
|
|
|
|
- void fill_in(compiledVFrame* vf);
|
|
+ void fill_in(compiledVFrame* vf, bool realloc_failures);
|
|
|
|
// Formerly part of deoptimizedVFrame
|
|
|
|
@@ -99,6 +102,12 @@
|
|
bool is_bottom_frame,
|
|
int exec_mode);
|
|
|
|
+#ifdef ASSERT
|
|
+ void set_removed_monitors() {
|
|
+ _removed_monitors = true;
|
|
+ }
|
|
+#endif
|
|
+
|
|
#ifndef PRODUCT
|
|
void print(outputStream* st);
|
|
#endif /* PRODUCT */
|
|
@@ -160,13 +169,14 @@
|
|
int frames() const { return _frames; }
|
|
|
|
static vframeArray* allocate(JavaThread* thread, int frame_size, GrowableArray<compiledVFrame*>* chunk,
|
|
- RegisterMap* reg_map, frame sender, frame caller, frame self);
|
|
+ RegisterMap* reg_map, frame sender, frame caller, frame self,
|
|
+ bool realloc_failures);
|
|
|
|
|
|
vframeArrayElement* element(int index) { assert(is_within_bounds(index), "Bad index"); return &_elements[index]; }
|
|
|
|
// Allocates a new vframe in the array and fills the array with vframe information in chunk
|
|
- void fill_in(JavaThread* thread, int frame_size, GrowableArray<compiledVFrame*>* chunk, const RegisterMap *reg_map);
|
|
+ void fill_in(JavaThread* thread, int frame_size, GrowableArray<compiledVFrame*>* chunk, const RegisterMap *reg_map, bool realloc_failures);
|
|
|
|
// Returns the owner of this vframeArray
|
|
JavaThread* owner_thread() const { return _owner_thread; }
|
|
--- jdk8/hotspot/src/share/vm/services/mallocTracker.cpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/services/mallocTracker.cpp 2015-01-08 21:23:31.194147808 +0100
|
|
@@ -72,7 +72,7 @@
|
|
|
|
MallocMemorySummary::record_free(size(), flags());
|
|
MallocMemorySummary::record_free_malloc_header(sizeof(MallocHeader));
|
|
- if (tracking_level() == NMT_detail) {
|
|
+ if (MemTracker::tracking_level() == NMT_detail) {
|
|
MallocSiteTable::deallocation_at(size(), _bucket_idx, _pos_idx);
|
|
}
|
|
}
|
|
@@ -128,36 +128,18 @@
|
|
}
|
|
|
|
// Uses placement global new operator to initialize malloc header
|
|
- switch(level) {
|
|
- case NMT_off:
|
|
+
|
|
+ if (level == NMT_off) {
|
|
return malloc_base;
|
|
- case NMT_minimal: {
|
|
- MallocHeader* hdr = ::new (malloc_base) MallocHeader();
|
|
- break;
|
|
- }
|
|
- case NMT_summary: {
|
|
- assert(size <= MAX_MALLOC_SIZE, "malloc size overrun for NMT");
|
|
- header = ::new (malloc_base) MallocHeader(size, flags);
|
|
- break;
|
|
- }
|
|
- case NMT_detail: {
|
|
- assert(size <= MAX_MALLOC_SIZE, "malloc size overrun for NMT");
|
|
- header = ::new (malloc_base) MallocHeader(size, flags, stack);
|
|
- break;
|
|
- }
|
|
- default:
|
|
- ShouldNotReachHere();
|
|
}
|
|
+
|
|
+ header = ::new (malloc_base)MallocHeader(size, flags, stack, level);
|
|
memblock = (void*)((char*)malloc_base + sizeof(MallocHeader));
|
|
|
|
// The alignment check: 8 bytes alignment for 32 bit systems.
|
|
// 16 bytes alignment for 64-bit systems.
|
|
assert(((size_t)memblock & (sizeof(size_t) * 2 - 1)) == 0, "Alignment check");
|
|
|
|
- // Sanity check
|
|
- assert(get_memory_tracking_level(memblock) == level,
|
|
- "Wrong tracking level");
|
|
-
|
|
#ifdef ASSERT
|
|
if (level > NMT_minimal) {
|
|
// Read back
|
|
--- jdk8/hotspot/src/share/vm/services/mallocTracker.hpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/services/mallocTracker.hpp 2015-01-08 21:23:31.194147808 +0100
|
|
@@ -239,46 +239,33 @@
|
|
|
|
class MallocHeader VALUE_OBJ_CLASS_SPEC {
|
|
#ifdef _LP64
|
|
- size_t _size : 62;
|
|
- size_t _level : 2;
|
|
+ size_t _size : 64;
|
|
size_t _flags : 8;
|
|
size_t _pos_idx : 16;
|
|
size_t _bucket_idx: 40;
|
|
-#define MAX_MALLOCSITE_TABLE_SIZE ((size_t)1 << 40)
|
|
-#define MAX_BUCKET_LENGTH ((size_t)(1 << 16))
|
|
-#define MAX_MALLOC_SIZE (((size_t)1 << 62) - 1)
|
|
+#define MAX_MALLOCSITE_TABLE_SIZE right_n_bits(40)
|
|
+#define MAX_BUCKET_LENGTH right_n_bits(16)
|
|
#else
|
|
- size_t _size : 30;
|
|
- size_t _level : 2;
|
|
+ size_t _size : 32;
|
|
size_t _flags : 8;
|
|
size_t _pos_idx : 8;
|
|
size_t _bucket_idx: 16;
|
|
-#define MAX_MALLOCSITE_TABLE_SIZE ((size_t)(1 << 16))
|
|
-#define MAX_BUCKET_LENGTH ((size_t)(1 << 8))
|
|
-// Max malloc size = 1GB - 1 on 32 bit system, such has total 4GB memory
|
|
-#define MAX_MALLOC_SIZE ((size_t)(1 << 30) - 1)
|
|
+#define MAX_MALLOCSITE_TABLE_SIZE right_n_bits(16)
|
|
+#define MAX_BUCKET_LENGTH right_n_bits(8)
|
|
#endif // _LP64
|
|
|
|
public:
|
|
- // Summary tracking header
|
|
- MallocHeader(size_t size, MEMFLAGS flags) {
|
|
+ MallocHeader(size_t size, MEMFLAGS flags, const NativeCallStack& stack, NMT_TrackingLevel level) {
|
|
assert(sizeof(MallocHeader) == sizeof(void*) * 2,
|
|
"Wrong header size");
|
|
|
|
- _level = NMT_summary;
|
|
- _flags = flags;
|
|
- set_size(size);
|
|
- MallocMemorySummary::record_malloc(size, flags);
|
|
- MallocMemorySummary::record_new_malloc_header(sizeof(MallocHeader));
|
|
+ if (level == NMT_minimal) {
|
|
+ return;
|
|
}
|
|
- // Detail tracking header
|
|
- MallocHeader(size_t size, MEMFLAGS flags, const NativeCallStack& stack) {
|
|
- assert(sizeof(MallocHeader) == sizeof(void*) * 2,
|
|
- "Wrong header size");
|
|
|
|
- _level = NMT_detail;
|
|
_flags = flags;
|
|
set_size(size);
|
|
+ if (level == NMT_detail) {
|
|
size_t bucket_idx;
|
|
size_t pos_idx;
|
|
if (record_malloc_site(stack, size, &bucket_idx, &pos_idx)) {
|
|
@@ -287,19 +274,10 @@
|
|
_bucket_idx = bucket_idx;
|
|
_pos_idx = pos_idx;
|
|
}
|
|
- MallocMemorySummary::record_malloc(size, flags);
|
|
- MallocMemorySummary::record_new_malloc_header(sizeof(MallocHeader));
|
|
- }
|
|
- // Minimal tracking header
|
|
- MallocHeader() {
|
|
- assert(sizeof(MallocHeader) == sizeof(void*) * 2,
|
|
- "Wrong header size");
|
|
-
|
|
- _level = (unsigned short)NMT_minimal;
|
|
}
|
|
|
|
- inline NMT_TrackingLevel tracking_level() const {
|
|
- return (NMT_TrackingLevel)_level;
|
|
+ MallocMemorySummary::record_malloc(size, flags);
|
|
+ MallocMemorySummary::record_new_malloc_header(sizeof(MallocHeader));
|
|
}
|
|
|
|
inline size_t size() const { return _size; }
|
|
@@ -311,7 +289,6 @@
|
|
|
|
private:
|
|
inline void set_size(size_t size) {
|
|
- assert(size <= MAX_MALLOC_SIZE, "Malloc size too large, should use virtual memory?");
|
|
_size = size;
|
|
}
|
|
bool record_malloc_site(const NativeCallStack& stack, size_t size,
|
|
@@ -347,10 +324,6 @@
|
|
// Record free on specified memory block
|
|
static void* record_free(void* memblock);
|
|
|
|
- // Get tracking level of specified memory block
|
|
- static inline NMT_TrackingLevel get_memory_tracking_level(void* memblock);
|
|
-
|
|
-
|
|
// Offset memory address to header address
|
|
static inline void* get_base(void* memblock);
|
|
static inline void* get_base(void* memblock, NMT_TrackingLevel level) {
|
|
@@ -361,16 +334,12 @@
|
|
// Get memory size
|
|
static inline size_t get_size(void* memblock) {
|
|
MallocHeader* header = malloc_header(memblock);
|
|
- assert(header->tracking_level() >= NMT_summary,
|
|
- "Wrong tracking level");
|
|
return header->size();
|
|
}
|
|
|
|
// Get memory type
|
|
static inline MEMFLAGS get_flags(void* memblock) {
|
|
MallocHeader* header = malloc_header(memblock);
|
|
- assert(header->tracking_level() >= NMT_summary,
|
|
- "Wrong tracking level");
|
|
return header->flags();
|
|
}
|
|
|
|
@@ -394,7 +363,6 @@
|
|
static inline MallocHeader* malloc_header(void *memblock) {
|
|
assert(memblock != NULL, "NULL pointer");
|
|
MallocHeader* header = (MallocHeader*)((char*)memblock - sizeof(MallocHeader));
|
|
- assert(header->tracking_level() >= NMT_minimal, "Bad header");
|
|
return header;
|
|
}
|
|
};
|
|
--- jdk8/hotspot/src/share/vm/services/mallocTracker.inline.hpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/services/mallocTracker.inline.hpp 2015-01-08 21:23:31.194147808 +0100
|
|
@@ -28,13 +28,6 @@
|
|
#include "services/mallocTracker.hpp"
|
|
#include "services/memTracker.hpp"
|
|
|
|
-inline NMT_TrackingLevel MallocTracker::get_memory_tracking_level(void* memblock) {
|
|
- assert(memblock != NULL, "Sanity check");
|
|
- if (MemTracker::tracking_level() == NMT_off) return NMT_off;
|
|
- MallocHeader* header = malloc_header(memblock);
|
|
- return header->tracking_level();
|
|
-}
|
|
-
|
|
inline void* MallocTracker::get_base(void* memblock){
|
|
return get_base(memblock, MemTracker::tracking_level());
|
|
}
|
|
--- jdk8/hotspot/src/share/vm/services/runtimeService.cpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/services/runtimeService.cpp 2015-01-08 21:23:31.194147808 +0100
|
|
@@ -46,6 +46,7 @@
|
|
PerfCounter* RuntimeService::_thread_interrupt_signaled_count = NULL;
|
|
PerfCounter* RuntimeService::_interrupted_before_count = NULL;
|
|
PerfCounter* RuntimeService::_interrupted_during_count = NULL;
|
|
+double RuntimeService::_last_safepoint_sync_time_sec = 0.0;
|
|
|
|
void RuntimeService::init() {
|
|
// Make sure the VM version is initialized
|
|
@@ -128,6 +129,7 @@
|
|
|
|
// update the time stamp to begin recording safepoint time
|
|
_safepoint_timer.update();
|
|
+ _last_safepoint_sync_time_sec = 0.0;
|
|
if (UsePerfData) {
|
|
_total_safepoints->inc();
|
|
if (_app_timer.is_updated()) {
|
|
@@ -140,6 +142,9 @@
|
|
if (UsePerfData) {
|
|
_sync_time_ticks->inc(_safepoint_timer.ticks_since_update());
|
|
}
|
|
+ if (PrintGCApplicationStoppedTime) {
|
|
+ _last_safepoint_sync_time_sec = last_safepoint_time_sec();
|
|
+ }
|
|
}
|
|
|
|
void RuntimeService::record_safepoint_end() {
|
|
@@ -155,8 +160,10 @@
|
|
gclog_or_tty->date_stamp(PrintGCDateStamps);
|
|
gclog_or_tty->stamp(PrintGCTimeStamps);
|
|
gclog_or_tty->print_cr("Total time for which application threads "
|
|
- "were stopped: %3.7f seconds",
|
|
- last_safepoint_time_sec());
|
|
+ "were stopped: %3.7f seconds, "
|
|
+ "Stopping threads took: %3.7f seconds",
|
|
+ last_safepoint_time_sec(),
|
|
+ _last_safepoint_sync_time_sec);
|
|
}
|
|
|
|
// update the time stamp to begin recording app time
|
|
--- jdk8/hotspot/src/share/vm/services/runtimeService.hpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/services/runtimeService.hpp 2015-01-08 21:23:31.195147784 +0100
|
|
@@ -40,6 +40,7 @@
|
|
|
|
static TimeStamp _safepoint_timer;
|
|
static TimeStamp _app_timer;
|
|
+ static double _last_safepoint_sync_time_sec;
|
|
|
|
public:
|
|
static void init();
|
|
--- jdk8/hotspot/src/share/vm/trace/noTraceBackend.hpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/trace/noTraceBackend.hpp 2015-01-08 21:23:31.195147784 +0100
|
|
@@ -41,4 +41,4 @@
|
|
|
|
typedef NoTraceBackend Tracing;
|
|
|
|
-#endif
|
|
+#endif // SHARE_VM_TRACE_NOTRACEBACKEND_HPP
|
|
--- jdk8/hotspot/src/share/vm/trace/traceBackend.hpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/trace/traceBackend.hpp 2015-01-08 21:23:31.195147784 +0100
|
|
@@ -58,9 +56,7 @@
|
|
|
|
typedef TraceBackend Tracing;
|
|
|
|
-#else /* INCLUDE_TRACE */
|
|
-
|
|
+#else // !INCLUDE_TRACE
|
|
#include "trace/noTraceBackend.hpp"
|
|
-
|
|
-#endif /* INCLUDE_TRACE */
|
|
-#endif /* SHARE_VM_TRACE_TRACEBACKEND_HPP */
|
|
+#endif // INCLUDE_TRACE
|
|
+#endif // SHARE_VM_TRACE_TRACEBACKEND_HPP
|
|
--- jdk8/hotspot/src/share/vm/trace/traceEventClasses.xsl 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/trace/traceEventClasses.xsl 2015-01-08 21:23:31.195147784 +0100
|
|
@@ -51,7 +48,7 @@
|
|
<xsl:apply-templates select="trace/events/struct" mode="trace"/>
|
|
<xsl:apply-templates select="trace/events/event" mode="trace"/>
|
|
|
|
-#else
|
|
+#else // !INCLUDE_TRACE
|
|
|
|
class TraceEvent {
|
|
public:
|
|
@@ -65,9 +62,8 @@
|
|
<xsl:apply-templates select="trace/events/struct" mode="empty"/>
|
|
<xsl:apply-templates select="trace/events/event" mode="empty"/>
|
|
|
|
-#endif
|
|
-
|
|
-#endif
|
|
+#endif // INCLUDE_TRACE
|
|
+#endif // TRACEFILES_TRACEEVENTCLASSES_HPP
|
|
</xsl:template>
|
|
|
|
<xsl:template match="struct" mode="trace">
|
|
--- jdk8/hotspot/src/share/vm/trace/traceEvent.hpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/trace/traceEvent.hpp 2015-01-08 21:23:31.195147784 +0100
|
|
@@ -154,6 +153,5 @@
|
|
}
|
|
};
|
|
|
|
-#endif /* INCLUDE_TRACE */
|
|
-
|
|
-#endif /* SHARE_VM_TRACE_TRACEEVENT_HPP */
|
|
+#endif // INCLUDE_TRACE
|
|
+#endif // SHARE_VM_TRACE_TRACEEVENT_HPP
|
|
--- jdk8/hotspot/src/share/vm/trace/traceEventIds.xsl 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/trace/traceEventIds.xsl 2015-01-08 21:23:31.195147784 +0100
|
|
@@ -29,13 +29,11 @@
|
|
<xsl:template match="/">
|
|
<xsl:call-template name="file-header"/>
|
|
|
|
-#ifndef TRACEFILES_JFREVENTIDS_HPP
|
|
-#define TRACEFILES_JFREVENTIDS_HPP
|
|
+#ifndef TRACEFILES_TRACEEVENTIDS_HPP
|
|
+#define TRACEFILES_TRACEEVENTIDS_HPP
|
|
|
|
#include "utilities/macros.hpp"
|
|
-
|
|
#if INCLUDE_TRACE
|
|
-
|
|
#include "trace/traceDataTypes.hpp"
|
|
|
|
/**
|
|
@@ -67,8 +65,8 @@
|
|
typedef enum TraceEventId TraceEventId;
|
|
typedef enum TraceStructId TraceStructId;
|
|
|
|
-#endif
|
|
-#endif
|
|
+#endif // INCLUDE_TRACE
|
|
+#endif // TRACEFILES_TRACEEVENTIDS_HPP
|
|
</xsl:template>
|
|
|
|
</xsl:stylesheet>
|
|
--- jdk8/hotspot/src/share/vm/trace/traceMacros.hpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/trace/traceMacros.hpp 2015-01-08 21:23:31.195147784 +0100
|
|
@@ -22,8 +22,8 @@
|
|
*
|
|
*/
|
|
|
|
-#ifndef SHARE_VM_TRACE_TRACE_MACRO_HPP
|
|
-#define SHARE_VM_TRACE_TRACE_MACRO_HPP
|
|
+#ifndef SHARE_VM_TRACE_TRACEMACROS_HPP
|
|
+#define SHARE_VM_TRACE_TRACEMACROS_HPP
|
|
|
|
#define EVENT_THREAD_EXIT(thread)
|
|
#define EVENT_THREAD_DESTRUCT(thread)
|
|
@@ -41,4 +41,4 @@
|
|
#define TRACE_TEMPLATES(template)
|
|
#define TRACE_INTRINSICS(do_intrinsic, do_class, do_name, do_signature, do_alias)
|
|
|
|
-#endif
|
|
+#endif // SHARE_VM_TRACE_TRACEMACROS_HPP
|
|
--- jdk8/hotspot/src/share/vm/trace/traceStream.hpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/trace/traceStream.hpp 2015-01-08 21:23:31.196147760 +0100
|
|
@@ -117,5 +115,5 @@
|
|
}
|
|
};
|
|
|
|
-#endif /* INCLUDE_TRACE */
|
|
-#endif /* SHARE_VM_TRACE_TRACESTREAM_HPP */
|
|
+#endif // INCLUDE_TRACE
|
|
+#endif // SHARE_VM_TRACE_TRACESTREAM_HPP
|
|
--- jdk8/hotspot/src/share/vm/trace/traceTypes.xsl 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/trace/traceTypes.xsl 2015-01-08 21:23:31.196147760 +0100
|
|
@@ -29,8 +29,8 @@
|
|
<xsl:template match="/">
|
|
<xsl:call-template name="file-header"/>
|
|
|
|
-#ifndef TRACEFILES_JFRTYPES_HPP
|
|
-#define TRACEFILES_JFRTYPES_HPP
|
|
+#ifndef TRACEFILES_TRACETYPES_HPP
|
|
+#define TRACEFILES_TRACETYPES_HPP
|
|
|
|
#include "oops/symbol.hpp"
|
|
#include "trace/traceDataTypes.hpp"
|
|
@@ -58,7 +57,7 @@
|
|
};
|
|
|
|
/**
|
|
- * Create typedefs for the JRA types:
|
|
+ * Create typedefs for the TRACE types:
|
|
* typedef s8 TYPE_LONG;
|
|
* typedef s4 TYPE_INTEGER;
|
|
* typedef const char * TYPE_STRING;
|
|
@@ -68,7 +67,7 @@
|
|
typedef <xsl:value-of select="@type"/> TYPE_<xsl:value-of select="@symbol"/>;
|
|
</xsl:for-each>
|
|
|
|
-#endif // JFRFILES_JFRTYPES_HPP
|
|
+#endif // TRACEFILES_TRACETYPES_HPP
|
|
</xsl:template>
|
|
|
|
</xsl:stylesheet>
|
|
--- jdk8/hotspot/src/share/vm/utilities/accessFlags.cpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/utilities/accessFlags.cpp 2015-01-08 21:23:31.196147760 +0100
|
|
@@ -62,6 +62,21 @@
|
|
} while(f != old_flags);
|
|
}
|
|
|
|
+// Returns true iff this thread succeeded setting the bit.
|
|
+bool AccessFlags::atomic_set_one_bit(jint bit) {
|
|
+ // Atomically update the flags with the bit given
|
|
+ jint old_flags, new_flags, f;
|
|
+ bool is_setting_bit = false;
|
|
+ do {
|
|
+ old_flags = _flags;
|
|
+ new_flags = old_flags | bit;
|
|
+ is_setting_bit = old_flags != new_flags;
|
|
+ f = Atomic::cmpxchg(new_flags, &_flags, old_flags);
|
|
+ } while(f != old_flags);
|
|
+
|
|
+ return is_setting_bit;
|
|
+}
|
|
+
|
|
#if !defined(PRODUCT) || INCLUDE_JVMTI
|
|
|
|
void AccessFlags::print_on(outputStream* st) const {
|
|
--- jdk8/hotspot/src/share/vm/utilities/accessFlags.hpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/utilities/accessFlags.hpp 2015-01-08 21:23:31.196147760 +0100
|
|
@@ -170,6 +170,7 @@
|
|
|
|
// Atomic update of flags
|
|
void atomic_set_bits(jint bits);
|
|
+ bool atomic_set_one_bit(jint bit);
|
|
void atomic_clear_bits(jint bits);
|
|
|
|
private:
|
|
@@ -230,12 +231,13 @@
|
|
atomic_set_bits(JVM_ACC_FIELD_HAS_GENERIC_SIGNATURE);
|
|
}
|
|
|
|
- void set_on_stack(const bool value)
|
|
+ bool set_on_stack(const bool value)
|
|
{
|
|
if (value) {
|
|
- atomic_set_bits(JVM_ACC_ON_STACK);
|
|
+ return atomic_set_one_bit(JVM_ACC_ON_STACK);
|
|
} else {
|
|
atomic_clear_bits(JVM_ACC_ON_STACK);
|
|
+ return true; // Ignored
|
|
}
|
|
}
|
|
// Conversion
|
|
--- jdk8/hotspot/src/share/vm/utilities/chunkedList.cpp 1970-01-01 01:00:00.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/utilities/chunkedList.cpp 2015-01-08 21:23:31.196147760 +0100
|
|
@@ -0,0 +1,109 @@
|
|
+/*
|
|
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
|
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
+ *
|
|
+ * This code is free software; you can redistribute it and/or modify it
|
|
+ * under the terms of the GNU General Public License version 2 only, as
|
|
+ * published by the Free Software Foundation.
|
|
+ *
|
|
+ * This code is distributed in the hope that it will be useful, but WITHOUT
|
|
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
+ * version 2 for more details (a copy is included in the LICENSE file that
|
|
+ * accompanied this code).
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License version
|
|
+ * 2 along with this work; if not, write to the Free Software Foundation,
|
|
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
+ *
|
|
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
|
+ * or visit www.oracle.com if you need additional information or have any
|
|
+ * questions.
|
|
+ *
|
|
+ */
|
|
+
|
|
+#include "precompiled.hpp"
|
|
+#include "utilities/chunkedList.hpp"
|
|
+#include "utilities/debug.hpp"
|
|
+
|
|
+/////////////// Unit tests ///////////////
|
|
+
|
|
+#ifndef PRODUCT
|
|
+
|
|
+template <typename T>
|
|
+class TestChunkedList {
|
|
+ typedef ChunkedList<T, mtOther> ChunkedListT;
|
|
+
|
|
+ public:
|
|
+ static void testEmpty() {
|
|
+ ChunkedListT buffer;
|
|
+ assert(buffer.size() == 0, "assert");
|
|
+ }
|
|
+
|
|
+ static void testFull() {
|
|
+ ChunkedListT buffer;
|
|
+ for (uintptr_t i = 0; i < ChunkedListT::BufferSize; i++) {
|
|
+ buffer.push((T)i);
|
|
+ }
|
|
+ assert(buffer.size() == ChunkedListT::BufferSize, "assert");
|
|
+ assert(buffer.is_full(), "assert");
|
|
+ }
|
|
+
|
|
+ static void testSize() {
|
|
+ ChunkedListT buffer;
|
|
+ for (uintptr_t i = 0; i < ChunkedListT::BufferSize; i++) {
|
|
+ assert(buffer.size() == i, "assert");
|
|
+ buffer.push((T)i);
|
|
+ assert(buffer.size() == i + 1, "assert");
|
|
+ }
|
|
+ }
|
|
+
|
|
+ static void testClear() {
|
|
+ ChunkedListT buffer;
|
|
+
|
|
+ buffer.clear();
|
|
+ assert(buffer.size() == 0, "assert");
|
|
+
|
|
+ for (uintptr_t i = 0; i < ChunkedListT::BufferSize / 2; i++) {
|
|
+ buffer.push((T)i);
|
|
+ }
|
|
+ buffer.clear();
|
|
+ assert(buffer.size() == 0, "assert");
|
|
+
|
|
+ for (uintptr_t i = 0; i < ChunkedListT::BufferSize; i++) {
|
|
+ buffer.push((T)i);
|
|
+ }
|
|
+ buffer.clear();
|
|
+ assert(buffer.size() == 0, "assert");
|
|
+ }
|
|
+
|
|
+ static void testAt() {
|
|
+ ChunkedListT buffer;
|
|
+
|
|
+ for (uintptr_t i = 0; i < ChunkedListT::BufferSize; i++) {
|
|
+ buffer.push((T)i);
|
|
+ assert(buffer.at(i) == (T)i, "assert");
|
|
+ }
|
|
+
|
|
+ for (uintptr_t i = 0; i < ChunkedListT::BufferSize; i++) {
|
|
+ assert(buffer.at(i) == (T)i, "assert");
|
|
+ }
|
|
+ }
|
|
+
|
|
+ static void test() {
|
|
+ testEmpty();
|
|
+ testFull();
|
|
+ testSize();
|
|
+ testClear();
|
|
+ testAt();
|
|
+ }
|
|
+};
|
|
+
|
|
+class Metadata;
|
|
+
|
|
+void TestChunkedList_test() {
|
|
+ TestChunkedList<Metadata*>::test();
|
|
+ TestChunkedList<size_t>::test();
|
|
+}
|
|
+
|
|
+#endif
|
|
--- jdk8/hotspot/src/share/vm/utilities/chunkedList.hpp 1970-01-01 01:00:00.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/utilities/chunkedList.hpp 2015-01-08 21:23:31.196147760 +0100
|
|
@@ -0,0 +1,81 @@
|
|
+/*
|
|
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
|
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
+ *
|
|
+ * This code is free software; you can redistribute it and/or modify it
|
|
+ * under the terms of the GNU General Public License version 2 only, as
|
|
+ * published by the Free Software Foundation.
|
|
+ *
|
|
+ * This code is distributed in the hope that it will be useful, but WITHOUT
|
|
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
+ * version 2 for more details (a copy is included in the LICENSE file that
|
|
+ * accompanied this code).
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License version
|
|
+ * 2 along with this work; if not, write to the Free Software Foundation,
|
|
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
+ *
|
|
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
|
+ * or visit www.oracle.com if you need additional information or have any
|
|
+ * questions.
|
|
+ *
|
|
+ */
|
|
+
|
|
+#ifndef SHARE_VM_UTILITIES_CHUNKED_LIST_HPP
|
|
+#define SHARE_VM_UTILITIES_CHUNKED_LIST_HPP
|
|
+
|
|
+#include "memory/allocation.hpp"
|
|
+#include "utilities/debug.hpp"
|
|
+
|
|
+template <class T, MEMFLAGS F> class ChunkedList : public CHeapObj<F> {
|
|
+ template <class U> friend class TestChunkedList;
|
|
+
|
|
+ static const size_t BufferSize = 64;
|
|
+
|
|
+ T _values[BufferSize];
|
|
+ T* _top;
|
|
+
|
|
+ ChunkedList<T, F>* _next_used;
|
|
+ ChunkedList<T, F>* _next_free;
|
|
+
|
|
+ T const * end() const {
|
|
+ return &_values[BufferSize];
|
|
+ }
|
|
+
|
|
+ public:
|
|
+ ChunkedList<T, F>() : _top(_values), _next_used(NULL), _next_free(NULL) {}
|
|
+
|
|
+ bool is_full() const {
|
|
+ return _top == end();
|
|
+ }
|
|
+
|
|
+ void clear() {
|
|
+ _top = _values;
|
|
+ // Don't clear the next pointers since that would interfere
|
|
+ // with other threads trying to iterate through the lists.
|
|
+ }
|
|
+
|
|
+ void push(T m) {
|
|
+ assert(!is_full(), "Buffer is full");
|
|
+ *_top = m;
|
|
+ _top++;
|
|
+ }
|
|
+
|
|
+ void set_next_used(ChunkedList<T, F>* buffer) { _next_used = buffer; }
|
|
+ void set_next_free(ChunkedList<T, F>* buffer) { _next_free = buffer; }
|
|
+
|
|
+ ChunkedList<T, F>* next_used() const { return _next_used; }
|
|
+ ChunkedList<T, F>* next_free() const { return _next_free; }
|
|
+
|
|
+ size_t size() const {
|
|
+ return pointer_delta(_top, _values, sizeof(T));
|
|
+ }
|
|
+
|
|
+ T at(size_t i) {
|
|
+ assert(i < size(), err_msg("IOOBE i: " SIZE_FORMAT " size(): " SIZE_FORMAT, i, size()));
|
|
+ return _values[i];
|
|
+ }
|
|
+};
|
|
+
|
|
+#endif // SHARE_VM_UTILITIES_CHUNKED_LIST_HPP
|
|
--- jdk8/hotspot/src/share/vm/utilities/debug.cpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/utilities/debug.cpp 2015-01-08 21:23:31.197147736 +0100
|
|
@@ -266,17 +266,19 @@
|
|
"native memory for metadata",
|
|
"shared read only space",
|
|
"shared read write space",
|
|
- "shared miscellaneous data space"
|
|
+ "shared miscellaneous data space",
|
|
+ "shared miscellaneous code space"
|
|
};
|
|
static const char* flag[] = {
|
|
"Metaspace",
|
|
"SharedReadOnlySize",
|
|
"SharedReadWriteSize",
|
|
- "SharedMiscDataSize"
|
|
+ "SharedMiscDataSize",
|
|
+ "SharedMiscCodeSize"
|
|
};
|
|
|
|
warning("\nThe %s is not large enough\n"
|
|
- "to preload requested classes. Use -XX:%s=\n"
|
|
+ "to preload requested classes. Use -XX:%s=<size>\n"
|
|
"to increase the initial size of %s.\n",
|
|
name[shared_space], flag[shared_space], name[shared_space]);
|
|
exit(2);
|
|
--- jdk8/hotspot/src/share/vm/utilities/debug.hpp 2015-01-06 16:57:27.000000000 +0100
|
|
+++ jdk8/hotspot/src/share/vm/utilities/debug.hpp 2015-01-08 21:23:31.197147736 +0100
|
|
@@ -246,7 +246,8 @@
|
|
SharedPermGen,
|
|
SharedReadOnly,
|
|
SharedReadWrite,
|
|
- SharedMiscData
|
|
+ SharedMiscData,
|
|
+ SharedMiscCode
|
|
};
|
|
|
|
void report_out_of_shared_space(SharedSpaceType space_type);
|