| 1 | /* |
| 2 | * Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved. |
| 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
| 4 | * |
| 5 | * This code is free software; you can redistribute it and/or modify it |
| 6 | * under the terms of the GNU General Public License version 2 only, as |
| 7 | * published by the Free Software Foundation. |
| 8 | * |
| 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| 12 | * version 2 for more details (a copy is included in the LICENSE file that |
| 13 | * accompanied this code). |
| 14 | * |
| 15 | * You should have received a copy of the GNU General Public License version |
| 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
| 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
| 18 | * |
| 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
| 20 | * or visit www.oracle.com if you need additional information or have any |
| 21 | * questions. |
| 22 | * |
| 23 | */ |
| 24 | |
| 25 | #include "precompiled.hpp" |
| 26 | #include "classfile/resolutionErrors.hpp" |
| 27 | #include "interpreter/bytecodeStream.hpp" |
| 28 | #include "interpreter/bytecodes.hpp" |
| 29 | #include "interpreter/interpreter.hpp" |
| 30 | #include "interpreter/linkResolver.hpp" |
| 31 | #include "interpreter/rewriter.hpp" |
| 32 | #include "logging/log.hpp" |
| 33 | #include "memory/heapShared.hpp" |
| 34 | #include "memory/metadataFactory.hpp" |
| 35 | #include "memory/metaspaceClosure.hpp" |
| 36 | #include "memory/metaspaceShared.hpp" |
| 37 | #include "memory/resourceArea.hpp" |
| 38 | #include "oops/access.inline.hpp" |
| 39 | #include "oops/compressedOops.hpp" |
| 40 | #include "oops/constantPool.inline.hpp" |
| 41 | #include "oops/cpCache.inline.hpp" |
| 42 | #include "oops/objArrayOop.inline.hpp" |
| 43 | #include "oops/oop.inline.hpp" |
| 44 | #include "prims/methodHandles.hpp" |
| 45 | #include "runtime/atomic.hpp" |
| 46 | #include "runtime/handles.inline.hpp" |
| 47 | #include "runtime/orderAccess.hpp" |
| 48 | #include "utilities/macros.hpp" |
| 49 | |
| 50 | // Implementation of ConstantPoolCacheEntry |
| 51 | |
| 52 | void ConstantPoolCacheEntry::initialize_entry(int index) { |
| 53 | assert(0 < index && index < 0x10000, "sanity check" ); |
| 54 | _indices = index; |
| 55 | _f1 = NULL; |
| 56 | _f2 = _flags = 0; |
| 57 | assert(constant_pool_index() == index, "" ); |
| 58 | } |
| 59 | |
| 60 | void ConstantPoolCacheEntry::verify_just_initialized(bool f2_used) { |
| 61 | assert((_indices & (~cp_index_mask)) == 0, "sanity" ); |
| 62 | assert(_f1 == NULL, "sanity" ); |
| 63 | assert(_flags == 0, "sanity" ); |
| 64 | if (!f2_used) { |
| 65 | assert(_f2 == 0, "sanity" ); |
| 66 | } |
| 67 | } |
| 68 | |
| 69 | void ConstantPoolCacheEntry::reinitialize(bool f2_used) { |
| 70 | _indices &= cp_index_mask; |
| 71 | _f1 = NULL; |
| 72 | _flags = 0; |
| 73 | if (!f2_used) { |
| 74 | _f2 = 0; |
| 75 | } |
| 76 | } |
| 77 | |
| 78 | int ConstantPoolCacheEntry::make_flags(TosState state, |
| 79 | int option_bits, |
| 80 | int field_index_or_method_params) { |
| 81 | assert(state < number_of_states, "Invalid state in make_flags" ); |
| 82 | int f = ((int)state << tos_state_shift) | option_bits | field_index_or_method_params; |
| 83 | // Preserve existing flag bit values |
| 84 | // The low bits are a field offset, or else the method parameter size. |
| 85 | #ifdef ASSERT |
| 86 | TosState old_state = flag_state(); |
| 87 | assert(old_state == (TosState)0 || old_state == state, |
| 88 | "inconsistent cpCache flags state" ); |
| 89 | #endif |
| 90 | return (_flags | f) ; |
| 91 | } |
| 92 | |
| 93 | void ConstantPoolCacheEntry::set_bytecode_1(Bytecodes::Code code) { |
| 94 | #ifdef ASSERT |
| 95 | // Read once. |
| 96 | volatile Bytecodes::Code c = bytecode_1(); |
| 97 | assert(c == 0 || c == code || code == 0, "update must be consistent" ); |
| 98 | #endif |
| 99 | // Need to flush pending stores here before bytecode is written. |
| 100 | OrderAccess::release_store(&_indices, _indices | ((u_char)code << bytecode_1_shift)); |
| 101 | } |
| 102 | |
| 103 | void ConstantPoolCacheEntry::set_bytecode_2(Bytecodes::Code code) { |
| 104 | #ifdef ASSERT |
| 105 | // Read once. |
| 106 | volatile Bytecodes::Code c = bytecode_2(); |
| 107 | assert(c == 0 || c == code || code == 0, "update must be consistent" ); |
| 108 | #endif |
| 109 | // Need to flush pending stores here before bytecode is written. |
| 110 | OrderAccess::release_store(&_indices, _indices | ((u_char)code << bytecode_2_shift)); |
| 111 | } |
| 112 | |
| 113 | // Sets f1, ordering with previous writes. |
| 114 | void ConstantPoolCacheEntry::release_set_f1(Metadata* f1) { |
| 115 | assert(f1 != NULL, "" ); |
| 116 | OrderAccess::release_store(&_f1, f1); |
| 117 | } |
| 118 | |
| 119 | void ConstantPoolCacheEntry::set_indy_resolution_failed() { |
| 120 | OrderAccess::release_store(&_flags, _flags | (1 << indy_resolution_failed_shift)); |
| 121 | } |
| 122 | |
| 123 | // Note that concurrent update of both bytecodes can leave one of them |
| 124 | // reset to zero. This is harmless; the interpreter will simply re-resolve |
| 125 | // the damaged entry. More seriously, the memory synchronization is needed |
| 126 | // to flush other fields (f1, f2) completely to memory before the bytecodes |
| 127 | // are updated, lest other processors see a non-zero bytecode but zero f1/f2. |
| 128 | void ConstantPoolCacheEntry::set_field(Bytecodes::Code get_code, |
| 129 | Bytecodes::Code put_code, |
| 130 | Klass* field_holder, |
| 131 | int field_index, |
| 132 | int field_offset, |
| 133 | TosState field_type, |
| 134 | bool is_final, |
| 135 | bool is_volatile, |
| 136 | Klass* root_klass) { |
| 137 | set_f1(field_holder); |
| 138 | set_f2(field_offset); |
| 139 | assert((field_index & field_index_mask) == field_index, |
| 140 | "field index does not fit in low flag bits" ); |
| 141 | set_field_flags(field_type, |
| 142 | ((is_volatile ? 1 : 0) << is_volatile_shift) | |
| 143 | ((is_final ? 1 : 0) << is_final_shift), |
| 144 | field_index); |
| 145 | set_bytecode_1(get_code); |
| 146 | set_bytecode_2(put_code); |
| 147 | NOT_PRODUCT(verify(tty)); |
| 148 | } |
| 149 | |
| 150 | void ConstantPoolCacheEntry::set_parameter_size(int value) { |
| 151 | // This routine is called only in corner cases where the CPCE is not yet initialized. |
| 152 | // See AbstractInterpreter::deopt_continue_after_entry. |
| 153 | assert(_flags == 0 || parameter_size() == 0 || parameter_size() == value, |
| 154 | "size must not change: parameter_size=%d, value=%d" , parameter_size(), value); |
| 155 | // Setting the parameter size by itself is only safe if the |
| 156 | // current value of _flags is 0, otherwise another thread may have |
| 157 | // updated it and we don't want to overwrite that value. Don't |
| 158 | // bother trying to update it once it's nonzero but always make |
| 159 | // sure that the final parameter size agrees with what was passed. |
| 160 | if (_flags == 0) { |
| 161 | intx newflags = (value & parameter_size_mask); |
| 162 | Atomic::cmpxchg(newflags, &_flags, (intx)0); |
| 163 | } |
| 164 | guarantee(parameter_size() == value, |
| 165 | "size must not change: parameter_size=%d, value=%d" , parameter_size(), value); |
| 166 | } |
| 167 | |
| 168 | void ConstantPoolCacheEntry::set_direct_or_vtable_call(Bytecodes::Code invoke_code, |
| 169 | const methodHandle& method, |
| 170 | int vtable_index, |
| 171 | bool sender_is_interface) { |
| 172 | bool is_vtable_call = (vtable_index >= 0); // FIXME: split this method on this boolean |
| 173 | assert(method->interpreter_entry() != NULL, "should have been set at this point" ); |
| 174 | assert(!method->is_obsolete(), "attempt to write obsolete method to cpCache" ); |
| 175 | |
| 176 | int byte_no = -1; |
| 177 | bool change_to_virtual = false; |
| 178 | InstanceKlass* holder = NULL; // have to declare this outside the switch |
| 179 | switch (invoke_code) { |
| 180 | case Bytecodes::_invokeinterface: |
| 181 | holder = method->method_holder(); |
| 182 | // check for private interface method invocations |
| 183 | if (vtable_index == Method::nonvirtual_vtable_index && holder->is_interface() ) { |
| 184 | assert(method->is_private(), "unexpected non-private method" ); |
| 185 | assert(method->can_be_statically_bound(), "unexpected non-statically-bound method" ); |
| 186 | // set_f2_as_vfinal_method checks if is_vfinal flag is true. |
| 187 | set_method_flags(as_TosState(method->result_type()), |
| 188 | ( 1 << is_vfinal_shift) | |
| 189 | ((method->is_final_method() ? 1 : 0) << is_final_shift), |
| 190 | method()->size_of_parameters()); |
| 191 | set_f2_as_vfinal_method(method()); |
| 192 | byte_no = 2; |
| 193 | set_f1(holder); // interface klass* |
| 194 | break; |
| 195 | } |
| 196 | else { |
| 197 | // We get here from InterpreterRuntime::resolve_invoke when an invokeinterface |
| 198 | // instruction links to a non-interface method (in Object). This can happen when |
| 199 | // an interface redeclares an Object method (like CharSequence declaring toString()) |
| 200 | // or when invokeinterface is used explicitly. |
| 201 | // In that case, the method has no itable index and must be invoked as a virtual. |
| 202 | // Set a flag to keep track of this corner case. |
| 203 | assert(holder->is_interface() || holder == SystemDictionary::Object_klass(), "unexpected holder class" ); |
| 204 | assert(method->is_public(), "Calling non-public method in Object with invokeinterface" ); |
| 205 | change_to_virtual = true; |
| 206 | |
| 207 | // ...and fall through as if we were handling invokevirtual: |
| 208 | } |
| 209 | case Bytecodes::_invokevirtual: |
| 210 | { |
| 211 | if (!is_vtable_call) { |
| 212 | assert(method->can_be_statically_bound(), "" ); |
| 213 | // set_f2_as_vfinal_method checks if is_vfinal flag is true. |
| 214 | set_method_flags(as_TosState(method->result_type()), |
| 215 | ( 1 << is_vfinal_shift) | |
| 216 | ((method->is_final_method() ? 1 : 0) << is_final_shift) | |
| 217 | ((change_to_virtual ? 1 : 0) << is_forced_virtual_shift), |
| 218 | method()->size_of_parameters()); |
| 219 | set_f2_as_vfinal_method(method()); |
| 220 | } else { |
| 221 | assert(!method->can_be_statically_bound(), "" ); |
| 222 | assert(vtable_index >= 0, "valid index" ); |
| 223 | assert(!method->is_final_method(), "sanity" ); |
| 224 | set_method_flags(as_TosState(method->result_type()), |
| 225 | ((change_to_virtual ? 1 : 0) << is_forced_virtual_shift), |
| 226 | method()->size_of_parameters()); |
| 227 | set_f2(vtable_index); |
| 228 | } |
| 229 | byte_no = 2; |
| 230 | break; |
| 231 | } |
| 232 | |
| 233 | case Bytecodes::_invokespecial: |
| 234 | case Bytecodes::_invokestatic: |
| 235 | assert(!is_vtable_call, "" ); |
| 236 | // Note: Read and preserve the value of the is_vfinal flag on any |
| 237 | // invokevirtual bytecode shared with this constant pool cache entry. |
| 238 | // It is cheap and safe to consult is_vfinal() at all times. |
| 239 | // Once is_vfinal is set, it must stay that way, lest we get a dangling oop. |
| 240 | set_method_flags(as_TosState(method->result_type()), |
| 241 | ((is_vfinal() ? 1 : 0) << is_vfinal_shift) | |
| 242 | ((method->is_final_method() ? 1 : 0) << is_final_shift), |
| 243 | method()->size_of_parameters()); |
| 244 | set_f1(method()); |
| 245 | byte_no = 1; |
| 246 | break; |
| 247 | default: |
| 248 | ShouldNotReachHere(); |
| 249 | break; |
| 250 | } |
| 251 | |
| 252 | // Note: byte_no also appears in TemplateTable::resolve. |
| 253 | if (byte_no == 1) { |
| 254 | assert(invoke_code != Bytecodes::_invokevirtual && |
| 255 | invoke_code != Bytecodes::_invokeinterface, "" ); |
| 256 | bool do_resolve = true; |
| 257 | // Don't mark invokespecial to method as resolved if sender is an interface. The receiver |
| 258 | // has to be checked that it is a subclass of the current class every time this bytecode |
| 259 | // is executed. |
| 260 | if (invoke_code == Bytecodes::_invokespecial && sender_is_interface && |
| 261 | method->name() != vmSymbols::object_initializer_name()) { |
| 262 | do_resolve = false; |
| 263 | } |
| 264 | if (invoke_code == Bytecodes::_invokestatic) { |
| 265 | assert(method->method_holder()->is_initialized() || |
| 266 | method->method_holder()->is_reentrant_initialization(Thread::current()), |
| 267 | "invalid class initialization state for invoke_static" ); |
| 268 | |
| 269 | if (!VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier()) { |
| 270 | // Don't mark invokestatic to method as resolved if the holder class has not yet completed |
| 271 | // initialization. An invokestatic must only proceed if the class is initialized, but if |
| 272 | // we resolve it before then that class initialization check is skipped. |
| 273 | // |
| 274 | // When fast class initialization checks are supported (VM_Version::supports_fast_class_init_checks() == true), |
| 275 | // template interpreter supports fast class initialization check for |
| 276 | // invokestatic which doesn't require call site re-resolution to |
| 277 | // enforce class initialization barrier. |
| 278 | do_resolve = false; |
| 279 | } |
| 280 | } |
| 281 | if (do_resolve) { |
| 282 | set_bytecode_1(invoke_code); |
| 283 | } |
| 284 | } else if (byte_no == 2) { |
| 285 | if (change_to_virtual) { |
| 286 | assert(invoke_code == Bytecodes::_invokeinterface, "" ); |
| 287 | // NOTE: THIS IS A HACK - BE VERY CAREFUL!!! |
| 288 | // |
| 289 | // Workaround for the case where we encounter an invokeinterface, but we |
| 290 | // should really have an _invokevirtual since the resolved method is a |
| 291 | // virtual method in java.lang.Object. This is a corner case in the spec |
| 292 | // but is presumably legal. javac does not generate this code. |
| 293 | // |
| 294 | // We do not set bytecode_1() to _invokeinterface, because that is the |
| 295 | // bytecode # used by the interpreter to see if it is resolved. In this |
| 296 | // case, the method gets reresolved with caller for each interface call |
| 297 | // because the actual selected method may not be public. |
| 298 | // |
| 299 | // We set bytecode_2() to _invokevirtual. |
| 300 | // See also interpreterRuntime.cpp. (8/25/2000) |
| 301 | } else { |
| 302 | assert(invoke_code == Bytecodes::_invokevirtual || |
| 303 | (invoke_code == Bytecodes::_invokeinterface && |
| 304 | ((method->is_private() || |
| 305 | (method->is_final() && method->method_holder() == SystemDictionary::Object_klass())))), |
| 306 | "unexpected invocation mode" ); |
| 307 | if (invoke_code == Bytecodes::_invokeinterface && |
| 308 | (method->is_private() || method->is_final())) { |
| 309 | // We set bytecode_1() to _invokeinterface, because that is the |
| 310 | // bytecode # used by the interpreter to see if it is resolved. |
| 311 | // We set bytecode_2() to _invokevirtual. |
| 312 | set_bytecode_1(invoke_code); |
| 313 | } |
| 314 | } |
| 315 | // set up for invokevirtual, even if linking for invokeinterface also: |
| 316 | set_bytecode_2(Bytecodes::_invokevirtual); |
| 317 | } else { |
| 318 | ShouldNotReachHere(); |
| 319 | } |
| 320 | NOT_PRODUCT(verify(tty)); |
| 321 | } |
| 322 | |
| 323 | void ConstantPoolCacheEntry::set_direct_call(Bytecodes::Code invoke_code, const methodHandle& method, |
| 324 | bool sender_is_interface) { |
| 325 | int index = Method::nonvirtual_vtable_index; |
| 326 | // index < 0; FIXME: inline and customize set_direct_or_vtable_call |
| 327 | set_direct_or_vtable_call(invoke_code, method, index, sender_is_interface); |
| 328 | } |
| 329 | |
| 330 | void ConstantPoolCacheEntry::set_vtable_call(Bytecodes::Code invoke_code, const methodHandle& method, int index) { |
| 331 | // either the method is a miranda or its holder should accept the given index |
| 332 | assert(method->method_holder()->is_interface() || method->method_holder()->verify_vtable_index(index), "" ); |
| 333 | // index >= 0; FIXME: inline and customize set_direct_or_vtable_call |
| 334 | set_direct_or_vtable_call(invoke_code, method, index, false); |
| 335 | } |
| 336 | |
| 337 | void ConstantPoolCacheEntry::set_itable_call(Bytecodes::Code invoke_code, |
| 338 | Klass* referenced_klass, |
| 339 | const methodHandle& method, int index) { |
| 340 | assert(method->method_holder()->verify_itable_index(index), "" ); |
| 341 | assert(invoke_code == Bytecodes::_invokeinterface, "" ); |
| 342 | InstanceKlass* interf = method->method_holder(); |
| 343 | assert(interf->is_interface(), "must be an interface" ); |
| 344 | assert(!method->is_final_method(), "interfaces do not have final methods; cannot link to one here" ); |
| 345 | set_f1(referenced_klass); |
| 346 | set_f2((intx)method()); |
| 347 | set_method_flags(as_TosState(method->result_type()), |
| 348 | 0, // no option bits |
| 349 | method()->size_of_parameters()); |
| 350 | set_bytecode_1(Bytecodes::_invokeinterface); |
| 351 | } |
| 352 | |
| 353 | |
| 354 | void ConstantPoolCacheEntry::set_method_handle(const constantPoolHandle& cpool, const CallInfo &call_info) { |
| 355 | set_method_handle_common(cpool, Bytecodes::_invokehandle, call_info); |
| 356 | } |
| 357 | |
| 358 | void ConstantPoolCacheEntry::set_dynamic_call(const constantPoolHandle& cpool, const CallInfo &call_info) { |
| 359 | set_method_handle_common(cpool, Bytecodes::_invokedynamic, call_info); |
| 360 | } |
| 361 | |
| 362 | void ConstantPoolCacheEntry::set_method_handle_common(const constantPoolHandle& cpool, |
| 363 | Bytecodes::Code invoke_code, |
| 364 | const CallInfo &call_info) { |
| 365 | // NOTE: This CPCE can be the subject of data races. |
| 366 | // There are three words to update: flags, refs[f2], f1 (in that order). |
| 367 | // Writers must store all other values before f1. |
| 368 | // Readers must test f1 first for non-null before reading other fields. |
| 369 | // Competing writers must acquire exclusive access via a lock. |
| 370 | // A losing writer waits on the lock until the winner writes f1 and leaves |
| 371 | // the lock, so that when the losing writer returns, he can use the linked |
| 372 | // cache entry. |
| 373 | |
| 374 | objArrayHandle resolved_references(Thread::current(), cpool->resolved_references()); |
| 375 | // Use the resolved_references() lock for this cpCache entry. |
| 376 | // resolved_references are created for all classes with Invokedynamic, MethodHandle |
| 377 | // or MethodType constant pool cache entries. |
| 378 | assert(resolved_references() != NULL, |
| 379 | "a resolved_references array should have been created for this class" ); |
| 380 | ObjectLocker ol(resolved_references, Thread::current()); |
| 381 | if (!is_f1_null()) { |
| 382 | return; |
| 383 | } |
| 384 | |
| 385 | if (indy_resolution_failed()) { |
| 386 | // Before we got here, another thread got a LinkageError exception during |
| 387 | // resolution. Ignore our success and throw their exception. |
| 388 | ConstantPoolCache* cpCache = cpool->cache(); |
| 389 | int index = -1; |
| 390 | for (int i = 0; i < cpCache->length(); i++) { |
| 391 | if (cpCache->entry_at(i) == this) { |
| 392 | index = i; |
| 393 | break; |
| 394 | } |
| 395 | } |
| 396 | guarantee(index >= 0, "Didn't find cpCache entry!" ); |
| 397 | int encoded_index = ResolutionErrorTable::encode_cpcache_index( |
| 398 | ConstantPool::encode_invokedynamic_index(index)); |
| 399 | Thread* THREAD = Thread::current(); |
| 400 | ConstantPool::throw_resolution_error(cpool, encoded_index, THREAD); |
| 401 | return; |
| 402 | } |
| 403 | |
| 404 | const methodHandle adapter = call_info.resolved_method(); |
| 405 | const Handle appendix = call_info.resolved_appendix(); |
| 406 | const bool has_appendix = appendix.not_null(); |
| 407 | |
| 408 | // Write the flags. |
| 409 | // MHs and indy are always sig-poly and have a local signature. |
| 410 | set_method_flags(as_TosState(adapter->result_type()), |
| 411 | ((has_appendix ? 1 : 0) << has_appendix_shift ) | |
| 412 | ( 1 << has_local_signature_shift ) | |
| 413 | ( 1 << is_final_shift ), |
| 414 | adapter->size_of_parameters()); |
| 415 | |
| 416 | if (TraceInvokeDynamic) { |
| 417 | ttyLocker ttyl; |
| 418 | tty->print_cr("set_method_handle bc=%d appendix=" PTR_FORMAT "%s method=" PTR_FORMAT " (local signature) " , |
| 419 | invoke_code, |
| 420 | p2i(appendix()), |
| 421 | (has_appendix ? "" : " (unused)" ), |
| 422 | p2i(adapter())); |
| 423 | adapter->print(); |
| 424 | if (has_appendix) appendix()->print(); |
| 425 | } |
| 426 | |
| 427 | // Method handle invokes and invokedynamic sites use both cp cache words. |
| 428 | // refs[f2], if not null, contains a value passed as a trailing argument to the adapter. |
| 429 | // In the general case, this could be the call site's MethodType, |
| 430 | // for use with java.lang.Invokers.checkExactType, or else a CallSite object. |
| 431 | // f1 contains the adapter method which manages the actual call. |
| 432 | // In the general case, this is a compiled LambdaForm. |
| 433 | // (The Java code is free to optimize these calls by binding other |
| 434 | // sorts of methods and appendices to call sites.) |
| 435 | // JVM-level linking is via f1, as if for invokespecial, and signatures are erased. |
| 436 | // The appendix argument (if any) is added to the signature, and is counted in the parameter_size bits. |
| 437 | // Even with the appendix, the method will never take more than 255 parameter slots. |
| 438 | // |
| 439 | // This means that given a call site like (List)mh.invoke("foo"), |
| 440 | // the f1 method has signature '(Ljl/Object;Ljl/invoke/MethodType;)Ljl/Object;', |
| 441 | // not '(Ljava/lang/String;)Ljava/util/List;'. |
| 442 | // The fact that String and List are involved is encoded in the MethodType in refs[f2]. |
| 443 | // This allows us to create fewer Methods, while keeping type safety. |
| 444 | // |
| 445 | |
| 446 | // Store appendix, if any. |
| 447 | if (has_appendix) { |
| 448 | const int appendix_index = f2_as_index(); |
| 449 | assert(appendix_index >= 0 && appendix_index < resolved_references->length(), "oob" ); |
| 450 | assert(resolved_references->obj_at(appendix_index) == NULL, "init just once" ); |
| 451 | resolved_references->obj_at_put(appendix_index, appendix()); |
| 452 | } |
| 453 | |
| 454 | release_set_f1(adapter()); // This must be the last one to set (see NOTE above)! |
| 455 | |
| 456 | // The interpreter assembly code does not check byte_2, |
| 457 | // but it is used by is_resolved, method_if_resolved, etc. |
| 458 | set_bytecode_1(invoke_code); |
| 459 | NOT_PRODUCT(verify(tty)); |
| 460 | if (TraceInvokeDynamic) { |
| 461 | ttyLocker ttyl; |
| 462 | this->print(tty, 0); |
| 463 | } |
| 464 | |
| 465 | assert(has_appendix == this->has_appendix(), "proper storage of appendix flag" ); |
| 466 | assert(this->has_local_signature(), "proper storage of signature flag" ); |
| 467 | } |
| 468 | |
| 469 | bool ConstantPoolCacheEntry::save_and_throw_indy_exc( |
| 470 | const constantPoolHandle& cpool, int cpool_index, int index, constantTag tag, TRAPS) { |
| 471 | |
| 472 | assert(HAS_PENDING_EXCEPTION, "No exception got thrown!" ); |
| 473 | assert(PENDING_EXCEPTION->is_a(SystemDictionary::LinkageError_klass()), |
| 474 | "No LinkageError exception" ); |
| 475 | |
| 476 | // Use the resolved_references() lock for this cpCache entry. |
| 477 | // resolved_references are created for all classes with Invokedynamic, MethodHandle |
| 478 | // or MethodType constant pool cache entries. |
| 479 | objArrayHandle resolved_references(Thread::current(), cpool->resolved_references()); |
| 480 | assert(resolved_references() != NULL, |
| 481 | "a resolved_references array should have been created for this class" ); |
| 482 | ObjectLocker ol(resolved_references, THREAD); |
| 483 | |
| 484 | // if f1 is not null or the indy_resolution_failed flag is set then another |
| 485 | // thread either succeeded in resolving the method or got a LinkageError |
| 486 | // exception, before this thread was able to record its failure. So, clear |
| 487 | // this thread's exception and return false so caller can use the earlier |
| 488 | // thread's result. |
| 489 | if (!is_f1_null() || indy_resolution_failed()) { |
| 490 | CLEAR_PENDING_EXCEPTION; |
| 491 | return false; |
| 492 | } |
| 493 | |
| 494 | Symbol* error = PENDING_EXCEPTION->klass()->name(); |
| 495 | Symbol* message = java_lang_Throwable::detail_message(PENDING_EXCEPTION); |
| 496 | |
| 497 | SystemDictionary::add_resolution_error(cpool, index, error, message); |
| 498 | set_indy_resolution_failed(); |
| 499 | return true; |
| 500 | } |
| 501 | |
| 502 | Method* ConstantPoolCacheEntry::method_if_resolved(const constantPoolHandle& cpool) { |
| 503 | // Decode the action of set_method and set_interface_call |
| 504 | Bytecodes::Code invoke_code = bytecode_1(); |
| 505 | if (invoke_code != (Bytecodes::Code)0) { |
| 506 | Metadata* f1 = f1_ord(); |
| 507 | if (f1 != NULL) { |
| 508 | switch (invoke_code) { |
| 509 | case Bytecodes::_invokeinterface: |
| 510 | assert(f1->is_klass(), "" ); |
| 511 | return f2_as_interface_method(); |
| 512 | case Bytecodes::_invokestatic: |
| 513 | case Bytecodes::_invokespecial: |
| 514 | assert(!has_appendix(), "" ); |
| 515 | case Bytecodes::_invokehandle: |
| 516 | case Bytecodes::_invokedynamic: |
| 517 | assert(f1->is_method(), "" ); |
| 518 | return (Method*)f1; |
| 519 | default: |
| 520 | break; |
| 521 | } |
| 522 | } |
| 523 | } |
| 524 | invoke_code = bytecode_2(); |
| 525 | if (invoke_code != (Bytecodes::Code)0) { |
| 526 | switch (invoke_code) { |
| 527 | case Bytecodes::_invokevirtual: |
| 528 | if (is_vfinal()) { |
| 529 | // invokevirtual |
| 530 | Method* m = f2_as_vfinal_method(); |
| 531 | assert(m->is_method(), "" ); |
| 532 | return m; |
| 533 | } else { |
| 534 | int holder_index = cpool->uncached_klass_ref_index_at(constant_pool_index()); |
| 535 | if (cpool->tag_at(holder_index).is_klass()) { |
| 536 | Klass* klass = cpool->resolved_klass_at(holder_index); |
| 537 | return klass->method_at_vtable(f2_as_index()); |
| 538 | } |
| 539 | } |
| 540 | break; |
| 541 | default: |
| 542 | break; |
| 543 | } |
| 544 | } |
| 545 | return NULL; |
| 546 | } |
| 547 | |
| 548 | |
| 549 | oop ConstantPoolCacheEntry::appendix_if_resolved(const constantPoolHandle& cpool) { |
| 550 | if (!has_appendix()) |
| 551 | return NULL; |
| 552 | const int ref_index = f2_as_index(); |
| 553 | objArrayOop resolved_references = cpool->resolved_references(); |
| 554 | return resolved_references->obj_at(ref_index); |
| 555 | } |
| 556 | |
| 557 | |
| 558 | #if INCLUDE_JVMTI |
| 559 | |
| 560 | void log_adjust(const char* entry_type, Method* old_method, Method* new_method, bool* trace_name_printed) { |
| 561 | if (log_is_enabled(Info, redefine, class, update)) { |
| 562 | ResourceMark rm; |
| 563 | if (!(*trace_name_printed)) { |
| 564 | log_info(redefine, class, update)("adjust: name=%s" , old_method->method_holder()->external_name()); |
| 565 | *trace_name_printed = true; |
| 566 | } |
| 567 | log_debug(redefine, class, update, constantpool) |
| 568 | ("cpc %s entry update: %s(%s)" , entry_type, new_method->name()->as_C_string(), new_method->signature()->as_C_string()); |
| 569 | } |
| 570 | } |
| 571 | |
| 572 | // RedefineClasses() API support: |
| 573 | // If this ConstantPoolCacheEntry refers to old_method then update it |
| 574 | // to refer to new_method. |
| 575 | void ConstantPoolCacheEntry::adjust_method_entry(Method* old_method, |
| 576 | Method* new_method, bool * trace_name_printed) { |
| 577 | |
| 578 | if (is_vfinal()) { |
| 579 | // virtual and final so _f2 contains method ptr instead of vtable index |
| 580 | if (f2_as_vfinal_method() == old_method) { |
| 581 | // match old_method so need an update |
| 582 | // NOTE: can't use set_f2_as_vfinal_method as it asserts on different values |
| 583 | _f2 = (intptr_t)new_method; |
| 584 | log_adjust("vfinal" , old_method, new_method, trace_name_printed); |
| 585 | } |
| 586 | return; |
| 587 | } |
| 588 | |
| 589 | assert (_f1 != NULL, "should not call with uninteresting entry" ); |
| 590 | |
| 591 | if (!(_f1->is_method())) { |
| 592 | // _f1 is a Klass* for an interface, _f2 is the method |
| 593 | if (f2_as_interface_method() == old_method) { |
| 594 | _f2 = (intptr_t)new_method; |
| 595 | log_adjust("interface" , old_method, new_method, trace_name_printed); |
| 596 | } |
| 597 | } else if (_f1 == old_method) { |
| 598 | _f1 = new_method; |
| 599 | log_adjust("special, static or dynamic" , old_method, new_method, trace_name_printed); |
| 600 | } |
| 601 | } |
| 602 | |
| 603 | // a constant pool cache entry should never contain old or obsolete methods |
| 604 | bool ConstantPoolCacheEntry::check_no_old_or_obsolete_entries() { |
| 605 | Method* m = get_interesting_method_entry(); |
| 606 | // return false if m refers to a non-deleted old or obsolete method |
| 607 | if (m != NULL) { |
| 608 | assert(m->is_valid() && m->is_method(), "m is a valid method" ); |
| 609 | return !m->is_old() && !m->is_obsolete(); // old is always set for old and obsolete |
| 610 | } else { |
| 611 | return true; |
| 612 | } |
| 613 | } |
| 614 | |
| 615 | Method* ConstantPoolCacheEntry::get_interesting_method_entry() { |
| 616 | if (!is_method_entry()) { |
| 617 | // not a method entry so not interesting by default |
| 618 | return NULL; |
| 619 | } |
| 620 | Method* m = NULL; |
| 621 | if (is_vfinal()) { |
| 622 | // virtual and final so _f2 contains method ptr instead of vtable index |
| 623 | m = f2_as_vfinal_method(); |
| 624 | } else if (is_f1_null()) { |
| 625 | // NULL _f1 means this is a virtual entry so also not interesting |
| 626 | return NULL; |
| 627 | } else { |
| 628 | if (!(_f1->is_method())) { |
| 629 | // _f1 is a Klass* for an interface |
| 630 | m = f2_as_interface_method(); |
| 631 | } else { |
| 632 | m = f1_as_method(); |
| 633 | } |
| 634 | } |
| 635 | assert(m != NULL && m->is_method(), "sanity check" ); |
| 636 | if (m == NULL || !m->is_method()) { |
| 637 | return NULL; |
| 638 | } |
| 639 | return m; |
| 640 | } |
| 641 | #endif // INCLUDE_JVMTI |
| 642 | |
| 643 | void ConstantPoolCacheEntry::print(outputStream* st, int index) const { |
| 644 | // print separator |
| 645 | if (index == 0) st->print_cr(" -------------" ); |
| 646 | // print entry |
| 647 | st->print("%3d (" PTR_FORMAT ") " , index, (intptr_t)this); |
| 648 | st->print_cr("[%02x|%02x|%5d]" , bytecode_2(), bytecode_1(), |
| 649 | constant_pool_index()); |
| 650 | st->print_cr(" [ " PTR_FORMAT "]" , (intptr_t)_f1); |
| 651 | st->print_cr(" [ " PTR_FORMAT "]" , (intptr_t)_f2); |
| 652 | st->print_cr(" [ " PTR_FORMAT "]" , (intptr_t)_flags); |
| 653 | st->print_cr(" -------------" ); |
| 654 | } |
| 655 | |
| 656 | void ConstantPoolCacheEntry::verify(outputStream* st) const { |
| 657 | // not implemented yet |
| 658 | } |
| 659 | |
| 660 | // Implementation of ConstantPoolCache |
| 661 | |
| 662 | ConstantPoolCache* ConstantPoolCache::allocate(ClassLoaderData* loader_data, |
| 663 | const intStack& index_map, |
| 664 | const intStack& invokedynamic_index_map, |
| 665 | const intStack& invokedynamic_map, TRAPS) { |
| 666 | |
| 667 | const int length = index_map.length() + invokedynamic_index_map.length(); |
| 668 | int size = ConstantPoolCache::size(length); |
| 669 | |
| 670 | return new (loader_data, size, MetaspaceObj::ConstantPoolCacheType, THREAD) |
| 671 | ConstantPoolCache(length, index_map, invokedynamic_index_map, invokedynamic_map); |
| 672 | } |
| 673 | |
| 674 | void ConstantPoolCache::initialize(const intArray& inverse_index_map, |
| 675 | const intArray& invokedynamic_inverse_index_map, |
| 676 | const intArray& invokedynamic_references_map) { |
| 677 | for (int i = 0; i < inverse_index_map.length(); i++) { |
| 678 | ConstantPoolCacheEntry* e = entry_at(i); |
| 679 | int original_index = inverse_index_map.at(i); |
| 680 | e->initialize_entry(original_index); |
| 681 | assert(entry_at(i) == e, "sanity" ); |
| 682 | } |
| 683 | |
| 684 | // Append invokedynamic entries at the end |
| 685 | int invokedynamic_offset = inverse_index_map.length(); |
| 686 | for (int i = 0; i < invokedynamic_inverse_index_map.length(); i++) { |
| 687 | int offset = i + invokedynamic_offset; |
| 688 | ConstantPoolCacheEntry* e = entry_at(offset); |
| 689 | int original_index = invokedynamic_inverse_index_map.at(i); |
| 690 | e->initialize_entry(original_index); |
| 691 | assert(entry_at(offset) == e, "sanity" ); |
| 692 | } |
| 693 | |
| 694 | for (int ref = 0; ref < invokedynamic_references_map.length(); ref++) { |
| 695 | const int cpci = invokedynamic_references_map.at(ref); |
| 696 | if (cpci >= 0) { |
| 697 | entry_at(cpci)->initialize_resolved_reference_index(ref); |
| 698 | } |
| 699 | } |
| 700 | } |
| 701 | |
| 702 | void ConstantPoolCache::verify_just_initialized() { |
| 703 | DEBUG_ONLY(walk_entries_for_initialization(/*check_only = */ true)); |
| 704 | } |
| 705 | |
| 706 | void ConstantPoolCache::remove_unshareable_info() { |
| 707 | walk_entries_for_initialization(/*check_only = */ false); |
| 708 | } |
| 709 | |
| 710 | void ConstantPoolCache::walk_entries_for_initialization(bool check_only) { |
| 711 | assert(DumpSharedSpaces || DynamicDumpSharedSpaces, "sanity" ); |
| 712 | // When dumping the archive, we want to clean up the ConstantPoolCache |
| 713 | // to remove any effect of linking due to the execution of Java code -- |
| 714 | // each ConstantPoolCacheEntry will have the same contents as if |
| 715 | // ConstantPoolCache::initialize has just returned: |
| 716 | // |
| 717 | // - We keep the ConstantPoolCache::constant_pool_index() bits for all entries. |
| 718 | // - We keep the "f2" field for entries used by invokedynamic and invokehandle |
| 719 | // - All other bits in the entries are cleared to zero. |
| 720 | ResourceMark rm; |
| 721 | |
| 722 | InstanceKlass* ik = constant_pool()->pool_holder(); |
| 723 | bool* f2_used = NEW_RESOURCE_ARRAY(bool, length()); |
| 724 | memset(f2_used, 0, sizeof(bool) * length()); |
| 725 | |
| 726 | // Find all the slots that we need to preserve f2 |
| 727 | for (int i = 0; i < ik->methods()->length(); i++) { |
| 728 | Method* m = ik->methods()->at(i); |
| 729 | RawBytecodeStream bcs(m); |
| 730 | while (!bcs.is_last_bytecode()) { |
| 731 | Bytecodes::Code opcode = bcs.raw_next(); |
| 732 | switch (opcode) { |
| 733 | case Bytecodes::_invokedynamic: { |
| 734 | int index = Bytes::get_native_u4(bcs.bcp() + 1); |
| 735 | int cp_cache_index = constant_pool()->invokedynamic_cp_cache_index(index); |
| 736 | f2_used[cp_cache_index] = 1; |
| 737 | } |
| 738 | break; |
| 739 | case Bytecodes::_invokehandle: { |
| 740 | int cp_cache_index = Bytes::get_native_u2(bcs.bcp() + 1); |
| 741 | f2_used[cp_cache_index] = 1; |
| 742 | } |
| 743 | break; |
| 744 | default: |
| 745 | break; |
| 746 | } |
| 747 | } |
| 748 | } |
| 749 | |
| 750 | if (check_only) { |
| 751 | DEBUG_ONLY( |
| 752 | for (int i=0; i<length(); i++) { |
| 753 | entry_at(i)->verify_just_initialized(f2_used[i]); |
| 754 | }) |
| 755 | } else { |
| 756 | for (int i=0; i<length(); i++) { |
| 757 | entry_at(i)->reinitialize(f2_used[i]); |
| 758 | } |
| 759 | } |
| 760 | } |
| 761 | |
| 762 | void ConstantPoolCache::deallocate_contents(ClassLoaderData* data) { |
| 763 | assert(!is_shared(), "shared caches are not deallocated" ); |
| 764 | data->remove_handle(_resolved_references); |
| 765 | set_resolved_references(NULL); |
| 766 | MetadataFactory::free_array<u2>(data, _reference_map); |
| 767 | set_reference_map(NULL); |
| 768 | } |
| 769 | |
| 770 | #if INCLUDE_CDS_JAVA_HEAP |
| 771 | oop ConstantPoolCache::archived_references() { |
| 772 | if (CompressedOops::is_null(_archived_references)) { |
| 773 | return NULL; |
| 774 | } |
| 775 | return HeapShared::materialize_archived_object(_archived_references); |
| 776 | } |
| 777 | |
| 778 | void ConstantPoolCache::set_archived_references(oop o) { |
| 779 | assert(DumpSharedSpaces, "called only during runtime" ); |
| 780 | _archived_references = CompressedOops::encode(o); |
| 781 | } |
| 782 | #endif |
| 783 | |
| 784 | #if INCLUDE_JVMTI |
| 785 | // RedefineClasses() API support: |
| 786 | // If any entry of this ConstantPoolCache points to any of |
| 787 | // old_methods, replace it with the corresponding new_method. |
| 788 | void ConstantPoolCache::adjust_method_entries(bool * trace_name_printed) { |
| 789 | for (int i = 0; i < length(); i++) { |
| 790 | ConstantPoolCacheEntry* entry = entry_at(i); |
| 791 | Method* old_method = entry->get_interesting_method_entry(); |
| 792 | if (old_method == NULL || !old_method->is_old()) { |
| 793 | continue; // skip uninteresting entries |
| 794 | } |
| 795 | if (old_method->is_deleted()) { |
| 796 | // clean up entries with deleted methods |
| 797 | entry->initialize_entry(entry->constant_pool_index()); |
| 798 | continue; |
| 799 | } |
| 800 | Method* new_method = old_method->get_new_method(); |
| 801 | entry_at(i)->adjust_method_entry(old_method, new_method, trace_name_printed); |
| 802 | } |
| 803 | } |
| 804 | |
| 805 | // the constant pool cache should never contain old or obsolete methods |
| 806 | bool ConstantPoolCache::check_no_old_or_obsolete_entries() { |
| 807 | for (int i = 1; i < length(); i++) { |
| 808 | if (entry_at(i)->get_interesting_method_entry() != NULL && |
| 809 | !entry_at(i)->check_no_old_or_obsolete_entries()) { |
| 810 | return false; |
| 811 | } |
| 812 | } |
| 813 | return true; |
| 814 | } |
| 815 | |
| 816 | void ConstantPoolCache::dump_cache() { |
| 817 | for (int i = 1; i < length(); i++) { |
| 818 | if (entry_at(i)->get_interesting_method_entry() != NULL) { |
| 819 | entry_at(i)->print(tty, i); |
| 820 | } |
| 821 | } |
| 822 | } |
| 823 | #endif // INCLUDE_JVMTI |
| 824 | |
| 825 | void ConstantPoolCache::metaspace_pointers_do(MetaspaceClosure* it) { |
| 826 | log_trace(cds)("Iter(ConstantPoolCache): %p" , this); |
| 827 | it->push(&_constant_pool); |
| 828 | it->push(&_reference_map); |
| 829 | } |
| 830 | |
| 831 | // Printing |
| 832 | |
| 833 | void ConstantPoolCache::print_on(outputStream* st) const { |
| 834 | st->print_cr("%s" , internal_name()); |
| 835 | // print constant pool cache entries |
| 836 | for (int i = 0; i < length(); i++) entry_at(i)->print(st, i); |
| 837 | } |
| 838 | |
| 839 | void ConstantPoolCache::print_value_on(outputStream* st) const { |
| 840 | st->print("cache [%d]" , length()); |
| 841 | print_address_on(st); |
| 842 | st->print(" for " ); |
| 843 | constant_pool()->print_value_on(st); |
| 844 | } |
| 845 | |
| 846 | |
| 847 | // Verification |
| 848 | |
| 849 | void ConstantPoolCache::verify_on(outputStream* st) { |
| 850 | // print constant pool cache entries |
| 851 | for (int i = 0; i < length(); i++) entry_at(i)->verify(st); |
| 852 | } |
| 853 | |