/*
* Copyright (c) 2007, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package com.oracle.max.vm.ext.t1x;
import static com.oracle.max.vm.ext.t1x.T1X.*;
import static com.sun.cri.bytecode.Bytecodes.*;
import static com.sun.max.platform.Platform.*;
import static com.sun.max.vm.MaxineVM.*;
import static com.sun.max.vm.compiler.CallEntryPoint.*;
import static com.sun.max.vm.compiler.target.Safepoints.*;
import static com.sun.max.vm.compiler.target.Stub.Type.*;
import static com.sun.max.vm.stack.JVMSFrameLayout.*;
import static com.sun.max.vm.stack.StackReferenceMapPreparer.*;
import java.util.*;
import com.oracle.max.vm.ext.t1x.T1XTemplate.SafepointsBuilder;
import com.sun.cri.bytecode.*;
import com.sun.cri.ci.*;
import com.sun.cri.ci.CiTargetMethod.CodeAnnotation;
import com.sun.cri.ri.*;
import com.sun.max.annotate.*;
import com.sun.max.atomic.*;
import com.sun.max.lang.*;
import com.sun.max.unsafe.*;
import com.sun.max.vm.*;
import com.sun.max.vm.actor.holder.*;
import com.sun.max.vm.actor.member.*;
import com.sun.max.vm.bytecode.refmaps.*;
import com.sun.max.vm.classfile.*;
import com.sun.max.vm.classfile.constant.*;
import com.sun.max.vm.code.*;
import com.sun.max.vm.code.CodeManager.Lifespan;
import com.sun.max.vm.collect.*;
import com.sun.max.vm.compiler.*;
import com.sun.max.vm.compiler.deopt.Deoptimization.CallerContinuation;
import com.sun.max.vm.compiler.deopt.Deoptimization.Continuation;
import com.sun.max.vm.compiler.deopt.Deoptimization.Info;
import com.sun.max.vm.compiler.target.*;
import com.sun.max.vm.compiler.target.amd64.*;
import com.sun.max.vm.object.*;
import com.sun.max.vm.profile.*;
import com.sun.max.vm.reference.*;
import com.sun.max.vm.runtime.*;
import com.sun.max.vm.stack.*;
import com.sun.max.vm.stack.amd64.*;
import com.sun.max.vm.thread.*;
import com.sun.max.vm.type.*;
/**
* A target method generated by T1X.
*/
public class T1XTargetMethod extends TargetMethod {
static final int SYNC_METHOD_CATCH_TYPE_CPI = -1;
public static final String PROTECTED = "PROTECTED";
/**
* This is the max number of slots used by any template and is computed when the templates are
* {@linkplain T1X#createTemplates(Class, T1X, com.oracle.max.vm.ext.t1x.T1X.Templates, boolean) created}.
*/
static int templateSlots;
/**
* Gets the number of slots to be reserved in each T1X frame for template spill slots.
*/
public static int templateSlots() {
return templateSlots;
}
/**
* The frame and register reference maps for this target method.
*
* The format of this byte array is described by the following pseudo C declaration:
* <p>
*
* <pre>
* referenceMaps {
* {
* u1 frameMap[frameRefMapSize];
* u1 regMap[regRefMapSize];
* } directCallMaps[numberOfDirectCalls]
* {
* u1 frameMap[frameRefMapSize];
* u1 regMap[regRefMapSize];
* } indirectCallMaps[numberOfIndirectCalls]
* {
* u1 frameMap[frameRefMapSize];
* u1 regMap[regRefMapSize];
* } safepointMaps[numberOfSafepoints]
* }
* </pre>
*/
private final byte[] refMaps;
/**
* Offset from the frame pointer (e.g. %rbp on AMD64) to the bottom of the frame.
* This is the address to which the frame reference maps are relative.
*/
public final int frameRefMapOffset;
public final int frameRefMapSize;
/**
* This field represents 1 of 3 ref map finalization states after the constructor of {@link T1XTargetMethod}
* has completed. These values and the states they represent are:
* <ol>
* <li>A {@link T1XReferenceMapEditor} instance. This represents a target method whose ref maps have not yet been finalized.</li>
* <li>A {@link VmThread} instance. This represents a target method whose ref maps are being finalized (by the denoted thread).</li>
* <li>A {@code null} value. This represents a target method whose ref maps are finalized.</li>
* </ol>
*
* Only the transition from state 1 to state 2 is atomic.
*
* @see #finalizeReferenceMaps()
*/
@INSPECTED(deepCopied = false)
private final AtomicReference refMapEditor = new AtomicReference();
/**
* The frame layout.
*/
public final JVMSFrameLayout frame;
/**
* The bytecode from which this target method was compiled. This will differ from
* the bytecode hanging off of {@link #classMethodActor} in the case where it was rewritten.
*/
public final CodeAttribute codeAttribute;
/**
* An array that encodes a mapping from BCIs to target code positions. A non-zero value
* {@code val} at index {@code i} in the array encodes that there is a bytecode instruction whose opcode is at index
* {@code i} in the bytecode array and whose target code position is {@code val}. Unless {@code i} is equal to the
* length of the bytecode array in which case {@code val} denotes the target code position one byte past the
* last target code byte emitted for the last bytecode instruction.
*/
public final int[] bciToPos;
public final CiExceptionHandler[] handlers;
/**
* The code annotations (if any) recorded when compiling this T1X method.
*/
public final CodeAnnotation[] annotations;
/**
* The profile for this method - if there is one (otherwise, null).
*/
public final MethodProfile profile;
/**
* Index for the method protection sentinel (for code eviction) in the reference literals array.
*/
public final int protectionLiteralIndex;
/**
* Number of {@linkplain CodeEviction code eviction cycles} this method survived.
*/
private int survivedEvictions = 0;
public T1XTargetMethod(T1XCompilation comp, boolean install) {
super(comp.method, CallEntryPoint.BASELINE_ENTRY_POINT);
codeAttribute = comp.codeAttribute;
bciToPos = comp.bciToPos;
frame = comp.frame;
frameRefMapOffset = frame.frameReferenceMapOffset();
setFrameSize(frame.frameSize());
frameRefMapSize = frame.frameReferenceMapSize();
if (comp.codeAnnotations == null || comp.codeAnnotations.isEmpty()) {
annotations = null;
} else {
annotations = comp.codeAnnotations.toArray(new CodeAnnotation[comp.codeAnnotations.size()]);
}
SafepointsBuilder safepointsBuilder = comp.safepointsBuilder;
int firstTemplateSlot = frame.numberOfNonParameterSlots() + frame.numberOfOperandStackSlots();
int firstTemplateSlotIndexInFrameReferenceMap = firstTemplateSlot * JVMSFrameLayout.STACK_SLOTS_PER_JVMS_SLOT;
safepointsBuilder.pack(frameRefMapSize, regRefMapSize(), firstTemplateSlotIndexInFrameReferenceMap, comp.adapter);
setSafepoints(safepointsBuilder.safepoints, safepointsBuilder.directCallees);
refMaps = safepointsBuilder.refMaps;
handlers = initHandlers(comp);
if (comp.methodProfileBuilder != null) {
comp.methodProfileBuilder.finish(this);
profile = comp.methodProfileBuilder.methodProfileObject();
} else {
profile = null;
}
assert comp.protectionLiteralIndex == 0 : "protection literal should be first but is " + comp.protectionLiteralIndex;
protectionLiteralIndex = comp.protectionLiteralIndex;
// Allocate and set the code and data buffer
final TargetBundleLayout targetBundleLayout = new TargetBundleLayout(0, comp.objectLiterals.size(), comp.buf.position());
if (install) {
Code.allocate(targetBundleLayout, this);
} else {
Code.allocateInHeap(targetBundleLayout, this);
}
// Copy code
comp.buf.copyInto(code(), 0, code().length);
// Copy reference literals
if (referenceLiterals != null) {
// Must not cause checkcast here, since some reference literals may be static tuples.
for (int i = 0; i < comp.objectLiterals.size(); i++) {
Object literal = comp.objectLiterals.get(i);
ArrayAccess.setObject(referenceLiterals, i, literal);
}
}
assert referenceLiterals[protectionLiteralIndex] == PROTECTED : "expected protection literal, found " + referenceLiterals[protectionLiteralIndex];
if (safepointsBuilder.safepoints.size() != 0) {
final T1XReferenceMapEditor referenceMapEditor = new T1XReferenceMapEditor(this, comp.numberOfBlocks, comp.blockBCIs, safepointsBuilder.bytecodeSafepointsIterator, frame);
this.refMapEditor.set(referenceMapEditor);
final ReferenceMapInterpreter interpreter = ReferenceMapInterpreter.from(referenceMapEditor.blockFrames());
if (interpreter.performsAllocation() || T1XOptions.EagerRefMaps || T1XOptions.PrintCFGToFile) {
finalizeReferenceMaps();
}
}
if (!MaxineVM.isHosted()) {
if (install) {
linkDirectCalls();
} else {
// the displacement between a call site in the heap and a code cache location may not fit in the offset operand of a call
}
}
// if the VM is running, validate freshly generated code
assert MaxineVM.isHosted() || CodeCacheValidation.instance.validateSingleMethod(this);
}
@Override
public VMFrameLayout frameLayout() {
return frame;
}
@Override
public Lifespan lifespan() {
return (classMethodActor != null && classMethodActor.isClassInitializer()) ? Lifespan.ONE_SHOT : Lifespan.SHORT;
}
@Override
public boolean isProtected() {
return referenceLiterals[protectionLiteralIndex] == PROTECTED;
}
@Override
public void protect() {
referenceLiterals[protectionLiteralIndex] = PROTECTED;
}
/**
* Notify this method of a survived {@linkplain CodeEviction code eviction cycle}. This is done
* by simply incrementing the {@link #survivedEvictions} counter.
*/
@Override
public void survivedEviction() {
++survivedEvictions;
}
/**
* This method was {@linkplain CodeEviction evicted} if its {@link #survivedEvictions} counter
* is smaller than the current eviction count.
*/
@Override
public boolean wasEvicted() {
return survivedEvictions < CodeEviction.evictionCount();
}
/**
* The number of times this method was relocated is equal to the number of {@linkplain CodeEviction
* eviction cycles} it survived.
*/
@Override
public int timesRelocated() {
return survivedEvictions;
}
@Override
public MethodProfile profile() {
return profile;
}
@Override
public int[] bciToPosMap() {
return bciToPos;
}
@Override
public CodeAttribute codeAttribute() {
return codeAttribute;
}
/**
* Initializes the exception handlers for this method, adding an extra one if necessary for
* a synchronized method. This synthesized handler covers the machine code from the
* instruction immediately after the lock on the receiver/class was acquired to the
* instruction just after the lock is released. This mimics the exception handler
* range generated by Java source compilers for synchronized blocks. The synthesized
* handler is in terms of machine code positions, not BCIs and is distinguished by
* its {@link CiExceptionHandler#catchTypeCPI} being {@link #SYNC_METHOD_CATCH_TYPE_CPI}.
*/
private CiExceptionHandler[] initHandlers(T1XCompilation comp) {
if (comp.method.isSynchronized()) {
CiExceptionHandler[] handlers = comp.handlers;
CiExceptionHandler[] newHandlers = Arrays.copyOf(handlers, handlers.length + 1);
CiExceptionHandler syncMethodHandler =
new CiExceptionHandler(comp.syncHandlerStartPos,
comp.syncHandlerEndPos,
comp.syncMethodHandlerPos,
SYNC_METHOD_CATCH_TYPE_CPI,
null);
newHandlers[handlers.length] = syncMethodHandler;
// Update the reference maps to cover the local variable holding the copy of the receiver
if (comp.synchronizedReceiver != -1) {
for (int safepointIndex = 0; safepointIndex < safepoints.size(); safepointIndex++) {
int pos = safepoints.posAt(safepointIndex);
if (pos >= comp.syncRefMapStartPos && pos < comp.syncRefMapEndPos) {
final int offset = safepointIndex * refMapSize();
final int refMapBit = frame.localVariableReferenceMapIndex(comp.synchronizedReceiver);
ByteArrayBitMap.set(refMaps, offset, frameRefMapSize, refMapBit);
}
}
}
return newHandlers;
}
return comp.handlers;
}
int sizeOfNonParameterLocals() {
return JVMSFrameLayout.JVMS_SLOT_SIZE * frame.numberOfNonParameterSlots();
}
@HOSTED_ONLY
@Override
public void gatherCalls(Set<MethodActor> directCalls, Set<MethodActor> virtualCalls, Set<MethodActor> interfaceCalls, Set<MethodActor> inlinedMethods) {
for (int i = 0; i < safepoints.size(); ++i) {
int bci = bciForPos(safepoints.posAt(i));
if (bci != -1) {
RiMethod callee = codeAttribute.calleeAt(bci);
if (callee instanceof MethodActor) {
MethodActor ma = (MethodActor) callee;
int opcode = codeAttribute.code()[bci] & 0xff;
if (opcode == INVOKEVIRTUAL) {
virtualCalls.add(ma);
} else if (opcode == INVOKESTATIC || opcode == INVOKESPECIAL) {
directCalls.add(ma);
} else {
assert opcode == INVOKEINTERFACE;
interfaceCalls.add(ma);
}
}
}
}
}
@Override
public boolean isPatchableCallSite(CodePointer callSite) {
return AMD64TargetMethodUtil.isPatchableCallSite(callSite);
}
@Override
public CodePointer fixupCallSite(int callOffset, CodePointer callEntryPoint) {
return AMD64TargetMethodUtil.fixupCall32Site(this, callOffset, callEntryPoint);
}
public byte[] referenceMaps() {
return refMaps;
}
/**
* Gets the size (in bytes) of a bit map covering all the registers that may store references.
* The bit position of a register in the bit map is the register's {@linkplain CiRegister#encoding encoding}.
*/
@FOLD
static int regRefMapSize() {
return ByteArrayBitMap.computeBitMapSize(target().arch.registerReferenceMapBitCount);
}
/**
* This version of {@code posFor()} accounts for the possibility of a method having moved in the code cache.
*/
@Override
public int posFor(CodePointer ip) {
int pos = super.posFor(ip);
if (pos == -1) {
if (Code.getCodeManager().getRuntimeBaselineCodeRegion().isInFromSpace(ip.toAddress())) {
pos = (int) (ip.toLong() - (oldStart.toLong() + codeStart().toLong() - start().toLong()));
}
if (pos < 0 && pos > code().length) {
pos = -1;
}
}
return pos;
}
@Override
public CodePointer patchCallSite(int callOffset, CodePointer callEntryPoint) {
return AMD64TargetMethodUtil.mtSafePatchCallDisplacement(this, codeAt(callOffset), callEntryPoint);
}
@Override
public void redirectTo(TargetMethod tm) {
if (platform().isa == ISA.AMD64) {
AMD64TargetMethodUtil.patchWithJump(this, BASELINE_ENTRY_POINT.offset(), BASELINE_ENTRY_POINT.in(tm));
if (vm().compilationBroker.needsAdapters()) {
AMD64TargetMethodUtil.patchWithJump(this, OPTIMIZED_ENTRY_POINT.offset(), OPTIMIZED_ENTRY_POINT.in(tm));
}
} else {
throw FatalError.unimplemented();
}
}
@Override
protected CallEntryPoint callEntryPointForDirectCall(int safepointIndex) {
if (!safepoints.isSetAt(TEMPLATE_CALL, safepointIndex)) {
return CallEntryPoint.OPTIMIZED_ENTRY_POINT;
}
return CallEntryPoint.BASELINE_ENTRY_POINT;
}
@INLINE
private CodePointer relocateIP(CodePointer ip) {
return CodePointer.from(start).plus(ip.minus(oldStart));
}
public int posForBci(int bci) {
return bciToPos[bci];
}
@Override
public CodeAnnotation[] annotations() {
return annotations;
}
@Override
public CiDebugInfo debugInfoAt(int safepointIndex, FrameAccess fa) {
CiBitMap frameRefMap = new CiBitMap(referenceMaps(), safepointIndex * refMapSize(), frameRefMapSize);
CiBitMap regRefMap = new CiBitMap(referenceMaps(), (safepointIndex * refMapSize()) + frameRefMapSize, regRefMapSize());
int bci = bciForPos(safepoints.posAt(safepointIndex));
CiFrame debugFrame = frame.asFrame(classMethodActor, bci, frameRefMap);
if (fa != null) {
CiValue[] actualValues = new CiValue[debugFrame.values.length];
// somewhat wasteful as we just replace all the recipe values with the actual values
for (int i = 0; i < debugFrame.values.length; i++) {
CiValue accessValue = debugFrame.values[i];
actualValues[i] = toLiveSlot(fa, accessValue);
}
debugFrame = new CiFrame(debugFrame.caller(), debugFrame.method, debugFrame.bci,
debugFrame.rethrowException, actualValues,
debugFrame.numLocals, debugFrame.numStack, debugFrame.numLocks);
}
return new CiDebugInfo(debugFrame, regRefMap, frameRefMap);
}
private static CiValue toLiveSlot(FrameAccess fa, CiValue value) {
CiAddress address = (CiAddress) value;
Pointer fp = fa.fp;
if (value.kind.isObject()) {
Reference ref = fp.readReference(address.displacement);
value = CiConstant.forObject(ref.toJava());
} else {
Word w = fp.readWord(address.displacement);
value = WordUtil.archConstant(w);
}
return value;
}
@Override
public int forEachCodePos(CodePosClosure cpc, CodePointer ip) {
int bci = bciFor(ip);
if (bci >= 0) {
cpc.doCodePos(classMethodActor, bci);
return 1;
}
return 0;
}
/**
* Gets the BCI for a machine code instruction address.
*
* @param ip an instruction pointer that may denote an instruction in this target method
* @return the start position of the bytecode instruction that is implemented at the instruction pointer or -1 if
* {@code instructionPointer} denotes an instruction that does not correlate to any bytecode. This will be
* the case when {@code instructionPointer} is not in this target method or is in the adapter frame stub
* code, prologue or epilogue.
*/
public int bciFor(CodePointer ip) {
assert bciToPos != null;
assert bciToPos.length > 0;
final int pos = posFor(ip);
return bciForPos(pos);
}
/**
* Gets the BCI for a target code position in this target method.
*
* @param pos a target code position that may denote an instruction in this method correlated with a bytecode
* @return the start position of the bytecode instruction that is implemented at {@code pos} or -1 if
* {@code pos} is outside the range(s) of target code positions in this target method that
* correlate with a bytecode.
*/
public int bciForPos(int pos) {
assert bciToPos != null;
assert bciToPos.length > 0;
int bci;
if (pos >= posForBci(0)) {
bci = -1;
// Search the map backwards as there may be bytecodes for which
// no target code was emitted. The search is for the first bytecode
// position that maps to a non-zero target code position less than or
// equal to 'pos'
for (int i = bciToPos.length - 1; i >= 0; --i) {
int p = posForBci(i);
if (p != 0) {
if (p <= pos) {
// This is the first bytecode that maps to a non-zero target
// code position less than or equal to 'pos'
bci = i;
break;
}
}
}
assert bci >= 0;
return bci;
}
// The instruction pointer denotes a position in the adapter frame code or the prologue
return -1;
}
/**
* Ensures that the {@linkplain #referenceMaps() reference maps} for this method are finalized. Only
* finalized reference maps are guaranteed to never change for the remaining lifetime of this target method.
* <p/>
* Although this method may be called by multiple threads, it cannot use standard synchronization as that may block
* one of the threads in native code on a mutex. This would incorrectly be interpreted by the GC as meaning
* the mutator thread has blocked for GC after taking a safepoint trap. To avoid blocking in native code,
* a spin loop is used instead.
* <p/>
* If this method is called while preparing the stack reference map for a thread that has taken a safepoint
* for GC, then safepoints are currently disabled and so there is no need to use the {@link NO_SAFEPOINTS}
* annotation on this method.
*/
@Override
public void finalizeReferenceMaps() {
Object object = this.refMapEditor.get();
if (object != null) {
T1XReferenceMapEditor referenceMapEditor = null;
Object result = object;
if (object instanceof T1XReferenceMapEditor) {
referenceMapEditor = (T1XReferenceMapEditor) object;
result = this.refMapEditor.compareAndSwap(referenceMapEditor, VmThread.current());
}
if (result == referenceMapEditor) {
// We must disable safepoint polls in the current thread to prevent any recursive
// attempt to finalize the ref maps. Such a recursive call will spin infinitely
// in the pause loop below.
// One case where such recursion is possible is if a GC is requested while
// this thread is preparing ref maps during ref map verification (i.e. -XX:+VerifyRefMaps).
boolean mustReenableSafepoints = !SafepointPoll.disable();
referenceMapEditor.fillInMaps();
this.refMapEditor.set(null);
if (mustReenableSafepoints) {
SafepointPoll.enable();
}
} else if (result != null) {
FatalError.check(result instanceof VmThread, "expected VmThread instance");
if (VmThread.current() == result) {
Log.print("Recursive attempt to finalize ref maps of ");
Log.printMethod(this, true);
FatalError.unexpected("Recursive attempt to finalize ref maps of a T1X target method", false, null, Pointer.zero());
}
// Spin while waiting for the other thread to complete finalization
while (refMapEditor.get() != null) {
Intrinsics.pause();
}
}
}
}
@Override
public boolean preserveRegistersForLocalExceptionHandler() {
return false;
}
@Override
public CodePointer throwAddressToCatchAddress(CodePointer throwAddress, Throwable exception) {
return throwAddressToCatchAddress(throwAddress, exception, null);
}
@Override
public boolean catchExceptionInfo(StackFrameCursor current, Throwable throwable, CatchExceptionInfo info) {
CodePointer codePointer = throwAddressToCatchAddress(current.vmIP(), throwable, info);
if (!codePointer.isZero()) {
info.codePointer = codePointer;
return true;
} else {
return false;
}
}
private CodePointer throwAddressToCatchAddress(CodePointer ip, Throwable exception, CatchExceptionInfo info) {
if (handlers.length != 0) {
final int exceptionPos = posFor(ip);
int exceptionBCI = bciForPos(exceptionPos);
if (exceptionBCI != -1) {
for (CiExceptionHandler e : handlers) {
if (e.catchTypeCPI != SYNC_METHOD_CATCH_TYPE_CPI) {
if (e.startBCI <= exceptionBCI && exceptionBCI < e.endBCI) {
ClassActor catchType = (ClassActor) e.catchType;
if (catchType == null || catchType.isAssignableFrom(ObjectAccess.readClassActor(exception))) {
int handlerPos = posForBci(e.handlerBCI());
checkHandler(exceptionPos, exceptionBCI, e.handlerBCI, handlerPos);
if (info != null) {
info.bci = e.handlerBCI();
}
return codeAt(handlerPos);
}
}
}
}
}
if (handlers[handlers.length - 1].catchTypeCPI == SYNC_METHOD_CATCH_TYPE_CPI) {
CiExceptionHandler syncMethodHandler = handlers[handlers.length - 1];
if (syncMethodHandler.startBCI <= exceptionPos && exceptionPos < syncMethodHandler.endBCI) {
int handlerPos = syncMethodHandler.handlerBCI;
checkHandler(exceptionPos, exceptionBCI, -1, handlerPos);
return codeAt(handlerPos);
}
}
}
return CodePointer.zero();
}
void checkHandler(int excPos, int excBCI, int handlerBCI, int handlerPos) {
if (handlerPos <= 0 || handlerPos >= code().length) {
FatalError.unexpected("Bad handler for exception at pos " + excPos + " (bci: " + excBCI + ") in " + this + ": handler pos " + handlerPos + " (bci: " + handlerBCI + ")");
}
}
private int bciForCallSite(CodePointer returnIP) {
// The instruction pointer is now just beyond the call machine instruction.
// In case the call happens to be the last machine instruction for the invoke bytecode we are interested in, we subtract one byte.
// Thus we always look up what bytecode we were in during the call.
return bciFor(returnIP.minus(1));
}
private static int getInvokeCPI(byte[] code, int invokeBCI) {
assert invokeBCI >= 0 : "illegal bytecode index";
assert
code[invokeBCI] == (byte) Bytecodes.INVOKEINTERFACE
|| code[invokeBCI] == (byte) Bytecodes.INVOKESPECIAL
|| code[invokeBCI] == (byte) Bytecodes.INVOKESTATIC
|| code[invokeBCI] == (byte) Bytecodes.INVOKEVIRTUAL
: "expected invoke bytecode at index " + invokeBCI + ", found " + String.format("0x%h", code[invokeBCI]);
return ((code[invokeBCI + 1] & 0xff) << 8) | (code[invokeBCI + 2] & 0xff);
}
/**
* Prepares the reference map to cover the reference parameters on the stack at a call from a T1X compiled method
* into a trampoline. These slots are normally ignored when computing the reference maps for a T1X method as they
* are covered by a reference map in the callee if necessary. They <b>cannot</b> be covered by a reference map in
* the T1X method as these slots are seen as local variables in a T1X callee and as such can be overwritten with
* non-reference values.
*
* However, in the case where a T1X method calls into a trampoline, the reference parameters of the call are not
* covered by any reference map. In this situation, we need to analyze the invokeXXX bytecode at the call site to
* derive the signature of the call which in turn allows us to mark the parameter stack slots that contain
* references.
*
* @param caller the T1X method frame cursor
*/
private void prepareTrampolineRefMap(StackFrameCursor caller, FrameReferenceMapVisitor preparer) {
// prepare the reference map for the parameters passed by the current (caller) frame.
// the call was unresolved and hit a trampoline, so compute the refmap from the signature of
// the called method by looking at the bytecode of the caller method
CodePointer cip = caller.vmIP();
if (Code.getCodeManager().getRuntimeBaselineCodeRegion().isInFromSpace(cip.toAddress())) {
cip = relocateIP(cip);
}
int bci = bciForCallSite(cip);
ConstantPool constantPool = codeAttribute.cp;
byte[] code = codeAttribute.code();
MethodRefConstant methodRef = constantPool.methodAt(getInvokeCPI(code, bci));
boolean isInvokestatic = (code[bci] & 0xFF) == Bytecodes.INVOKESTATIC;
SignatureDescriptor sig = methodRef.signature(constantPool);
int slotSize = JVMSFrameLayout.JVMS_SLOT_SIZE;
int numberOfSlots = sig.computeNumberOfSlots() + (isInvokestatic ? 0 : 1);
if (numberOfSlots != 0) {
// Handle the parameters in reverse order as caller.sp() is currently
// pointing at the last parameter.
Pointer slotPointer = caller.sp();
for (int i = sig.numberOfParameters() - 1; i >= 0; --i) {
TypeDescriptor parameter = sig.parameterDescriptorAt(i);
Kind parameterKind = parameter.toKind();
if (parameterKind.isReference) {
if (logStackRootScanning()) {
StackReferenceMapPreparer.stackRootScanLogger.logParameter(i, parameter);
}
preparer.visitReferenceMapBits(caller, slotPointer, 1, 1);
}
int parameterSlots = (!parameterKind.isCategory1) ? 2 : 1;
slotPointer = slotPointer.plus(slotSize * parameterSlots);
}
// Finally deal with the receiver (if any)
if (!isInvokestatic) {
// Mark the slot for the receiver as it is not covered by the method signature:
if (logStackRootScanning()) {
StackReferenceMapPreparer.stackRootScanLogger.logReceiver(methodRef.holder(constantPool));
}
preparer.visitReferenceMapBits(caller, slotPointer, 1, 1);
}
}
}
/**
* @return the number of bytes in {@link #refMaps} corresponding to one stop position.
*/
int refMapSize() {
return regRefMapSize() + frameRefMapSize;
}
@Override
public void prepareReferenceMap(StackFrameCursor current, StackFrameCursor callee, FrameReferenceMapVisitor preparer) {
finalizeReferenceMaps();
CiCalleeSaveLayout csl = callee.csl();
Pointer csa = callee.csa();
TargetMethod calleeTM = callee.targetMethod();
if (calleeTM != null) {
Stub.Type st = calleeTM.stubType();
if (st == StaticTrampoline || st == VirtualTrampoline || st == InterfaceTrampoline) {
prepareTrampolineRefMap(current, preparer);
} else if (calleeTM.is(TrapStub) && Trap.Number.isStackOverflow(csa)) {
// a method can never catch stack overflow for itself so there
// is no need to scan the references in the trapped method
return;
}
}
CodePointer cip = current.vmIP();
if (Code.getCodeManager().getRuntimeBaselineCodeRegion().isInFromSpace(cip.toAddress())) {
cip = relocateIP(cip);
}
int safepointIndex = findSafepointIndex(cip);
if (safepointIndex < 0) {
// this is very bad.
throw FatalError.unexpected("could not find safepoint index");
}
int refMapSize = refMapSize();
if (!csa.isZero()) {
assert csl != null;
// the callee contains register state from this frame;
// use register reference maps in this method to fill in the map for the callee
Pointer slotPointer = csa;
int byteIndex = (safepointIndex * refMapSize) + frameRefMapSize;
preparer.logPrepareReferenceMap(this, safepointIndex, slotPointer, "registers");
// Need to translate from register numbers (as stored in the reg ref maps) to frame slots.
for (int i = 0; i < regRefMapSize(); i++) {
int b = refMaps[byteIndex] & 0xff;
int reg = i * 8;
while (b != 0) {
if ((b & 1) != 0) {
int offset = csl.offsetOf(reg);
if (logStackRootScanning()) {
StackReferenceMapPreparer.stackRootScanLogger.logRegisterState(csl.registers[reg]);
}
preparer.visitReferenceMapBits(callee, slotPointer.plus(offset), 1, 1);
}
reg++;
b = b >>> 1;
}
byteIndex++;
}
}
// prepare the map for this stack frame
Pointer slotPointer = current.fp().plus(frameRefMapOffset);
preparer.logPrepareReferenceMap(this, safepointIndex, slotPointer, "frame");
int byteIndex = safepointIndex * refMapSize;
for (int i = 0; i < frameRefMapSize; i++) {
preparer.visitReferenceMapBits(current, slotPointer, refMaps[byteIndex] & 0xff, 8);
slotPointer = slotPointer.plusWords(8);
byteIndex++;
}
}
private Pointer adjustSPForHandler(Pointer sp, Pointer fp) {
if (isAMD64()) {
Pointer localVariablesBase = fp;
// The Java operand stack of the T1X method that handles the exception is cleared
// when unwinding. The T1X generated handler is responsible for loading the
// exception from VmThreadLocal.EXCEPTION_OBJECT to the operand stack.
//
// Compute the offset to the first stack slot of the Java Stack:
//
// frame size - (space for locals + saved RBP + space of the first slot itself).
//
Pointer catcherSP = localVariablesBase.minus(sizeOfNonParameterLocals());
// The ref maps for the handler address will expect a valid reference to
// to in stack slot 0 so we store a null there.
Pointer slot0 = catcherSP.minus(JVMS_SLOT_SIZE);
for (int i = 0; i < STACK_SLOTS_PER_JVMS_SLOT; i++) {
slot0.writeReference(i, Reference.zero());
}
return catcherSP;
} else {
throw unimplISA();
}
}
static CodePointer throwAddress(StackFrameCursor frame) {
if (!frame.isTopFrame() && platform().isa.offsetToReturnPC == 0) {
// Adjust 'retAddr' to ensure it is within the call instruction.
return frame.vmIP().minus(1);
} else {
return frame.vmIP();
}
}
@Override
public void catchException(StackFrameCursor current, StackFrameCursor callee, Throwable throwable) {
StackFrameWalker sfw = current.stackFrameWalker();
CodePointer throwAddress = throwAddress(current);
CodePointer catchAddress = throwAddressToCatchAddress(throwAddress, throwable);
if (!catchAddress.isZero()) {
if (StackFrameWalker.TraceStackWalk) {
Log.print("StackFrameWalk: Handler position for exception at position ");
Log.print(current.vmIP().minus(codeStart()).toInt());
Log.print(" is ");
Log.println(catchAddress.minus(codeStart()).toInt());
}
if (isAMD64()) {
Pointer localVariablesBase = current.fp();
Pointer catcherSP = adjustSPForHandler(current.sp(), current.fp());
// Done with the stack walker
sfw.reset();
// Store the exception for the handler
VmThread.current().storeExceptionForHandler(throwable, this, posFor(catchAddress));
Stubs.unwind(catchAddress.toPointer(), catcherSP, localVariablesBase);
FatalError.unexpected("should not reach here");
} else {
unimplISA();
}
}
}
@PLATFORM(cpu = "amd64")
@HOSTED_ONLY
private FramePointerStateAMD64 computeFramePointerState(StackFrameCursor current, StackFrameWalker stackFrameWalker, CodePointer lastPrologueInstr) {
// Checkstyle: stop
final byte ENTER = (byte) 0xC8;
final byte LEAVE = (byte) 0xC9;
final byte POP_RBP = (byte) 0x5D;
final byte RET = (byte) 0xC3;
final byte RET2 = (byte) 0xC2;
// Checkstyle: resume
CodePointer ip = current.vmIP();
byte byteAtIP = stackFrameWalker.readByte(ip.toPointer(), 0);
if (ip.toPointer().lessThan(lastPrologueInstr.toPointer()) || byteAtIP == ENTER || byteAtIP == RET || byteAtIP == RET2) {
return FramePointerStateAMD64.CALLER_FRAME_IN_RBP;
}
if (ip.equals(lastPrologueInstr) || byteAtIP == LEAVE) {
return FramePointerStateAMD64.CALLER_FRAME_AT_RBP;
}
if (byteAtIP == POP_RBP) {
return FramePointerStateAMD64.RETURNING_FROM_RUNTIME;
}
return FramePointerStateAMD64.IN_RBP;
}
@Override
@HOSTED_ONLY
public boolean acceptStackFrameVisitor(StackFrameCursor current, StackFrameVisitor visitor) {
if (isAMD64()) {
StackFrameWalker sfw = current.stackFrameWalker();
Pointer localVariablesBase = current.fp();
CodePointer startOfPrologue;
AdapterGenerator generator = AdapterGenerator.forCallee(this);
if (generator != null) {
startOfPrologue = codeAt(generator.prologueSizeForCallee(classMethodActor));
} else {
startOfPrologue = codeStart();
}
CodePointer lastPrologueInstruction = startOfPrologue.plus(FramePointerStateAMD64.OFFSET_TO_LAST_PROLOGUE_INSTRUCTION);
FramePointerStateAMD64 framePointerState = computeFramePointerState(current, sfw, lastPrologueInstruction);
localVariablesBase = framePointerState.localVariablesBase(current);
StackFrame stackFrame = new AMD64JVMSFrame(sfw.calleeStackFrame(), current.targetMethod(), current.vmIP().toPointer(), current.sp(), localVariablesBase, localVariablesBase);
return visitor.visitFrame(stackFrame);
} else {
throw unimplISA();
}
}
@Override
public void advance(StackFrameCursor current) {
if (isAMD64()) {
StackFrameWalker sfw = current.stackFrameWalker();
int dispToRip = frameSize() - sizeOfNonParameterLocals();
Pointer returnRIP = current.fp().plus(dispToRip);
Pointer callerFP = sfw.readWord(returnRIP, -Word.size()).asPointer();
if (MaxineVM.isHosted()) {
// Inspector context only
CodePointer startOfPrologue;
AdapterGenerator generator = AdapterGenerator.forCallee(this);
if (generator != null) {
if (generator.advanceIfInPrologue(current)) {
return;
}
startOfPrologue = codeAt(generator.prologueSizeForCallee(classMethodActor));
} else {
startOfPrologue = codeStart();
}
CodePointer lastPrologueInstruction = startOfPrologue.plus(FramePointerStateAMD64.OFFSET_TO_LAST_PROLOGUE_INSTRUCTION);
FramePointerStateAMD64 framePointerState = computeFramePointerState(current, sfw, lastPrologueInstruction);
returnRIP = framePointerState.returnIP(current);
callerFP = framePointerState.callerFP(current);
}
Pointer callerIP = sfw.readWord(returnRIP, 0).asPointer();
Pointer callerSP = returnRIP.plus(Word.size()); // Skip the rip
int stackAmountInBytes = classMethodActor.numberOfParameterSlots() * JVMSFrameLayout.JVMS_SLOT_SIZE;
if (stackAmountInBytes != 0) {
callerSP = callerSP.plus(stackAmountInBytes);
}
boolean wasDisabled = SafepointPoll.disable();
sfw.advance(callerIP, callerSP, callerFP);
if (!wasDisabled) {
SafepointPoll.enable();
}
} else {
unimplISA();
}
}
@Override
public Pointer returnAddressPointer(StackFrameCursor frame) {
if (isAMD64()) {
int dispToRip = frameSize() - sizeOfNonParameterLocals();
return frame.fp().plus(dispToRip);
} else {
throw unimplISA();
}
}
@Override
public boolean isBaseline() {
return true;
}
@Override
public Continuation createDeoptimizedFrame(Info info, CiFrame frame, Continuation cont, Throwable exception, boolean reexecute) {
int bci = frame.bci;
ClassMethodActor method = classMethodActor;
assert classMethodActor == frame.method : classMethodActor + " != " + frame.method;
CodePointer ip = findContinuationIP(info, exception, bci, reexecute);
// record continuation instruction pointer
cont.setIP(info, ip.toPointer());
if (exception == null) {
// record continuation stack pointer
cont.setSP(info, CiConstant.forJsr(info.slotsCount()));
// add operand stack slots
for (int i = frame.numStack - 1; i >= 0; i--) {
CiConstant value = (CiConstant) frame.getStackValue(i);
info.addSlot(value, "ostack");
addSlotPadding(info, "ostack (pad)");
}
} else {
assert frame.numStack == 0 : "operand stack must be clear at exception handler";
// The ref maps for the handler address will expect a valid reference to
// to in stack slot 0 so we store a null there.
for (int pad = 0; pad < STACK_SLOTS_PER_JVMS_SLOT; pad++) {
info.addSlot(WordUtil.ZERO, "ostack (pad)");
}
// The continuation stack pointer must denote an empty stack
cont.setSP(info, CiConstant.forJsr(info.slotsCount()));
}
int paramLocals = method.numberOfParameterSlots();
int numLocals = frame.numLocals;
int synchronizedReceiver = -1;
if (method.isSynchronized() && !method.isStatic()) {
assert frame.numLocks > 0;
synchronizedReceiver = numLocals;
numLocals++;
}
int nonParamLocals = numLocals - paramLocals;
// add (non-parameter) local slots
for (int i = numLocals - 1; i >= paramLocals; i--) {
if (i == synchronizedReceiver) {
CiConstant value = (CiConstant) frame.getLockValue(0);
info.addSlot(value, "locked rcvr");
} else {
CiConstant value = (CiConstant) frame.getLocalValue(i);
info.addSlot(value, "local");
}
addSlotPadding(info, "local (pad)");
}
// record continuation frame pointer
cont.setFP(info, CiConstant.forJsr(info.slotsCount()));
// add template slots
for (int i = 0; i < templateSlots; i++) {
info.addSlot(WordUtil.ZERO, "template");
}
// add alignment slots
int numberOfSlots = 1 + templateSlots; // one extra word for the caller FP
int unalignedSize = (numberOfSlots + (nonParamLocals * STACK_SLOTS_PER_JVMS_SLOT)) * STACK_SLOT_SIZE;
int alignedSize = target().alignFrameSize(unalignedSize);
int alignmentSlots = (alignedSize - unalignedSize) / STACK_SLOT_SIZE;
for (int i = 0; i < alignmentSlots; i++) {
info.addSlot(WordUtil.ZERO, "align");
}
// add caller FP slot with placeholder value
int callerFPIndex = info.slotsCount();
info.addSlot(WordUtil.ZERO, "callerFP");
// add caller return address slot with placeholder value
int returnAddressIndex = info.slotsCount();
info.addSlot(WordUtil.ZERO, "returnIP");
// add parameter slots
for (int i = paramLocals - 1; i >= 0; i--) {
CiConstant value = (CiConstant) frame.getLocalValue(i);
info.addSlot(value, "param");
addSlotPadding(info, "param (pad)");
}
return new CallerContinuation(callerFPIndex, -1, returnAddressIndex);
}
/**
* Adds any per-JVMS slot padding required. The padding occupies the higher slots as per {@link JVMSFrameLayout#JVMS_SLOT_SIZE}.
*/
private void addSlotPadding(Info info, String desc) {
for (int pad = 1; pad < STACK_SLOTS_PER_JVMS_SLOT; pad++) {
info.addSlot(WordUtil.ZERO, desc);
}
}
/**
* Finds the address at which deoptimized execution will continue in this target method
* based on a BCI specifying the bytecode instruction that should be resumed.
*
* @param info details of current deoptimization
* @param exception if non-null, then the returned address will be for the handler of this exception
* @param bci the BCI specified by a debug info {@linkplain CiFrame frame}
* @param reexecute specifies if the instruction at {@code bci} is to be re-executed
*/
private CodePointer findContinuationIP(Info info, Throwable exception, int bci, boolean reexecute) throws FatalError {
CodePointer ip;
if (exception == null) {
RiMethod callee = classMethodActor.codeAttribute().calleeAt(bci);
if (reexecute) {
int curPos = bciToPos[bci];
ip = codeAt(curPos);
} else {
ip = findTemplateCallReturnAddress(info, bci, callee);
}
} else {
// Unwinding to deoptimized frame containing the handler for 'exception'
int curPos = bciToPos[bci];
CodePointer handler = throwAddressToCatchAddress(codeAt(curPos), exception);
assert !handler.isZero() : "could not (re)find handler for " + exception + " thrown at " + this + "+" + curPos;
ip = handler;
}
return ip;
}
/**
* Finds the address of the instruction after a template call.
*
* @param info details of current deoptimization
* @param bci BCI of the bytecode invoke instruction that was translated to a template call
* @param callee the callee method at bci in this target method, or null if none.
*/
protected CodePointer findTemplateCallReturnAddress(Info info, int bci, RiMethod callee) throws FatalError {
int templateCallReturnPos = -1;
int curPos = bciToPos[bci];
if (callee != null) {
// Must be in a call
// Find the instruction *following* the template call by searching backwards
// from the template emitted for the instruction after the invoke
byte[] bytecode = classMethodActor.code();
int opcode = bytecode[bci] & 0xFF;
assert opcode == INVOKEINTERFACE || opcode == INVOKESPECIAL || opcode == INVOKESTATIC || opcode == INVOKEVIRTUAL;
final int invokeSize = Bytecodes.lengthOf(opcode);
int succBCI = bci + invokeSize;
int succPos = bciToPos[succBCI];
assert succPos > curPos;
for (int safepointIndex = 0; safepointIndex < safepoints.size(); ++safepointIndex) {
int safepointPos = safepoints.posAt(safepointIndex);
if (curPos <= safepointPos && safepointPos < succPos) {
if (safepoints.isSetAt(TEMPLATE_CALL, safepointIndex)) {
if (isAMD64()) {
// On x86 the safepoint position of a call *is* the return position
templateCallReturnPos = safepointPos;
} else {
throw unimplISA();
}
break;
}
}
}
}
if (templateCallReturnPos != -1) {
return codeAt(templateCallReturnPos);
} else {
FatalError.check(callee == null || ((RiResolvedMethod) callee).intrinsic() != null, "could not find template call for non-intrinisc method at " + curPos + " in " + this);
// Must be a safepoint
return codeAt(curPos);
}
}
}
/**
* Various execution states in a T1X method that can only be observed in
* the context of the Inspector.
*/
@HOSTED_ONLY
@PLATFORM(cpu = "amd64")
enum FramePointerStateAMD64 {
/**
* RBP holds the frame pointer of the current method activation. caller's RIP is at [RBP + FrameSize], caller's
* frame pointer is at [RBP + FrameSize -1]
*/
IN_RBP {
@Override
Pointer localVariablesBase(StackFrameCursor current) {
return current.fp();
}
@Override
Pointer returnIP(StackFrameCursor current) {
T1XTargetMethod targetMethod = (T1XTargetMethod) current.targetMethod();
int dispToRip = targetMethod.frameSize() - targetMethod.sizeOfNonParameterLocals();
return current.fp().plus(dispToRip);
}
@Override
Pointer callerFP(StackFrameCursor current) {
return current.stackFrameWalker().readWord(returnIP(current), -Word.size()).asPointer();
}
},
/**
* RBP holds the frame pointer of the caller, caller's RIP is at [RSP] This state occurs when entering the
* method or exiting it.
*/
CALLER_FRAME_IN_RBP {
@Override
Pointer localVariablesBase(StackFrameCursor current) {
int offsetToSaveArea = current.targetMethod().frameSize();
return current.sp().minus(offsetToSaveArea);
}
@Override
Pointer returnIP(StackFrameCursor current) {
return current.sp();
}
@Override
Pointer callerFP(StackFrameCursor current) {
return current.fp();
}
},
/**
* RBP points at the bottom of the "saving area". Caller's frame pointer is at [RBP], caller's RIP is at [RBP +
* WordSize].
*/
CALLER_FRAME_AT_RBP {
@Override
Pointer localVariablesBase(StackFrameCursor current) {
T1XTargetMethod targetMethod = (T1XTargetMethod) current.targetMethod();
int dispToFrameStart = targetMethod.frameSize() - (targetMethod.sizeOfNonParameterLocals() + Word.size());
return current.fp().minus(dispToFrameStart);
}
@Override
Pointer returnIP(StackFrameCursor current) {
return current.fp().plus(Word.size());
}
@Override
Pointer callerFP(StackFrameCursor current) {
return current.stackFrameWalker().readWord(current.fp(), 0).asPointer();
}
},
/**
* Returning from a runtime call (or actually in a runtime call). RBP may have been clobbered by the runtime.
* The frame pointer for the current activation record is 'RSP + stack slot size'.
*/
RETURNING_FROM_RUNTIME {
@Override
Pointer localVariablesBase(StackFrameCursor current) {
return current.stackFrameWalker().readWord(current.sp(), 0).asPointer();
}
@Override
Pointer returnIP(StackFrameCursor current) {
T1XTargetMethod targetMethod = (T1XTargetMethod) current.targetMethod();
int dispToRip = targetMethod.frameSize() - targetMethod.sizeOfNonParameterLocals();
return localVariablesBase(current).plus(dispToRip);
}
@Override
Pointer callerFP(StackFrameCursor current) {
return current.stackFrameWalker().readWord(returnIP(current), -Word.size()).asPointer();
}
};
abstract Pointer localVariablesBase(StackFrameCursor current);
abstract Pointer returnIP(StackFrameCursor current);
abstract Pointer callerFP(StackFrameCursor current);
/**
* Offset to the last instruction of the prologue from the JIT entry point. The prologue comprises two instructions,
* the first one of which is enter (fixed size, 4 bytes long).
*/
public static final int OFFSET_TO_LAST_PROLOGUE_INSTRUCTION = 4;
}