%PDF- %PDF-
Mini Shell

Mini Shell

Direktori : /home/vacivi36/vittasync.vacivitta.com.br/vittasync/node/deps/v8/src/builtins/
Upload File :
Create Path :
Current File : /home/vacivi36/vittasync.vacivitta.com.br/vittasync/node/deps/v8/src/builtins/js-to-wasm.tq

// Copyright 2023 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#include 'src/wasm/wasm-linkage.h'

namespace runtime {
extern runtime WasmGenericJSToWasmObject(
    Context, WasmInstanceObject|Undefined, JSAny, Smi): JSAny;
extern runtime WasmGenericWasmToJSObject(Context, Object): JSAny;
extern runtime WasmCompileWrapper(NoContext, WasmExportedFunctionData): JSAny;
extern runtime WasmAllocateSuspender(Context): JSAny;
}  // namespace runtime

namespace wasm {
extern builtin JSToWasmWrapperAsm(RawPtr<intptr>, WasmInstanceObject, JSAny):
    JSAny;
extern builtin WasmReturnPromiseOnSuspendAsm(
    RawPtr<intptr>, WasmInstanceObject, JSAny): JSAny;

extern macro UniqueIntPtrConstant(constexpr intptr): intptr;

const kWasmExportedFunctionDataSignatureOffset:
    constexpr int32 generates 'WasmExportedFunctionData::kSigOffset';

const kWasmReturnCountOffset:
    constexpr intptr generates 'wasm::FunctionSig::kReturnCountOffset';

const kWasmParameterCountOffset: constexpr intptr
    generates 'wasm::FunctionSig::kParameterCountOffset';

const kWasmSigTypesOffset:
    constexpr intptr generates 'wasm::FunctionSig::kRepsOffset';

// This constant should only be loaded as a `UniqueIntPtrConstant` to avoid
// problems with PGO.
// `- 1` because of the instance parameter.
const kNumGPRegisterParameters:
    constexpr intptr generates 'arraysize(wasm::kGpParamRegisters) - 1';

// This constant should only be loaded as a `UniqueIntPtrConstant` to avoid
// problems with PGO.
const kNumFPRegisterParameters:
    constexpr intptr generates 'arraysize(wasm::kFpParamRegisters)';

const kNumGPRegisterReturns:
    constexpr intptr generates 'arraysize(wasm::kGpReturnRegisters)';

const kNumFPRegisterReturns:
    constexpr intptr generates 'arraysize(wasm::kFpReturnRegisters)';

const kWasmI32Type:
    constexpr int32 generates 'wasm::kWasmI32.raw_bit_field()';
const kWasmI64Type:
    constexpr int32 generates 'wasm::kWasmI64.raw_bit_field()';
const kWasmF32Type:
    constexpr int32 generates 'wasm::kWasmF32.raw_bit_field()';
const kWasmF64Type:
    constexpr int32 generates 'wasm::kWasmF64.raw_bit_field()';

extern enum ValueKind extends int32 constexpr 'wasm::ValueKind' {
  kRef,
  kRefNull,
  ...
}

extern enum HeapType extends int32
    constexpr 'wasm::HeapType::Representation' {
  kExtern,
  kNoExtern,
  kString,
  kEq,
  kI31,
  kStruct,
  kArray,
  kAny,
  kNone,
  kNoFunc,
  ...
}

const kWrapperBufferReturnCount: constexpr intptr
    generates 'JSToWasmWrapperFrameConstants::kWrapperBufferReturnCount';
const kWrapperBufferRefReturnCount: constexpr intptr
    generates 'JSToWasmWrapperFrameConstants::kWrapperBufferRefReturnCount';
const kWrapperBufferSigRepresentationArray: constexpr intptr
    generates 'JSToWasmWrapperFrameConstants::kWrapperBufferSigRepresentationArray'
    ;
const kWrapperBufferStackReturnBufferSize: constexpr intptr
    generates 'JSToWasmWrapperFrameConstants::kWrapperBufferStackReturnBufferSize'
    ;
const kWrapperBufferCallTarget: constexpr intptr
    generates 'JSToWasmWrapperFrameConstants::kWrapperBufferCallTarget';
const kWrapperBufferParamStart: constexpr intptr
    generates 'JSToWasmWrapperFrameConstants::kWrapperBufferParamStart';
const kWrapperBufferParamEnd: constexpr intptr
    generates 'JSToWasmWrapperFrameConstants::kWrapperBufferParamEnd';
const kWrapperBufferStackReturnBufferStart: constexpr intptr
    generates 'JSToWasmWrapperFrameConstants::kWrapperBufferStackReturnBufferStart'
    ;
const kWrapperBufferFPReturnRegister1: constexpr intptr
    generates 'JSToWasmWrapperFrameConstants::kWrapperBufferFPReturnRegister1'
    ;
const kWrapperBufferFPReturnRegister2: constexpr intptr
    generates 'JSToWasmWrapperFrameConstants::kWrapperBufferFPReturnRegister2'
    ;
const kWrapperBufferGPReturnRegister1: constexpr intptr
    generates 'JSToWasmWrapperFrameConstants::kWrapperBufferGPReturnRegister1'
    ;
const kWrapperBufferGPReturnRegister2: constexpr intptr
    generates 'JSToWasmWrapperFrameConstants::kWrapperBufferGPReturnRegister2'
    ;
const kWrapperBufferSize: constexpr int32
    generates 'JSToWasmWrapperFrameConstants::kWrapperBufferSize';

const kValueTypeKindBits: constexpr int32
    generates 'wasm::ValueType::kKindBits';
const kValueTypeKindBitsMask: constexpr int32
    generates 'wasm::kWasmValueKindBitsMask';
const kValueTypeHeapTypeMask: constexpr int32
    generates 'wasm::kWasmHeapTypeBitsMask';

macro Bitcast<To: type, From: type>(i: From): To {
  return i;
}

extern macro BitcastFloat32ToInt32(float32): uint32;

Bitcast<uint32, float32>(v: float32): uint32 {
  return BitcastFloat32ToInt32(v);
}

macro RefCast<To: type>(i: &intptr):
  &To {
  return torque_internal::unsafe::NewReference<To>(i.object, i.offset);
}

macro TruncateBigIntToI64(context: Context, input: JSAny): intptr {
  // This is only safe to use on 64-bit platforms.
  dcheck(Is64());
  const bigint = ToBigInt(context, input);

  if (bigint::ReadBigIntLength(bigint) == 0) {
    return 0;
  }

  const digit = bigint::LoadBigIntDigit(bigint, 0);
  if (bigint::ReadBigIntSign(bigint) == bigint::kPositiveSign) {
    // Note that even though the bigint is positive according to its sign, the
    // result of `Signed(digit)` can be negative if the most significant bit is
    // set. This is intentional and follows the specification of `ToBigInt64()`.
    return Signed(digit);
  }
  return 0 - Signed(digit);
}

@export
struct Int64AsInt32Pair {
  low: uintptr;
  high: uintptr;
}

// This is only safe to use on 32-bit platforms.
extern macro BigIntToRawBytes(BigInt): Int64AsInt32Pair;

extern macro PopAndReturn(intptr, JSAny): never;

// The ReturnSlotAllocator calculates the size of the space needed on the stack
// for return values.
struct ReturnSlotAllocator {
  macro AllocStack(): void {
    if constexpr (Is64()) {
      this.stackSlots++;
    } else {
      if (this.hasSmallSlot) {
        this.hasSmallSlot = false;
        this.smallSlotLast = false;
      } else {
        this.stackSlots += 2;
        this.hasSmallSlot = true;
        this.smallSlotLast = true;
      }
    }
    return;
  }

  macro AllocGP(): void {
    if (this.remainingGPRegs > 0) {
      this.remainingGPRegs--;
      return;
    }
    this.AllocStack();
  }

  macro AllocFP32(): void {
    if (this.remainingFPRegs > 0) {
      this.remainingFPRegs--;
      return;
    }
    this.AllocStack();
  }

  macro AllocFP64(): void {
    if (this.remainingFPRegs > 0) {
      this.remainingFPRegs--;
      return;
    }
    if constexpr (Is64()) {
      this.stackSlots++;
    } else {
      this.stackSlots += 2;
      this.smallSlotLast = false;
    }
  }

  macro GetSize(): intptr {
    if (this.smallSlotLast) {
      return this.stackSlots - 1;
    } else {
      return this.stackSlots;
    }
  }

  remainingGPRegs: intptr;
  remainingFPRegs: intptr;
  // Even on 32-bit platforms we always allocate 64-bit stack space at a time to
  // preserve alignment. If we allocate a 64-bit slot for a 32-bit type, then we
  // remember the second half of the 64-bit slot as `smallSlot` so that it can
  // be used for the next 32-bit type.
  hasSmallSlot: bool;
  // If the {smallSlot} is in the middle of the whole allocated stack space,
  // then it is part of the overall stack space size. However, if the hole is at
  // the border of the whole allocated stack space, then we have to subtract it
  // from the overall stack space size. This flag keeps track of whether the
  // hole is in the middle (false) or at the border (true).
  smallSlotLast: bool;
  stackSlots: intptr;
}

macro NewReturnSlotAllocator(): ReturnSlotAllocator {
  let result: ReturnSlotAllocator;
  result.remainingGPRegs = kNumGPRegisterReturns;
  result.remainingFPRegs = kNumFPRegisterReturns;
  result.stackSlots = 0;
  result.hasSmallSlot = false;
  result.smallSlotLast = false;
  return result;
}

struct LocationAllocator {
  macro GetStackSlot(): &intptr {
    if constexpr (Is64()) {
      const result = torque_internal::unsafe::NewReference<intptr>(
          this.object, this.nextStack);
      this.nextStack += torque_internal::SizeOf<intptr>();
      return result;
    } else {
      if (this.smallSlot != 0) {
        const result = torque_internal::unsafe::NewReference<intptr>(
            this.object, this.smallSlot);
        this.smallSlot = 0;
        this.smallSlotLast = false;
        return result;
      }
      const result = torque_internal::unsafe::NewReference<intptr>(
          this.object, this.nextStack);
      this.smallSlot = this.nextStack + torque_internal::SizeOf<intptr>();
      this.nextStack = this.smallSlot + torque_internal::SizeOf<intptr>();
      this.smallSlotLast = true;
      return result;
    }
  }

  macro GetGPSlot(): &intptr {
    if (this.remainingGPRegs-- > 0) {
      const result = torque_internal::unsafe::NewReference<intptr>(
          this.object, this.nextGPReg);
      this.nextGPReg += torque_internal::SizeOf<intptr>();
      return result;
    }
    return this.GetStackSlot();
  }

  macro GetFP32Slot(): &intptr {
    if (this.remainingFPRegs-- > 0) {
      const result = torque_internal::unsafe::NewReference<intptr>(
          this.object, this.nextFPReg);
      this.nextFPReg += torque_internal::SizeOf<float64>();
      return result;
    }
    return this.GetStackSlot();
  }

  macro GetFP64Slot(): &intptr {
    if (this.remainingFPRegs-- > 0) {
      const result = torque_internal::unsafe::NewReference<intptr>(
          this.object, this.nextFPReg);
      this.nextFPReg += torque_internal::SizeOf<float64>();
      return result;
    }
    if constexpr (Is64()) {
      return this.GetStackSlot();
    } else {
      const result = torque_internal::unsafe::NewReference<intptr>(
          this.object, this.nextStack);
      this.nextStack = this.nextStack + 2 * torque_internal::SizeOf<intptr>();
      this.smallSlotLast = false;
      return result;
    }
  }

  // For references we start a new section on the stack, no old slots are
  // filled.
  macro StartRefs(): void {
    if (!this.smallSlotLast) {
      this.smallSlot = 0;
    }
  }

  macro GetStackEnd(): RawPtr {
    let offset = this.nextStack;
    if (this.smallSlotLast) {
      offset -= torque_internal::SizeOf<intptr>();
    }
    return torque_internal::unsafe::GCUnsafeReferenceToRawPtr(
        this.object, offset);
  }

  macro GetAlignedStackEnd(alignment: intptr): RawPtr {
    let offset = this.nextStack;
    if (this.smallSlotLast) {
      offset -= torque_internal::SizeOf<intptr>();
    }
    const stackSize = offset - this.stackStart;
    if (stackSize % alignment != 0) {
      offset += alignment - (stackSize % alignment);
    }
    return torque_internal::unsafe::GCUnsafeReferenceToRawPtr(
        this.object, offset);
  }

  object: HeapObject|TaggedZeroPattern;
  remainingGPRegs: intptr;
  remainingFPRegs: intptr;
  nextGPReg: intptr;
  nextFPReg: intptr;
  nextStack: intptr;
  stackStart: intptr;
  // Even on 32-bit platforms we always allocate 64-bit stack space at a time to
  // preserve alignment. If we allocate a 64-bit slot for a 32-bit type, then we
  // remember the second half of the 64-bit slot as `smallSlot` so that it can
  // be used for the next 32-bit type.
  smallSlot: intptr;
  // If the {smallSlot} is in the middle of the whole allocated stack space,
  // then it is part of the overall stack space size. However, if the hole is at
  // the border of the whole allocated stack space, then we have to subtract it
  // from the overall stack space size. This flag keeps track of whether the
  // hole is in the middle (false) or at the border (true).
  smallSlotLast: bool;
}

macro LocationAllocatorForParams(paramBuffer: &intptr): LocationAllocator {
  let result: LocationAllocator;
  result.object = paramBuffer.object;
  result.remainingGPRegs = UniqueIntPtrConstant(kNumGPRegisterParameters);
  result.remainingFPRegs = UniqueIntPtrConstant(kNumFPRegisterParameters);
  result.nextGPReg = paramBuffer.offset;
  result.nextFPReg = result.remainingGPRegs * torque_internal::SizeOf<intptr>();
  if constexpr (!Is64()) {
    // Add padding to provide 8-byte alignment for float64 values.
    result.nextFPReg += (result.nextFPReg & torque_internal::SizeOf<intptr>());
  }
  dcheck(result.nextFPReg % 8 == 0);
  result.nextFPReg += paramBuffer.offset;
  result.nextStack = result.nextFPReg +
      result.remainingFPRegs * torque_internal::SizeOf<float64>();
  result.stackStart = result.nextStack;
  result.smallSlot = 0;
  result.smallSlotLast = false;
  return result;
}

macro LocationAllocatorForReturns(
    gpRegs: RawPtr, fpRegs: RawPtr, stack: RawPtr): LocationAllocator {
  let result: LocationAllocator;
  result.object = kZeroBitPattern;
  result.remainingGPRegs = kNumGPRegisterReturns;
  result.remainingFPRegs = kNumFPRegisterReturns;
  result.nextGPReg = Convert<intptr>(gpRegs) + kHeapObjectTag;
  result.nextFPReg = Convert<intptr>(fpRegs) + kHeapObjectTag;
  result.nextStack = Convert<intptr>(stack) + kHeapObjectTag;
  result.stackStart = result.nextStack;
  result.smallSlot = 0;
  result.smallSlotLast = false;
  return result;
}

macro JSToWasmObject(
    context: NativeContext, instanceOrUndefined: WasmInstanceObject|Undefined,
    targetType: int32, value: JSAny): Object {
  const heapType = (targetType >> kValueTypeKindBits) & kValueTypeHeapTypeMask;
  const kind = targetType & kValueTypeKindBitsMask;
  if (heapType == HeapType::kExtern || heapType == HeapType::kNoExtern) {
    if (kind == ValueKind::kRef && value == Null) {
      ThrowTypeError(MessageTemplate::kWasmTrapJSTypeError);
    }
    return value;
  }
  if (heapType == HeapType::kString) {
    if (TaggedIsSmi(value)) {
      ThrowTypeError(MessageTemplate::kWasmTrapJSTypeError);
    }
    if (IsString(UnsafeCast<HeapObject>(value))) {
      return value;
    }
    if (value == Null) {
      if (kind == ValueKind::kRef) {
        ThrowTypeError(MessageTemplate::kWasmTrapJSTypeError);
      } else {
        return kWasmNull;
      }
    }

    ThrowTypeError(MessageTemplate::kWasmTrapJSTypeError);
  }
  return runtime::WasmGenericJSToWasmObject(
      context, instanceOrUndefined, value, Convert<Smi>(targetType));
}

macro JSToWasmWrapperHelper(
    context: NativeContext, _receiver: JSAny, target: JSFunction,
    arguments: Arguments, switchStack: constexpr bool): never {
  const functionData = UnsafeCast<WasmExportedFunctionData>(
      target.shared_function_info.function_data);

  // Trigger a wrapper tier-up when this function got called often enough.
  if constexpr (!switchStack) {
    functionData.wrapper_budget = functionData.wrapper_budget - 1;
    if (functionData.wrapper_budget == 0) {
      runtime::WasmCompileWrapper(kNoContext, functionData);
    }
  }

  const sig = functionData.sig_ptr;

  const paramCount = *GetRefAt<int32>(sig, kWasmParameterCountOffset);

  const returnCount = *GetRefAt<int32>(sig, kWasmReturnCountOffset);

  const reps = *GetRefAt<RawPtr>(sig, kWasmSigTypesOffset);

  const sigTypes = torque_internal::unsafe::NewOffHeapConstSlice(
      %RawDownCast<RawPtr<int32>>(reps),
      Convert<intptr>(paramCount + returnCount));

  // If the return count is greater than 1, then the return values are returned
  // as a JSArray. After returning from the call to wasm, the return values are
  // stored on an area of the stack the GC does not know about. To avoid a GC
  // while references are still stored in this area of the stack, we allocate
  // the result JSArray already now before the call to wasm.
  let resultArray: JSAny = Undefined;
  let returnSize: intptr = 0;
  let hasRefReturns: bool = false;
  if (returnCount > 1) {
    resultArray = WasmAllocateJSArray(Convert<Smi>(returnCount));

    // We have to calculate the size of the stack area where the wasm function
    // will store the return values for multi-return.
    const returnTypes =
        Subslice(sigTypes, Convert<intptr>(0), Convert<intptr>(returnCount))
        otherwise unreachable;
    let retIt = returnTypes.Iterator();
    let allocator = NewReturnSlotAllocator();

    while (!retIt.Empty()) {
      const retType = retIt.NextNotEmpty();
      if (retType == kWasmI32Type) {
        allocator.AllocGP();
      } else if (retType == kWasmI64Type) {
        allocator.AllocGP();
        if constexpr (!Is64()) {
          // On 32-bit platforms I64 values are stored as two I32 values.
          allocator.AllocGP();
        }
      } else if (retType == kWasmF32Type) {
        allocator.AllocFP32();
      } else if (retType == kWasmF64Type) {
        allocator.AllocFP64();
      } else {
        // Also check if there are any reference return values, as this allows
        // us to skip code when we process return values.
        hasRefReturns = true;
        allocator.AllocGP();
      }
    }
    returnSize = allocator.GetSize();
  }

  const paramTypes = Subslice(
      sigTypes, Convert<intptr>(returnCount), Convert<intptr>(paramCount))
      otherwise unreachable;

  let paramBuffer: &intptr;

  // 10 here is an arbitrary number. The analysis of signatures of exported
  // functions of big modules showed that most signatures have a low number of
  // I32 parameters. We picked a cutoff point where for most signatures the
  // pre-allocated stack slots are sufficient without making these stack slots
  // overly big.
  if (paramCount <= 10) {
    // Performance optimization: we pre-allocate a stack area with 18
    // 8-byte slots, and use this area when it is sufficient for all
    // parameters. If the stack area is too small, we allocate a byte array
    // below. The stack area is big enough for 10 parameters. The 10 parameters
    // need 18 * 8 bytes because some segments of the stack area are reserved
    // for register parameters, and there may e.g. be no FP parameters passed
    // by register, so all 8 FP register slots would remain empty.
    const stackSlots = %RawDownCast<RawPtr<intptr>>(
        StackSlotPtr(144, torque_internal::SizeOf<float64>()));
    paramBuffer = torque_internal::unsafe::NewOffHeapReference(stackSlots);
  } else {
    // We have to estimate the size of the byte array such that it can store
    // all converted parameters. The size is the sum of sizes of the segments
    // for the gp registers, fp registers, and stack slots. The sizes of
    // the register segments are fixed, but for the size of the stack segment
    // we have to guess the number of parameters on the stack. On ia32 it can
    // happen that only a single parameter fits completely into a register, and
    // all other parameters end up at least partially on the stack (e.g. for a
    // signature with only I64 parameters). To make the calculation simpler, we
    // just assume that all parameters are on the stack.
    const kSlotSize: intptr = torque_internal::SizeOf<float64>();
    const bufferSize = UniqueIntPtrConstant(kNumGPRegisterParameters) *
            Convert<intptr>(torque_internal::SizeOf<intptr>()) +
        UniqueIntPtrConstant(kNumFPRegisterParameters) * kSlotSize +
        Convert<intptr>(paramCount) * kSlotSize;
    const slice = &AllocateByteArray(Convert<uintptr>(bufferSize)).bytes;
    paramBuffer = torque_internal::unsafe::NewReference<intptr>(
        slice.object, slice.offset);
  }

  let locationAllocator = LocationAllocatorForParams(paramBuffer);
  let hasRefParam: bool = false;

  let paramTypeIndex: int32 = 0;
  // For stack switching paramTypeIndex and paramIndex diverges,
  // because a suspender is not passed to wrapper as param.
  if constexpr (switchStack) {
    paramTypeIndex++;
    hasRefParam = true;
  }
  for (let paramIndex: int32 = 0; paramTypeIndex < paramCount; paramIndex++) {
    const param = arguments[Convert<intptr>(paramIndex)];
    const paramType = *paramTypes.UncheckedAtIndex(
        Convert<intptr>(paramTypeIndex++));
    if (paramType == kWasmI32Type) {
      let toRef = locationAllocator.GetGPSlot();
      typeswitch (param) {
        case (smiParam: Smi): {
          *toRef = Convert<intptr>(Unsigned(SmiToInt32(smiParam)));
        }
        case (heapParam: JSAnyNotSmi): {
          *toRef =
              Convert<intptr>(Unsigned(WasmTaggedNonSmiToInt32(heapParam)));
        }
      }
    } else if (paramType == kWasmF32Type) {
      let toRef = locationAllocator.GetFP32Slot();
      *toRef = Convert<intptr>(Bitcast<uint32>(WasmTaggedToFloat32(param)));
    } else if (paramType == kWasmF64Type) {
      let toRef = locationAllocator.GetFP64Slot();
      *RefCast<float64>(toRef) = ChangeTaggedToFloat64(param);
    } else if (paramType == kWasmI64Type) {
      if constexpr (Is64()) {
        let toRef = locationAllocator.GetGPSlot();
        const v = TruncateBigIntToI64(context, param);
        *toRef = v;
      } else {
        let toLowRef = locationAllocator.GetGPSlot();
        let toHighRef = locationAllocator.GetGPSlot();
        const bigIntVal = ToBigInt(context, param);
        const pair = BigIntToRawBytes(bigIntVal);
        *toLowRef = Signed(pair.low);
        *toHighRef = Signed(pair.high);
      }
    } else {
      // The byte array where we store converted parameters is not GC-safe.
      // Therefore we can only copy references into this array once no GC can
      // happen anymore. Any conversion of a primitive type can execute
      // arbitrary JavaScript code and therefore also trigger GC. Therefore
      // references get copied into the array only after all parameters of
      // primitive types are finished. For now we write the converted parameter
      // back to the stack.
      hasRefParam = true;
      arguments[Convert<intptr>(paramIndex)] =
          JSToWasmObject(context, functionData.instance, paramType, param);
    }
  }
  if (hasRefParam) {
    // Iterate over all parameters again and handle all those with ref types.
    let k: int32 = 0;
    // For stack switching k and paramIndex diverges,
    // because a suspender is not passed to wrapper as param.
    let paramIndex: int32 = 0;
    locationAllocator.StartRefs();

    if constexpr (switchStack) {
      const suspender = runtime::WasmAllocateSuspender(context);
      const toRef = locationAllocator.GetGPSlot();
      *toRef = BitcastTaggedToWord(suspender);
      // First param is suspender, so skip it in the signature loop.
      k++;
    }

    // We are not using a `for` loop here because Torque does not support
    // `continue` in `for` loops.
    while (k < paramCount) {
      const paramType = *paramTypes.UncheckedAtIndex(Convert<intptr>(k));
      const paramKind = paramType & kValueTypeKindBitsMask;
      if (paramKind != ValueKind::kRef && paramKind != ValueKind::kRefNull) {
        k++;
        paramIndex++;
        continue;
      }
      const param = arguments[Convert<intptr>(paramIndex++)];
      let toRef = locationAllocator.GetGPSlot();
      *toRef = BitcastTaggedToWord(param);
      k++;
    }
  }
  const paramStart = paramBuffer.GCUnsafeRawPtr();
  const paramEnd = locationAllocator.GetStackEnd();

  const internal: WasmInternalFunction = functionData.internal;
  const callTarget = internal.call_target_ptr;
  const instance: WasmInstanceObject = functionData.instance;

  // We construct a state that will be passed to `JSToWasmWrapperAsm`
  // and `JSToWasmHandleReturns`. There are too many parameters to pass
  // everything through registers. The stack area also contains slots for
  // values that get passed from `JSToWasmWrapperAsm` and
  // `WasmReturnPromiseOnSuspendAsm` to `JSToWasmHandleReturns`.
  const wrapperBuffer = %RawDownCast<RawPtr<intptr>>(
      StackSlotPtr(kWrapperBufferSize, torque_internal::SizeOf<intptr>()));

  *GetRefAt<int32>(wrapperBuffer, kWrapperBufferReturnCount) = returnCount;
  *GetRefAt<bool>(wrapperBuffer, kWrapperBufferRefReturnCount) = hasRefReturns;
  *GetRefAt<RawPtr>(wrapperBuffer, kWrapperBufferSigRepresentationArray) = reps;
  *GetRefAt<intptr>(wrapperBuffer, kWrapperBufferStackReturnBufferSize) =
      returnSize;
  *GetRefAt<RawPtr>(wrapperBuffer, kWrapperBufferCallTarget) = callTarget;
  *GetRefAt<RawPtr<intptr>>(wrapperBuffer, kWrapperBufferParamStart) =
      paramStart;
  *GetRefAt<RawPtr>(wrapperBuffer, kWrapperBufferParamEnd) = paramEnd;

  // Both `instance` and `resultArray` get passed separately as parameters to
  // make them GC-safe. They get passed over the stack so that they get scanned
  // by the GC as part of the outgoing parameters of this Torque builtin.
  let result: JSAny;
  if constexpr (switchStack) {
    result =
        WasmReturnPromiseOnSuspendAsm(wrapperBuffer, instance, resultArray);
  } else {
    result = JSToWasmWrapperAsm(wrapperBuffer, instance, resultArray);
  }

  // The normal return sequence of Torque-generated JavaScript builtins does not
  // consider the case where the caller may push additional "undefined"
  // parameters on the stack, and therefore does not generate code to pop these
  // additional parameters. Here we calculate the actual number of parameters on
  // the stack. This number is the number of actual parameters provided by the
  // caller, which is `arguments.length`, or the number of declared arguments,
  // if not enough actual parameters were provided, i.e.
  // `SharedFunctionInfo::length`.
  let popCount = arguments.length;
  const declaredArgCount =
      Convert<intptr>(Convert<int32>(target.shared_function_info.length));
  if (declaredArgCount > popCount) {
    popCount = declaredArgCount;
  }
  // Also pop the receiver.
  PopAndReturn(popCount + 1, result);
}

transitioning javascript builtin JSToWasmWrapper(
    js-implicit context: NativeContext, receiver: JSAny, target: JSFunction)(
    ...arguments): JSAny {
  JSToWasmWrapperHelper(context, receiver, target, arguments, false);
}

transitioning javascript builtin WasmReturnPromiseOnSuspend(
    js-implicit context: NativeContext, receiver: JSAny, target: JSFunction)(
    ...arguments): JSAny {
  JSToWasmWrapperHelper(context, receiver, target, arguments, true);
}

macro WasmToJSObject(context: NativeContext, value: Object, retType: int32):
    JSAny {
  const paramKind = retType & kValueTypeKindBitsMask;
  const heapType = (retType >> kValueTypeKindBits) & kValueTypeHeapTypeMask;
  if (paramKind == ValueKind::kRef) {
    if (heapType == HeapType::kEq || heapType == HeapType::kI31 ||
        heapType == HeapType::kStruct || heapType == HeapType::kArray ||
        heapType == HeapType::kAny || heapType == HeapType::kExtern ||
        heapType == HeapType::kString || heapType == HeapType::kNone ||
        heapType == HeapType::kNoFunc || heapType == HeapType::kNoExtern) {
      return UnsafeCast<JSAny>(value);
    }
    // TODO(ahaas): This is overly pessimistic: all module-defined struct and
    // array types can be passed to JS as-is as well; and for function types we
    // could at least support the fast path where the WasmExternalFunction has
    // already been created.
    return runtime::WasmGenericWasmToJSObject(context, value);
  } else {
    dcheck(paramKind == ValueKind::kRefNull);
    if (heapType == HeapType::kExtern || heapType == HeapType::kNoExtern) {
      return UnsafeCast<JSAny>(value);
    }
    if (value == kWasmNull) {
      return Null;
    }
    if (heapType == HeapType::kEq || heapType == HeapType::kStruct ||
        heapType == HeapType::kArray || heapType == HeapType::kString ||
        heapType == HeapType::kI31 || heapType == HeapType::kAny) {
      return UnsafeCast<JSAny>(value);
    }
    // TODO(ahaas): This is overly pessimistic: all module-defined struct and
    // array types can be passed to JS as-is as well; and for function types we
    // could at least support the fast path where the WasmExternalFunction has
    // already been created.
    return runtime::WasmGenericWasmToJSObject(context, value);
  }
}

builtin JSToWasmHandleReturns(
    instance: WasmInstanceObject, resultArray: JSArray,
    wrapperBuffer: RawPtr<intptr>): JSAny {
  const returnCount = *GetRefAt<int32>(
      wrapperBuffer, kWrapperBufferReturnCount);
  if (returnCount == 0) {
    return Undefined;
  }
  if (returnCount == 1) {
    const reps = *GetRefAt<RawPtr>(
        wrapperBuffer, kWrapperBufferSigRepresentationArray);
    const retType = *GetRefAt<int32>(reps, 0);
    if (retType == kWasmI32Type) {
      const ret = *GetRefAt<int32>(
          wrapperBuffer, kWrapperBufferGPReturnRegister1);
      const result = Convert<Number>(ret);
      return result;
    } else if (retType == kWasmF32Type) {
      const resultRef =
          GetRefAt<float32>(wrapperBuffer, kWrapperBufferFPReturnRegister1);
      return Convert<Number>(*resultRef);
    } else if (retType == kWasmF64Type) {
      const resultRef =
          GetRefAt<float64>(wrapperBuffer, kWrapperBufferFPReturnRegister1);
      return Convert<Number>(*resultRef);
    } else if (retType == kWasmI64Type) {
      if constexpr (Is64()) {
        const ret = *GetRefAt<intptr>(
            wrapperBuffer, kWrapperBufferGPReturnRegister1);
        return I64ToBigInt(ret);
      } else {
        const lowWord = *GetRefAt<intptr>(
            wrapperBuffer, kWrapperBufferGPReturnRegister1);
        const highWord = *GetRefAt<intptr>(
            wrapperBuffer, kWrapperBufferGPReturnRegister2);
        return I32PairToBigInt(lowWord, highWord);
      }
    } else {
      const ptr = %RawDownCast<RawPtr<uintptr>>(
          wrapperBuffer + kWrapperBufferGPReturnRegister1);
      const rawRef = *GetRefAt<uintptr>(ptr, 0);
      const value = BitcastWordToTagged(rawRef);
      return WasmToJSObject(LoadContextFromInstance(instance), value, retType);
    }
  }

  // Multi return;
  const fixedArray: FixedArray = UnsafeCast<FixedArray>(resultArray.elements);
  const returnBuffer = *GetRefAt<RawPtr>(
      wrapperBuffer, kWrapperBufferStackReturnBufferStart);
  let locationAllocator = LocationAllocatorForReturns(
      wrapperBuffer + kWrapperBufferGPReturnRegister1,
      wrapperBuffer + kWrapperBufferFPReturnRegister1, returnBuffer);

  const reps = *GetRefAt<RawPtr>(
      wrapperBuffer, kWrapperBufferSigRepresentationArray);

  const retTypes = torque_internal::unsafe::NewOffHeapConstSlice(
      %RawDownCast<RawPtr<int32>>(reps), Convert<intptr>(returnCount));

  const hasRefReturns = *GetRefAt<bool>(
      wrapperBuffer, kWrapperBufferRefReturnCount);

  if (hasRefReturns) {
    // We first process all references and copy them in the the result array to
    // put them into a location that is known to the GC. The processing of
    // references does not trigger a GC, but the allocation of HeapNumbers and
    // BigInts for primitive types may trigger a GC.

    for (let k: intptr = 0; k < Convert<intptr>(returnCount); k++) {
      const retType = *retTypes.UncheckedAtIndex(Convert<intptr>(k));
      if (retType == kWasmI32Type) {
        locationAllocator.GetGPSlot();
      } else if (retType == kWasmF32Type) {
        locationAllocator.GetFP32Slot();
      } else if (retType == kWasmI64Type) {
        locationAllocator.GetGPSlot();
        if constexpr (!Is64()) {
          locationAllocator.GetGPSlot();
        }
      } else if (retType == kWasmF64Type) {
        locationAllocator.GetFP64Slot();
      } else {
        let value: Object;
        const slot = locationAllocator.GetGPSlot();
        const rawRef = *slot;
        value = BitcastWordToTagged(rawRef);
        // Store the wasm object in the JSArray to make it GC safe. The
        // transformation will happen later in a second loop.
        fixedArray.objects[k] = value;
      }
    }
  }

  locationAllocator = LocationAllocatorForReturns(
      wrapperBuffer + kWrapperBufferGPReturnRegister1,
      wrapperBuffer + kWrapperBufferFPReturnRegister1, returnBuffer);

  for (let k: intptr = 0; k < Convert<intptr>(returnCount); k++) {
    const retType = *retTypes.UncheckedAtIndex(Convert<intptr>(k));
    if (retType == kWasmI32Type) {
      const slot = locationAllocator.GetGPSlot();
      const val = *RefCast<int32>(slot);
      fixedArray.objects[k] = Convert<Number>(val);
    } else if (retType == kWasmF32Type) {
      const slot = locationAllocator.GetFP32Slot();
      const val = *RefCast<float32>(slot);
      fixedArray.objects[k] = Convert<Number>(val);
    } else if (retType == kWasmI64Type) {
      if constexpr (Is64()) {
        const slot = locationAllocator.GetGPSlot();
        const val = *slot;
        fixedArray.objects[k] = I64ToBigInt(val);
      } else {
        const lowWordSlot = locationAllocator.GetGPSlot();
        const highWordSlot = locationAllocator.GetGPSlot();
        const lowWord = *lowWordSlot;
        const highWord = *highWordSlot;
        fixedArray.objects[k] = I32PairToBigInt(lowWord, highWord);
      }
    } else if (retType == kWasmF64Type) {
      const slot = locationAllocator.GetFP64Slot();
      const val = *RefCast<float64>(slot);
      fixedArray.objects[k] = Convert<Number>(val);
    } else {
      locationAllocator.GetGPSlot();
      const value = fixedArray.objects[k];
      fixedArray.objects[k] =
          WasmToJSObject(LoadContextFromInstance(instance), value, retType);
    }
  }

  return resultArray;
}
}  // namespace wasm

Zerion Mini Shell 1.0