Browse Source

chore: cherry-pick 9797576 from v8 (#43386)

* chore: cherry-pick 9797576 from v8 (#43376)

* chore: update patches

---------

Co-authored-by: Shelley Vohr <[email protected]>
Keeley Hammond 8 months ago
parent
commit
054679ee99
2 changed files with 105 additions and 0 deletions
  1. 1 0
      patches/v8/.patches
  2. 104 0
      patches/v8/spill_all_loop_inputs_before_entering_loop.patch

+ 1 - 0
patches/v8/.patches

@@ -2,3 +2,4 @@ chore_allow_customizing_microtask_policy_per_context.patch
 deps_add_v8_object_setinternalfieldfornodecore.patch
 revert_heap_add_checks_position_info.patch
 revert_api_cleanup_remove_setaccessor_and_setnativedataproperty.patch
+spill_all_loop_inputs_before_entering_loop.patch

+ 104 - 0
patches/v8/spill_all_loop_inputs_before_entering_loop.patch

@@ -0,0 +1,104 @@
+From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
+From: Clemens Backes <[email protected]>
+Date: Tue, 20 Aug 2024 12:25:40 +0200
+Subject: Spill all loop inputs before entering loop
+
+This avoids having to load the value back into a register if it was
+spilled inside of the loop.
+
[email protected]
+
+Fixed: chromium:360700873
+Change-Id: I24f5deacebc893293e8a3c007e9f070c7fa0ccd2
+Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/5797073
+Reviewed-by: Jakob Kummerow <[email protected]>
+Commit-Queue: Clemens Backes <[email protected]>
+Cr-Commit-Position: refs/heads/main@{#95711}
+
+diff --git a/src/wasm/baseline/liftoff-assembler.cc b/src/wasm/baseline/liftoff-assembler.cc
+index 36f5dade90e251b57c42f24242fec2a0c7fbcdb9..e2dda9b6d684cdf951cc5a23f05fc1b008830fa9 100644
+--- a/src/wasm/baseline/liftoff-assembler.cc
++++ b/src/wasm/baseline/liftoff-assembler.cc
+@@ -445,29 +445,10 @@ void LiftoffAssembler::DropExceptionValueAtOffset(int offset) {
+   cache_state_.stack_state.pop_back();
+ }
+ 
+-void LiftoffAssembler::PrepareLoopArgs(int num) {
+-  for (int i = 0; i < num; ++i) {
+-    VarState& slot = cache_state_.stack_state.end()[-1 - i];
+-    if (slot.is_stack()) continue;
+-    RegClass rc = reg_class_for(slot.kind());
+-    if (slot.is_reg()) {
+-      if (cache_state_.get_use_count(slot.reg()) > 1) {
+-        // If the register is used more than once, we cannot use it for the
+-        // merge. Move it to an unused register instead.
+-        LiftoffRegList pinned;
+-        pinned.set(slot.reg());
+-        LiftoffRegister dst_reg = GetUnusedRegister(rc, pinned);
+-        Move(dst_reg, slot.reg(), slot.kind());
+-        cache_state_.dec_used(slot.reg());
+-        cache_state_.inc_used(dst_reg);
+-        slot.MakeRegister(dst_reg);
+-      }
+-      continue;
+-    }
+-    LiftoffRegister reg = GetUnusedRegister(rc, {});
+-    LoadConstant(reg, slot.constant());
+-    slot.MakeRegister(reg);
+-    cache_state_.inc_used(reg);
++void LiftoffAssembler::SpillLoopArgs(int num) {
++  for (VarState& slot :
++       base::VectorOf(cache_state_.stack_state.end() - num, num)) {
++    Spill(&slot);
+   }
+ }
+ 
+@@ -685,14 +666,14 @@ void LiftoffAssembler::Spill(VarState* slot) {
+ }
+ 
+ void LiftoffAssembler::SpillLocals() {
+-  for (uint32_t i = 0; i < num_locals_; ++i) {
+-    Spill(&cache_state_.stack_state[i]);
++  for (VarState& local_slot :
++       base::VectorOf(cache_state_.stack_state.data(), num_locals_)) {
++    Spill(&local_slot);
+   }
+ }
+ 
+ void LiftoffAssembler::SpillAllRegisters() {
+-  for (uint32_t i = 0, e = cache_state_.stack_height(); i < e; ++i) {
+-    auto& slot = cache_state_.stack_state[i];
++  for (VarState& slot : cache_state_.stack_state) {
+     if (!slot.is_reg()) continue;
+     Spill(slot.offset(), slot.reg(), slot.kind());
+     slot.MakeStack();
+diff --git a/src/wasm/baseline/liftoff-assembler.h b/src/wasm/baseline/liftoff-assembler.h
+index fec9efb7d32819d39a836fb38b71ae6233a12d72..3261b582139c0652e2faff455f1c8a580f57c382 100644
+--- a/src/wasm/baseline/liftoff-assembler.h
++++ b/src/wasm/baseline/liftoff-assembler.h
+@@ -460,9 +460,9 @@ class LiftoffAssembler : public MacroAssembler {
+   // the bottom of the stack.
+   void DropExceptionValueAtOffset(int offset);
+ 
+-  // Ensure that the loop inputs are either in a register or spilled to the
+-  // stack, so that we can merge different values on the back-edge.
+-  void PrepareLoopArgs(int num);
++  // Spill all loop inputs to the stack to free registers and to ensure that we
++  // can merge different values on the back-edge.
++  void SpillLoopArgs(int num);
+ 
+   V8_INLINE static int NextSpillOffset(ValueKind kind, int top_spill_offset);
+   V8_INLINE int NextSpillOffset(ValueKind kind);
+diff --git a/src/wasm/baseline/liftoff-compiler.cc b/src/wasm/baseline/liftoff-compiler.cc
+index 29a4cdce9bac51d1ad0fa5e5cb7a39e14a8b7eae..c613568dc476418e3c40ec1a37690b3263f9473a 100644
+--- a/src/wasm/baseline/liftoff-compiler.cc
++++ b/src/wasm/baseline/liftoff-compiler.cc
+@@ -1390,7 +1390,7 @@ class LiftoffCompiler {
+     // pre-analysis of the function.
+     __ SpillLocals();
+ 
+-    __ PrepareLoopArgs(loop->start_merge.arity);
++    __ SpillLoopArgs(loop->start_merge.arity);
+ 
+     // Loop labels bind at the beginning of the block.
+     __ bind(loop->label.get());