Browse Source

chore: cherry-pick 9797576 from v8 (#43387)

* chore: cherry-pick 9797576 from v8 (#43376)

* chore: update patches

---------

Co-authored-by: Shelley Vohr <[email protected]>
Keeley Hammond 8 months ago
parent
commit
b74089153a
2 changed files with 105 additions and 0 deletions
  1. 1 0
      patches/v8/.patches
  2. 104 0
      patches/v8/spill_all_loop_inputs_before_entering_loop.patch

+ 1 - 0
patches/v8/.patches

@@ -8,3 +8,4 @@ cherry-pick-70d2fe6b7c47.patch
 cherry-pick-901377bb2f3b.patch
 cherry-pick-bb28367eed73.patch
 cherry-pick-bc545b15a0ee.patch
+spill_all_loop_inputs_before_entering_loop.patch

+ 104 - 0
patches/v8/spill_all_loop_inputs_before_entering_loop.patch

@@ -0,0 +1,104 @@
+From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
+From: Clemens Backes <[email protected]>
+Date: Tue, 20 Aug 2024 12:25:40 +0200
+Subject: Spill all loop inputs before entering loop
+
+This avoids having to load the value back into a register if it was
+spilled inside of the loop.
+
[email protected]
+
+Fixed: chromium:360700873
+Change-Id: I24f5deacebc893293e8a3c007e9f070c7fa0ccd2
+Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/5797073
+Reviewed-by: Jakob Kummerow <[email protected]>
+Commit-Queue: Clemens Backes <[email protected]>
+Cr-Commit-Position: refs/heads/main@{#95711}
+
+diff --git a/src/wasm/baseline/liftoff-assembler.cc b/src/wasm/baseline/liftoff-assembler.cc
+index e1ca7bebdc8408e21715dd0fc9861a474b989bcc..adaa8f9586b76bdb6902dce7ecc64b32dc3f5253 100644
+--- a/src/wasm/baseline/liftoff-assembler.cc
++++ b/src/wasm/baseline/liftoff-assembler.cc
+@@ -441,29 +441,10 @@ void LiftoffAssembler::DropExceptionValueAtOffset(int offset) {
+   cache_state_.stack_state.pop_back();
+ }
+ 
+-void LiftoffAssembler::PrepareLoopArgs(int num) {
+-  for (int i = 0; i < num; ++i) {
+-    VarState& slot = cache_state_.stack_state.end()[-1 - i];
+-    if (slot.is_stack()) continue;
+-    RegClass rc = reg_class_for(slot.kind());
+-    if (slot.is_reg()) {
+-      if (cache_state_.get_use_count(slot.reg()) > 1) {
+-        // If the register is used more than once, we cannot use it for the
+-        // merge. Move it to an unused register instead.
+-        LiftoffRegList pinned;
+-        pinned.set(slot.reg());
+-        LiftoffRegister dst_reg = GetUnusedRegister(rc, pinned);
+-        Move(dst_reg, slot.reg(), slot.kind());
+-        cache_state_.dec_used(slot.reg());
+-        cache_state_.inc_used(dst_reg);
+-        slot.MakeRegister(dst_reg);
+-      }
+-      continue;
+-    }
+-    LiftoffRegister reg = GetUnusedRegister(rc, {});
+-    LoadConstant(reg, slot.constant());
+-    slot.MakeRegister(reg);
+-    cache_state_.inc_used(reg);
++void LiftoffAssembler::SpillLoopArgs(int num) {
++  for (VarState& slot :
++       base::VectorOf(cache_state_.stack_state.end() - num, num)) {
++    Spill(&slot);
+   }
+ }
+ 
+@@ -684,14 +665,14 @@ void LiftoffAssembler::Spill(VarState* slot) {
+ }
+ 
+ void LiftoffAssembler::SpillLocals() {
+-  for (uint32_t i = 0; i < num_locals_; ++i) {
+-    Spill(&cache_state_.stack_state[i]);
++  for (VarState& local_slot :
++       base::VectorOf(cache_state_.stack_state.data(), num_locals_)) {
++    Spill(&local_slot);
+   }
+ }
+ 
+ void LiftoffAssembler::SpillAllRegisters() {
+-  for (uint32_t i = 0, e = cache_state_.stack_height(); i < e; ++i) {
+-    auto& slot = cache_state_.stack_state[i];
++  for (VarState& slot : cache_state_.stack_state) {
+     if (!slot.is_reg()) continue;
+     Spill(slot.offset(), slot.reg(), slot.kind());
+     slot.MakeStack();
+diff --git a/src/wasm/baseline/liftoff-assembler.h b/src/wasm/baseline/liftoff-assembler.h
+index 7cb2f5003735ef826559c247938275f996f61439..5d4ebd7fa30898ec857a92a2f31f18e1ea76905c 100644
+--- a/src/wasm/baseline/liftoff-assembler.h
++++ b/src/wasm/baseline/liftoff-assembler.h
+@@ -542,9 +542,9 @@ class LiftoffAssembler : public MacroAssembler {
+   // the bottom of the stack.
+   void DropExceptionValueAtOffset(int offset);
+ 
+-  // Ensure that the loop inputs are either in a register or spilled to the
+-  // stack, so that we can merge different values on the back-edge.
+-  void PrepareLoopArgs(int num);
++  // Spill all loop inputs to the stack to free registers and to ensure that we
++  // can merge different values on the back-edge.
++  void SpillLoopArgs(int num);
+ 
+   V8_INLINE static int NextSpillOffset(ValueKind kind, int top_spill_offset);
+   V8_INLINE int NextSpillOffset(ValueKind kind);
+diff --git a/src/wasm/baseline/liftoff-compiler.cc b/src/wasm/baseline/liftoff-compiler.cc
+index c185d7273629fda3ddf5d756395f2a367cfa5864..c7b9ca92ecd7369ca3a758d1f69a9516063a44aa 100644
+--- a/src/wasm/baseline/liftoff-compiler.cc
++++ b/src/wasm/baseline/liftoff-compiler.cc
+@@ -1284,7 +1284,7 @@ class LiftoffCompiler {
+     // pre-analysis of the function.
+     __ SpillLocals();
+ 
+-    __ PrepareLoopArgs(loop->start_merge.arity);
++    __ SpillLoopArgs(loop->start_merge.arity);
+ 
+     // Loop labels bind at the beginning of the block.
+     __ bind(loop->label.get());