diff --git a/src/AcDream.App/Rendering/Wb/WbDrawDispatcher.cs b/src/AcDream.App/Rendering/Wb/WbDrawDispatcher.cs
index e1d4cbd..50b24fe 100644
--- a/src/AcDream.App/Rendering/Wb/WbDrawDispatcher.cs
+++ b/src/AcDream.App/Rendering/Wb/WbDrawDispatcher.cs
@@ -397,10 +397,42 @@ public sealed unsafe class WbDrawDispatcher : IDisposable
uint? populateEntityId = null;
uint populateLandblockId = 0;
+ // Tier 1 cache (#53) — fast-path one-shot tracker. The cache stores a
+ // FLAT list of batches across all MeshRefs of an entity, so a single
+ // ApplyCacheHit call already drew every batch. _walkScratch yields
+ // one tuple per (entity, MeshRefIndex), so without this guard a
+ // 3-MeshRef static entity on a frame-2 cache hit would call
+ // ApplyCacheHit 3 times — appending all 6 batches × 3 = 18 instances
+ // to _groups instead of 6. Result: severe Z-fighting + 3× perf hit
+ // on every multi-part static entity (buildings, statues, multi-MeshRef
+ // NPCs). The fast path must fire only on the FIRST tuple of each
+ // entity; subsequent tuples skip via this tracker.
+ uint? lastHitEntityId = null;
+
foreach (var (entity, partIdx, landblockId) in _walkScratch)
{
if (diag) _entitiesSeen++;
+ // Skip subsequent tuples of an entity that already cache-hit on
+ // its first tuple. ApplyCacheHit drew the full flat batch list;
+ // re-firing here would N-multiply the instance count. Diag
+ // _entitiesDrawn is bumped here to preserve per-tuple parity with
+ // the previous counting semantics.
+ if (lastHitEntityId == entity.Id)
+ {
+ if (diag) _entitiesDrawn++;
+ continue;
+ }
+
+ // Reset the hit tracker on entity change so the next entity's
+ // first tuple re-checks the cache. (When this iteration is the
+ // FIRST tuple of a new entity after a cache-hit entity, we must
+ // not retain the previous entity's id.)
+ if (lastHitEntityId.HasValue && lastHitEntityId.Value != entity.Id)
+ {
+ lastHitEntityId = null;
+ }
+
// Flush-on-entity-change: if the previous entity accumulated any
// batches AND this iteration is for a different entity, populate
// its cache entry now and reset the scratch buffer.
@@ -424,6 +456,11 @@ public sealed unsafe class WbDrawDispatcher : IDisposable
// hit, this iteration also finishes flushing any pending
// populate state from a previous entity. Animated entities never
// enter this branch — the !isAnimated guard makes that explicit.
+ //
+ // Fires ONCE per entity: the first tuple reaches here, runs
+ // ApplyCacheHit, sets lastHitEntityId, and continues. Subsequent
+ // tuples of the same entity short-circuit at the top of the loop
+ // body via the lastHitEntityId == entity.Id check above.
if (!isAnimated && _cache.TryGet(entity.Id, out var cachedEntry))
{
ApplyCacheHit(cachedEntry!, entityWorld, AppendInstanceToGroup);
@@ -440,6 +477,7 @@ public sealed unsafe class WbDrawDispatcher : IDisposable
}
if (diag) _entitiesDrawn++;
+ lastHitEntityId = entity.Id;
continue;
}
@@ -869,6 +907,10 @@ public sealed unsafe class WbDrawDispatcher : IDisposable
/// no next-iteration to trigger ,
/// so commit its accumulated batches here. No-op when no populate is
/// pending (the last entity was animated, or the scratch is empty).
+ ///
+ /// End-of-loop only — does NOT reset the caller's tracker locals
+ /// (intentional, since they go out of scope immediately after).
+ ///
///
internal static void FinalFlushPopulate(
uint? populateEntityId,
diff --git a/tests/AcDream.Core.Tests/Rendering/Wb/WbDrawDispatcherBucketingTests.cs b/tests/AcDream.Core.Tests/Rendering/Wb/WbDrawDispatcherBucketingTests.cs
index 28c6ff8..06e814b 100644
--- a/tests/AcDream.Core.Tests/Rendering/Wb/WbDrawDispatcherBucketingTests.cs
+++ b/tests/AcDream.Core.Tests/Rendering/Wb/WbDrawDispatcherBucketingTests.cs
@@ -606,4 +606,83 @@ public sealed class WbDrawDispatcherBucketingTests
// After flush, scratch is cleared so the next entity starts fresh.
Assert.Empty(scratch);
}
+
+ [Fact]
+ public void ApplyCacheHit_PerTupleAmplification_DoesNotOccur()
+ {
+ // Regression test for the bug fixed at the commit landing alongside
+ // this test: Task 10's first attempt called ApplyCacheHit per-(entity,
+ // MeshRefIndex) tuple in Draw's foreach, but cachedEntry.Batches is
+ // flat across all MeshRefs of the entity. For a 3-MeshRef building on
+ // frame 2: 3 tuples × 6 cached batches per call = 18 instances drawn
+ // instead of 6. Severe Z-fighting and 3× perf hit on every multi-part
+ // static entity (buildings, statues, multi-MeshRef NPCs).
+ //
+ // This is the symmetric mirror of the Task 9 bug fixed at 00fa8ae —
+ // both came from spec §5.2 describing the foreach as per-entity when
+ // _walkScratch is per-tuple.
+ //
+ // The fix: track lastHitEntityId; the cache-hit fast path fires only
+ // on the FIRST tuple of each entity. Subsequent tuples of the same
+ // entity skip the iteration body via continue.
+ //
+ // This test simulates the inner-loop logic by directly invoking
+ // ApplyCacheHit + AppendInstanceToGroup the way Draw would, with N
+ // tuples for the same entity, asserting that groups[key].Count equals
+ // the cached batch count (6), NOT N × cached batch count (18).
+
+ // Set up a synthetic cache entry with 6 batches (representing 3
+ // MeshRefs × 2 batches each).
+ const int CachedBatchCount = 6;
+ var cache = new EntityClassificationCache();
+ var batches = new CachedBatch[CachedBatchCount];
+ for (int i = 0; i < CachedBatchCount; i++)
+ {
+ batches[i] = MakeCachedBatch(
+ ibo: 1u, firstIndex: (uint)i, indexCount: 6, texHandle: (ulong)(0x100 + i));
+ }
+ cache.Populate(entityId: 100, landblockHint: 0xA9B40000u, batches);
+
+ // Simulate Draw's per-entity loop: 3 tuples for the same entity.
+ // Track which entity has already cache-hit (mirrors the production
+ // lastHitEntityId pattern).
+ var groups = new Dictionary>();
+ uint? lastHitEntityId = null;
+ var entityWorld = Matrix4x4.Identity; // simplest case for assertion clarity
+ const uint EntityId = 100;
+ const int MeshRefCount = 3;
+
+ void AppendInstance(GroupKey k, Matrix4x4 m)
+ {
+ if (!groups.TryGetValue(k, out var list))
+ {
+ list = new List();
+ groups[k] = list;
+ }
+ list.Add(m);
+ }
+
+ for (int partIdx = 0; partIdx < MeshRefCount; partIdx++)
+ {
+ // Skip subsequent tuples of an entity that cache-hit (the fix).
+ if (lastHitEntityId == EntityId) continue;
+
+ if (cache.TryGet(EntityId, out var entry))
+ {
+ Assert.NotNull(entry);
+ WbDrawDispatcher.ApplyCacheHit(entry!, entityWorld, AppendInstance);
+ lastHitEntityId = EntityId;
+ }
+ }
+
+ // Assertion: each group's matrix count equals the cached batches matching
+ // that key, NOT (cached batches × MeshRef count). Here each batch has a
+ // unique key, so each group has exactly 1 matrix.
+ int totalMatrices = 0;
+ foreach (var (_, matrices) in groups) totalMatrices += matrices.Count;
+ Assert.Equal(CachedBatchCount, totalMatrices); // 6, NOT 18
+
+ // Sanity: 6 distinct keys (one per cached batch since FirstIndex differs).
+ Assert.Equal(CachedBatchCount, groups.Count);
+ }
}