1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
|
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
From: Aikar <aikar@aikar.co>
Date: Fri, 29 May 2020 23:32:14 -0400
Subject: [PATCH] Improve Chunk Status Transition Speed
When a chunk is loaded from disk that has already been generated,
the server has to promote the chunk through the system to reach
it's current desired status level.
This results in every single status transition going from the main thread
to the world gen threads, only to discover it has no work it actually
needs to do.... and then it returns back to main.
This back and forth costs a lot of time and can really delay chunk loads
when the server is under high TPS due to their being a lot of time in
between chunk load times, as well as hogs up the chunk threads from doing
actual generation and light work.
Additionally, the whole task system uses a lot of CPU on the server threads anyways.
So by optimizing status transitions for status's that are already complete,
we can run them to the desired level while on main thread (where it has
to happen anyways) instead of ever jumping to world gen thread.
This will improve chunk loading effeciency to be reduced down to the following
scenario / path:
1) MAIN: Chunk Requested, Load Request sent to ChunkTaskManager / IO Queue
2) IO: Once position in queue comes, submit read IO data and schedule to chunk task thread
3) CHUNK: Once IO is loaded and position in queue comes, deserialize the chunk data, process conversions, submit to main queue
4) MAIN: next Chunk Task process (Mid Tick or End Of Tick), load chunk data into world (POI, main thread tasks)
5) MAIN: process status transitions all the way to LIGHT, light schedules Threaded task
6) SERVER: Light tasks register light enablement for chunk and any lighting needing to be done
7) MAIN: Task returns to main, finish processing to FULL/TICKING status
Previously would have hopped to SERVER around 12+ times there extra.
diff --git a/src/main/java/net/minecraft/server/level/ChunkHolder.java b/src/main/java/net/minecraft/server/level/ChunkHolder.java
index 6ba7e2713452c4c6f48a1a825ef27b500140aa16..cd4328bd606d778ebb45f36af8cf23d88edef881 100644
--- a/src/main/java/net/minecraft/server/level/ChunkHolder.java
+++ b/src/main/java/net/minecraft/server/level/ChunkHolder.java
@@ -95,6 +95,13 @@ public class ChunkHolder {
// Paper end - optimise anyPlayerCloseEnoughForSpawning
long lastAutoSaveTime; // Paper - incremental autosave
long inactiveTimeStart; // Paper - incremental autosave
+ // Paper start - optimize chunk status progression without jumping through thread pool
+ public boolean canAdvanceStatus() {
+ ChunkStatus status = getChunkHolderStatus();
+ ChunkAccess chunk = getAvailableChunkNow();
+ return chunk != null && (status == null || chunk.getStatus().isOrAfter(getNextStatus(status)));
+ }
+ // Paper end
public ChunkHolder(ChunkPos pos, int level, LevelHeightAccessor world, LevelLightEngine lightingProvider, ChunkHolder.LevelChangeListener levelUpdateListener, ChunkHolder.PlayerProvider playersWatchingChunkProvider) {
this.futures = new AtomicReferenceArray(ChunkHolder.CHUNK_STATUSES.size());
diff --git a/src/main/java/net/minecraft/server/level/ChunkMap.java b/src/main/java/net/minecraft/server/level/ChunkMap.java
index 17af44ebe972158a4fa7b0cb5ea67406ec7bc5ac..9d9312cb8e9681428f7ab1b1e6eb803fc558d651 100644
--- a/src/main/java/net/minecraft/server/level/ChunkMap.java
+++ b/src/main/java/net/minecraft/server/level/ChunkMap.java
@@ -691,7 +691,7 @@ public class ChunkMap extends ChunkStorage implements ChunkHolder.PlayerProvider
return either.mapLeft((list) -> {
return (LevelChunk) list.get(list.size() / 2);
});
- }, this.mainThreadExecutor);
+ }, this.mainInvokingExecutor); // Paper
}
@Nullable
@@ -1101,6 +1101,12 @@ public class ChunkMap extends ChunkStorage implements ChunkHolder.PlayerProvider
return "chunkGenerate " + requiredStatus.getName();
});
Executor executor = (runnable) -> {
+ // Paper start - optimize chunk status progression without jumping through thread pool
+ if (holder.canAdvanceStatus()) {
+ this.mainInvokingExecutor.execute(runnable);
+ return;
+ }
+ // Paper end
this.worldgenMailbox.tell(ChunkTaskPriorityQueueSorter.message(holder, runnable));
};
|