aboutsummaryrefslogtreecommitdiffhomepage
path: root/paper-server/patches/features/0024-Attempt-to-recalculate-regionfile-header-if-it-is-co.patch
diff options
context:
space:
mode:
Diffstat (limited to 'paper-server/patches/features/0024-Attempt-to-recalculate-regionfile-header-if-it-is-co.patch')
-rw-r--r--paper-server/patches/features/0024-Attempt-to-recalculate-regionfile-header-if-it-is-co.patch732
1 files changed, 732 insertions, 0 deletions
diff --git a/paper-server/patches/features/0024-Attempt-to-recalculate-regionfile-header-if-it-is-co.patch b/paper-server/patches/features/0024-Attempt-to-recalculate-regionfile-header-if-it-is-co.patch
new file mode 100644
index 0000000000..e35d6a31a9
--- /dev/null
+++ b/paper-server/patches/features/0024-Attempt-to-recalculate-regionfile-header-if-it-is-co.patch
@@ -0,0 +1,732 @@
+From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
+From: Spottedleaf <[email protected]>
+Date: Sun, 2 Feb 2020 02:25:10 -0800
+Subject: [PATCH] Attempt to recalculate regionfile header if it is corrupt
+
+Instead of trying to relocate the chunk, which is seems to never
+be the correct choice, so we end up duplicating or swapping chunks,
+we instead drop the current regionfile header and recalculate -
+hoping that at least then we don't swap chunks, and maybe recover
+them all.
+
+diff --git a/net/minecraft/world/level/chunk/storage/RegionBitmap.java b/net/minecraft/world/level/chunk/storage/RegionBitmap.java
+index 64a718c98f799c62a5bb28e1e8e5f66cc96c915d..666f2e967c99f78422c83fb20e1a3bf3efa7845e 100644
+--- a/net/minecraft/world/level/chunk/storage/RegionBitmap.java
++++ b/net/minecraft/world/level/chunk/storage/RegionBitmap.java
+@@ -9,6 +9,27 @@ import java.util.BitSet;
+ public class RegionBitmap {
+ private final BitSet used = new BitSet();
+
++ // Paper start - Attempt to recalculate regionfile header if it is corrupt
++ public final void copyFrom(RegionBitmap other) {
++ BitSet thisBitset = this.used;
++ BitSet otherBitset = other.used;
++
++ for (int i = 0; i < Math.max(thisBitset.size(), otherBitset.size()); ++i) {
++ thisBitset.set(i, otherBitset.get(i));
++ }
++ }
++
++ public final boolean tryAllocate(int from, int length) {
++ BitSet bitset = this.used;
++ int firstSet = bitset.nextSetBit(from);
++ if (firstSet > 0 && firstSet < (from + length)) {
++ return false;
++ }
++ bitset.set(from, from + length);
++ return true;
++ }
++ // Paper end - Attempt to recalculate regionfile header if it is corrupt
++
+ public void force(int sectorOffset, int sectorCount) {
+ this.used.set(sectorOffset, sectorOffset + sectorCount);
+ }
+diff --git a/net/minecraft/world/level/chunk/storage/RegionFile.java b/net/minecraft/world/level/chunk/storage/RegionFile.java
+index ff4fc280409f680f3879a495e37cf1925b1a38f1..a4621c96fd456c5cdd1b6847931806e677b26b30 100644
+--- a/net/minecraft/world/level/chunk/storage/RegionFile.java
++++ b/net/minecraft/world/level/chunk/storage/RegionFile.java
+@@ -46,6 +46,355 @@ public class RegionFile implements AutoCloseable, ca.spottedleaf.moonrise.patche
+ @VisibleForTesting
+ protected final RegionBitmap usedSectors = new RegionBitmap();
+
++ // Paper start - Attempt to recalculate regionfile header if it is corrupt
++ private static long roundToSectors(long bytes) {
++ long sectors = bytes >>> 12; // 4096 = 2^12
++ long remainingBytes = bytes & 4095;
++ long sign = -remainingBytes; // sign is 1 if nonzero
++ return sectors + (sign >>> 63);
++ }
++
++ private static final net.minecraft.nbt.CompoundTag OVERSIZED_COMPOUND = new net.minecraft.nbt.CompoundTag();
++
++ private @Nullable net.minecraft.nbt.CompoundTag attemptRead(long sector, int chunkDataLength, long fileLength) throws IOException {
++ try {
++ if (chunkDataLength < 0) {
++ return null;
++ }
++
++ long offset = sector * 4096L + 4L; // offset for chunk data
++
++ if ((offset + chunkDataLength) > fileLength) {
++ return null;
++ }
++
++ ByteBuffer chunkData = ByteBuffer.allocate(chunkDataLength);
++ if (chunkDataLength != this.file.read(chunkData, offset)) {
++ return null;
++ }
++
++ ((java.nio.Buffer)chunkData).flip();
++
++ byte compressionType = chunkData.get();
++ if (compressionType < 0) { // compressionType & 128 != 0
++ // oversized chunk
++ return OVERSIZED_COMPOUND;
++ }
++
++ RegionFileVersion compression = RegionFileVersion.fromId(compressionType);
++ if (compression == null) {
++ return null;
++ }
++
++ InputStream input = compression.wrap(new ByteArrayInputStream(chunkData.array(), chunkData.position(), chunkDataLength - chunkData.position()));
++
++ return net.minecraft.nbt.NbtIo.read(new DataInputStream(input));
++ } catch (Exception ex) {
++ return null;
++ }
++ }
++
++ private int getLength(long sector) throws IOException {
++ ByteBuffer length = ByteBuffer.allocate(4);
++ if (4 != this.file.read(length, sector * 4096L)) {
++ return -1;
++ }
++
++ return length.getInt(0);
++ }
++
++ private void backupRegionFile() {
++ Path backup = this.path.getParent().resolve(this.path.getFileName() + "." + new java.util.Random().nextLong() + ".backup");
++ this.backupRegionFile(backup);
++ }
++
++ private void backupRegionFile(Path to) {
++ try {
++ this.file.force(true);
++ LOGGER.warn("Backing up regionfile \"" + this.path.toAbsolutePath() + "\" to " + to.toAbsolutePath());
++ java.nio.file.Files.copy(this.path, to, java.nio.file.StandardCopyOption.COPY_ATTRIBUTES);
++ LOGGER.warn("Backed up the regionfile to " + to.toAbsolutePath());
++ } catch (IOException ex) {
++ LOGGER.error("Failed to backup to " + to.toAbsolutePath(), ex);
++ }
++ }
++
++ private static boolean inSameRegionfile(ChunkPos first, ChunkPos second) {
++ return (first.x & ~31) == (second.x & ~31) && (first.z & ~31) == (second.z & ~31);
++ }
++
++ // note: only call for CHUNK regionfiles
++ boolean recalculateHeader() throws IOException {
++ if (!this.canRecalcHeader) {
++ return false;
++ }
++ ChunkPos ourLowerLeftPosition = RegionFileStorage.getRegionFileCoordinates(this.path);
++ if (ourLowerLeftPosition == null) {
++ LOGGER.error("Unable to get chunk location of regionfile " + this.path.toAbsolutePath() + ", cannot recover header");
++ return false;
++ }
++ synchronized (this) {
++ LOGGER.warn("Corrupt regionfile header detected! Attempting to re-calculate header offsets for regionfile " + this.path.toAbsolutePath(), new Throwable());
++
++ // try to backup file so maybe it could be sent to us for further investigation
++
++ this.backupRegionFile();
++ net.minecraft.nbt.CompoundTag[] compounds = new net.minecraft.nbt.CompoundTag[32 * 32]; // only in the regionfile (i.e exclude mojang/aikar oversized data)
++ int[] rawLengths = new int[32 * 32]; // length of chunk data including 4 byte length field, bytes
++ int[] sectorOffsets = new int[32 * 32]; // in sectors
++ boolean[] hasAikarOversized = new boolean[32 * 32];
++
++ long fileLength = this.file.size();
++ long totalSectors = roundToSectors(fileLength);
++
++ // search the regionfile from start to finish for the most up-to-date chunk data
++
++ for (long i = 2, maxSector = Math.min((long)(Integer.MAX_VALUE >>> 8), totalSectors); i < maxSector; ++i) { // first two sectors are header, skip
++ int chunkDataLength = this.getLength(i);
++ net.minecraft.nbt.CompoundTag compound = this.attemptRead(i, chunkDataLength, fileLength);
++ if (compound == null || compound == OVERSIZED_COMPOUND) {
++ continue;
++ }
++
++ ChunkPos chunkPos = SerializableChunkData.getChunkCoordinate(compound);
++ if (!inSameRegionfile(ourLowerLeftPosition, chunkPos)) {
++ LOGGER.error("Ignoring absolute chunk " + chunkPos + " in regionfile as it is not contained in the bounds of the regionfile '" + this.path.toAbsolutePath() + "'. It should be in regionfile (" + (chunkPos.x >> 5) + "," + (chunkPos.z >> 5) + ")");
++ continue;
++ }
++ int location = (chunkPos.x & 31) | ((chunkPos.z & 31) << 5);
++
++ net.minecraft.nbt.CompoundTag otherCompound = compounds[location];
++
++ if (otherCompound != null && SerializableChunkData.getLastWorldSaveTime(otherCompound) > SerializableChunkData.getLastWorldSaveTime(compound)) {
++ continue; // don't overwrite newer data.
++ }
++
++ // aikar oversized?
++ Path aikarOversizedFile = this.getOversizedFile(chunkPos.x, chunkPos.z);
++ boolean isAikarOversized = false;
++ if (Files.exists(aikarOversizedFile)) {
++ try {
++ net.minecraft.nbt.CompoundTag aikarOversizedCompound = this.getOversizedData(chunkPos.x, chunkPos.z);
++ if (SerializableChunkData.getLastWorldSaveTime(compound) == SerializableChunkData.getLastWorldSaveTime(aikarOversizedCompound)) {
++ // best we got for an id. hope it's good enough
++ isAikarOversized = true;
++ }
++ } catch (Exception ex) {
++ LOGGER.error("Failed to read aikar oversized data for absolute chunk (" + chunkPos.x + "," + chunkPos.z + ") in regionfile " + this.path.toAbsolutePath() + ", oversized data for this chunk will be lost", ex);
++ // fall through, if we can't read aikar oversized we can't risk corrupting chunk data
++ }
++ }
++
++ hasAikarOversized[location] = isAikarOversized;
++ compounds[location] = compound;
++ rawLengths[location] = chunkDataLength + 4;
++ sectorOffsets[location] = (int)i;
++
++ int chunkSectorLength = (int)roundToSectors(rawLengths[location]);
++ i += chunkSectorLength;
++ --i; // gets incremented next iteration
++ }
++
++ // forge style oversized data is already handled by the local search, and aikar data we just hope
++ // we get it right as aikar data has no identifiers we could use to try and find its corresponding
++ // local data compound
++
++ java.nio.file.Path containingFolder = this.externalFileDir;
++ Path[] regionFiles = Files.list(containingFolder).toArray(Path[]::new);
++ boolean[] oversized = new boolean[32 * 32];
++ RegionFileVersion[] oversizedCompressionTypes = new RegionFileVersion[32 * 32];
++
++ if (regionFiles != null) {
++ int lowerXBound = ourLowerLeftPosition.x; // inclusive
++ int lowerZBound = ourLowerLeftPosition.z; // inclusive
++ int upperXBound = lowerXBound + 32 - 1; // inclusive
++ int upperZBound = lowerZBound + 32 - 1; // inclusive
++
++ // read mojang oversized data
++ for (Path regionFile : regionFiles) {
++ ChunkPos oversizedCoords = getOversizedChunkPair(regionFile);
++ if (oversizedCoords == null) {
++ continue;
++ }
++
++ if ((oversizedCoords.x < lowerXBound || oversizedCoords.x > upperXBound) || (oversizedCoords.z < lowerZBound || oversizedCoords.z > upperZBound)) {
++ continue; // not in our regionfile
++ }
++
++ // ensure oversized data is valid & is newer than data in the regionfile
++
++ int location = (oversizedCoords.x & 31) | ((oversizedCoords.z & 31) << 5);
++
++ byte[] chunkData;
++ try {
++ chunkData = Files.readAllBytes(regionFile);
++ } catch (Exception ex) {
++ LOGGER.error("Failed to read oversized chunk data in file " + regionFile.toAbsolutePath() + ", data will be lost", ex);
++ continue;
++ }
++
++ net.minecraft.nbt.CompoundTag compound = null;
++
++ // We do not know the compression type, as it's stored in the regionfile. So we need to try all of them
++ RegionFileVersion compression = null;
++ for (RegionFileVersion compressionType : RegionFileVersion.VERSIONS.values()) {
++ try {
++ DataInputStream in = new DataInputStream(compressionType.wrap(new ByteArrayInputStream(chunkData))); // typical java
++ compound = net.minecraft.nbt.NbtIo.read((java.io.DataInput)in);
++ compression = compressionType;
++ break; // reaches here iff readNBT does not throw
++ } catch (Exception ex) {
++ continue;
++ }
++ }
++
++ if (compound == null) {
++ LOGGER.error("Failed to read oversized chunk data in file " + regionFile.toAbsolutePath() + ", it's corrupt. Its data will be lost");
++ continue;
++ }
++
++ if (!SerializableChunkData.getChunkCoordinate(compound).equals(oversizedCoords)) {
++ LOGGER.error("Can't use oversized chunk stored in " + regionFile.toAbsolutePath() + ", got absolute chunkpos: " + SerializableChunkData.getChunkCoordinate(compound) + ", expected " + oversizedCoords);
++ continue;
++ }
++
++ if (compounds[location] == null || SerializableChunkData.getLastWorldSaveTime(compound) > SerializableChunkData.getLastWorldSaveTime(compounds[location])) {
++ oversized[location] = true;
++ oversizedCompressionTypes[location] = compression;
++ }
++ }
++ }
++
++ // now we need to calculate a new offset header
++
++ int[] calculatedOffsets = new int[32 * 32];
++ RegionBitmap newSectorAllocations = new RegionBitmap();
++ newSectorAllocations.force(0, 2); // make space for header
++
++ // allocate sectors for normal chunks
++
++ for (int chunkX = 0; chunkX < 32; ++chunkX) {
++ for (int chunkZ = 0; chunkZ < 32; ++chunkZ) {
++ int location = chunkX | (chunkZ << 5);
++
++ if (oversized[location]) {
++ continue;
++ }
++
++ int rawLength = rawLengths[location]; // bytes
++ int sectorOffset = sectorOffsets[location]; // sectors
++ int sectorLength = (int)roundToSectors(rawLength);
++
++ if (newSectorAllocations.tryAllocate(sectorOffset, sectorLength)) {
++ calculatedOffsets[location] = sectorOffset << 8 | (sectorLength > 255 ? 255 : sectorLength); // support forge style oversized
++ } else {
++ LOGGER.error("Failed to allocate space for local chunk (overlapping data??) at (" + chunkX + "," + chunkZ + ") in regionfile " + this.path.toAbsolutePath() + ", chunk will be regenerated");
++ }
++ }
++ }
++
++ // allocate sectors for oversized chunks
++
++ for (int chunkX = 0; chunkX < 32; ++chunkX) {
++ for (int chunkZ = 0; chunkZ < 32; ++chunkZ) {
++ int location = chunkX | (chunkZ << 5);
++
++ if (!oversized[location]) {
++ continue;
++ }
++
++ int sectorOffset = newSectorAllocations.allocate(1);
++ int sectorLength = 1;
++
++ try {
++ this.file.write(this.createExternalStub(oversizedCompressionTypes[location]), sectorOffset * 4096);
++ // only allocate in the new offsets if the write succeeds
++ calculatedOffsets[location] = sectorOffset << 8 | (sectorLength > 255 ? 255 : sectorLength); // support forge style oversized
++ } catch (IOException ex) {
++ newSectorAllocations.free(sectorOffset, sectorLength);
++ LOGGER.error("Failed to write new oversized chunk data holder, local chunk at (" + chunkX + "," + chunkZ + ") in regionfile " + this.path.toAbsolutePath() + " will be regenerated");
++ }
++ }
++ }
++
++ // rewrite aikar oversized data
++
++ this.oversizedCount = 0;
++ for (int chunkX = 0; chunkX < 32; ++chunkX) {
++ for (int chunkZ = 0; chunkZ < 32; ++chunkZ) {
++ int location = chunkX | (chunkZ << 5);
++ int isAikarOversized = hasAikarOversized[location] ? 1 : 0;
++
++ this.oversizedCount += isAikarOversized;
++ this.oversized[location] = (byte)isAikarOversized;
++ }
++ }
++
++ if (this.oversizedCount > 0) {
++ try {
++ this.writeOversizedMeta();
++ } catch (Exception ex) {
++ LOGGER.error("Failed to write aikar oversized chunk meta, all aikar style oversized chunk data will be lost for regionfile " + this.path.toAbsolutePath(), ex);
++ Files.deleteIfExists(this.getOversizedMetaFile());
++ }
++ } else {
++ Files.deleteIfExists(this.getOversizedMetaFile());
++ }
++
++ this.usedSectors.copyFrom(newSectorAllocations);
++
++ // before we overwrite the old sectors, print a summary of the chunks that got changed.
++
++ LOGGER.info("Starting summary of changes for regionfile " + this.path.toAbsolutePath());
++
++ for (int chunkX = 0; chunkX < 32; ++chunkX) {
++ for (int chunkZ = 0; chunkZ < 32; ++chunkZ) {
++ int location = chunkX | (chunkZ << 5);
++
++ int oldOffset = this.offsets.get(location);
++ int newOffset = calculatedOffsets[location];
++
++ if (oldOffset == newOffset) {
++ continue;
++ }
++
++ this.offsets.put(location, newOffset); // overwrite incorrect offset
++
++ if (oldOffset == 0) {
++ // found lost data
++ LOGGER.info("Found missing data for local chunk (" + chunkX + "," + chunkZ + ") in regionfile " + this.path.toAbsolutePath());
++ } else if (newOffset == 0) {
++ LOGGER.warn("Data for local chunk (" + chunkX + "," + chunkZ + ") could not be recovered in regionfile " + this.path.toAbsolutePath() + ", it will be regenerated");
++ } else {
++ LOGGER.info("Local chunk (" + chunkX + "," + chunkZ + ") changed to point to newer data or correct chunk in regionfile " + this.path.toAbsolutePath());
++ }
++ }
++ }
++
++ LOGGER.info("End of change summary for regionfile " + this.path.toAbsolutePath());
++
++ // simply destroy the timestamp header, it's not used
++
++ for (int i = 0; i < 32 * 32; ++i) {
++ this.timestamps.put(i, calculatedOffsets[i] != 0 ? RegionFile.getTimestamp() : 0); // write a valid timestamp for valid chunks, I do not want to find out whatever dumb program actually checks this
++ }
++
++ // write new header
++ try {
++ this.flush();
++ this.file.force(true); // try to ensure it goes through...
++ LOGGER.info("Successfully wrote new header to disk for regionfile " + this.path.toAbsolutePath());
++ } catch (IOException ex) {
++ LOGGER.error("Failed to write new header to disk for regionfile " + this.path.toAbsolutePath(), ex);
++ }
++ }
++
++ return true;
++ }
++
++ final boolean canRecalcHeader; // final forces compile fail on new constructor
++ // Paper end - Attempt to recalculate regionfile header if it is corrupt
++
+ // Paper start - rewrite chunk system
+ @Override
+ public final ca.spottedleaf.moonrise.patches.chunk_system.io.MoonriseRegionFileIO.RegionDataController.WriteData moonrise$startWrite(final net.minecraft.nbt.CompoundTag data, final ChunkPos pos) throws IOException {
+@@ -74,6 +423,7 @@ public class RegionFile implements AutoCloseable, ca.spottedleaf.moonrise.patche
+ throw new IllegalArgumentException("Expected directory, got " + externalFileDir.toAbsolutePath());
+ } else {
+ this.externalFileDir = externalFileDir;
++ this.canRecalcHeader = RegionFileStorage.isChunkDataFolder(this.externalFileDir); // Paper - add can recalc flag
+ this.offsets = this.header.asIntBuffer();
+ this.offsets.limit(1024);
+ this.header.position(4096);
+@@ -94,11 +444,13 @@ public class RegionFile implements AutoCloseable, ca.spottedleaf.moonrise.patche
+
+ long size = Files.size(path);
+
+- for (int i1 = 0; i1 < 1024; i1++) {
++ boolean needsHeaderRecalc = false; // Paper - recalculate header on header corruption
++ boolean hasBackedUp = false; // Paper - recalculate header on header corruption
++ for (int i1 = 0; i1 < 1024; i1++) { final int headerLocation = i1; // Paper - we expect this to be the header location
+ int i2 = this.offsets.get(i1);
+ if (i2 != 0) {
+- int sectorNumber = getSectorNumber(i2);
+- int numSectors = getNumSectors(i2);
++ final int sectorNumber = getSectorNumber(i2); // Paper - we expect this to be offset in file in sectors
++ int numSectors = getNumSectors(i2); // Paper - diff on change, we expect this to be sector length of region - watch out for reassignments
+ // Spigot start
+ if (numSectors == 255) {
+ // We're maxed out, so we need to read the proper length from the section
+@@ -109,18 +461,62 @@ public class RegionFile implements AutoCloseable, ca.spottedleaf.moonrise.patche
+ // Spigot end
+ if (sectorNumber < 2) {
+ LOGGER.warn("Region file {} has invalid sector at index: {}; sector {} overlaps with header", path, i1, sectorNumber);
+- this.offsets.put(i1, 0);
++ //this.offsets.put(i1, 0); // Paper - we catch this, but need it in the header for the summary change
+ } else if (numSectors == 0) {
+ LOGGER.warn("Region file {} has an invalid sector at index: {}; size has to be > 0", path, i1);
+- this.offsets.put(i1, 0);
++ //this.offsets.put(i1, 0); // Paper - we catch this, but need it in the header for the summary change
+ } else if (sectorNumber * 4096L > size) {
+ LOGGER.warn("Region file {} has an invalid sector at index: {}; sector {} is out of bounds", path, i1, sectorNumber);
+- this.offsets.put(i1, 0);
++ //this.offsets.put(i1, 0); // Paper - we catch this, but need it in the header for the summary change
+ } else {
+- this.usedSectors.force(sectorNumber, numSectors);
++ //this.usedSectors.force(sectorNumber, numSectors); // Paper - move this down so we can check if it fails to allocate
++ }
++ // Paper start - recalculate header on header corruption
++ if (sectorNumber < 2 || numSectors <= 0 || ((long)sectorNumber * 4096L) > size) {
++ if (canRecalcHeader) {
++ LOGGER.error("Detected invalid header for regionfile " + this.path.toAbsolutePath() + "! Recalculating header...");
++ needsHeaderRecalc = true;
++ break;
++ } else {
++ // location = chunkX | (chunkZ << 5);
++ LOGGER.error("Detected invalid header for regionfile " + this.path.toAbsolutePath() +
++ "! Cannot recalculate, removing local chunk (" + (headerLocation & 31) + "," + (headerLocation >>> 5) + ") from header");
++ if (!hasBackedUp) {
++ hasBackedUp = true;
++ this.backupRegionFile();
++ }
++ this.timestamps.put(headerLocation, 0); // be consistent, delete the timestamp too
++ this.offsets.put(headerLocation, 0); // delete the entry from header
++ continue;
++ }
++ }
++ boolean failedToAllocate = !this.usedSectors.tryAllocate(sectorNumber, numSectors);
++ if (failedToAllocate) {
++ LOGGER.error("Overlapping allocation by local chunk (" + (headerLocation & 31) + "," + (headerLocation >>> 5) + ") in regionfile " + this.path.toAbsolutePath());
+ }
++ if (failedToAllocate & !canRecalcHeader) {
++ // location = chunkX | (chunkZ << 5);
++ LOGGER.error("Detected invalid header for regionfile " + this.path.toAbsolutePath() +
++ "! Cannot recalculate, removing local chunk (" + (headerLocation & 31) + "," + (headerLocation >>> 5) + ") from header");
++ if (!hasBackedUp) {
++ hasBackedUp = true;
++ this.backupRegionFile();
++ }
++ this.timestamps.put(headerLocation, 0); // be consistent, delete the timestamp too
++ this.offsets.put(headerLocation, 0); // delete the entry from header
++ continue;
++ }
++ needsHeaderRecalc |= failedToAllocate;
++ // Paper end - recalculate header on header corruption
+ }
+ }
++ // Paper start - recalculate header on header corruption
++ // we move the recalc here so comparison to old header is correct when logging to console
++ if (needsHeaderRecalc) { // true if header gave us overlapping allocations or had other issues
++ LOGGER.error("Recalculating regionfile " + this.path.toAbsolutePath() + ", header gave erroneous offsets & locations");
++ this.recalculateHeader();
++ }
++ // Paper end
+ }
+ }
+ }
+@@ -130,10 +526,35 @@ public class RegionFile implements AutoCloseable, ca.spottedleaf.moonrise.patche
+ }
+
+ private Path getExternalChunkPath(ChunkPos chunkPos) {
+- String string = "c." + chunkPos.x + "." + chunkPos.z + ".mcc";
++ String string = "c." + chunkPos.x + "." + chunkPos.z + ".mcc"; // Paper - diff on change
+ return this.externalFileDir.resolve(string);
+ }
+
++ // Paper start
++ private static @Nullable ChunkPos getOversizedChunkPair(Path file) {
++ String fileName = file.getFileName().toString();
++
++ if (!fileName.startsWith("c.") || !fileName.endsWith(".mcc")) {
++ return null;
++ }
++
++ String[] split = fileName.split("\\.");
++
++ if (split.length != 4) {
++ return null;
++ }
++
++ try {
++ int x = Integer.parseInt(split[1]);
++ int z = Integer.parseInt(split[2]);
++
++ return new ChunkPos(x, z);
++ } catch (NumberFormatException ex) {
++ return null;
++ }
++ }
++ // Paper end
++
+ @Nullable
+ public synchronized DataInputStream getChunkDataInputStream(ChunkPos chunkPos) throws IOException {
+ int offset = this.getOffset(chunkPos);
+@@ -155,30 +576,67 @@ public class RegionFile implements AutoCloseable, ca.spottedleaf.moonrise.patche
+ byteBuffer.flip();
+ if (byteBuffer.remaining() < 5) {
+ LOGGER.error("Chunk {} header is truncated: expected {} but read {}", chunkPos, i, byteBuffer.remaining());
++ // Paper start - recalculate header on regionfile corruption
++ if (this.canRecalcHeader && this.recalculateHeader()) {
++ return this.getChunkDataInputStream(chunkPos);
++ }
++ // Paper end - recalculate header on regionfile corruption
+ return null;
+ } else {
+ int _int = byteBuffer.getInt();
+ byte b = byteBuffer.get();
+ if (_int == 0) {
+ LOGGER.warn("Chunk {} is allocated, but stream is missing", chunkPos);
++ // Paper start - recalculate header on regionfile corruption
++ if (this.canRecalcHeader && this.recalculateHeader()) {
++ return this.getChunkDataInputStream(chunkPos);
++ }
++ // Paper end - recalculate header on regionfile corruption
+ return null;
+ } else {
+ int i1 = _int - 1;
+ if (isExternalStreamChunk(b)) {
+ if (i1 != 0) {
+ LOGGER.warn("Chunk has both internal and external streams");
++ // Paper start - recalculate header on regionfile corruption
++ if (this.canRecalcHeader && this.recalculateHeader()) {
++ return this.getChunkDataInputStream(chunkPos);
++ }
++ // Paper end - recalculate header on regionfile corruption
+ }
+
+- return this.createExternalChunkInputStream(chunkPos, getExternalChunkVersion(b));
++ // Paper start - recalculate header on regionfile corruption
++ final DataInputStream ret = this.createExternalChunkInputStream(chunkPos, getExternalChunkVersion(b));
++ if (ret == null && this.canRecalcHeader && this.recalculateHeader()) {
++ return this.getChunkDataInputStream(chunkPos);
++ }
++ return ret;
++ // Paper end - recalculate header on regionfile corruption
+ } else if (i1 > byteBuffer.remaining()) {
+ LOGGER.error("Chunk {} stream is truncated: expected {} but read {}", chunkPos, i1, byteBuffer.remaining());
++ // Paper start - recalculate header on regionfile corruption
++ if (this.canRecalcHeader && this.recalculateHeader()) {
++ return this.getChunkDataInputStream(chunkPos);
++ }
++ // Paper end - recalculate header on regionfile corruption
+ return null;
+ } else if (i1 < 0) {
+ LOGGER.error("Declared size {} of chunk {} is negative", _int, chunkPos);
++ // Paper start - recalculate header on regionfile corruption
++ if (this.canRecalcHeader && this.recalculateHeader()) {
++ return this.getChunkDataInputStream(chunkPos);
++ }
++ // Paper end - recalculate header on regionfile corruption
+ return null;
+ } else {
+ JvmProfiler.INSTANCE.onRegionFileRead(this.info, chunkPos, this.version, i1);
+- return this.createChunkInputStream(chunkPos, b, createStream(byteBuffer, i1));
++ // Paper start - recalculate header on regionfile corruption
++ final DataInputStream ret = this.createChunkInputStream(chunkPos, b, createStream(byteBuffer, i1));
++ if (ret == null && this.canRecalcHeader && this.recalculateHeader()) {
++ return this.getChunkDataInputStream(chunkPos);
++ }
++ return ret;
++ // Paper end - recalculate header on regionfile corruption
+ }
+ }
+ }
+@@ -361,9 +819,14 @@ public class RegionFile implements AutoCloseable, ca.spottedleaf.moonrise.patche
+ }
+
+ private ByteBuffer createExternalStub() {
++ // Paper start - add version param
++ return this.createExternalStub(this.version);
++ }
++ private ByteBuffer createExternalStub(RegionFileVersion version) {
++ // Paper end - add version param
+ ByteBuffer byteBuffer = ByteBuffer.allocate(5);
+ byteBuffer.putInt(1);
+- byteBuffer.put((byte)(this.version.getId() | 128));
++ byteBuffer.put((byte)(version.getId() | 128));
+ byteBuffer.flip();
+ return byteBuffer;
+ }
+diff --git a/net/minecraft/world/level/chunk/storage/RegionFileStorage.java b/net/minecraft/world/level/chunk/storage/RegionFileStorage.java
+index d263f78fa610ce6f6fb5a0f5e064e3d8335c2199..dad7f94b611cf0fc68b1a3878c458233f6bb6d61 100644
+--- a/net/minecraft/world/level/chunk/storage/RegionFileStorage.java
++++ b/net/minecraft/world/level/chunk/storage/RegionFileStorage.java
+@@ -23,6 +23,36 @@ public class RegionFileStorage implements AutoCloseable, ca.spottedleaf.moonrise
+ private final Path folder;
+ private final boolean sync;
+
++ // Paper start - recalculate region file headers
++ private final boolean isChunkData;
++
++ public static boolean isChunkDataFolder(Path path) {
++ return path.toFile().getName().equalsIgnoreCase("region");
++ }
++
++ @Nullable
++ public static ChunkPos getRegionFileCoordinates(Path file) {
++ String fileName = file.getFileName().toString();
++ if (!fileName.startsWith("r.") || !fileName.endsWith(".mca")) {
++ return null;
++ }
++
++ String[] split = fileName.split("\\.");
++
++ if (split.length != 4) {
++ return null;
++ }
++
++ try {
++ int x = Integer.parseInt(split[1]);
++ int z = Integer.parseInt(split[2]);
++
++ return new ChunkPos(x << 5, z << 5);
++ } catch (NumberFormatException ex) {
++ return null;
++ }
++ }
++ // Paper end
+ // Paper start - rewrite chunk system
+ private static final int REGION_SHIFT = 5;
+ private static final int MAX_NON_EXISTING_CACHE = 1024 * 4;
+@@ -216,6 +246,7 @@ public class RegionFileStorage implements AutoCloseable, ca.spottedleaf.moonrise
+ this.folder = folder;
+ this.sync = sync;
+ this.info = info;
++ this.isChunkData = isChunkDataFolder(this.folder); // Paper - recalculate region file headers
+ }
+
+ @org.jetbrains.annotations.Contract("_, false -> !null") @Nullable private RegionFile getRegionFile(ChunkPos chunkPos, boolean existingOnly) throws IOException { // CraftBukkit
+@@ -309,6 +340,19 @@ public class RegionFileStorage implements AutoCloseable, ca.spottedleaf.moonrise
+ }
+
+ var4 = NbtIo.read(chunkDataInputStream);
++ // Paper start - recover from corrupt regionfile header
++ if (this.isChunkData) {
++ ChunkPos headerChunkPos = SerializableChunkData.getChunkCoordinate(var4);
++ if (!headerChunkPos.equals(chunkPos)) {
++ net.minecraft.server.MinecraftServer.LOGGER.error("Attempting to read chunk data at " + chunkPos + " but got chunk data for " + headerChunkPos + " instead! Attempting regionfile recalculation for regionfile " + regionFile.getPath().toAbsolutePath());
++ if (regionFile.recalculateHeader()) {
++ return this.read(chunkPos);
++ }
++ net.minecraft.server.MinecraftServer.LOGGER.error("Can't recalculate regionfile header, regenerating chunk " + chunkPos + " for " + regionFile.getPath().toAbsolutePath());
++ return null;
++ }
++ }
++ // Paper end - recover from corrupt regionfile header
+ }
+
+ return var4;
+diff --git a/net/minecraft/world/level/chunk/storage/RegionFileVersion.java b/net/minecraft/world/level/chunk/storage/RegionFileVersion.java
+index 0c739ca5b01ac0ec35a11fd01c5fc65de97c2852..de7deee4b79c969a7797bd57b657a16404c15303 100644
+--- a/net/minecraft/world/level/chunk/storage/RegionFileVersion.java
++++ b/net/minecraft/world/level/chunk/storage/RegionFileVersion.java
+@@ -21,7 +21,7 @@ import org.slf4j.Logger;
+
+ public class RegionFileVersion {
+ private static final Logger LOGGER = LogUtils.getLogger();
+- private static final Int2ObjectMap<RegionFileVersion> VERSIONS = new Int2ObjectOpenHashMap<>();
++ public static final Int2ObjectMap<RegionFileVersion> VERSIONS = new Int2ObjectOpenHashMap<>(); // Paper - private -> public
+ private static final Object2ObjectMap<String, RegionFileVersion> VERSIONS_BY_NAME = new Object2ObjectOpenHashMap<>();
+ public static final RegionFileVersion VERSION_GZIP = register(
+ new RegionFileVersion(
+diff --git a/net/minecraft/world/level/chunk/storage/SerializableChunkData.java b/net/minecraft/world/level/chunk/storage/SerializableChunkData.java
+index 70a9972252576e039ac126f6057a6ed66b80cdfc..d783c3580ea274a0a9cb07449eb8037bc5a04d76 100644
+--- a/net/minecraft/world/level/chunk/storage/SerializableChunkData.java
++++ b/net/minecraft/world/level/chunk/storage/SerializableChunkData.java
+@@ -120,6 +120,18 @@ public record SerializableChunkData(
+ }
+ }
+ // Paper end - guard against serializing mismatching coordinates
++ // Paper start - Attempt to recalculate regionfile header if it is corrupt
++ // TODO: Check on update
++ public static long getLastWorldSaveTime(final CompoundTag chunkData) {
++ final int dataVersion = ChunkStorage.getVersion(chunkData);
++ if (dataVersion < 2842) { // Level tag is removed after this version
++ final CompoundTag levelData = chunkData.getCompound("Level");
++ return levelData.getLong("LastUpdate");
++ } else {
++ return chunkData.getLong("LastUpdate");
++ }
++ }
++ // Paper end - Attempt to recalculate regionfile header if it is corrupt
+
+ // Paper start - Do not let the server load chunks from newer versions
+ private static final int CURRENT_DATA_VERSION = net.minecraft.SharedConstants.getCurrentVersion().getDataVersion().getVersion();
+@@ -604,7 +616,7 @@ public record SerializableChunkData(
+ compoundTag.putInt("xPos", this.chunkPos.x);
+ compoundTag.putInt("yPos", this.minSectionY);
+ compoundTag.putInt("zPos", this.chunkPos.z);
+- compoundTag.putLong("LastUpdate", this.lastUpdateTime);
++ compoundTag.putLong("LastUpdate", this.lastUpdateTime); // Paper - Diff on change
+ compoundTag.putLong("InhabitedTime", this.inhabitedTime);
+ compoundTag.putString("Status", BuiltInRegistries.CHUNK_STATUS.getKey(this.chunkStatus).toString());
+ if (this.blendingData != null) {