aboutsummaryrefslogtreecommitdiffhomepage
path: root/patches/server/1006-Attempt-to-recalculate-regionfile-header-if-it-is-co.patch
blob: aac5515348049da5917a14d3cf4a1825c7f43227 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
From: Spottedleaf <Spottedleaf@users.noreply.github.com>
Date: Sun, 2 Feb 2020 02:25:10 -0800
Subject: [PATCH] Attempt to recalculate regionfile header if it is corrupt

Instead of trying to relocate the chunk, which is seems to never
be the correct choice, so we end up duplicating or swapping chunks,
we instead drop the current regionfile header and recalculate -
hoping that at least then we don't swap chunks, and maybe recover
them all.

diff --git a/src/main/java/net/minecraft/world/level/chunk/storage/ChunkSerializer.java b/src/main/java/net/minecraft/world/level/chunk/storage/ChunkSerializer.java
index ace99d55c8343fa1907545f47a03f069844b801d..26431a814f6472689484dcc7cd8183fe1676e17e 100644
--- a/src/main/java/net/minecraft/world/level/chunk/storage/ChunkSerializer.java
+++ b/src/main/java/net/minecraft/world/level/chunk/storage/ChunkSerializer.java
@@ -72,6 +72,18 @@ import net.minecraft.world.ticks.ProtoChunkTicks;
 import org.slf4j.Logger;
 
 public class ChunkSerializer {
+    // Paper start - Attempt to recalculate regionfile header if it is corrupt
+    // TODO: Check on update
+    public static long getLastWorldSaveTime(CompoundTag chunkData) {
+        final int dataVersion = ChunkStorage.getVersion(chunkData);
+        if (dataVersion < 2842) { // Level tag is removed after this version
+            final CompoundTag levelData = chunkData.getCompound("Level");
+            return levelData.getLong("LastUpdate");
+        } else {
+            return chunkData.getLong("LastUpdate");
+        }
+    }
+    // Paper end - Attempt to recalculate regionfile header if it is corrupt
 
     public static final Codec<PalettedContainer<BlockState>> BLOCK_STATE_CODEC = PalettedContainer.codecRW(Block.BLOCK_STATE_REGISTRY, BlockState.CODEC, PalettedContainer.Strategy.SECTION_STATES, Blocks.AIR.defaultBlockState(), null); // Paper - Anti-Xray - Add preset block states
     private static final Logger LOGGER = LogUtils.getLogger();
@@ -450,7 +462,7 @@ public class ChunkSerializer {
         nbttagcompound.putInt("xPos", chunkcoordintpair.x);
         nbttagcompound.putInt("yPos", chunk.getMinSection());
         nbttagcompound.putInt("zPos", chunkcoordintpair.z);
-        nbttagcompound.putLong("LastUpdate", asyncsavedata != null ? asyncsavedata.worldTime : world.getGameTime()); // Paper - async chunk unloading
+        nbttagcompound.putLong("LastUpdate", asyncsavedata != null ? asyncsavedata.worldTime : world.getGameTime()); // Paper - async chunk unloading // Paper - diff on change
         nbttagcompound.putLong("InhabitedTime", chunk.getInhabitedTime());
         nbttagcompound.putString("Status", BuiltInRegistries.CHUNK_STATUS.getKey(chunk.getStatus()).toString());
         BlendingData blendingdata = chunk.getBlendingData();
diff --git a/src/main/java/net/minecraft/world/level/chunk/storage/ChunkStorage.java b/src/main/java/net/minecraft/world/level/chunk/storage/ChunkStorage.java
index 554dede2ad0e45d3ee4ccc5510b7644f2e9e4250..7801fac96d728f951989fca36f6a4890a0638c36 100644
--- a/src/main/java/net/minecraft/world/level/chunk/storage/ChunkStorage.java
+++ b/src/main/java/net/minecraft/world/level/chunk/storage/ChunkStorage.java
@@ -41,7 +41,7 @@ public class ChunkStorage implements AutoCloseable {
 
     public ChunkStorage(RegionStorageInfo storageKey, Path directory, DataFixer dataFixer, boolean dsync) {
         this.fixerUpper = dataFixer;
-        this.regionFileCache = new RegionFileStorage(storageKey, directory, dsync); // Paper - rewrite chunk system; async chunk IO
+        this.regionFileCache = new RegionFileStorage(storageKey, directory, dsync, true); // Paper - rewrite chunk system; async chunk IO & Attempt to recalculate regionfile header if it is corrupt
     }
 
     public boolean isOldChunkAround(ChunkPos chunkPos, int checkRadius) {
diff --git a/src/main/java/net/minecraft/world/level/chunk/storage/RegionBitmap.java b/src/main/java/net/minecraft/world/level/chunk/storage/RegionBitmap.java
index a23dc2f8f4475de1ee35bf18a7a8a53233ccac12..226af44fd469053451a0403a95ffb446face9530 100644
--- a/src/main/java/net/minecraft/world/level/chunk/storage/RegionBitmap.java
+++ b/src/main/java/net/minecraft/world/level/chunk/storage/RegionBitmap.java
@@ -9,6 +9,27 @@ import java.util.BitSet;
 public class RegionBitmap {
     private final BitSet used = new BitSet();
 
+    // Paper start - Attempt to recalculate regionfile header if it is corrupt
+    public final void copyFrom(RegionBitmap other) {
+        BitSet thisBitset = this.used;
+        BitSet otherBitset = other.used;
+
+        for (int i = 0; i < Math.max(thisBitset.size(), otherBitset.size()); ++i) {
+            thisBitset.set(i, otherBitset.get(i));
+        }
+    }
+
+    public final boolean tryAllocate(int from, int length) {
+        BitSet bitset = this.used;
+        int firstSet = bitset.nextSetBit(from);
+        if (firstSet > 0 && firstSet < (from + length)) {
+            return false;
+        }
+        bitset.set(from, from + length);
+        return true;
+    }
+    // Paper end - Attempt to recalculate regionfile header if it is corrupt
+
     public void force(int start, int size) {
         this.used.set(start, start + size);
     }
diff --git a/src/main/java/net/minecraft/world/level/chunk/storage/RegionFile.java b/src/main/java/net/minecraft/world/level/chunk/storage/RegionFile.java
index cf43daa019b239464401784938d01af83f9da47c..1362a47943cf1a51a185a15094b1f74c94bf40ef 100644
--- a/src/main/java/net/minecraft/world/level/chunk/storage/RegionFile.java
+++ b/src/main/java/net/minecraft/world/level/chunk/storage/RegionFile.java
@@ -52,6 +52,354 @@ public class RegionFile implements AutoCloseable {
     @VisibleForTesting
     protected final RegionBitmap usedSectors;
     public final java.util.concurrent.locks.ReentrantLock fileLock = new java.util.concurrent.locks.ReentrantLock(); // Paper
+    // Paper start - Attempt to recalculate regionfile header if it is corrupt
+    private static long roundToSectors(long bytes) {
+        long sectors = bytes >>> 12; // 4096 = 2^12
+        long remainingBytes = bytes & 4095;
+        long sign = -remainingBytes; // sign is 1 if nonzero
+        return sectors + (sign >>> 63);
+    }
+
+    private static final CompoundTag OVERSIZED_COMPOUND = new CompoundTag();
+
+    private CompoundTag attemptRead(long sector, int chunkDataLength, long fileLength) throws IOException {
+        try {
+            if (chunkDataLength < 0) {
+                return null;
+            }
+
+            long offset = sector * 4096L + 4L; // offset for chunk data
+
+            if ((offset + chunkDataLength) > fileLength) {
+                return null;
+            }
+
+            ByteBuffer chunkData = ByteBuffer.allocate(chunkDataLength);
+            if (chunkDataLength != this.file.read(chunkData, offset)) {
+                return null;
+            }
+
+            ((java.nio.Buffer)chunkData).flip();
+
+            byte compressionType = chunkData.get();
+            if (compressionType < 0) { // compressionType & 128 != 0
+                // oversized chunk
+                return OVERSIZED_COMPOUND;
+            }
+
+            RegionFileVersion compression = RegionFileVersion.fromId(compressionType);
+            if (compression == null) {
+                return null;
+            }
+
+            InputStream input = compression.wrap(new ByteArrayInputStream(chunkData.array(), chunkData.position(), chunkDataLength - chunkData.position()));
+
+            return NbtIo.read(new DataInputStream(input));
+        } catch (Exception ex) {
+            return null;
+        }
+    }
+
+    private int getLength(long sector) throws IOException {
+        ByteBuffer length = ByteBuffer.allocate(4);
+        if (4 != this.file.read(length, sector * 4096L)) {
+            return -1;
+        }
+
+        return length.getInt(0);
+    }
+
+    private void backupRegionFile() {
+        Path backup = this.path.getParent().resolve(this.path.getFileName() + "." + new java.util.Random().nextLong() + ".backup");
+        this.backupRegionFile(backup);
+    }
+
+    private void backupRegionFile(Path to) {
+        try {
+            this.file.force(true);
+            LOGGER.warn("Backing up regionfile \"" + this.path.toAbsolutePath() + "\" to " + to.toAbsolutePath());
+            java.nio.file.Files.copy(this.path, to, java.nio.file.StandardCopyOption.COPY_ATTRIBUTES);
+            LOGGER.warn("Backed up the regionfile to " + to.toAbsolutePath());
+        } catch (IOException ex) {
+            LOGGER.error("Failed to backup to " + to.toAbsolutePath(), ex);
+        }
+    }
+
+    private static boolean inSameRegionfile(ChunkPos first, ChunkPos second) {
+        return (first.x & ~31) == (second.x & ~31) && (first.z & ~31) == (second.z & ~31);
+    }
+
+    // note: only call for CHUNK regionfiles
+    boolean recalculateHeader() throws IOException {
+        if (!this.canRecalcHeader) {
+            return false;
+        }
+        ChunkPos ourLowerLeftPosition = RegionFileStorage.getRegionFileCoordinates(this.path);
+        if (ourLowerLeftPosition == null) {
+            LOGGER.error("Unable to get chunk location of regionfile " + this.path.toAbsolutePath() + ", cannot recover header");
+            return false;
+        }
+        synchronized (this) {
+            LOGGER.warn("Corrupt regionfile header detected! Attempting to re-calculate header offsets for regionfile " + this.path.toAbsolutePath(), new Throwable());
+
+            // try to backup file so maybe it could be sent to us for further investigation
+
+            this.backupRegionFile();
+            CompoundTag[] compounds = new CompoundTag[32 * 32]; // only in the regionfile (i.e exclude mojang/aikar oversized data)
+            int[] rawLengths = new int[32 * 32]; // length of chunk data including 4 byte length field, bytes
+            int[] sectorOffsets = new int[32 * 32]; // in sectors
+            boolean[] hasAikarOversized = new boolean[32 * 32];
+
+            long fileLength = this.file.size();
+            long totalSectors = roundToSectors(fileLength);
+
+            // search the regionfile from start to finish for the most up-to-date chunk data
+
+            for (long i = 2, maxSector = Math.min((long)(Integer.MAX_VALUE >>> 8), totalSectors); i < maxSector; ++i) { // first two sectors are header, skip
+                int chunkDataLength = this.getLength(i);
+                CompoundTag compound = this.attemptRead(i, chunkDataLength, fileLength);
+                if (compound == null || compound == OVERSIZED_COMPOUND) {
+                    continue;
+                }
+
+                ChunkPos chunkPos = ChunkSerializer.getChunkCoordinate(compound);
+                if (!inSameRegionfile(ourLowerLeftPosition, chunkPos)) {
+                    LOGGER.error("Ignoring absolute chunk " + chunkPos + " in regionfile as it is not contained in the bounds of the regionfile '" + this.path.toAbsolutePath() + "'. It should be in regionfile (" + (chunkPos.x >> 5) + "," + (chunkPos.z >> 5) + ")");
+                    continue;
+                }
+                int location = (chunkPos.x & 31) | ((chunkPos.z & 31) << 5);
+
+                CompoundTag otherCompound = compounds[location];
+
+                if (otherCompound != null && ChunkSerializer.getLastWorldSaveTime(otherCompound) > ChunkSerializer.getLastWorldSaveTime(compound)) {
+                    continue; // don't overwrite newer data.
+                }
+
+                // aikar oversized?
+                Path aikarOversizedFile = this.getOversizedFile(chunkPos.x, chunkPos.z);
+                boolean isAikarOversized = false;
+                if (Files.exists(aikarOversizedFile)) {
+                    try {
+                        CompoundTag aikarOversizedCompound = this.getOversizedData(chunkPos.x, chunkPos.z);
+                        if (ChunkSerializer.getLastWorldSaveTime(compound) == ChunkSerializer.getLastWorldSaveTime(aikarOversizedCompound)) {
+                            // best we got for an id. hope it's good enough
+                            isAikarOversized = true;
+                        }
+                    } catch (Exception ex) {
+                        LOGGER.error("Failed to read aikar oversized data for absolute chunk (" + chunkPos.x + "," + chunkPos.z + ") in regionfile " + this.path.toAbsolutePath() + ", oversized data for this chunk will be lost", ex);
+                        // fall through, if we can't read aikar oversized we can't risk corrupting chunk data
+                    }
+                }
+
+                hasAikarOversized[location] = isAikarOversized;
+                compounds[location] = compound;
+                rawLengths[location] = chunkDataLength + 4;
+                sectorOffsets[location] = (int)i;
+
+                int chunkSectorLength = (int)roundToSectors(rawLengths[location]);
+                i += chunkSectorLength;
+                --i; // gets incremented next iteration
+            }
+
+            // forge style oversized data is already handled by the local search, and aikar data we just hope
+            // we get it right as aikar data has no identifiers we could use to try and find its corresponding
+            // local data compound
+
+            java.nio.file.Path containingFolder = this.externalFileDir;
+            Path[] regionFiles = Files.list(containingFolder).toArray(Path[]::new);
+            boolean[] oversized = new boolean[32 * 32];
+            RegionFileVersion[] oversizedCompressionTypes = new RegionFileVersion[32 * 32];
+
+            if (regionFiles != null) {
+                int lowerXBound = ourLowerLeftPosition.x; // inclusive
+                int lowerZBound = ourLowerLeftPosition.z; // inclusive
+                int upperXBound = lowerXBound + 32 - 1; // inclusive
+                int upperZBound = lowerZBound + 32 - 1; // inclusive
+
+                // read mojang oversized data
+                for (Path regionFile : regionFiles) {
+                    ChunkPos oversizedCoords = getOversizedChunkPair(regionFile);
+                    if (oversizedCoords == null) {
+                        continue;
+                    }
+
+                    if ((oversizedCoords.x < lowerXBound || oversizedCoords.x > upperXBound) || (oversizedCoords.z < lowerZBound || oversizedCoords.z > upperZBound)) {
+                        continue; // not in our regionfile
+                    }
+
+                    // ensure oversized data is valid & is newer than data in the regionfile
+
+                    int location = (oversizedCoords.x & 31) | ((oversizedCoords.z & 31) << 5);
+
+                    byte[] chunkData;
+                    try {
+                        chunkData = Files.readAllBytes(regionFile);
+                    } catch (Exception ex) {
+                        LOGGER.error("Failed to read oversized chunk data in file " + regionFile.toAbsolutePath() + ", data will be lost", ex);
+                        continue;
+                    }
+
+                    CompoundTag compound = null;
+
+                    // We do not know the compression type, as it's stored in the regionfile. So we need to try all of them
+                    RegionFileVersion compression = null;
+                    for (RegionFileVersion compressionType : RegionFileVersion.VERSIONS.values()) {
+                        try {
+                            DataInputStream in = new DataInputStream(compressionType.wrap(new ByteArrayInputStream(chunkData))); // typical java
+                            compound = NbtIo.read((java.io.DataInput)in);
+                            compression = compressionType;
+                            break; // reaches here iff readNBT does not throw
+                        } catch (Exception ex) {
+                            continue;
+                        }
+                    }
+
+                    if (compound == null) {
+                        LOGGER.error("Failed to read oversized chunk data in file " + regionFile.toAbsolutePath() + ", it's corrupt. Its data will be lost");
+                        continue;
+                    }
+
+                    if (!ChunkSerializer.getChunkCoordinate(compound).equals(oversizedCoords)) {
+                        LOGGER.error("Can't use oversized chunk stored in " + regionFile.toAbsolutePath() + ", got absolute chunkpos: " + ChunkSerializer.getChunkCoordinate(compound) + ", expected " + oversizedCoords);
+                        continue;
+                    }
+
+                    if (compounds[location] == null || ChunkSerializer.getLastWorldSaveTime(compound) > ChunkSerializer.getLastWorldSaveTime(compounds[location])) {
+                        oversized[location] = true;
+                        oversizedCompressionTypes[location] = compression;
+                    }
+                }
+            }
+
+            // now we need to calculate a new offset header
+
+            int[] calculatedOffsets = new int[32 * 32];
+            RegionBitmap newSectorAllocations = new RegionBitmap();
+            newSectorAllocations.force(0, 2); // make space for header
+
+            // allocate sectors for normal chunks
+
+            for (int chunkX = 0; chunkX < 32; ++chunkX) {
+                for (int chunkZ = 0; chunkZ < 32; ++chunkZ) {
+                    int location = chunkX | (chunkZ << 5);
+
+                    if (oversized[location]) {
+                        continue;
+                    }
+
+                    int rawLength = rawLengths[location]; // bytes
+                    int sectorOffset = sectorOffsets[location]; // sectors
+                    int sectorLength = (int)roundToSectors(rawLength);
+
+                    if (newSectorAllocations.tryAllocate(sectorOffset, sectorLength)) {
+                        calculatedOffsets[location] = sectorOffset << 8 | (sectorLength > 255 ? 255 : sectorLength); // support forge style oversized
+                    } else {
+                        LOGGER.error("Failed to allocate space for local chunk (overlapping data??) at (" + chunkX + "," + chunkZ + ") in regionfile " + this.path.toAbsolutePath() + ", chunk will be regenerated");
+                    }
+                }
+            }
+
+            // allocate sectors for oversized chunks
+
+            for (int chunkX = 0; chunkX < 32; ++chunkX) {
+                for (int chunkZ = 0; chunkZ < 32; ++chunkZ) {
+                    int location = chunkX | (chunkZ << 5);
+
+                    if (!oversized[location]) {
+                        continue;
+                    }
+
+                    int sectorOffset = newSectorAllocations.allocate(1);
+                    int sectorLength = 1;
+
+                    try {
+                        this.file.write(this.createExternalStub(oversizedCompressionTypes[location]), sectorOffset * 4096);
+                        // only allocate in the new offsets if the write succeeds
+                        calculatedOffsets[location] = sectorOffset << 8 | (sectorLength > 255 ? 255 : sectorLength); // support forge style oversized
+                    } catch (IOException ex) {
+                        newSectorAllocations.free(sectorOffset, sectorLength);
+                        LOGGER.error("Failed to write new oversized chunk data holder, local chunk at (" + chunkX + "," + chunkZ + ") in regionfile " + this.path.toAbsolutePath() + " will be regenerated");
+                    }
+                }
+            }
+
+            // rewrite aikar oversized data
+
+            this.oversizedCount = 0;
+            for (int chunkX = 0; chunkX < 32; ++chunkX) {
+                for (int chunkZ = 0; chunkZ < 32; ++chunkZ) {
+                    int location = chunkX | (chunkZ << 5);
+                    int isAikarOversized = hasAikarOversized[location] ? 1 : 0;
+
+                    this.oversizedCount += isAikarOversized;
+                    this.oversized[location] = (byte)isAikarOversized;
+                }
+            }
+
+            if (this.oversizedCount > 0) {
+                try {
+                    this.writeOversizedMeta();
+                } catch (Exception ex) {
+                    LOGGER.error("Failed to write aikar oversized chunk meta, all aikar style oversized chunk data will be lost for regionfile " + this.path.toAbsolutePath(), ex);
+                    Files.deleteIfExists(this.getOversizedMetaFile());
+                }
+            } else {
+                Files.deleteIfExists(this.getOversizedMetaFile());
+            }
+
+            this.usedSectors.copyFrom(newSectorAllocations);
+
+            // before we overwrite the old sectors, print a summary of the chunks that got changed.
+
+            LOGGER.info("Starting summary of changes for regionfile " + this.path.toAbsolutePath());
+
+            for (int chunkX = 0; chunkX < 32; ++chunkX) {
+                for (int chunkZ = 0; chunkZ < 32; ++chunkZ) {
+                    int location = chunkX | (chunkZ << 5);
+
+                    int oldOffset = this.offsets.get(location);
+                    int newOffset = calculatedOffsets[location];
+
+                    if (oldOffset == newOffset) {
+                        continue;
+                    }
+
+                    this.offsets.put(location, newOffset); // overwrite incorrect offset
+
+                    if (oldOffset == 0) {
+                        // found lost data
+                        LOGGER.info("Found missing data for local chunk (" + chunkX + "," + chunkZ + ") in regionfile " + this.path.toAbsolutePath());
+                    } else if (newOffset == 0) {
+                        LOGGER.warn("Data for local chunk (" + chunkX + "," + chunkZ + ") could not be recovered in regionfile " + this.path.toAbsolutePath() + ", it will be regenerated");
+                    } else {
+                        LOGGER.info("Local chunk (" + chunkX + "," + chunkZ + ") changed to point to newer data or correct chunk in regionfile " + this.path.toAbsolutePath());
+                    }
+                }
+            }
+
+            LOGGER.info("End of change summary for regionfile " + this.path.toAbsolutePath());
+
+            // simply destroy the timestamp header, it's not used
+
+            for (int i = 0; i < 32 * 32; ++i) {
+                this.timestamps.put(i, calculatedOffsets[i] != 0 ? (int)System.currentTimeMillis() : 0); // write a valid timestamp for valid chunks, I do not want to find out whatever dumb program actually checks this
+            }
+
+            // write new header
+            try {
+                this.flush();
+                this.file.force(true); // try to ensure it goes through...
+                LOGGER.info("Successfully wrote new header to disk for regionfile " + this.path.toAbsolutePath());
+            } catch (IOException ex) {
+                LOGGER.error("Failed to write new header to disk for regionfile " + this.path.toAbsolutePath(), ex);
+            }
+        }
+
+        return true;
+    }
+
+    final boolean canRecalcHeader; // final forces compile fail on new constructor
+    // Paper end - Attempt to recalculate regionfile header if it is corrupt
     // Paper start - Cache chunk status
     private final net.minecraft.world.level.chunk.status.ChunkStatus[] statuses = new net.minecraft.world.level.chunk.status.ChunkStatus[32 * 32];
 
@@ -79,8 +427,18 @@ public class RegionFile implements AutoCloseable {
     public RegionFile(RegionStorageInfo storageKey, Path directory, Path path, boolean dsync) throws IOException {
         this(storageKey, directory, path, RegionFileVersion.getCompressionFormat(), dsync); // Paper - Configurable region compression format
     }
+    // Paper start - add can recalc flag
+    public RegionFile(RegionStorageInfo storageKey, Path directory, Path path, boolean dsync, boolean canRecalcHeader) throws IOException {
+        this(storageKey, directory, path, RegionFileVersion.getCompressionFormat(), dsync, canRecalcHeader);
+    }
 
     public RegionFile(RegionStorageInfo storageKey, Path path, Path directory, RegionFileVersion compressionFormat, boolean dsync) throws IOException {
+        this(storageKey, path, directory, compressionFormat, dsync, true);
+    }
+
+    public RegionFile(RegionStorageInfo storageKey, Path path, Path directory, RegionFileVersion compressionFormat, boolean dsync, boolean canRecalcHeader) throws IOException {
+        this.canRecalcHeader = canRecalcHeader;
+        // Paper end - add can recalc flag
         this.header = ByteBuffer.allocateDirect(8192);
         this.usedSectors = new RegionBitmap();
         this.info = storageKey;
@@ -110,14 +468,16 @@ public class RegionFile implements AutoCloseable {
                     RegionFile.LOGGER.warn("Region file {} has truncated header: {}", path, i);
                 }
 
-                long j = Files.size(path);
+                final long j = Files.size(path); final long regionFileSize = j; // Paper - recalculate header on header corruption
 
-                for (int k = 0; k < 1024; ++k) {
-                    int l = this.offsets.get(k);
+                boolean needsHeaderRecalc = false; // Paper - recalculate header on header corruption
+                boolean hasBackedUp = false; // Paper - recalculate header on header corruption
+                for (int k = 0; k < 1024; ++k) { final int headerLocation = k; // Paper - we expect this to be the header location
+                    final int l = this.offsets.get(k);
 
                     if (l != 0) {
-                        int i1 = RegionFile.getSectorNumber(l);
-                        int j1 = RegionFile.getNumSectors(l);
+                        final int i1 = RegionFile.getSectorNumber(l); final int offset = i1; // Paper - we expect this to be offset in file in sectors
+                        int j1 = RegionFile.getNumSectors(l); final int sectorLength; // Paper - diff on change, we expect this to be sector length of region - watch out for reassignments
                         // Spigot start
                         if (j1 == 255) {
                             // We're maxed out, so we need to read the proper length from the section
@@ -126,21 +486,66 @@ public class RegionFile implements AutoCloseable {
                             j1 = (realLen.getInt(0) + 4) / 4096 + 1;
                         }
                         // Spigot end
+                        sectorLength = j1; // Paper - diff on change, we expect this to be sector length of region
 
                         if (i1 < 2) {
                             RegionFile.LOGGER.warn("Region file {} has invalid sector at index: {}; sector {} overlaps with header", new Object[]{path, k, i1});
-                            this.offsets.put(k, 0);
+                            //this.offsets.put(k, 0); // Paper - we catch this, but need it in the header for the summary change
                         } else if (j1 == 0) {
                             RegionFile.LOGGER.warn("Region file {} has an invalid sector at index: {}; size has to be > 0", path, k);
-                            this.offsets.put(k, 0);
+                            //this.offsets.put(k, 0); // Paper - we catch this, but need it in the header for the summary change
                         } else if ((long) i1 * 4096L > j) {
                             RegionFile.LOGGER.warn("Region file {} has an invalid sector at index: {}; sector {} is out of bounds", new Object[]{path, k, i1});
-                            this.offsets.put(k, 0);
+                            //this.offsets.put(k, 0); // Paper - we catch this, but need it in the header for the summary change
                         } else {
-                            this.usedSectors.force(i1, j1);
+                            //this.usedSectors.force(i1, j1); // Paper - move this down so we can check if it fails to allocate
+                        }
+                        // Paper start - recalculate header on header corruption
+                        if (offset < 2 || sectorLength <= 0 || ((long)offset * 4096L) > regionFileSize) {
+                            if (canRecalcHeader) {
+                                LOGGER.error("Detected invalid header for regionfile " + this.path.toAbsolutePath() + "! Recalculating header...");
+                                needsHeaderRecalc = true;
+                                break;
+                            } else {
+                                // location = chunkX | (chunkZ << 5);
+                                LOGGER.error("Detected invalid header for regionfile " + this.path.toAbsolutePath() +
+                                        "! Cannot recalculate, removing local chunk (" + (headerLocation & 31) + "," + (headerLocation >>> 5) + ") from header");
+                                if (!hasBackedUp) {
+                                    hasBackedUp = true;
+                                    this.backupRegionFile();
+                                }
+                                this.timestamps.put(headerLocation, 0); // be consistent, delete the timestamp too
+                                this.offsets.put(headerLocation, 0); // delete the entry from header
+                                continue;
+                            }
+                        }
+                        boolean failedToAllocate = !this.usedSectors.tryAllocate(offset, sectorLength);
+                        if (failedToAllocate) {
+                            LOGGER.error("Overlapping allocation by local chunk (" + (headerLocation & 31) + "," + (headerLocation >>> 5) + ") in regionfile " + this.path.toAbsolutePath());
                         }
+                        if (failedToAllocate & !canRecalcHeader) {
+                            // location = chunkX | (chunkZ << 5);
+                            LOGGER.error("Detected invalid header for regionfile " + this.path.toAbsolutePath() +
+                                    "! Cannot recalculate, removing local chunk (" + (headerLocation & 31) + "," + (headerLocation >>> 5) + ") from header");
+                            if (!hasBackedUp) {
+                                hasBackedUp = true;
+                                this.backupRegionFile();
+                            }
+                            this.timestamps.put(headerLocation, 0); // be consistent, delete the timestamp too
+                            this.offsets.put(headerLocation, 0); // delete the entry from header
+                            continue;
+                        }
+                        needsHeaderRecalc |= failedToAllocate;
+                        // Paper end - recalculate header on header corruption
                     }
                 }
+                // Paper start - recalculate header on header corruption
+                // we move the recalc here so comparison to old header is correct when logging to console
+                if (needsHeaderRecalc) { // true if header gave us overlapping allocations or had other issues
+                    LOGGER.error("Recalculating regionfile " + this.path.toAbsolutePath() + ", header gave erroneous offsets & locations");
+                    this.recalculateHeader();
+                }
+                // Paper end
             }
 
         }
@@ -151,11 +556,36 @@ public class RegionFile implements AutoCloseable {
     }
 
     private Path getExternalChunkPath(ChunkPos chunkPos) {
-        String s = "c." + chunkPos.x + "." + chunkPos.z + ".mcc";
+        String s = "c." + chunkPos.x + "." + chunkPos.z + ".mcc"; // Paper - diff on change
 
         return this.externalFileDir.resolve(s);
     }
 
+    // Paper start
+    private static ChunkPos getOversizedChunkPair(Path file) {
+        String fileName = file.getFileName().toString();
+
+        if (!fileName.startsWith("c.") || !fileName.endsWith(".mcc")) {
+            return null;
+        }
+
+        String[] split = fileName.split("\\.");
+
+        if (split.length != 4) {
+            return null;
+        }
+
+        try {
+            int x = Integer.parseInt(split[1]);
+            int z = Integer.parseInt(split[2]);
+
+            return new ChunkPos(x, z);
+        } catch (NumberFormatException ex) {
+            return null;
+        }
+    }
+    // Paper end
+
     @Nullable
     public synchronized DataInputStream getChunkDataInputStream(ChunkPos pos) throws IOException {
         int i = this.getOffset(pos);
@@ -179,6 +609,11 @@ public class RegionFile implements AutoCloseable {
             ((java.nio.Buffer) bytebuffer).flip(); // CraftBukkit - decompile error
             if (bytebuffer.remaining() < 5) {
                 RegionFile.LOGGER.error("Chunk {} header is truncated: expected {} but read {}", new Object[]{pos, l, bytebuffer.remaining()});
+                // Paper start - recalculate header on regionfile corruption
+                if (this.canRecalcHeader && this.recalculateHeader()) {
+                    return this.getChunkDataInputStream(pos);
+                }
+                // Paper end - recalculate header on regionfile corruption
                 return null;
             } else {
                 int i1 = bytebuffer.getInt();
@@ -186,6 +621,11 @@ public class RegionFile implements AutoCloseable {
 
                 if (i1 == 0) {
                     RegionFile.LOGGER.warn("Chunk {} is allocated, but stream is missing", pos);
+                    // Paper start - recalculate header on regionfile corruption
+                    if (this.canRecalcHeader && this.recalculateHeader()) {
+                        return this.getChunkDataInputStream(pos);
+                    }
+                    // Paper end - recalculate header on regionfile corruption
                     return null;
                 } else {
                     int j1 = i1 - 1;
@@ -193,18 +633,45 @@ public class RegionFile implements AutoCloseable {
                     if (RegionFile.isExternalStreamChunk(b0)) {
                         if (j1 != 0) {
                             RegionFile.LOGGER.warn("Chunk has both internal and external streams");
+                            // Paper start - recalculate header on regionfile corruption
+                            if (this.canRecalcHeader && this.recalculateHeader()) {
+                                return this.getChunkDataInputStream(pos);
+                            }
+                            // Paper end - recalculate header on regionfile corruption
                         }
 
-                        return this.createExternalChunkInputStream(pos, RegionFile.getExternalChunkVersion(b0));
+                        // Paper start - recalculate header on regionfile corruption
+                        final DataInputStream ret = this.createExternalChunkInputStream(pos, RegionFile.getExternalChunkVersion(b0));
+                        if (ret == null && this.canRecalcHeader && this.recalculateHeader()) {
+                            return this.getChunkDataInputStream(pos);
+                        }
+                        return ret;
+                        // Paper end - recalculate header on regionfile corruption
                     } else if (j1 > bytebuffer.remaining()) {
                         RegionFile.LOGGER.error("Chunk {} stream is truncated: expected {} but read {}", new Object[]{pos, j1, bytebuffer.remaining()});
+                        // Paper start - recalculate header on regionfile corruption
+                        if (this.canRecalcHeader && this.recalculateHeader()) {
+                            return this.getChunkDataInputStream(pos);
+                        }
+                        // Paper end - recalculate header on regionfile corruption
                         return null;
                     } else if (j1 < 0) {
                         RegionFile.LOGGER.error("Declared size {} of chunk {} is negative", i1, pos);
+                        // Paper start - recalculate header on regionfile corruption
+                        if (this.canRecalcHeader && this.recalculateHeader()) {
+                            return this.getChunkDataInputStream(pos);
+                        }
+                        // Paper end - recalculate header on regionfile corruption
                         return null;
                     } else {
                         JvmProfiler.INSTANCE.onRegionFileRead(this.info, pos, this.version, j1);
-                        return this.createChunkInputStream(pos, b0, RegionFile.createStream(bytebuffer, j1));
+                        // Paper start - recalculate header on regionfile corruption
+                        final DataInputStream ret = this.createChunkInputStream(pos, b0, RegionFile.createStream(bytebuffer, j1));
+                        if (ret == null && this.canRecalcHeader && this.recalculateHeader()) {
+                            return this.getChunkDataInputStream(pos);
+                        }
+                        return ret;
+                        // Paper end - recalculate header on regionfile corruption
                     }
                 }
             }
@@ -390,10 +857,15 @@ public class RegionFile implements AutoCloseable {
     }
 
     private ByteBuffer createExternalStub() {
+        // Paper start - add version param
+        return this.createExternalStub(this.version);
+    }
+    private ByteBuffer createExternalStub(RegionFileVersion version) {
+        // Paper end - add version param
         ByteBuffer bytebuffer = ByteBuffer.allocate(5);
 
         bytebuffer.putInt(1);
-        bytebuffer.put((byte) (this.version.getId() | 128));
+        bytebuffer.put((byte) (version.getId() | 128)); // Paper - replace with version param
         ((java.nio.Buffer) bytebuffer).flip(); // CraftBukkit - decompile error
         return bytebuffer;
     }
diff --git a/src/main/java/net/minecraft/world/level/chunk/storage/RegionFileStorage.java b/src/main/java/net/minecraft/world/level/chunk/storage/RegionFileStorage.java
index c33640859aab837c85f3e860fe2241a0e78bb09a..1090b7e36e3c1c105bc36135b82751c651f237d4 100644
--- a/src/main/java/net/minecraft/world/level/chunk/storage/RegionFileStorage.java
+++ b/src/main/java/net/minecraft/world/level/chunk/storage/RegionFileStorage.java
@@ -25,6 +25,7 @@ public class RegionFileStorage implements AutoCloseable {
     private final RegionStorageInfo info;
     private final Path folder;
     private final boolean sync;
+    private final boolean isChunkData; // Paper
 
     // Paper start - cache regionfile does not exist state
     static final int MAX_NON_EXISTING_CACHE = 1024 * 64;
@@ -56,11 +57,42 @@ public class RegionFileStorage implements AutoCloseable {
     // Paper end - cache regionfile does not exist state
 
     protected RegionFileStorage(RegionStorageInfo storageKey, Path directory, boolean dsync) { // Paper - protected constructor
+        // Paper start - add isChunkData param
+        this(storageKey, directory, dsync, false);
+    }
+    RegionFileStorage(RegionStorageInfo storageKey, Path directory, boolean dsync, boolean isChunkData) {
+        this.isChunkData = isChunkData;
+        // Paper end - add isChunkData param
         this.folder = directory;
         this.sync = dsync;
         this.info = storageKey;
     }
 
+    // Paper start
+    @Nullable
+    public static ChunkPos getRegionFileCoordinates(Path file) {
+        String fileName = file.getFileName().toString();
+        if (!fileName.startsWith("r.") || !fileName.endsWith(".mca")) {
+            return null;
+        }
+
+        String[] split = fileName.split("\\.");
+
+        if (split.length != 4) {
+            return null;
+        }
+
+        try {
+            int x = Integer.parseInt(split[1]);
+            int z = Integer.parseInt(split[2]);
+
+            return new ChunkPos(x << 5, z << 5);
+        } catch (NumberFormatException ex) {
+            return null;
+        }
+    }
+    // Paper end
+
     // Paper start
     public synchronized RegionFile getRegionFileIfLoaded(ChunkPos chunkcoordintpair) {
         return this.regionCache.getAndMoveToFirst(ChunkPos.asLong(chunkcoordintpair.getRegionX(), chunkcoordintpair.getRegionZ()));
@@ -101,7 +133,7 @@ public class RegionFileStorage implements AutoCloseable {
             // Paper - only create directory if not existing only - moved down
             Path path = this.folder;
             int j = chunkcoordintpair.getRegionX();
-            Path path1 = path.resolve("r." + j + "." + chunkcoordintpair.getRegionZ() + ".mca");
+            Path path1 = path.resolve("r." + j + "." + chunkcoordintpair.getRegionZ() + ".mca"); // Paper - diff on change
             if (existingOnly && !java.nio.file.Files.exists(path1)) { // Paper start - cache regionfile does not exist state
                 this.markNonExisting(regionPos);
                 return null; // CraftBukkit
@@ -110,7 +142,7 @@ public class RegionFileStorage implements AutoCloseable {
             }
             // Paper end - cache regionfile does not exist state
             FileUtil.createDirectoriesSafe(this.folder); // Paper - only create directory if not existing only - moved from above
-            RegionFile regionfile1 = new RegionFile(this.info, path1, this.folder, this.sync);
+            RegionFile regionfile1 = new RegionFile(this.info, path1, this.folder, this.sync, this.isChunkData); // Paper - allow for chunk regionfiles to regen header
 
             this.regionCache.putAndMoveToFirst(i, regionfile1);
             // Paper start
@@ -167,6 +199,13 @@ public class RegionFileStorage implements AutoCloseable {
         if (regionfile == null) {
             return null;
         }
+        // Paper start - Add regionfile parameter
+        return this.read(pos, regionfile);
+    }
+    public CompoundTag read(ChunkPos pos, RegionFile regionfile) throws IOException {
+        // We add the regionfile parameter to avoid the potential deadlock (on fileLock) if we went back to obtain a regionfile
+        // if we decide to re-read
+        // Paper end
         // CraftBukkit end
         try { // Paper
         DataInputStream datainputstream = regionfile.getChunkDataInputStream(pos);
@@ -183,6 +222,20 @@ public class RegionFileStorage implements AutoCloseable {
             try {
                 if (datainputstream != null) {
                     nbttagcompound = NbtIo.read((DataInput) datainputstream);
+                    // Paper start - recover from corrupt regionfile header
+                    if (this.isChunkData) {
+                        ChunkPos chunkPos = ChunkSerializer.getChunkCoordinate(nbttagcompound);
+                        if (!chunkPos.equals(pos)) {
+                            net.minecraft.server.MinecraftServer.LOGGER.error("Attempting to read chunk data at " + pos + " but got chunk data for " + chunkPos + " instead! Attempting regionfile recalculation for regionfile " + regionfile.getPath().toAbsolutePath());
+                            if (regionfile.recalculateHeader()) {
+                                regionfile.fileLock.lock(); // otherwise we will unlock twice and only lock once.
+                                return this.read(pos, regionfile);
+                            }
+                            net.minecraft.server.MinecraftServer.LOGGER.error("Can't recalculate regionfile header, regenerating chunk " + pos + " for " + regionfile.getPath().toAbsolutePath());
+                            return null;
+                        }
+                    }
+                    // Paper end - recover from corrupt regionfile header
                     break label43;
                 }
 
diff --git a/src/main/java/net/minecraft/world/level/chunk/storage/RegionFileVersion.java b/src/main/java/net/minecraft/world/level/chunk/storage/RegionFileVersion.java
index ef68b57ef1d8d7cb317c417569dd23a777fba4ad..f4a39f49b354c560d614483db1cd3dfc154e94b4 100644
--- a/src/main/java/net/minecraft/world/level/chunk/storage/RegionFileVersion.java
+++ b/src/main/java/net/minecraft/world/level/chunk/storage/RegionFileVersion.java
@@ -21,7 +21,7 @@ import org.slf4j.Logger;
 
 public class RegionFileVersion {
     private static final Logger LOGGER = LogUtils.getLogger();
-    private static final Int2ObjectMap<RegionFileVersion> VERSIONS = new Int2ObjectOpenHashMap<>();
+    public static final Int2ObjectMap<RegionFileVersion> VERSIONS = new Int2ObjectOpenHashMap<>(); // Paper - private -> public
     private static final Object2ObjectMap<String, RegionFileVersion> VERSIONS_BY_NAME = new Object2ObjectOpenHashMap<>();
     public static final RegionFileVersion VERSION_GZIP = register(
         new RegionFileVersion(