From 64d835362993ef8a7615257bac178c05c139c555 Mon Sep 17 00:00:00 2001 From: CalDescent Date: Thu, 2 Feb 2023 15:54:03 +0100 Subject: [PATCH 01/16] Added V2 support in the block archive, and added feature to rebuild a V1 block archive using V2 block serialization. Should drastically reduce the archive size once rebuilt. --- .../qortal/api/resource/BlocksResource.java | 12 +- src/main/java/org/qortal/block/Block.java | 4 + .../org/qortal/controller/Controller.java | 21 +++- .../repository/BlockArchiveRebuilder.java | 119 ++++++++++++++++++ .../network/message/CachedBlockV2Message.java | 43 +++++++ .../qortal/repository/BlockArchiveReader.java | 66 +++++++--- .../qortal/repository/BlockArchiveWriter.java | 109 ++++++++++++++-- .../transform/block/BlockTransformer.java | 28 +++-- 8 files changed, 361 insertions(+), 41 deletions(-) create mode 100644 src/main/java/org/qortal/controller/repository/BlockArchiveRebuilder.java create mode 100644 src/main/java/org/qortal/network/message/CachedBlockV2Message.java diff --git a/src/main/java/org/qortal/api/resource/BlocksResource.java b/src/main/java/org/qortal/api/resource/BlocksResource.java index 15541802..20207c70 100644 --- a/src/main/java/org/qortal/api/resource/BlocksResource.java +++ b/src/main/java/org/qortal/api/resource/BlocksResource.java @@ -48,6 +48,7 @@ import org.qortal.repository.RepositoryManager; import org.qortal.transform.TransformationException; import org.qortal.transform.block.BlockTransformer; import org.qortal.utils.Base58; +import org.qortal.utils.Triple; @Path("/blocks") @Tag(name = "Blocks") @@ -165,10 +166,13 @@ public class BlocksResource { } // Not found, so try the block archive - byte[] bytes = BlockArchiveReader.getInstance().fetchSerializedBlockBytesForSignature(signature, false, repository); - if (bytes != null) { - if (version != 1) { - throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.INVALID_CRITERIA, "Archived blocks require version 1"); + Triple serializedBlock = BlockArchiveReader.getInstance().fetchSerializedBlockBytesForSignature(signature, false, repository); + if (serializedBlock != null) { + byte[] bytes = serializedBlock.getA(); + Integer serializationVersion = serializedBlock.getB(); + if (version != serializationVersion) { + // TODO: we could quite easily reserialize the block with the requested version + throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.INVALID_CRITERIA, "Block is not stored using requested serialization version."); } return Base58.encode(bytes); } diff --git a/src/main/java/org/qortal/block/Block.java b/src/main/java/org/qortal/block/Block.java index 3f306b93..540f8cf7 100644 --- a/src/main/java/org/qortal/block/Block.java +++ b/src/main/java/org/qortal/block/Block.java @@ -657,6 +657,10 @@ public class Block { return this.atStates; } + public byte[] getAtStatesHash() { + return this.atStatesHash; + } + /** * Return expanded info on block's online accounts. *

diff --git a/src/main/java/org/qortal/controller/Controller.java b/src/main/java/org/qortal/controller/Controller.java index e9e1fcc2..ed1d2d07 100644 --- a/src/main/java/org/qortal/controller/Controller.java +++ b/src/main/java/org/qortal/controller/Controller.java @@ -1379,9 +1379,24 @@ public class Controller extends Thread { // If we have no block data, we should check the archive in case it's there if (blockData == null) { if (Settings.getInstance().isArchiveEnabled()) { - byte[] bytes = BlockArchiveReader.getInstance().fetchSerializedBlockBytesForSignature(signature, true, repository); - if (bytes != null) { - CachedBlockMessage blockMessage = new CachedBlockMessage(bytes); + Triple serializedBlock = BlockArchiveReader.getInstance().fetchSerializedBlockBytesForSignature(signature, true, repository); + if (serializedBlock != null) { + byte[] bytes = serializedBlock.getA(); + Integer serializationVersion = serializedBlock.getB(); + + Message blockMessage; + switch (serializationVersion) { + case 1: + blockMessage = new CachedBlockMessage(bytes); + break; + + case 2: + blockMessage = new CachedBlockV2Message(bytes); + break; + + default: + return; + } blockMessage.setId(message.getId()); // This call also causes the other needed data to be pulled in from repository diff --git a/src/main/java/org/qortal/controller/repository/BlockArchiveRebuilder.java b/src/main/java/org/qortal/controller/repository/BlockArchiveRebuilder.java new file mode 100644 index 00000000..74201251 --- /dev/null +++ b/src/main/java/org/qortal/controller/repository/BlockArchiveRebuilder.java @@ -0,0 +1,119 @@ +package org.qortal.controller.repository; + +import org.apache.commons.io.FileUtils; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.qortal.controller.Controller; +import org.qortal.controller.Synchronizer; +import org.qortal.repository.*; +import org.qortal.settings.Settings; +import org.qortal.transform.TransformationException; + +import java.io.IOException; +import java.nio.file.Path; +import java.nio.file.Paths; + + +public class BlockArchiveRebuilder { + + private static final Logger LOGGER = LogManager.getLogger(BlockArchiveRebuilder.class); + + private final int serializationVersion; + + public BlockArchiveRebuilder(int serializationVersion) { + this.serializationVersion = serializationVersion; + } + + public void start() throws DataException, IOException { + if (!Settings.getInstance().isArchiveEnabled() || Settings.getInstance().isLite()) { + return; + } + + // New archive path is in a different location from original archive path, to avoid conflicts. + // It will be moved later, once the process is complete. + final Path newArchivePath = Paths.get(Settings.getInstance().getRepositoryPath(), "archive-rebuild"); + final Path originalArchivePath = Paths.get(Settings.getInstance().getRepositoryPath(), "archive"); + + // Delete archive-rebuild if it exists from a previous attempt + FileUtils.deleteDirectory(newArchivePath.toFile()); + + try (final Repository repository = RepositoryManager.getRepository()) { + int startHeight = 1; // We need to rebuild the entire archive + + LOGGER.info("Rebuilding block archive from height {}...", startHeight); + + while (!Controller.isStopping()) { + repository.discardChanges(); + + Thread.sleep(1000L); + + // Don't even attempt if we're mid-sync as our repository requests will be delayed for ages + if (Synchronizer.getInstance().isSynchronizing()) { + continue; + } + + // Rebuild archive + try { + final int maximumArchiveHeight = BlockArchiveReader.getInstance().getHeightOfLastArchivedBlock(); + if (startHeight >= maximumArchiveHeight) { + // We've finished. + // Delete existing archive and move the newly built one into its place + FileUtils.deleteDirectory(originalArchivePath.toFile()); + FileUtils.moveDirectory(newArchivePath.toFile(), originalArchivePath.toFile()); + LOGGER.info("Block archive successfully rebuilt"); + return; + } + + BlockArchiveWriter writer = new BlockArchiveWriter(startHeight, maximumArchiveHeight, serializationVersion, newArchivePath, repository); + + // Set data source to BLOCK_ARCHIVE as we are rebuilding + writer.setDataSource(BlockArchiveWriter.BlockArchiveDataSource.BLOCK_ARCHIVE); + + // We can't enforce the 100MB file size target, as the final file needs to contain all blocks + // that exist in the current archive. Otherwise, the final blocks in the archive will be lost. + writer.setShouldEnforceFileSizeTarget(false); + + // We want to log the rebuild progress + writer.setShouldLogProgress(true); + + BlockArchiveWriter.BlockArchiveWriteResult result = writer.write(); + switch (result) { + case OK: + // Increment block archive height + startHeight += writer.getWrittenCount(); + repository.saveChanges(); + break; + + case STOPPING: + return; + + // We've reached the limit of the blocks we can archive + // Sleep for a while to allow more to become available + case NOT_ENOUGH_BLOCKS: + // This shouldn't happen, as we're not enforcing minimum file sizes + repository.discardChanges(); + throw new DataException("Unable to rebuild archive due to unexpected NOT_ENOUGH_BLOCKS response."); + + case BLOCK_NOT_FOUND: + // We tried to archive a block that didn't exist. This is a major failure and likely means + // that a bootstrap or re-sync is needed. Try again every minute until then. + LOGGER.info("Error: block not found when rebuilding archive. If this error persists, " + + "a bootstrap or re-sync may be needed."); + repository.discardChanges(); + throw new DataException("Unable to rebuild archive because a block is missing."); + } + + } catch (IOException | TransformationException e) { + LOGGER.info("Caught exception when rebuilding block archive", e); + } + + } + } catch (InterruptedException e) { + // Do nothing + } finally { + // Delete archive-rebuild if it still exists, as that means something went wrong + FileUtils.deleteDirectory(newArchivePath.toFile()); + } + } + +} diff --git a/src/main/java/org/qortal/network/message/CachedBlockV2Message.java b/src/main/java/org/qortal/network/message/CachedBlockV2Message.java new file mode 100644 index 00000000..c981293d --- /dev/null +++ b/src/main/java/org/qortal/network/message/CachedBlockV2Message.java @@ -0,0 +1,43 @@ +package org.qortal.network.message; + +import com.google.common.primitives.Ints; +import org.qortal.block.Block; +import org.qortal.transform.TransformationException; +import org.qortal.transform.block.BlockTransformer; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; + +// This is an OUTGOING-only Message which more readily lends itself to being cached +public class CachedBlockV2Message extends Message implements Cloneable { + + public CachedBlockV2Message(Block block) throws TransformationException { + super(MessageType.BLOCK_V2); + + ByteArrayOutputStream bytes = new ByteArrayOutputStream(); + + try { + bytes.write(Ints.toByteArray(block.getBlockData().getHeight())); + + bytes.write(BlockTransformer.toBytes(block)); + } catch (IOException e) { + throw new AssertionError("IOException shouldn't occur with ByteArrayOutputStream"); + } + + this.dataBytes = bytes.toByteArray(); + this.checksumBytes = Message.generateChecksum(this.dataBytes); + } + + public CachedBlockV2Message(byte[] cachedBytes) { + super(MessageType.BLOCK_V2); + + this.dataBytes = cachedBytes; + this.checksumBytes = Message.generateChecksum(this.dataBytes); + } + + public static Message fromByteBuffer(int id, ByteBuffer byteBuffer) { + throw new UnsupportedOperationException("CachedBlockMessageV2 is for outgoing messages only"); + } + +} diff --git a/src/main/java/org/qortal/repository/BlockArchiveReader.java b/src/main/java/org/qortal/repository/BlockArchiveReader.java index 311d21c7..c5878563 100644 --- a/src/main/java/org/qortal/repository/BlockArchiveReader.java +++ b/src/main/java/org/qortal/repository/BlockArchiveReader.java @@ -3,10 +3,7 @@ package org.qortal.repository; import com.google.common.primitives.Ints; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.qortal.data.at.ATStateData; import org.qortal.data.block.BlockArchiveData; -import org.qortal.data.block.BlockData; -import org.qortal.data.transaction.TransactionData; import org.qortal.settings.Settings; import org.qortal.transform.TransformationException; import org.qortal.transform.block.BlockTransformation; @@ -72,15 +69,30 @@ public class BlockArchiveReader { this.fetchFileList(); } - byte[] serializedBytes = this.fetchSerializedBlockBytesForHeight(height); - if (serializedBytes == null) { + Triple serializedBlock = this.fetchSerializedBlockBytesForHeight(height); + byte[] serializedBytes = serializedBlock.getA(); + Integer serializationVersion = serializedBlock.getB(); + if (serializedBytes == null || serializationVersion == null) { return null; } ByteBuffer byteBuffer = ByteBuffer.wrap(serializedBytes); BlockTransformation blockInfo = null; try { - blockInfo = BlockTransformer.fromByteBuffer(byteBuffer); + switch (serializationVersion) { + case 1: + blockInfo = BlockTransformer.fromByteBuffer(byteBuffer); + break; + + case 2: + blockInfo = BlockTransformer.fromByteBufferV2(byteBuffer); + break; + + default: + // Invalid serialization version + return null; + } + if (blockInfo != null && blockInfo.getBlockData() != null) { // Block height is stored outside of the main serialized bytes, so it // won't be set automatically. @@ -168,15 +180,17 @@ public class BlockArchiveReader { return null; } - public byte[] fetchSerializedBlockBytesForSignature(byte[] signature, boolean includeHeightPrefix, Repository repository) { + public Triple fetchSerializedBlockBytesForSignature(byte[] signature, boolean includeHeightPrefix, Repository repository) { if (this.fileListCache == null) { this.fetchFileList(); } Integer height = this.fetchHeightForSignature(signature, repository); if (height != null) { - byte[] blockBytes = this.fetchSerializedBlockBytesForHeight(height); - if (blockBytes == null) { + Triple serializedBlock = this.fetchSerializedBlockBytesForHeight(height); + byte[] blockBytes = serializedBlock.getA(); + Integer version = serializedBlock.getB(); + if (blockBytes == null || version == null) { return null; } @@ -187,18 +201,18 @@ public class BlockArchiveReader { try { bytes.write(Ints.toByteArray(height)); bytes.write(blockBytes); - return bytes.toByteArray(); + return new Triple<>(bytes.toByteArray(), version, height); } catch (IOException e) { return null; } } - return blockBytes; + return new Triple<>(blockBytes, version, height); } return null; } - public byte[] fetchSerializedBlockBytesForHeight(int height) { + public Triple fetchSerializedBlockBytesForHeight(int height) { String filename = this.getFilenameForHeight(height); if (filename == null) { // We don't have this block in the archive @@ -221,7 +235,7 @@ public class BlockArchiveReader { // End of fixed length header // Make sure the version is one we recognize - if (version != 1) { + if (version != 1 && version != 2) { LOGGER.info("Error: unknown version in file {}: {}", filename, version); return null; } @@ -258,7 +272,7 @@ public class BlockArchiveReader { byte[] blockBytes = new byte[blockLength]; file.read(blockBytes); - return blockBytes; + return new Triple<>(blockBytes, version, height); } catch (FileNotFoundException e) { LOGGER.info("File {} not found: {}", filename, e.getMessage()); @@ -279,6 +293,30 @@ public class BlockArchiveReader { } } + public int getHeightOfLastArchivedBlock() { + if (this.fileListCache == null) { + this.fetchFileList(); + } + + int maxEndHeight = 0; + + Iterator it = this.fileListCache.entrySet().iterator(); + while (it.hasNext()) { + Map.Entry pair = (Map.Entry) it.next(); + if (pair == null && pair.getKey() == null && pair.getValue() == null) { + continue; + } + Triple heightInfo = (Triple) pair.getValue(); + Integer endHeight = heightInfo.getB(); + + if (endHeight != null && endHeight > maxEndHeight) { + maxEndHeight = endHeight; + } + } + + return maxEndHeight; + } + public void invalidateFileListCache() { this.fileListCache = null; } diff --git a/src/main/java/org/qortal/repository/BlockArchiveWriter.java b/src/main/java/org/qortal/repository/BlockArchiveWriter.java index 5127bf9b..c2eb17c9 100644 --- a/src/main/java/org/qortal/repository/BlockArchiveWriter.java +++ b/src/main/java/org/qortal/repository/BlockArchiveWriter.java @@ -6,10 +6,13 @@ import org.apache.logging.log4j.Logger; import org.qortal.block.Block; import org.qortal.controller.Controller; import org.qortal.controller.Synchronizer; +import org.qortal.data.at.ATStateData; import org.qortal.data.block.BlockArchiveData; import org.qortal.data.block.BlockData; +import org.qortal.data.transaction.TransactionData; import org.qortal.settings.Settings; import org.qortal.transform.TransformationException; +import org.qortal.transform.block.BlockTransformation; import org.qortal.transform.block.BlockTransformer; import java.io.ByteArrayOutputStream; @@ -18,6 +21,7 @@ import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; +import java.util.List; public class BlockArchiveWriter { @@ -28,27 +32,57 @@ public class BlockArchiveWriter { BLOCK_NOT_FOUND } + public enum BlockArchiveDataSource { + BLOCK_REPOSITORY, // To build an archive from the Blocks table + BLOCK_ARCHIVE // To build a new archive from an existing archive + } + private static final Logger LOGGER = LogManager.getLogger(BlockArchiveWriter.class); public static final long DEFAULT_FILE_SIZE_TARGET = 100 * 1024 * 1024; // 100MiB private int startHeight; private final int endHeight; + private final int serializationVersion; + private final Path archivePath; private final Repository repository; private long fileSizeTarget = DEFAULT_FILE_SIZE_TARGET; private boolean shouldEnforceFileSizeTarget = true; + // Default data source to BLOCK_REPOSITORY; can optionally be overridden + private BlockArchiveDataSource dataSource = BlockArchiveDataSource.BLOCK_REPOSITORY; + + private boolean shouldLogProgress = false; + private int writtenCount; private int lastWrittenHeight; private Path outputPath; - public BlockArchiveWriter(int startHeight, int endHeight, Repository repository) { + /** + * Instantiate a BlockArchiveWriter using a custom archive path + * @param startHeight + * @param endHeight + * @param repository + */ + public BlockArchiveWriter(int startHeight, int endHeight, int serializationVersion, Path archivePath, Repository repository) { this.startHeight = startHeight; this.endHeight = endHeight; + this.serializationVersion = serializationVersion; + this.archivePath = archivePath.toAbsolutePath(); this.repository = repository; } + /** + * Instantiate a BlockArchiveWriter using the default archive path and version + * @param startHeight + * @param endHeight + * @param repository + */ + public BlockArchiveWriter(int startHeight, int endHeight, Repository repository) { + this(startHeight, endHeight, 1, Paths.get(Settings.getInstance().getRepositoryPath(), "archive"), repository); + } + public static int getMaxArchiveHeight(Repository repository) throws DataException { // We must only archive trimmed blocks, or the archive will grow far too large final int accountSignaturesTrimStartHeight = repository.getBlockRepository().getOnlineAccountsSignaturesTrimHeight(); @@ -72,8 +106,7 @@ public class BlockArchiveWriter { public BlockArchiveWriteResult write() throws DataException, IOException, TransformationException, InterruptedException { // Create the archive folder if it doesn't exist - // This is a subfolder of the db directory, to make bootstrapping easier - Path archivePath = Paths.get(Settings.getInstance().getRepositoryPath(), "archive").toAbsolutePath(); + // This is generally a subfolder of the db directory, to make bootstrapping easier try { Files.createDirectories(archivePath); } catch (IOException e) { @@ -95,8 +128,7 @@ public class BlockArchiveWriter { LOGGER.info(String.format("Fetching blocks from height %d...", startHeight)); int i = 0; - while (headerBytes.size() + bytes.size() < this.fileSizeTarget - || this.shouldEnforceFileSizeTarget == false) { + while (headerBytes.size() + bytes.size() < this.fileSizeTarget) { if (Controller.isStopping()) { return BlockArchiveWriteResult.STOPPING; @@ -112,7 +144,28 @@ public class BlockArchiveWriter { //LOGGER.info("Fetching block {}...", currentHeight); - BlockData blockData = repository.getBlockRepository().fromHeight(currentHeight); + BlockData blockData = null; + List transactions = null; + List atStates = null; + byte[] atStatesHash = null; + + switch (this.dataSource) { + case BLOCK_ARCHIVE: + BlockTransformation archivedBlock = BlockArchiveReader.getInstance().fetchBlockAtHeight(currentHeight); + if (archivedBlock != null) { + blockData = archivedBlock.getBlockData(); + transactions = archivedBlock.getTransactions(); + atStates = archivedBlock.getAtStates(); + atStatesHash = archivedBlock.getAtStatesHash(); + } + break; + + case BLOCK_REPOSITORY: + default: + blockData = repository.getBlockRepository().fromHeight(currentHeight); + break; + } + if (blockData == null) { return BlockArchiveWriteResult.BLOCK_NOT_FOUND; } @@ -122,18 +175,47 @@ public class BlockArchiveWriter { repository.getBlockArchiveRepository().save(blockArchiveData); repository.saveChanges(); + // Build the block + Block block; + if (atStatesHash != null) { + block = new Block(repository, blockData, transactions, atStatesHash); + } + else { + block = new Block(repository, blockData, transactions, atStates); + } + // Write the block data to some byte buffers - Block block = new Block(repository, blockData); int blockIndex = bytes.size(); // Write block index to header headerBytes.write(Ints.toByteArray(blockIndex)); // Write block height bytes.write(Ints.toByteArray(block.getBlockData().getHeight())); - byte[] blockBytes = BlockTransformer.toBytes(block); + + // Get serialized block bytes + byte[] blockBytes; + switch (serializationVersion) { + case 1: + blockBytes = BlockTransformer.toBytes(block); + break; + + case 2: + blockBytes = BlockTransformer.toBytesV2(block); + break; + + default: + throw new DataException("Invalid serialization version"); + } + // Write block length bytes.write(Ints.toByteArray(blockBytes.length)); // Write block bytes bytes.write(blockBytes); + + // Log every 1000 blocks + if (this.shouldLogProgress && i % 1000 == 0) { + LOGGER.info("Archived up to block height {}. Size of current file: {} bytes", currentHeight, (headerBytes.size() + bytes.size())); + } + i++; } @@ -147,11 +229,10 @@ public class BlockArchiveWriter { // We have enough blocks to create a new file int endHeight = startHeight + i - 1; - int version = 1; String filePath = String.format("%s/%d-%d.dat", archivePath.toString(), startHeight, endHeight); FileOutputStream fileOutputStream = new FileOutputStream(filePath); // Write version number - fileOutputStream.write(Ints.toByteArray(version)); + fileOutputStream.write(Ints.toByteArray(serializationVersion)); // Write start height fileOutputStream.write(Ints.toByteArray(startHeight)); // Write end height @@ -199,4 +280,12 @@ public class BlockArchiveWriter { this.shouldEnforceFileSizeTarget = shouldEnforceFileSizeTarget; } + public void setDataSource(BlockArchiveDataSource dataSource) { + this.dataSource = dataSource; + } + + public void setShouldLogProgress(boolean shouldLogProgress) { + this.shouldLogProgress = shouldLogProgress; + } + } diff --git a/src/main/java/org/qortal/transform/block/BlockTransformer.java b/src/main/java/org/qortal/transform/block/BlockTransformer.java index c97aa090..15445327 100644 --- a/src/main/java/org/qortal/transform/block/BlockTransformer.java +++ b/src/main/java/org/qortal/transform/block/BlockTransformer.java @@ -312,16 +312,24 @@ public class BlockTransformer extends Transformer { ByteArrayOutputStream atHashBytes = new ByteArrayOutputStream(atBytesLength); long atFees = 0; - for (ATStateData atStateData : block.getATStates()) { - // Skip initial states generated by DEPLOY_AT transactions in the same block - if (atStateData.isInitial()) - continue; - - atHashBytes.write(atStateData.getATAddress().getBytes(StandardCharsets.UTF_8)); - atHashBytes.write(atStateData.getStateHash()); - atHashBytes.write(Longs.toByteArray(atStateData.getFees())); - - atFees += atStateData.getFees(); + if (block.getAtStatesHash() != null) { + // We already have the AT states hash + atFees = blockData.getATFees(); + atHashBytes.write(block.getAtStatesHash()); + } + else { + // We need to build the AT states hash + for (ATStateData atStateData : block.getATStates()) { + // Skip initial states generated by DEPLOY_AT transactions in the same block + if (atStateData.isInitial()) + continue; + + atHashBytes.write(atStateData.getATAddress().getBytes(StandardCharsets.UTF_8)); + atHashBytes.write(atStateData.getStateHash()); + atHashBytes.write(Longs.toByteArray(atStateData.getFees())); + + atFees += atStateData.getFees(); + } } bytes.write(Ints.toByteArray(blockData.getATCount())); From d27316eb64103f79c7729be0c53118b72bce464e Mon Sep 17 00:00:00 2001 From: CalDescent Date: Thu, 2 Feb 2023 18:11:56 +0100 Subject: [PATCH 02/16] Clear cache after rebuilding. --- .../org/qortal/controller/repository/BlockArchiveRebuilder.java | 1 + 1 file changed, 1 insertion(+) diff --git a/src/main/java/org/qortal/controller/repository/BlockArchiveRebuilder.java b/src/main/java/org/qortal/controller/repository/BlockArchiveRebuilder.java index 74201251..78616a99 100644 --- a/src/main/java/org/qortal/controller/repository/BlockArchiveRebuilder.java +++ b/src/main/java/org/qortal/controller/repository/BlockArchiveRebuilder.java @@ -60,6 +60,7 @@ public class BlockArchiveRebuilder { // Delete existing archive and move the newly built one into its place FileUtils.deleteDirectory(originalArchivePath.toFile()); FileUtils.moveDirectory(newArchivePath.toFile(), originalArchivePath.toFile()); + BlockArchiveReader.getInstance().invalidateFileListCache(); LOGGER.info("Block archive successfully rebuilt"); return; } From 257ca2da05bbbc6a7e4e538c18304cee55179f4b Mon Sep 17 00:00:00 2001 From: CalDescent Date: Fri, 3 Feb 2023 12:36:57 +0100 Subject: [PATCH 03/16] Bumped default block archive serialization version to V2. --- src/main/java/org/qortal/repository/BlockArchiveWriter.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/main/java/org/qortal/repository/BlockArchiveWriter.java b/src/main/java/org/qortal/repository/BlockArchiveWriter.java index c2eb17c9..2eb4c6a6 100644 --- a/src/main/java/org/qortal/repository/BlockArchiveWriter.java +++ b/src/main/java/org/qortal/repository/BlockArchiveWriter.java @@ -80,7 +80,7 @@ public class BlockArchiveWriter { * @param repository */ public BlockArchiveWriter(int startHeight, int endHeight, Repository repository) { - this(startHeight, endHeight, 1, Paths.get(Settings.getInstance().getRepositoryPath(), "archive"), repository); + this(startHeight, endHeight, 2, Paths.get(Settings.getInstance().getRepositoryPath(), "archive"), repository); } public static int getMaxArchiveHeight(Repository repository) throws DataException { From ae5b713e5803aca370e297b10805725bbdbeae7a Mon Sep 17 00:00:00 2001 From: CalDescent Date: Sun, 15 Jan 2023 14:32:33 +0000 Subject: [PATCH 04/16] Rework of AT state trimming and pruning, in order to more reliably track the "latest" AT states. This should fix an edge case where AT states data was pruned/trimmed but it was then later required in consensus. The older state was deleted because it was replaced by a new "latest" state in a brand new block. But once the new "latest" state was orphaned from the block, the old "latest" state was then required again. This works around the problem by excluding very recent blocks in the latest AT states data, so that it is unaffected by real-time sync activity. The trade off is that we could end up retaining more AT states than needed, so a secondary cleanup process may need to run at some time in the future to remove these. But it should only be a minimal amount of data, and can be cleaned up with a single query. This would have been happening to a certain degree already. # Conflicts: # src/main/java/org/qortal/controller/repository/AtStatesPruner.java # src/main/java/org/qortal/controller/repository/AtStatesTrimmer.java --- .../controller/repository/AtStatesPruner.java | 6 +- .../repository/AtStatesTrimmer.java | 6 +- .../controller/repository/PruneManager.java | 14 ++ .../org/qortal/repository/ATRepository.java | 2 +- .../repository/hsqldb/HSQLDBATRepository.java | 5 +- .../hsqldb/HSQLDBDatabasePruning.java | 2 +- .../org/qortal/test/BlockArchiveTests.java | 24 ++- .../java/org/qortal/test/BootstrapTests.java | 3 +- src/test/java/org/qortal/test/PruneTests.java | 143 +++++++++++++++++- .../org/qortal/test/at/AtRepositoryTests.java | 19 +-- .../org/qortal/test/common/BlockUtils.java | 9 ++ 11 files changed, 196 insertions(+), 37 deletions(-) diff --git a/src/main/java/org/qortal/controller/repository/AtStatesPruner.java b/src/main/java/org/qortal/controller/repository/AtStatesPruner.java index 064fe0ea..f06efdb8 100644 --- a/src/main/java/org/qortal/controller/repository/AtStatesPruner.java +++ b/src/main/java/org/qortal/controller/repository/AtStatesPruner.java @@ -39,9 +39,10 @@ public class AtStatesPruner implements Runnable { try (final Repository repository = RepositoryManager.getRepository()) { int pruneStartHeight = repository.getATRepository().getAtPruneHeight(); + int maxLatestAtStatesHeight = PruneManager.getMaxHeightForLatestAtStates(repository); repository.discardChanges(); - repository.getATRepository().rebuildLatestAtStates(); + repository.getATRepository().rebuildLatestAtStates(maxLatestAtStatesHeight); repository.saveChanges(); while (!Controller.isStopping()) { @@ -92,7 +93,8 @@ public class AtStatesPruner implements Runnable { if (upperPrunableHeight > upperBatchHeight) { pruneStartHeight = upperBatchHeight; repository.getATRepository().setAtPruneHeight(pruneStartHeight); - repository.getATRepository().rebuildLatestAtStates(); + maxLatestAtStatesHeight = PruneManager.getMaxHeightForLatestAtStates(repository); + repository.getATRepository().rebuildLatestAtStates(maxLatestAtStatesHeight); repository.saveChanges(); final int finalPruneStartHeight = pruneStartHeight; diff --git a/src/main/java/org/qortal/controller/repository/AtStatesTrimmer.java b/src/main/java/org/qortal/controller/repository/AtStatesTrimmer.java index 6c026385..125628f1 100644 --- a/src/main/java/org/qortal/controller/repository/AtStatesTrimmer.java +++ b/src/main/java/org/qortal/controller/repository/AtStatesTrimmer.java @@ -26,9 +26,10 @@ public class AtStatesTrimmer implements Runnable { try (final Repository repository = RepositoryManager.getRepository()) { int trimStartHeight = repository.getATRepository().getAtTrimHeight(); + int maxLatestAtStatesHeight = PruneManager.getMaxHeightForLatestAtStates(repository); repository.discardChanges(); - repository.getATRepository().rebuildLatestAtStates(); + repository.getATRepository().rebuildLatestAtStates(maxLatestAtStatesHeight); repository.saveChanges(); while (!Controller.isStopping()) { @@ -70,7 +71,8 @@ public class AtStatesTrimmer implements Runnable { if (upperTrimmableHeight > upperBatchHeight) { trimStartHeight = upperBatchHeight; repository.getATRepository().setAtTrimHeight(trimStartHeight); - repository.getATRepository().rebuildLatestAtStates(); + maxLatestAtStatesHeight = PruneManager.getMaxHeightForLatestAtStates(repository); + repository.getATRepository().rebuildLatestAtStates(maxLatestAtStatesHeight); repository.saveChanges(); final int finalTrimStartHeight = trimStartHeight; diff --git a/src/main/java/org/qortal/controller/repository/PruneManager.java b/src/main/java/org/qortal/controller/repository/PruneManager.java index ec27456f..dfb6290b 100644 --- a/src/main/java/org/qortal/controller/repository/PruneManager.java +++ b/src/main/java/org/qortal/controller/repository/PruneManager.java @@ -157,4 +157,18 @@ public class PruneManager { return (height < latestUnprunedHeight); } + /** + * When rebuilding the latest AT states, we need to specify a maxHeight, so that we aren't tracking + * very recent AT states that could potentially be orphaned. This method ensures that AT states + * are given a sufficient number of blocks to confirm before being tracked as a latest AT state. + */ + public static int getMaxHeightForLatestAtStates(Repository repository) throws DataException { + // Get current chain height, and subtract a certain number of "confirmation" blocks + // This is to ensure we are basing our latest AT states data on confirmed blocks - + // ones that won't be orphaned in any normal circumstances + final int confirmationBlocks = 250; + final int chainHeight = repository.getBlockRepository().getBlockchainHeight(); + return chainHeight - confirmationBlocks; + } + } diff --git a/src/main/java/org/qortal/repository/ATRepository.java b/src/main/java/org/qortal/repository/ATRepository.java index 0f537ae9..93da924c 100644 --- a/src/main/java/org/qortal/repository/ATRepository.java +++ b/src/main/java/org/qortal/repository/ATRepository.java @@ -119,7 +119,7 @@ public interface ATRepository { *

* NOTE: performs implicit repository.saveChanges(). */ - public void rebuildLatestAtStates() throws DataException; + public void rebuildLatestAtStates(int maxHeight) throws DataException; /** Returns height of first trimmable AT state. */ diff --git a/src/main/java/org/qortal/repository/hsqldb/HSQLDBATRepository.java b/src/main/java/org/qortal/repository/hsqldb/HSQLDBATRepository.java index 04823925..dd0404a8 100644 --- a/src/main/java/org/qortal/repository/hsqldb/HSQLDBATRepository.java +++ b/src/main/java/org/qortal/repository/hsqldb/HSQLDBATRepository.java @@ -603,7 +603,7 @@ public class HSQLDBATRepository implements ATRepository { @Override - public void rebuildLatestAtStates() throws DataException { + public void rebuildLatestAtStates(int maxHeight) throws DataException { // latestATStatesLock is to prevent concurrent updates on LatestATStates // that could result in one process using a partial or empty dataset // because it was in the process of being rebuilt by another thread @@ -624,11 +624,12 @@ public class HSQLDBATRepository implements ATRepository { + "CROSS JOIN LATERAL(" + "SELECT height FROM ATStates " + "WHERE ATStates.AT_address = ATs.AT_address " + + "AND height <= ?" + "ORDER BY AT_address DESC, height DESC LIMIT 1" + ") " + ")"; try { - this.repository.executeCheckedUpdate(insertSql); + this.repository.executeCheckedUpdate(insertSql, maxHeight); } catch (SQLException e) { repository.examineException(e); throw new DataException("Unable to populate temporary latest AT states cache in repository", e); diff --git a/src/main/java/org/qortal/repository/hsqldb/HSQLDBDatabasePruning.java b/src/main/java/org/qortal/repository/hsqldb/HSQLDBDatabasePruning.java index 978ba25e..e2bfc9ef 100644 --- a/src/main/java/org/qortal/repository/hsqldb/HSQLDBDatabasePruning.java +++ b/src/main/java/org/qortal/repository/hsqldb/HSQLDBDatabasePruning.java @@ -99,7 +99,7 @@ public class HSQLDBDatabasePruning { // It's essential that we rebuild the latest AT states here, as we are using this data in the next query. // Failing to do this will result in important AT states being deleted, rendering the database unusable. - repository.getATRepository().rebuildLatestAtStates(); + repository.getATRepository().rebuildLatestAtStates(endHeight); // Loop through all the LatestATStates and copy them to the new table diff --git a/src/test/java/org/qortal/test/BlockArchiveTests.java b/src/test/java/org/qortal/test/BlockArchiveTests.java index 3bfa4e84..8b3de67b 100644 --- a/src/test/java/org/qortal/test/BlockArchiveTests.java +++ b/src/test/java/org/qortal/test/BlockArchiveTests.java @@ -23,7 +23,6 @@ import org.qortal.transform.TransformationException; import org.qortal.transform.block.BlockTransformation; import org.qortal.utils.BlockArchiveUtils; import org.qortal.utils.NTP; -import org.qortal.utils.Triple; import java.io.File; import java.io.IOException; @@ -314,9 +313,10 @@ public class BlockArchiveTests extends Common { repository.getBlockRepository().setBlockPruneHeight(901); // Prune the AT states for the archived blocks - repository.getATRepository().rebuildLatestAtStates(); + repository.getATRepository().rebuildLatestAtStates(900); + repository.saveChanges(); int numATStatesPruned = repository.getATRepository().pruneAtStates(0, 900); - assertEquals(900-1, numATStatesPruned); + assertEquals(900-2, numATStatesPruned); // Minus 1 for genesis block, and another for the latest AT state repository.getATRepository().setAtPruneHeight(901); // Now ensure the SQL repository is missing blocks 2 and 900... @@ -563,16 +563,23 @@ public class BlockArchiveTests extends Common { // Trim the first 500 blocks repository.getBlockRepository().trimOldOnlineAccountsSignatures(0, 500); repository.getBlockRepository().setOnlineAccountsSignaturesTrimHeight(501); + repository.getATRepository().rebuildLatestAtStates(500); repository.getATRepository().trimAtStates(0, 500, 1000); repository.getATRepository().setAtTrimHeight(501); - // Now block 500 should only have the AT state data hash + // Now block 499 should only have the AT state data hash + List block499AtStatesData = repository.getATRepository().getBlockATStatesAtHeight(499); + atStatesData = repository.getATRepository().getATStateAtHeight(block499AtStatesData.get(0).getATAddress(), 499); + assertNotNull(atStatesData.getStateHash()); + assertNull(atStatesData.getStateData()); + + // ... but block 500 should have the full data (due to being retained as the "latest" AT state in the trimmed range block500AtStatesData = repository.getATRepository().getBlockATStatesAtHeight(500); atStatesData = repository.getATRepository().getATStateAtHeight(block500AtStatesData.get(0).getATAddress(), 500); assertNotNull(atStatesData.getStateHash()); - assertNull(atStatesData.getStateData()); + assertNotNull(atStatesData.getStateData()); - // ... but block 501 should have the full data + // ... and block 501 should also have the full data List block501AtStatesData = repository.getATRepository().getBlockATStatesAtHeight(501); atStatesData = repository.getATRepository().getATStateAtHeight(block501AtStatesData.get(0).getATAddress(), 501); assertNotNull(atStatesData.getStateHash()); @@ -612,9 +619,10 @@ public class BlockArchiveTests extends Common { repository.getBlockRepository().setBlockPruneHeight(501); // Prune the AT states for the archived blocks - repository.getATRepository().rebuildLatestAtStates(); + repository.getATRepository().rebuildLatestAtStates(500); + repository.saveChanges(); int numATStatesPruned = repository.getATRepository().pruneAtStates(2, 500); - assertEquals(499, numATStatesPruned); + assertEquals(498, numATStatesPruned); // Minus 1 for genesis block, and another for the latest AT state repository.getATRepository().setAtPruneHeight(501); // Now ensure the SQL repository is missing blocks 2 and 500... diff --git a/src/test/java/org/qortal/test/BootstrapTests.java b/src/test/java/org/qortal/test/BootstrapTests.java index aa641e71..b60b412c 100644 --- a/src/test/java/org/qortal/test/BootstrapTests.java +++ b/src/test/java/org/qortal/test/BootstrapTests.java @@ -176,7 +176,8 @@ public class BootstrapTests extends Common { repository.getBlockRepository().setBlockPruneHeight(901); // Prune the AT states for the archived blocks - repository.getATRepository().rebuildLatestAtStates(); + repository.getATRepository().rebuildLatestAtStates(900); + repository.saveChanges(); repository.getATRepository().pruneAtStates(0, 900); repository.getATRepository().setAtPruneHeight(901); diff --git a/src/test/java/org/qortal/test/PruneTests.java b/src/test/java/org/qortal/test/PruneTests.java index 0914d794..5a31146e 100644 --- a/src/test/java/org/qortal/test/PruneTests.java +++ b/src/test/java/org/qortal/test/PruneTests.java @@ -1,16 +1,33 @@ package org.qortal.test; +import com.google.common.hash.HashCode; import org.junit.Before; import org.junit.Test; +import org.qortal.account.Account; import org.qortal.account.PrivateKeyAccount; +import org.qortal.asset.Asset; +import org.qortal.block.Block; import org.qortal.controller.BlockMinter; +import org.qortal.crosschain.AcctMode; +import org.qortal.crosschain.LitecoinACCTv3; +import org.qortal.data.at.ATData; import org.qortal.data.at.ATStateData; import org.qortal.data.block.BlockData; +import org.qortal.data.crosschain.CrossChainTradeData; +import org.qortal.data.transaction.BaseTransactionData; +import org.qortal.data.transaction.DeployAtTransactionData; +import org.qortal.data.transaction.MessageTransactionData; +import org.qortal.data.transaction.TransactionData; +import org.qortal.group.Group; import org.qortal.repository.DataException; import org.qortal.repository.Repository; import org.qortal.repository.RepositoryManager; import org.qortal.test.common.AtUtils; +import org.qortal.test.common.BlockUtils; import org.qortal.test.common.Common; +import org.qortal.test.common.TransactionUtils; +import org.qortal.transaction.DeployAtTransaction; +import org.qortal.transaction.MessageTransaction; import java.util.ArrayList; import java.util.List; @@ -19,6 +36,13 @@ import static org.junit.Assert.*; public class PruneTests extends Common { + // Constants for test AT (an LTC ACCT) + public static final byte[] litecoinPublicKeyHash = HashCode.fromString("bb00bb11bb22bb33bb44bb55bb66bb77bb88bb99").asBytes(); + public static final int tradeTimeout = 20; // blocks + public static final long redeemAmount = 80_40200000L; + public static final long fundingAmount = 123_45600000L; + public static final long litecoinAmount = 864200L; // 0.00864200 LTC + @Before public void beforeTest() throws DataException { Common.useDefaultSettings(); @@ -62,23 +86,32 @@ public class PruneTests extends Common { repository.getBlockRepository().setBlockPruneHeight(6); // Prune AT states for blocks 2-5 + repository.getATRepository().rebuildLatestAtStates(5); + repository.saveChanges(); int numATStatesPruned = repository.getATRepository().pruneAtStates(0, 5); - assertEquals(4, numATStatesPruned); + assertEquals(3, numATStatesPruned); repository.getATRepository().setAtPruneHeight(6); - // Make sure that blocks 2-5 are now missing block data and AT states data - for (Integer i=2; i <= 5; i++) { + // Make sure that blocks 2-4 are now missing block data and AT states data + for (Integer i=2; i <= 4; i++) { BlockData blockData = repository.getBlockRepository().fromHeight(i); assertNull(blockData); List atStatesDataList = repository.getATRepository().getBlockATStatesAtHeight(i); assertTrue(atStatesDataList.isEmpty()); } - // ... but blocks 6-10 have block data and full AT states data + // Block 5 should have full AT states data even though it was pruned. + // This is because we identified that as the "latest" AT state in that block range + BlockData blockData = repository.getBlockRepository().fromHeight(5); + assertNull(blockData); + List atStatesDataList = repository.getATRepository().getBlockATStatesAtHeight(5); + assertEquals(1, atStatesDataList.size()); + + // Blocks 6-10 have block data and full AT states data for (Integer i=6; i <= 10; i++) { - BlockData blockData = repository.getBlockRepository().fromHeight(i); + blockData = repository.getBlockRepository().fromHeight(i); assertNotNull(blockData.getSignature()); - List atStatesDataList = repository.getATRepository().getBlockATStatesAtHeight(i); + atStatesDataList = repository.getATRepository().getBlockATStatesAtHeight(i); assertNotNull(atStatesDataList); assertFalse(atStatesDataList.isEmpty()); ATStateData atStatesData = repository.getATRepository().getATStateAtHeight(atStatesDataList.get(0).getATAddress(), i); @@ -88,4 +121,102 @@ public class PruneTests extends Common { } } + @Test + public void testPruneSleepingAt() throws DataException { + try (final Repository repository = RepositoryManager.getRepository()) { + PrivateKeyAccount deployer = Common.getTestAccount(repository, "chloe"); + PrivateKeyAccount tradeAccount = Common.getTestAccount(repository, "alice"); + + DeployAtTransaction deployAtTransaction = doDeploy(repository, deployer, tradeAccount.getAddress()); + Account at = deployAtTransaction.getATAccount(); + String atAddress = at.getAddress(); + + // Mint enough blocks to take the original DEPLOY_AT past the prune threshold (in this case 20) + Block block = BlockUtils.mintBlocks(repository, 25); + + // Send creator's address to AT, instead of typical partner's address + byte[] messageData = LitecoinACCTv3.getInstance().buildCancelMessage(deployer.getAddress()); + long txTimestamp = block.getBlockData().getTimestamp(); + MessageTransaction messageTransaction = sendMessage(repository, deployer, messageData, atAddress, txTimestamp); + + // AT should process 'cancel' message in next block + BlockUtils.mintBlock(repository); + + // Prune AT states up to block 20 + repository.getATRepository().rebuildLatestAtStates(20); + repository.saveChanges(); + int numATStatesPruned = repository.getATRepository().pruneAtStates(0, 20); + assertEquals(1, numATStatesPruned); // deleted state at heights 2, but state at height 3 remains + + // Check AT is finished + ATData atData = repository.getATRepository().fromATAddress(atAddress); + assertTrue(atData.getIsFinished()); + + // AT should be in CANCELLED mode + CrossChainTradeData tradeData = LitecoinACCTv3.getInstance().populateTradeData(repository, atData); + assertEquals(AcctMode.CANCELLED, tradeData.mode); + + // Test orphaning - should be possible because the previous AT state at height 3 is still available + BlockUtils.orphanLastBlock(repository); + } + } + + + // Helper methods for AT testing + private DeployAtTransaction doDeploy(Repository repository, PrivateKeyAccount deployer, String tradeAddress) throws DataException { + byte[] creationBytes = LitecoinACCTv3.buildQortalAT(tradeAddress, litecoinPublicKeyHash, redeemAmount, litecoinAmount, tradeTimeout); + + long txTimestamp = System.currentTimeMillis(); + byte[] lastReference = deployer.getLastReference(); + + if (lastReference == null) { + System.err.println(String.format("Qortal account %s has no last reference", deployer.getAddress())); + System.exit(2); + } + + Long fee = null; + String name = "QORT-LTC cross-chain trade"; + String description = String.format("Qortal-Litecoin cross-chain trade"); + String atType = "ACCT"; + String tags = "QORT-LTC ACCT"; + + BaseTransactionData baseTransactionData = new BaseTransactionData(txTimestamp, Group.NO_GROUP, lastReference, deployer.getPublicKey(), fee, null); + TransactionData deployAtTransactionData = new DeployAtTransactionData(baseTransactionData, name, description, atType, tags, creationBytes, fundingAmount, Asset.QORT); + + DeployAtTransaction deployAtTransaction = new DeployAtTransaction(repository, deployAtTransactionData); + + fee = deployAtTransaction.calcRecommendedFee(); + deployAtTransactionData.setFee(fee); + + TransactionUtils.signAndMint(repository, deployAtTransactionData, deployer); + + return deployAtTransaction; + } + + private MessageTransaction sendMessage(Repository repository, PrivateKeyAccount sender, byte[] data, String recipient, long txTimestamp) throws DataException { + byte[] lastReference = sender.getLastReference(); + + if (lastReference == null) { + System.err.println(String.format("Qortal account %s has no last reference", sender.getAddress())); + System.exit(2); + } + + Long fee = null; + int version = 4; + int nonce = 0; + long amount = 0; + Long assetId = null; // because amount is zero + + BaseTransactionData baseTransactionData = new BaseTransactionData(txTimestamp, Group.NO_GROUP, lastReference, sender.getPublicKey(), fee, null); + TransactionData messageTransactionData = new MessageTransactionData(baseTransactionData, version, nonce, recipient, amount, assetId, data, false, false); + + MessageTransaction messageTransaction = new MessageTransaction(repository, messageTransactionData); + + fee = messageTransaction.calcRecommendedFee(); + messageTransactionData.setFee(fee); + + TransactionUtils.signAndMint(repository, messageTransactionData, sender); + + return messageTransaction; + } } diff --git a/src/test/java/org/qortal/test/at/AtRepositoryTests.java b/src/test/java/org/qortal/test/at/AtRepositoryTests.java index 8ef4c774..8441731f 100644 --- a/src/test/java/org/qortal/test/at/AtRepositoryTests.java +++ b/src/test/java/org/qortal/test/at/AtRepositoryTests.java @@ -2,29 +2,20 @@ package org.qortal.test.at; import static org.junit.Assert.*; -import java.nio.ByteBuffer; import java.util.List; -import org.ciyam.at.CompilationException; import org.ciyam.at.MachineState; -import org.ciyam.at.OpCode; import org.junit.Before; import org.junit.Test; import org.qortal.account.PrivateKeyAccount; -import org.qortal.asset.Asset; import org.qortal.data.at.ATData; import org.qortal.data.at.ATStateData; -import org.qortal.data.transaction.BaseTransactionData; -import org.qortal.data.transaction.DeployAtTransactionData; -import org.qortal.data.transaction.TransactionData; -import org.qortal.group.Group; import org.qortal.repository.DataException; import org.qortal.repository.Repository; import org.qortal.repository.RepositoryManager; import org.qortal.test.common.AtUtils; import org.qortal.test.common.BlockUtils; import org.qortal.test.common.Common; -import org.qortal.test.common.TransactionUtils; import org.qortal.transaction.DeployAtTransaction; public class AtRepositoryTests extends Common { @@ -76,7 +67,7 @@ public class AtRepositoryTests extends Common { Integer testHeight = maxHeight - 2; // Trim AT state data - repository.getATRepository().rebuildLatestAtStates(); + repository.getATRepository().rebuildLatestAtStates(maxHeight); repository.getATRepository().trimAtStates(2, maxHeight, 1000); ATStateData atStateData = repository.getATRepository().getATStateAtHeight(atAddress, testHeight); @@ -130,7 +121,7 @@ public class AtRepositoryTests extends Common { Integer testHeight = blockchainHeight; // Trim AT state data - repository.getATRepository().rebuildLatestAtStates(); + repository.getATRepository().rebuildLatestAtStates(maxHeight); // COMMIT to check latest AT states persist / TEMPORARY table interaction repository.saveChanges(); @@ -163,8 +154,8 @@ public class AtRepositoryTests extends Common { int maxTrimHeight = blockchainHeight - 4; Integer testHeight = maxTrimHeight + 1; - // Trim AT state data - repository.getATRepository().rebuildLatestAtStates(); + // Trim AT state data (using a max height of maxTrimHeight + 1, so it is beyond the trimmed range) + repository.getATRepository().rebuildLatestAtStates(maxTrimHeight + 1); repository.saveChanges(); repository.getATRepository().trimAtStates(2, maxTrimHeight, 1000); @@ -333,7 +324,7 @@ public class AtRepositoryTests extends Common { Integer testHeight = maxHeight - 2; // Trim AT state data - repository.getATRepository().rebuildLatestAtStates(); + repository.getATRepository().rebuildLatestAtStates(maxHeight); repository.getATRepository().trimAtStates(2, maxHeight, 1000); List atStates = repository.getATRepository().getBlockATStatesAtHeight(testHeight); diff --git a/src/test/java/org/qortal/test/common/BlockUtils.java b/src/test/java/org/qortal/test/common/BlockUtils.java index 3077b65b..ab57dadf 100644 --- a/src/test/java/org/qortal/test/common/BlockUtils.java +++ b/src/test/java/org/qortal/test/common/BlockUtils.java @@ -20,6 +20,15 @@ public class BlockUtils { return BlockMinter.mintTestingBlock(repository, mintingAccount); } + /** Mints multiple blocks using "alice-reward-share" test account, and returns the final block. */ + public static Block mintBlocks(Repository repository, int count) throws DataException { + Block block = null; + for (int i=0; i Date: Sun, 26 Feb 2023 15:59:18 +0000 Subject: [PATCH 05/16] Added "archiveVersion" setting, which specifies the archive version to be used when building. Defaults to 1 for now, but will bump to version 2 at the time of a wider rollout. --- src/main/java/org/qortal/repository/BlockArchiveWriter.java | 2 +- src/main/java/org/qortal/settings/Settings.java | 6 ++++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/src/main/java/org/qortal/repository/BlockArchiveWriter.java b/src/main/java/org/qortal/repository/BlockArchiveWriter.java index 2eb4c6a6..1799f3c4 100644 --- a/src/main/java/org/qortal/repository/BlockArchiveWriter.java +++ b/src/main/java/org/qortal/repository/BlockArchiveWriter.java @@ -80,7 +80,7 @@ public class BlockArchiveWriter { * @param repository */ public BlockArchiveWriter(int startHeight, int endHeight, Repository repository) { - this(startHeight, endHeight, 2, Paths.get(Settings.getInstance().getRepositoryPath(), "archive"), repository); + this(startHeight, endHeight, Settings.getInstance().getArchiveVersion(), Paths.get(Settings.getInstance().getRepositoryPath(), "archive"), repository); } public static int getMaxArchiveHeight(Repository repository) throws DataException { diff --git a/src/main/java/org/qortal/settings/Settings.java b/src/main/java/org/qortal/settings/Settings.java index ae5dc173..52b3aed5 100644 --- a/src/main/java/org/qortal/settings/Settings.java +++ b/src/main/java/org/qortal/settings/Settings.java @@ -178,6 +178,8 @@ public class Settings { private boolean archiveEnabled = true; /** How often to attempt archiving (ms). */ private long archiveInterval = 7171L; // milliseconds + /** Serialization version to use when building an archive */ + private int archiveVersion = 1; /** Whether to automatically bootstrap instead of syncing from genesis */ @@ -926,6 +928,10 @@ public class Settings { return this.archiveInterval; } + public int getArchiveVersion() { + return this.archiveVersion; + } + public boolean getBootstrap() { return this.bootstrap; From 0af6fbe1eb80b1e8fe47e293acf72f0f8d221a75 Mon Sep 17 00:00:00 2001 From: CalDescent Date: Sun, 26 Feb 2023 16:52:48 +0000 Subject: [PATCH 06/16] Added `POST /repository/archive/rebuild` endpoint to allow local archive to be rebuilt. When "archiveVersion" is set to 2 in settings, this should allow the archive size to reduce by over 90%. Some nodes might want to maintain an older/larger version, for the purposes of development/debugging, so this is currently opt-in. --- .../qortal/api/resource/AdminResource.java | 47 +++++++++++++++++++ 1 file changed, 47 insertions(+) diff --git a/src/main/java/org/qortal/api/resource/AdminResource.java b/src/main/java/org/qortal/api/resource/AdminResource.java index 46e204db..0531f60d 100644 --- a/src/main/java/org/qortal/api/resource/AdminResource.java +++ b/src/main/java/org/qortal/api/resource/AdminResource.java @@ -45,6 +45,7 @@ import org.qortal.block.BlockChain; import org.qortal.controller.Controller; import org.qortal.controller.Synchronizer; import org.qortal.controller.Synchronizer.SynchronizationResult; +import org.qortal.controller.repository.BlockArchiveRebuilder; import org.qortal.data.account.MintingAccountData; import org.qortal.data.account.RewardShareData; import org.qortal.network.Network; @@ -734,6 +735,52 @@ public class AdminResource { } } + @POST + @Path("/repository/archive/rebuild") + @Operation( + summary = "Rebuild archive.", + description = "Rebuilds archive files, using the serialization version specified via the archiveVersion setting.", + responses = { + @ApiResponse( + description = "\"true\"", + content = @Content(mediaType = MediaType.TEXT_PLAIN, schema = @Schema(type = "string")) + ) + } + ) + @ApiErrors({ApiError.REPOSITORY_ISSUE}) + @SecurityRequirement(name = "apiKey") + public String rebuildArchive(@HeaderParam(Security.API_KEY_HEADER) String apiKey) { + Security.checkApiCallAllowed(request); + + try { + // We don't actually need to lock the blockchain here, but we'll do it anyway so that + // the node can focus on rebuilding rather than synchronizing / minting. + ReentrantLock blockchainLock = Controller.getInstance().getBlockchainLock(); + + blockchainLock.lockInterruptibly(); + + try { + int archiveVersion = Settings.getInstance().getArchiveVersion(); + + BlockArchiveRebuilder blockArchiveRebuilder = new BlockArchiveRebuilder(archiveVersion); + blockArchiveRebuilder.start(); + + return "true"; + + } catch (IOException e) { + throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA, e); + + } finally { + blockchainLock.unlock(); + } + } catch (InterruptedException e) { + // We couldn't lock blockchain to perform rebuild + return "false"; + } catch (DataException e) { + throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e); + } + } + @DELETE @Path("/repository") @Operation( From 1153519d788934e852a3568b7f84905d186acadd Mon Sep 17 00:00:00 2001 From: CalDescent Date: Sun, 26 Feb 2023 16:53:43 +0000 Subject: [PATCH 07/16] Various fixes as a result of moving to archive version 2. --- .../qortal/repository/BlockArchiveReader.java | 6 +++++ .../qortal/repository/BlockArchiveWriter.java | 6 ++++- .../org/qortal/utils/BlockArchiveUtils.java | 25 ++++++++++++++++--- 3 files changed, 33 insertions(+), 4 deletions(-) diff --git a/src/main/java/org/qortal/repository/BlockArchiveReader.java b/src/main/java/org/qortal/repository/BlockArchiveReader.java index c5878563..e45f1fdf 100644 --- a/src/main/java/org/qortal/repository/BlockArchiveReader.java +++ b/src/main/java/org/qortal/repository/BlockArchiveReader.java @@ -70,6 +70,9 @@ public class BlockArchiveReader { } Triple serializedBlock = this.fetchSerializedBlockBytesForHeight(height); + if (serializedBlock == null) { + return null; + } byte[] serializedBytes = serializedBlock.getA(); Integer serializationVersion = serializedBlock.getB(); if (serializedBytes == null || serializationVersion == null) { @@ -188,6 +191,9 @@ public class BlockArchiveReader { Integer height = this.fetchHeightForSignature(signature, repository); if (height != null) { Triple serializedBlock = this.fetchSerializedBlockBytesForHeight(height); + if (serializedBlock == null) { + return null; + } byte[] blockBytes = serializedBlock.getA(); Integer version = serializedBlock.getB(); if (blockBytes == null || version == null) { diff --git a/src/main/java/org/qortal/repository/BlockArchiveWriter.java b/src/main/java/org/qortal/repository/BlockArchiveWriter.java index 1799f3c4..87d0a93c 100644 --- a/src/main/java/org/qortal/repository/BlockArchiveWriter.java +++ b/src/main/java/org/qortal/repository/BlockArchiveWriter.java @@ -134,6 +134,7 @@ public class BlockArchiveWriter { return BlockArchiveWriteResult.STOPPING; } if (Synchronizer.getInstance().isSynchronizing()) { + Thread.sleep(1000L); continue; } @@ -180,9 +181,12 @@ public class BlockArchiveWriter { if (atStatesHash != null) { block = new Block(repository, blockData, transactions, atStatesHash); } - else { + else if (atStates != null) { block = new Block(repository, blockData, transactions, atStates); } + else { + block = new Block(repository, blockData); + } // Write the block data to some byte buffers int blockIndex = bytes.size(); diff --git a/src/main/java/org/qortal/utils/BlockArchiveUtils.java b/src/main/java/org/qortal/utils/BlockArchiveUtils.java index 84de1a31..807faef9 100644 --- a/src/main/java/org/qortal/utils/BlockArchiveUtils.java +++ b/src/main/java/org/qortal/utils/BlockArchiveUtils.java @@ -21,6 +21,16 @@ public class BlockArchiveUtils { * into the HSQLDB, in order to make it SQL-compatible * again. *

+ * This is only fully compatible with archives that use + * serialization version 1. For version 2 (or above), + * we are unable to import individual AT states as we + * only have a single combined hash, so the use cases + * for this are greatly limited. + *

+ * A version 1 archive should ultimately be rebuildable + * via a resync or reindex from genesis, allowing + * access to this feature once again. + *

* Note: calls discardChanges() and saveChanges(), so * make sure that you commit any existing repository * changes before calling this method. @@ -61,9 +71,18 @@ public class BlockArchiveUtils { repository.getBlockRepository().save(blockInfo.getBlockData()); // Save AT state data hashes - for (ATStateData atStateData : blockInfo.getAtStates()) { - atStateData.setHeight(blockInfo.getBlockData().getHeight()); - repository.getATRepository().save(atStateData); + if (blockInfo.getAtStates() != null) { + for (ATStateData atStateData : blockInfo.getAtStates()) { + atStateData.setHeight(blockInfo.getBlockData().getHeight()); + repository.getATRepository().save(atStateData); + } + } + else { + // We don't have AT state hashes, so we are only importing a partial state. + // This can still be useful to allow orphaning to very old blocks, when we + // need to access other chainstate info (such as balances) at an earlier block. + // In order to do this, the orphan process must be temporarily adjusted to avoid + // orphaning AT states, as it will otherwise fail due to having no previous state. } } catch (DataException e) { From abdc265fc62580fc74153851539edfc67f4003db Mon Sep 17 00:00:00 2001 From: CalDescent Date: Sun, 26 Feb 2023 16:54:14 +0000 Subject: [PATCH 08/16] Removed legacy bulk archiving/pruning code that is no longer needed. --- .../org/qortal/controller/Controller.java | 8 +- .../qortal/repository/RepositoryManager.java | 61 ---- .../hsqldb/HSQLDBDatabaseArchiving.java | 88 ----- .../hsqldb/HSQLDBDatabasePruning.java | 332 ------------------ 4 files changed, 2 insertions(+), 487 deletions(-) delete mode 100644 src/main/java/org/qortal/repository/hsqldb/HSQLDBDatabaseArchiving.java delete mode 100644 src/main/java/org/qortal/repository/hsqldb/HSQLDBDatabasePruning.java diff --git a/src/main/java/org/qortal/controller/Controller.java b/src/main/java/org/qortal/controller/Controller.java index ed1d2d07..f0bd1ef5 100644 --- a/src/main/java/org/qortal/controller/Controller.java +++ b/src/main/java/org/qortal/controller/Controller.java @@ -400,12 +400,8 @@ public class Controller extends Thread { RepositoryFactory repositoryFactory = new HSQLDBRepositoryFactory(getRepositoryUrl()); RepositoryManager.setRepositoryFactory(repositoryFactory); RepositoryManager.setRequestedCheckpoint(Boolean.TRUE); - - try (final Repository repository = RepositoryManager.getRepository()) { - RepositoryManager.archive(repository); - RepositoryManager.prune(repository); - } - } catch (DataException e) { + } + catch (DataException e) { // If exception has no cause then repository is in use by some other process. if (e.getCause() == null) { LOGGER.info("Repository in use by another process?"); diff --git a/src/main/java/org/qortal/repository/RepositoryManager.java b/src/main/java/org/qortal/repository/RepositoryManager.java index 0d9325b9..9d76ccae 100644 --- a/src/main/java/org/qortal/repository/RepositoryManager.java +++ b/src/main/java/org/qortal/repository/RepositoryManager.java @@ -2,11 +2,6 @@ package org.qortal.repository; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.qortal.gui.SplashFrame; -import org.qortal.repository.hsqldb.HSQLDBDatabaseArchiving; -import org.qortal.repository.hsqldb.HSQLDBDatabasePruning; -import org.qortal.repository.hsqldb.HSQLDBRepository; -import org.qortal.settings.Settings; import java.sql.SQLException; import java.util.concurrent.TimeoutException; @@ -61,62 +56,6 @@ public abstract class RepositoryManager { } } - public static boolean archive(Repository repository) { - if (Settings.getInstance().isLite()) { - // Lite nodes have no blockchain - return false; - } - - // Bulk archive the database the first time we use archive mode - if (Settings.getInstance().isArchiveEnabled()) { - if (RepositoryManager.canArchiveOrPrune()) { - try { - return HSQLDBDatabaseArchiving.buildBlockArchive(repository, BlockArchiveWriter.DEFAULT_FILE_SIZE_TARGET); - - } catch (DataException e) { - LOGGER.info("Unable to build block archive. The database may have been left in an inconsistent state."); - } - } - else { - LOGGER.info("Unable to build block archive due to missing ATStatesHeightIndex. Bootstrapping is recommended."); - LOGGER.info("To bootstrap, stop the core and delete the db folder, then start the core again."); - SplashFrame.getInstance().updateStatus("Missing index. Bootstrapping is recommended."); - } - } - return false; - } - - public static boolean prune(Repository repository) { - if (Settings.getInstance().isLite()) { - // Lite nodes have no blockchain - return false; - } - - // Bulk prune the database the first time we use top-only or block archive mode - if (Settings.getInstance().isTopOnly() || - Settings.getInstance().isArchiveEnabled()) { - if (RepositoryManager.canArchiveOrPrune()) { - try { - boolean prunedATStates = HSQLDBDatabasePruning.pruneATStates((HSQLDBRepository) repository); - boolean prunedBlocks = HSQLDBDatabasePruning.pruneBlocks((HSQLDBRepository) repository); - - // Perform repository maintenance to shrink the db size down - if (prunedATStates && prunedBlocks) { - HSQLDBDatabasePruning.performMaintenance(repository); - return true; - } - - } catch (SQLException | DataException e) { - LOGGER.info("Unable to bulk prune AT states. The database may have been left in an inconsistent state."); - } - } - else { - LOGGER.info("Unable to prune blocks due to missing ATStatesHeightIndex. Bootstrapping is recommended."); - } - } - return false; - } - public static void setRequestedCheckpoint(Boolean quick) { quickCheckpointRequested = quick; } diff --git a/src/main/java/org/qortal/repository/hsqldb/HSQLDBDatabaseArchiving.java b/src/main/java/org/qortal/repository/hsqldb/HSQLDBDatabaseArchiving.java deleted file mode 100644 index 90022b00..00000000 --- a/src/main/java/org/qortal/repository/hsqldb/HSQLDBDatabaseArchiving.java +++ /dev/null @@ -1,88 +0,0 @@ -package org.qortal.repository.hsqldb; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.qortal.controller.Controller; -import org.qortal.gui.SplashFrame; -import org.qortal.repository.BlockArchiveWriter; -import org.qortal.repository.DataException; -import org.qortal.repository.Repository; -import org.qortal.repository.RepositoryManager; -import org.qortal.transform.TransformationException; - -import java.io.IOException; - -/** - * - * When switching to an archiving node, we need to archive most of the database contents. - * This involves copying its data into flat files. - * If we do this entirely as a background process, it is very slow and can interfere with syncing. - * However, if we take the approach of doing this in bulk, before starting up the rest of the - * processes, this makes it much faster and less invasive. - * - * From that point, the original background archiving process will run, but can be dialled right down - * so not to interfere with syncing. - * - */ - - -public class HSQLDBDatabaseArchiving { - - private static final Logger LOGGER = LogManager.getLogger(HSQLDBDatabaseArchiving.class); - - - public static boolean buildBlockArchive(Repository repository, long fileSizeTarget) throws DataException { - - // Only build the archive if we haven't already got one that is up to date - boolean upToDate = BlockArchiveWriter.isArchiverUpToDate(repository); - if (upToDate) { - // Already archived - return false; - } - - LOGGER.info("Building block archive - this process could take a while..."); - SplashFrame.getInstance().updateStatus("Building block archive..."); - - final int maximumArchiveHeight = BlockArchiveWriter.getMaxArchiveHeight(repository); - int startHeight = 0; - - while (!Controller.isStopping()) { - try { - BlockArchiveWriter writer = new BlockArchiveWriter(startHeight, maximumArchiveHeight, repository); - writer.setFileSizeTarget(fileSizeTarget); - BlockArchiveWriter.BlockArchiveWriteResult result = writer.write(); - switch (result) { - case OK: - // Increment block archive height - startHeight = writer.getLastWrittenHeight() + 1; - repository.getBlockArchiveRepository().setBlockArchiveHeight(startHeight); - repository.saveChanges(); - break; - - case STOPPING: - return false; - - case NOT_ENOUGH_BLOCKS: - // We've reached the limit of the blocks we can archive - // Return from the whole method - return true; - - case BLOCK_NOT_FOUND: - // We tried to archive a block that didn't exist. This is a major failure and likely means - // that a bootstrap or re-sync is needed. Return rom the method - LOGGER.info("Error: block not found when building archive. If this error persists, " + - "a bootstrap or re-sync may be needed."); - return false; - } - - } catch (IOException | TransformationException | InterruptedException e) { - LOGGER.info("Caught exception when creating block cache", e); - return false; - } - } - - // If we got this far then something went wrong (most likely the app is stopping) - return false; - } - -} diff --git a/src/main/java/org/qortal/repository/hsqldb/HSQLDBDatabasePruning.java b/src/main/java/org/qortal/repository/hsqldb/HSQLDBDatabasePruning.java deleted file mode 100644 index e2bfc9ef..00000000 --- a/src/main/java/org/qortal/repository/hsqldb/HSQLDBDatabasePruning.java +++ /dev/null @@ -1,332 +0,0 @@ -package org.qortal.repository.hsqldb; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.qortal.controller.Controller; -import org.qortal.data.block.BlockData; -import org.qortal.gui.SplashFrame; -import org.qortal.repository.BlockArchiveWriter; -import org.qortal.repository.DataException; -import org.qortal.repository.Repository; -import org.qortal.settings.Settings; - -import java.sql.ResultSet; -import java.sql.SQLException; -import java.util.concurrent.TimeoutException; - -/** - * - * When switching from a full node to a pruning node, we need to delete most of the database contents. - * If we do this entirely as a background process, it is very slow and can interfere with syncing. - * However, if we take the approach of transferring only the necessary rows to a new table and then - * deleting the original table, this makes the process much faster. It was taking several days to - * delete the AT states in the background, but only a couple of minutes to copy them to a new table. - * - * The trade off is that we have to go through a form of "reshape" when starting the app for the first - * time after enabling pruning mode. But given that this is an opt-in mode, I don't think it will be - * a problem. - * - * Once the pruning is complete, it automatically performs a CHECKPOINT DEFRAG in order to - * shrink the database file size down to a fraction of what it was before. - * - * From this point, the original background process will run, but can be dialled right down so not - * to interfere with syncing. - * - */ - - -public class HSQLDBDatabasePruning { - - private static final Logger LOGGER = LogManager.getLogger(HSQLDBDatabasePruning.class); - - - public static boolean pruneATStates(HSQLDBRepository repository) throws SQLException, DataException { - - // Only bulk prune AT states if we have never done so before - int pruneHeight = repository.getATRepository().getAtPruneHeight(); - if (pruneHeight > 0) { - // Already pruned AT states - return false; - } - - if (Settings.getInstance().isArchiveEnabled()) { - // Only proceed if we can see that the archiver has already finished - // This way, if the archiver failed for any reason, we can prune once it has had - // some opportunities to try again - boolean upToDate = BlockArchiveWriter.isArchiverUpToDate(repository); - if (!upToDate) { - return false; - } - } - - LOGGER.info("Starting bulk prune of AT states - this process could take a while... " + - "(approx. 2 mins on high spec, or upwards of 30 mins in some cases)"); - SplashFrame.getInstance().updateStatus("Pruning database (takes up to 30 mins)..."); - - // Create new AT-states table to hold smaller dataset - repository.executeCheckedUpdate("DROP TABLE IF EXISTS ATStatesNew"); - repository.executeCheckedUpdate("CREATE TABLE ATStatesNew (" - + "AT_address QortalAddress, height INTEGER NOT NULL, state_hash ATStateHash NOT NULL, " - + "fees QortalAmount NOT NULL, is_initial BOOLEAN NOT NULL, sleep_until_message_timestamp BIGINT, " - + "PRIMARY KEY (AT_address, height), " - + "FOREIGN KEY (AT_address) REFERENCES ATs (AT_address) ON DELETE CASCADE)"); - repository.executeCheckedUpdate("SET TABLE ATStatesNew NEW SPACE"); - repository.executeCheckedUpdate("CHECKPOINT"); - - // Add a height index - LOGGER.info("Adding index to AT states table..."); - repository.executeCheckedUpdate("CREATE INDEX IF NOT EXISTS ATStatesNewHeightIndex ON ATStatesNew (height)"); - repository.executeCheckedUpdate("CHECKPOINT"); - - - // Find our latest block - BlockData latestBlock = repository.getBlockRepository().getLastBlock(); - if (latestBlock == null) { - LOGGER.info("Unable to determine blockchain height, necessary for bulk block pruning"); - return false; - } - - // Calculate some constants for later use - final int blockchainHeight = latestBlock.getHeight(); - int maximumBlockToTrim = blockchainHeight - Settings.getInstance().getPruneBlockLimit(); - if (Settings.getInstance().isArchiveEnabled()) { - // Archive mode - don't prune anything that hasn't been archived yet - maximumBlockToTrim = Math.min(maximumBlockToTrim, repository.getBlockArchiveRepository().getBlockArchiveHeight() - 1); - } - final int endHeight = blockchainHeight; - final int blockStep = 10000; - - - // It's essential that we rebuild the latest AT states here, as we are using this data in the next query. - // Failing to do this will result in important AT states being deleted, rendering the database unusable. - repository.getATRepository().rebuildLatestAtStates(endHeight); - - - // Loop through all the LatestATStates and copy them to the new table - LOGGER.info("Copying AT states..."); - for (int height = 0; height < endHeight; height += blockStep) { - final int batchEndHeight = height + blockStep - 1; - //LOGGER.info(String.format("Copying AT states between %d and %d...", height, batchEndHeight)); - - String sql = "SELECT height, AT_address FROM LatestATStates WHERE height BETWEEN ? AND ?"; - try (ResultSet latestAtStatesResultSet = repository.checkedExecute(sql, height, batchEndHeight)) { - if (latestAtStatesResultSet != null) { - do { - int latestAtHeight = latestAtStatesResultSet.getInt(1); - String latestAtAddress = latestAtStatesResultSet.getString(2); - - // Copy this latest ATState to the new table - //LOGGER.info(String.format("Copying AT %s at height %d...", latestAtAddress, latestAtHeight)); - try { - String updateSql = "INSERT INTO ATStatesNew (" - + "SELECT AT_address, height, state_hash, fees, is_initial, sleep_until_message_timestamp " - + "FROM ATStates " - + "WHERE height = ? AND AT_address = ?)"; - repository.executeCheckedUpdate(updateSql, latestAtHeight, latestAtAddress); - } catch (SQLException e) { - repository.examineException(e); - throw new DataException("Unable to copy ATStates", e); - } - - // If this batch includes blocks after the maximum block to trim, we will need to copy - // each of its AT states above maximumBlockToTrim as they are considered "recent". We - // need to do this for _all_ AT states in these blocks, regardless of their latest state. - if (batchEndHeight >= maximumBlockToTrim) { - // Now copy this AT's states for each recent block they are present in - for (int i = maximumBlockToTrim; i < endHeight; i++) { - if (latestAtHeight < i) { - // This AT finished before this block so there is nothing to copy - continue; - } - - //LOGGER.info(String.format("Copying recent AT %s at height %d...", latestAtAddress, i)); - try { - // Copy each LatestATState to the new table - String updateSql = "INSERT IGNORE INTO ATStatesNew (" - + "SELECT AT_address, height, state_hash, fees, is_initial, sleep_until_message_timestamp " - + "FROM ATStates " - + "WHERE height = ? AND AT_address = ?)"; - repository.executeCheckedUpdate(updateSql, i, latestAtAddress); - } catch (SQLException e) { - repository.examineException(e); - throw new DataException("Unable to copy ATStates", e); - } - } - } - repository.saveChanges(); - - } while (latestAtStatesResultSet.next()); - } - } catch (SQLException e) { - throw new DataException("Unable to copy AT states", e); - } - } - - - // Finally, drop the original table and rename - LOGGER.info("Deleting old AT states..."); - repository.executeCheckedUpdate("DROP TABLE ATStates"); - repository.executeCheckedUpdate("ALTER TABLE ATStatesNew RENAME TO ATStates"); - repository.executeCheckedUpdate("ALTER INDEX ATStatesNewHeightIndex RENAME TO ATStatesHeightIndex"); - repository.executeCheckedUpdate("CHECKPOINT"); - - // Update the prune height - int nextPruneHeight = maximumBlockToTrim + 1; - repository.getATRepository().setAtPruneHeight(nextPruneHeight); - repository.saveChanges(); - - repository.executeCheckedUpdate("CHECKPOINT"); - - // Now prune/trim the ATStatesData, as this currently goes back over a month - return HSQLDBDatabasePruning.pruneATStateData(repository); - } - - /* - * Bulk prune ATStatesData to catch up with the now pruned ATStates table - * This uses the existing AT States trimming code but with a much higher end block - */ - private static boolean pruneATStateData(Repository repository) throws DataException { - - if (Settings.getInstance().isArchiveEnabled()) { - // Don't prune ATStatesData in archive mode - return true; - } - - BlockData latestBlock = repository.getBlockRepository().getLastBlock(); - if (latestBlock == null) { - LOGGER.info("Unable to determine blockchain height, necessary for bulk ATStatesData pruning"); - return false; - } - final int blockchainHeight = latestBlock.getHeight(); - int upperPrunableHeight = blockchainHeight - Settings.getInstance().getPruneBlockLimit(); - // ATStateData is already trimmed - so carry on from where we left off in the past - int pruneStartHeight = repository.getATRepository().getAtTrimHeight(); - - LOGGER.info("Starting bulk prune of AT states data - this process could take a while... (approx. 3 mins on high spec)"); - - while (pruneStartHeight < upperPrunableHeight) { - // Prune all AT state data up until our latest minus pruneBlockLimit (or our archive height) - - if (Controller.isStopping()) { - return false; - } - - // Override batch size in the settings because this is a one-off process - final int batchSize = 1000; - final int rowLimitPerBatch = 50000; - int upperBatchHeight = pruneStartHeight + batchSize; - int upperPruneHeight = Math.min(upperBatchHeight, upperPrunableHeight); - - LOGGER.trace(String.format("Pruning AT states data between %d and %d...", pruneStartHeight, upperPruneHeight)); - - int numATStatesPruned = repository.getATRepository().trimAtStates(pruneStartHeight, upperPruneHeight, rowLimitPerBatch); - repository.saveChanges(); - - if (numATStatesPruned > 0) { - LOGGER.trace(String.format("Pruned %d AT states data rows between blocks %d and %d", - numATStatesPruned, pruneStartHeight, upperPruneHeight)); - } else { - repository.getATRepository().setAtTrimHeight(upperBatchHeight); - // No need to rebuild the latest AT states as we aren't currently synchronizing - repository.saveChanges(); - LOGGER.debug(String.format("Bumping AT states trim height to %d", upperBatchHeight)); - - // Can we move onto next batch? - if (upperPrunableHeight > upperBatchHeight) { - pruneStartHeight = upperBatchHeight; - } - else { - // We've finished pruning - break; - } - } - } - - return true; - } - - public static boolean pruneBlocks(Repository repository) throws SQLException, DataException { - - // Only bulk prune AT states if we have never done so before - int pruneHeight = repository.getBlockRepository().getBlockPruneHeight(); - if (pruneHeight > 0) { - // Already pruned blocks - return false; - } - - if (Settings.getInstance().isArchiveEnabled()) { - // Only proceed if we can see that the archiver has already finished - // This way, if the archiver failed for any reason, we can prune once it has had - // some opportunities to try again - boolean upToDate = BlockArchiveWriter.isArchiverUpToDate(repository); - if (!upToDate) { - return false; - } - } - - BlockData latestBlock = repository.getBlockRepository().getLastBlock(); - if (latestBlock == null) { - LOGGER.info("Unable to determine blockchain height, necessary for bulk block pruning"); - return false; - } - final int blockchainHeight = latestBlock.getHeight(); - int upperPrunableHeight = blockchainHeight - Settings.getInstance().getPruneBlockLimit(); - int pruneStartHeight = 0; - - if (Settings.getInstance().isArchiveEnabled()) { - // Archive mode - don't prune anything that hasn't been archived yet - upperPrunableHeight = Math.min(upperPrunableHeight, repository.getBlockArchiveRepository().getBlockArchiveHeight() - 1); - } - - LOGGER.info("Starting bulk prune of blocks - this process could take a while... (approx. 5 mins on high spec)"); - - while (pruneStartHeight < upperPrunableHeight) { - // Prune all blocks up until our latest minus pruneBlockLimit - - int upperBatchHeight = pruneStartHeight + Settings.getInstance().getBlockPruneBatchSize(); - int upperPruneHeight = Math.min(upperBatchHeight, upperPrunableHeight); - - LOGGER.info(String.format("Pruning blocks between %d and %d...", pruneStartHeight, upperPruneHeight)); - - int numBlocksPruned = repository.getBlockRepository().pruneBlocks(pruneStartHeight, upperPruneHeight); - repository.saveChanges(); - - if (numBlocksPruned > 0) { - LOGGER.info(String.format("Pruned %d block%s between %d and %d", - numBlocksPruned, (numBlocksPruned != 1 ? "s" : ""), - pruneStartHeight, upperPruneHeight)); - } else { - final int nextPruneHeight = upperPruneHeight + 1; - repository.getBlockRepository().setBlockPruneHeight(nextPruneHeight); - repository.saveChanges(); - LOGGER.debug(String.format("Bumping block base prune height to %d", nextPruneHeight)); - - // Can we move onto next batch? - if (upperPrunableHeight > nextPruneHeight) { - pruneStartHeight = nextPruneHeight; - } - else { - // We've finished pruning - break; - } - } - } - - return true; - } - - public static void performMaintenance(Repository repository) throws SQLException, DataException { - try { - SplashFrame.getInstance().updateStatus("Performing maintenance..."); - - // Timeout if the database isn't ready for backing up after 5 minutes - // Nothing else should be using the db at this point, so a timeout shouldn't happen - long timeout = 5 * 60 * 1000L; - repository.performPeriodicMaintenance(timeout); - - } catch (TimeoutException e) { - LOGGER.info("Attempt to perform maintenance failed due to timeout: {}", e.getMessage()); - } - } - -} From 0b05de22a04d452b962df5557e06c4a456773e49 Mon Sep 17 00:00:00 2001 From: CalDescent Date: Fri, 3 Mar 2023 16:14:43 +0000 Subject: [PATCH 09/16] Rebuild name in ArbitraryTransaction.preProcess() --- .../org/qortal/transaction/ArbitraryTransaction.java | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/src/main/java/org/qortal/transaction/ArbitraryTransaction.java b/src/main/java/org/qortal/transaction/ArbitraryTransaction.java index 50d8ccad..3330a84c 100644 --- a/src/main/java/org/qortal/transaction/ArbitraryTransaction.java +++ b/src/main/java/org/qortal/transaction/ArbitraryTransaction.java @@ -9,6 +9,7 @@ import org.qortal.account.Account; import org.qortal.block.BlockChain; import org.qortal.controller.arbitrary.ArbitraryDataManager; import org.qortal.controller.arbitrary.ArbitraryDataStorageManager; +import org.qortal.controller.repository.NamesDatabaseIntegrityCheck; import org.qortal.crypto.Crypto; import org.qortal.crypto.MemoryPoW; import org.qortal.data.PaymentData; @@ -241,7 +242,13 @@ public class ArbitraryTransaction extends Transaction { @Override public void preProcess() throws DataException { - // Nothing to do + ArbitraryTransactionData arbitraryTransactionData = (ArbitraryTransactionData) transactionData; + + // Rebuild this name in the Names table from the transaction history + // This is necessary because in some rare cases names can be missing from the Names table after registration + // but we have been unable to reproduce the issue and track down the root cause + NamesDatabaseIntegrityCheck namesDatabaseIntegrityCheck = new NamesDatabaseIntegrityCheck(); + namesDatabaseIntegrityCheck.rebuildName(arbitraryTransactionData.getName(), this.repository); } @Override From 7d7cea3278fe8897c074aa5f1f29ecc52f690715 Mon Sep 17 00:00:00 2001 From: CalDescent Date: Fri, 3 Mar 2023 17:10:14 +0000 Subject: [PATCH 10/16] Only rebuild if transaction has a name. --- src/main/java/org/qortal/transaction/ArbitraryTransaction.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/main/java/org/qortal/transaction/ArbitraryTransaction.java b/src/main/java/org/qortal/transaction/ArbitraryTransaction.java index 3330a84c..7e7d4040 100644 --- a/src/main/java/org/qortal/transaction/ArbitraryTransaction.java +++ b/src/main/java/org/qortal/transaction/ArbitraryTransaction.java @@ -243,6 +243,8 @@ public class ArbitraryTransaction extends Transaction { @Override public void preProcess() throws DataException { ArbitraryTransactionData arbitraryTransactionData = (ArbitraryTransactionData) transactionData; + if (arbitraryTransactionData.getName() == null) + return; // Rebuild this name in the Names table from the transaction history // This is necessary because in some rare cases names can be missing from the Names table after registration From 7f21ea7e0044acf9ac0a8a3ceb81341592558218 Mon Sep 17 00:00:00 2001 From: CalDescent Date: Sun, 5 Mar 2023 13:16:58 +0000 Subject: [PATCH 11/16] Added new bootstrap host --- src/main/java/org/qortal/settings/Settings.java | 1 + 1 file changed, 1 insertion(+) diff --git a/src/main/java/org/qortal/settings/Settings.java b/src/main/java/org/qortal/settings/Settings.java index ae5dc173..05012b41 100644 --- a/src/main/java/org/qortal/settings/Settings.java +++ b/src/main/java/org/qortal/settings/Settings.java @@ -273,6 +273,7 @@ public class Settings { private String[] bootstrapHosts = new String[] { "http://bootstrap.qortal.org", "http://bootstrap2.qortal.org", + "http://bootstrap3.qortal.org", "http://bootstrap.qortal.online" }; From 3739920ad38d4750553be2c54a8b25de7588cabd Mon Sep 17 00:00:00 2001 From: CalDescent Date: Mon, 6 Mar 2023 13:17:48 +0000 Subject: [PATCH 12/16] Added support for an optional fee in arbitrary transactions, to give the option for data to be published instantly (i.e. no proof of work / mempow required when fee is sufficient). Takes effect at a future undecided timestamp. --- .../api/resource/ArbitraryResource.java | 35 +- .../ArbitraryDataTransactionBuilder.java | 6 +- .../java/org/qortal/block/BlockChain.java | 7 +- .../transaction/ArbitraryTransaction.java | 18 +- src/main/resources/blockchain.json | 3 +- .../ArbitraryDataStoragePolicyTests.java | 2 +- .../ArbitraryTransactionMetadataTests.java | 8 +- .../arbitrary/ArbitraryTransactionTests.java | 344 +++++++++++++++++- .../qortal/test/common/ArbitraryUtils.java | 11 +- .../test-chain-v2-block-timestamps.json | 3 +- .../test-chain-v2-disable-reference.json | 3 +- .../test-chain-v2-founder-rewards.json | 3 +- .../test-chain-v2-leftover-reward.json | 3 +- src/test/resources/test-chain-v2-minting.json | 5 +- .../test-chain-v2-qora-holder-extremes.json | 3 +- .../test-chain-v2-qora-holder-reduction.json | 3 +- .../resources/test-chain-v2-qora-holder.json | 3 +- .../test-chain-v2-reward-levels.json | 3 +- .../test-chain-v2-reward-scaling.json | 3 +- .../test-chain-v2-reward-shares.json | 3 +- .../test-chain-v2-self-sponsorship-algo.json | 3 +- src/test/resources/test-chain-v2.json | 3 +- 22 files changed, 432 insertions(+), 43 deletions(-) diff --git a/src/main/java/org/qortal/api/resource/ArbitraryResource.java b/src/main/java/org/qortal/api/resource/ArbitraryResource.java index 0df81d9b..235e3edc 100644 --- a/src/main/java/org/qortal/api/resource/ArbitraryResource.java +++ b/src/main/java/org/qortal/api/resource/ArbitraryResource.java @@ -773,6 +773,7 @@ public class ArbitraryResource { @QueryParam("description") String description, @QueryParam("tags") List tags, @QueryParam("category") Category category, + @QueryParam("fee") Long fee, String path) { Security.checkApiCallAllowed(request); @@ -781,7 +782,7 @@ public class ArbitraryResource { } return this.upload(Service.valueOf(serviceString), name, null, path, null, null, false, - title, description, tags, category); + fee, title, description, tags, category); } @POST @@ -818,6 +819,7 @@ public class ArbitraryResource { @QueryParam("description") String description, @QueryParam("tags") List tags, @QueryParam("category") Category category, + @QueryParam("fee") Long fee, String path) { Security.checkApiCallAllowed(request); @@ -826,7 +828,7 @@ public class ArbitraryResource { } return this.upload(Service.valueOf(serviceString), name, identifier, path, null, null, false, - title, description, tags, category); + fee, title, description, tags, category); } @@ -864,6 +866,7 @@ public class ArbitraryResource { @QueryParam("description") String description, @QueryParam("tags") List tags, @QueryParam("category") Category category, + @QueryParam("fee") Long fee, String base64) { Security.checkApiCallAllowed(request); @@ -872,7 +875,7 @@ public class ArbitraryResource { } return this.upload(Service.valueOf(serviceString), name, null, null, null, base64, false, - title, description, tags, category); + fee, title, description, tags, category); } @POST @@ -907,6 +910,7 @@ public class ArbitraryResource { @QueryParam("description") String description, @QueryParam("tags") List tags, @QueryParam("category") Category category, + @QueryParam("fee") Long fee, String base64) { Security.checkApiCallAllowed(request); @@ -915,7 +919,7 @@ public class ArbitraryResource { } return this.upload(Service.valueOf(serviceString), name, identifier, null, null, base64, false, - title, description, tags, category); + fee, title, description, tags, category); } @@ -952,6 +956,7 @@ public class ArbitraryResource { @QueryParam("description") String description, @QueryParam("tags") List tags, @QueryParam("category") Category category, + @QueryParam("fee") Long fee, String base64Zip) { Security.checkApiCallAllowed(request); @@ -960,7 +965,7 @@ public class ArbitraryResource { } return this.upload(Service.valueOf(serviceString), name, null, null, null, base64Zip, true, - title, description, tags, category); + fee, title, description, tags, category); } @POST @@ -995,6 +1000,7 @@ public class ArbitraryResource { @QueryParam("description") String description, @QueryParam("tags") List tags, @QueryParam("category") Category category, + @QueryParam("fee") Long fee, String base64Zip) { Security.checkApiCallAllowed(request); @@ -1003,7 +1009,7 @@ public class ArbitraryResource { } return this.upload(Service.valueOf(serviceString), name, identifier, null, null, base64Zip, true, - title, description, tags, category); + fee, title, description, tags, category); } @@ -1043,6 +1049,7 @@ public class ArbitraryResource { @QueryParam("description") String description, @QueryParam("tags") List tags, @QueryParam("category") Category category, + @QueryParam("fee") Long fee, String string) { Security.checkApiCallAllowed(request); @@ -1051,7 +1058,7 @@ public class ArbitraryResource { } return this.upload(Service.valueOf(serviceString), name, null, null, string, null, false, - title, description, tags, category); + fee, title, description, tags, category); } @POST @@ -1088,6 +1095,7 @@ public class ArbitraryResource { @QueryParam("description") String description, @QueryParam("tags") List tags, @QueryParam("category") Category category, + @QueryParam("fee") Long fee, String string) { Security.checkApiCallAllowed(request); @@ -1096,14 +1104,14 @@ public class ArbitraryResource { } return this.upload(Service.valueOf(serviceString), name, identifier, null, string, null, false, - title, description, tags, category); + fee, title, description, tags, category); } // Shared methods - private String upload(Service service, String name, String identifier, - String path, String string, String base64, boolean zipped, + private String upload(Service service, String name, String identifier, String path, + String string, String base64, boolean zipped, Long fee, String title, String description, List tags, Category category) { // Fetch public key from registered name try (final Repository repository = RepositoryManager.getRepository()) { @@ -1167,9 +1175,14 @@ public class ArbitraryResource { } } + // Default to zero fee if not specified + if (fee == null) { + fee = 0L; + } + try { ArbitraryDataTransactionBuilder transactionBuilder = new ArbitraryDataTransactionBuilder( - repository, publicKey58, Paths.get(path), name, null, service, identifier, + repository, publicKey58, fee, Paths.get(path), name, null, service, identifier, title, description, tags, category ); diff --git a/src/main/java/org/qortal/arbitrary/ArbitraryDataTransactionBuilder.java b/src/main/java/org/qortal/arbitrary/ArbitraryDataTransactionBuilder.java index 0f3d4357..b27e511c 100644 --- a/src/main/java/org/qortal/arbitrary/ArbitraryDataTransactionBuilder.java +++ b/src/main/java/org/qortal/arbitrary/ArbitraryDataTransactionBuilder.java @@ -46,6 +46,7 @@ public class ArbitraryDataTransactionBuilder { private static final double MAX_FILE_DIFF = 0.5f; private final String publicKey58; + private final long fee; private final Path path; private final String name; private Method method; @@ -64,11 +65,12 @@ public class ArbitraryDataTransactionBuilder { private ArbitraryTransactionData arbitraryTransactionData; private ArbitraryDataFile arbitraryDataFile; - public ArbitraryDataTransactionBuilder(Repository repository, String publicKey58, Path path, String name, + public ArbitraryDataTransactionBuilder(Repository repository, String publicKey58, long fee, Path path, String name, Method method, Service service, String identifier, String title, String description, List tags, Category category) { this.repository = repository; this.publicKey58 = publicKey58; + this.fee = fee; this.path = path; this.name = name; this.method = method; @@ -261,7 +263,7 @@ public class ArbitraryDataTransactionBuilder { } final BaseTransactionData baseTransactionData = new BaseTransactionData(now, Group.NO_GROUP, - lastReference, creatorPublicKey, 0L, null); + lastReference, creatorPublicKey, fee, null); final int size = (int) arbitraryDataFile.size(); final int version = 5; final int nonce = 0; diff --git a/src/main/java/org/qortal/block/BlockChain.java b/src/main/java/org/qortal/block/BlockChain.java index b96350e6..88880887 100644 --- a/src/main/java/org/qortal/block/BlockChain.java +++ b/src/main/java/org/qortal/block/BlockChain.java @@ -78,7 +78,8 @@ public class BlockChain { onlineAccountMinterLevelValidationHeight, selfSponsorshipAlgoV1Height, feeValidationFixTimestamp, - chatReferenceTimestamp; + chatReferenceTimestamp, + arbitraryOptionalFeeTimestamp; } // Custom transaction fees @@ -522,6 +523,10 @@ public class BlockChain { return this.featureTriggers.get(FeatureTrigger.chatReferenceTimestamp.name()).longValue(); } + public long getArbitraryOptionalFeeTimestamp() { + return this.featureTriggers.get(FeatureTrigger.arbitraryOptionalFeeTimestamp.name()).longValue(); + } + // More complex getters for aspects that change by height or timestamp diff --git a/src/main/java/org/qortal/transaction/ArbitraryTransaction.java b/src/main/java/org/qortal/transaction/ArbitraryTransaction.java index 7e7d4040..3452f916 100644 --- a/src/main/java/org/qortal/transaction/ArbitraryTransaction.java +++ b/src/main/java/org/qortal/transaction/ArbitraryTransaction.java @@ -88,6 +88,12 @@ public class ArbitraryTransaction extends Transaction { if (this.transactionData.getFee() < 0) return ValidationResult.NEGATIVE_FEE; + // After the feature trigger, we require the fee to be sufficient if it's not 0. + // If the fee is zero, then the nonce is validated in isSignatureValid() as an alternative to a fee + if (this.arbitraryTransactionData.getTimestamp() >= BlockChain.getInstance().getArbitraryOptionalFeeTimestamp() && this.arbitraryTransactionData.getFee() != 0L) { + return super.isFeeValid(); + } + return ValidationResult.OK; } @@ -208,10 +214,14 @@ public class ArbitraryTransaction extends Transaction { // Clear nonce from transactionBytes ArbitraryTransactionTransformer.clearNonce(transactionBytes); - // We only need to check nonce for recent transactions due to PoW verification overhead - if (NTP.getTime() - this.arbitraryTransactionData.getTimestamp() < HISTORIC_THRESHOLD) { - int difficulty = ArbitraryDataManager.getInstance().getPowDifficulty(); - return MemoryPoW.verify2(transactionBytes, POW_BUFFER_SIZE, difficulty, nonce); + // As of feature-trigger timestamp, we only require a nonce when the fee is zero + boolean beforeFeatureTrigger = this.arbitraryTransactionData.getTimestamp() < BlockChain.getInstance().getArbitraryOptionalFeeTimestamp(); + if (beforeFeatureTrigger || this.arbitraryTransactionData.getFee() == 0L) { + // We only need to check nonce for recent transactions due to PoW verification overhead + if (NTP.getTime() - this.arbitraryTransactionData.getTimestamp() < HISTORIC_THRESHOLD) { + int difficulty = ArbitraryDataManager.getInstance().getPowDifficulty(); + return MemoryPoW.verify2(transactionBytes, POW_BUFFER_SIZE, difficulty, nonce); + } } } diff --git a/src/main/resources/blockchain.json b/src/main/resources/blockchain.json index 46b4b4f9..7ce93a28 100644 --- a/src/main/resources/blockchain.json +++ b/src/main/resources/blockchain.json @@ -85,7 +85,8 @@ "onlineAccountMinterLevelValidationHeight": 1092000, "selfSponsorshipAlgoV1Height": 1092400, "feeValidationFixTimestamp": 1671918000000, - "chatReferenceTimestamp": 1674316800000 + "chatReferenceTimestamp": 1674316800000, + "arbitraryOptionalFeeTimestamp": 9999999999999 }, "checkpoints": [ { "height": 1136300, "signature": "3BbwawEF2uN8Ni5ofpJXkukoU8ctAPxYoFB7whq9pKfBnjfZcpfEJT4R95NvBDoTP8WDyWvsUvbfHbcr9qSZuYpSKZjUQTvdFf6eqznHGEwhZApWfvXu6zjGCxYCp65F4jsVYYJjkzbjmkCg5WAwN5voudngA23kMK6PpTNygapCzXt" } diff --git a/src/test/java/org/qortal/test/arbitrary/ArbitraryDataStoragePolicyTests.java b/src/test/java/org/qortal/test/arbitrary/ArbitraryDataStoragePolicyTests.java index 9bf76127..49e645cf 100644 --- a/src/test/java/org/qortal/test/arbitrary/ArbitraryDataStoragePolicyTests.java +++ b/src/test/java/org/qortal/test/arbitrary/ArbitraryDataStoragePolicyTests.java @@ -246,7 +246,7 @@ public class ArbitraryDataStoragePolicyTests extends Common { Path path = Paths.get("src/test/resources/arbitrary/demo1"); ArbitraryDataTransactionBuilder txnBuilder = new ArbitraryDataTransactionBuilder( - repository, publicKey58, path, name, Method.PUT, Service.ARBITRARY_DATA, null, + repository, publicKey58, 0L, path, name, Method.PUT, Service.ARBITRARY_DATA, null, null, null, null, null); txnBuilder.build(); diff --git a/src/test/java/org/qortal/test/arbitrary/ArbitraryTransactionMetadataTests.java b/src/test/java/org/qortal/test/arbitrary/ArbitraryTransactionMetadataTests.java index 5d28568d..bf4f0a70 100644 --- a/src/test/java/org/qortal/test/arbitrary/ArbitraryTransactionMetadataTests.java +++ b/src/test/java/org/qortal/test/arbitrary/ArbitraryTransactionMetadataTests.java @@ -107,7 +107,7 @@ public class ArbitraryTransactionMetadataTests extends Common { // Create PUT transaction Path path1 = ArbitraryUtils.generateRandomDataPath(dataLength); ArbitraryDataFile arbitraryDataFile = ArbitraryUtils.createAndMintTxn(repository, publicKey58, path1, name, - identifier, ArbitraryTransactionData.Method.PUT, service, alice, chunkSize, + identifier, ArbitraryTransactionData.Method.PUT, service, alice, chunkSize, 0L, true, title, description, tags, category); // Check the chunk count is correct @@ -157,7 +157,7 @@ public class ArbitraryTransactionMetadataTests extends Common { // Create PUT transaction Path path1 = ArbitraryUtils.generateRandomDataPath(dataLength); ArbitraryDataFile arbitraryDataFile = ArbitraryUtils.createAndMintTxn(repository, publicKey58, path1, name, - identifier, ArbitraryTransactionData.Method.PUT, service, alice, chunkSize, + identifier, ArbitraryTransactionData.Method.PUT, service, alice, chunkSize, 0L, true, title, description, tags, category); // Check the chunk count is correct @@ -219,7 +219,7 @@ public class ArbitraryTransactionMetadataTests extends Common { // Create PUT transaction Path path1 = ArbitraryUtils.generateRandomDataPath(dataLength); ArbitraryDataFile arbitraryDataFile = ArbitraryUtils.createAndMintTxn(repository, publicKey58, path1, name, - identifier, ArbitraryTransactionData.Method.PUT, service, alice, chunkSize, + identifier, ArbitraryTransactionData.Method.PUT, service, alice, chunkSize, 0L, true, title, description, tags, category); // Check the chunk count is correct @@ -273,7 +273,7 @@ public class ArbitraryTransactionMetadataTests extends Common { // Create PUT transaction Path path1 = ArbitraryUtils.generateRandomDataPath(dataLength); ArbitraryDataFile arbitraryDataFile = ArbitraryUtils.createAndMintTxn(repository, publicKey58, path1, name, - identifier, ArbitraryTransactionData.Method.PUT, service, alice, chunkSize, + identifier, ArbitraryTransactionData.Method.PUT, service, alice, chunkSize, 0L, true, title, description, tags, category); // Check the metadata is correct diff --git a/src/test/java/org/qortal/test/arbitrary/ArbitraryTransactionTests.java b/src/test/java/org/qortal/test/arbitrary/ArbitraryTransactionTests.java index 294e463e..2c2d52b2 100644 --- a/src/test/java/org/qortal/test/arbitrary/ArbitraryTransactionTests.java +++ b/src/test/java/org/qortal/test/arbitrary/ArbitraryTransactionTests.java @@ -5,6 +5,7 @@ import org.junit.Before; import org.junit.Test; import org.qortal.account.PrivateKeyAccount; import org.qortal.arbitrary.ArbitraryDataFile; +import org.qortal.arbitrary.ArbitraryDataTransactionBuilder; import org.qortal.arbitrary.exception.MissingDataException; import org.qortal.arbitrary.misc.Service; import org.qortal.controller.arbitrary.ArbitraryDataManager; @@ -20,9 +21,11 @@ import org.qortal.test.common.TransactionUtils; import org.qortal.test.common.transaction.TestTransaction; import org.qortal.transaction.ArbitraryTransaction; import org.qortal.transaction.RegisterNameTransaction; +import org.qortal.transaction.Transaction; import org.qortal.utils.Base58; import org.qortal.utils.NTP; +import javax.xml.crypto.Data; import java.io.IOException; import java.nio.file.Path; @@ -36,7 +39,7 @@ public class ArbitraryTransactionTests extends Common { } @Test - public void testDifficultyTooLow() throws IllegalAccessException, DataException, IOException, MissingDataException { + public void testDifficultyTooLow() throws IllegalAccessException, DataException, IOException { try (final Repository repository = RepositoryManager.getRepository()) { PrivateKeyAccount alice = Common.getTestAccount(repository, "alice"); String publicKey58 = Base58.encode(alice.getPublicKey()); @@ -78,7 +81,346 @@ public class ArbitraryTransactionTests extends Common { assertTrue(transaction.isSignatureValid()); } + } + + @Test + public void testNonceAndFee() throws IllegalAccessException, DataException, IOException { + try (final Repository repository = RepositoryManager.getRepository()) { + PrivateKeyAccount alice = Common.getTestAccount(repository, "alice"); + String publicKey58 = Base58.encode(alice.getPublicKey()); + String name = "TEST"; // Can be anything for this test + String identifier = null; // Not used for this test + Service service = Service.ARBITRARY_DATA; + int chunkSize = 100; + int dataLength = 900; // Actual data length will be longer due to encryption + + // Register the name to Alice + RegisterNameTransactionData registerNameTransactionData = new RegisterNameTransactionData(TestTransaction.generateBase(alice), name, ""); + registerNameTransactionData.setFee(new RegisterNameTransaction(null, null).getUnitFee(registerNameTransactionData.getTimestamp())); + TransactionUtils.signAndMint(repository, registerNameTransactionData, alice); + + // Set difficulty to 1 + FieldUtils.writeField(ArbitraryDataManager.getInstance(), "powDifficulty", 1, true); + + // Create PUT transaction, with a fee + Path path1 = ArbitraryUtils.generateRandomDataPath(dataLength); + long fee = 10000000; // sufficient + boolean computeNonce = true; + ArbitraryDataFile arbitraryDataFile = ArbitraryUtils.createAndMintTxn(repository, publicKey58, path1, name, identifier, ArbitraryTransactionData.Method.PUT, service, alice, chunkSize, fee, computeNonce, null, null, null, null); + + // Check that nonce validation succeeds + byte[] signature = arbitraryDataFile.getSignature(); + TransactionData transactionData = repository.getTransactionRepository().fromSignature(signature); + ArbitraryTransaction transaction = new ArbitraryTransaction(repository, transactionData); + assertTrue(transaction.isSignatureValid()); + + // Increase difficulty to 15 + FieldUtils.writeField(ArbitraryDataManager.getInstance(), "powDifficulty", 15, true); + + // Make sure that nonce validation still succeeds, as the fee has allowed us to avoid including a nonce + assertTrue(transaction.isSignatureValid()); + } + } + @Test + public void testNonceAndLowFee() throws IllegalAccessException, DataException, IOException { + try (final Repository repository = RepositoryManager.getRepository()) { + PrivateKeyAccount alice = Common.getTestAccount(repository, "alice"); + String publicKey58 = Base58.encode(alice.getPublicKey()); + String name = "TEST"; // Can be anything for this test + String identifier = null; // Not used for this test + Service service = Service.ARBITRARY_DATA; + int chunkSize = 100; + int dataLength = 900; // Actual data length will be longer due to encryption + + // Register the name to Alice + RegisterNameTransactionData registerNameTransactionData = new RegisterNameTransactionData(TestTransaction.generateBase(alice), name, ""); + registerNameTransactionData.setFee(new RegisterNameTransaction(null, null).getUnitFee(registerNameTransactionData.getTimestamp())); + TransactionUtils.signAndMint(repository, registerNameTransactionData, alice); + + // Set difficulty to 1 + FieldUtils.writeField(ArbitraryDataManager.getInstance(), "powDifficulty", 1, true); + + // Create PUT transaction, with a fee that is too low + Path path1 = ArbitraryUtils.generateRandomDataPath(dataLength); + long fee = 9999999; // insufficient + boolean computeNonce = true; + boolean insufficientFeeDetected = false; + try { + ArbitraryUtils.createAndMintTxn(repository, publicKey58, path1, name, identifier, ArbitraryTransactionData.Method.PUT, service, alice, chunkSize, fee, computeNonce, null, null, null, null); + } + catch (DataException e) { + if (e.getMessage().contains("INSUFFICIENT_FEE")) { + insufficientFeeDetected = true; + } + } + + // Transaction should be invalid due to an insufficient fee + assertTrue(insufficientFeeDetected); + } } + @Test + public void testFeeNoNonce() throws IllegalAccessException, DataException, IOException { + try (final Repository repository = RepositoryManager.getRepository()) { + PrivateKeyAccount alice = Common.getTestAccount(repository, "alice"); + String publicKey58 = Base58.encode(alice.getPublicKey()); + String name = "TEST"; // Can be anything for this test + String identifier = null; // Not used for this test + Service service = Service.ARBITRARY_DATA; + int chunkSize = 100; + int dataLength = 900; // Actual data length will be longer due to encryption + + // Register the name to Alice + RegisterNameTransactionData registerNameTransactionData = new RegisterNameTransactionData(TestTransaction.generateBase(alice), name, ""); + registerNameTransactionData.setFee(new RegisterNameTransaction(null, null).getUnitFee(registerNameTransactionData.getTimestamp())); + TransactionUtils.signAndMint(repository, registerNameTransactionData, alice); + + // Set difficulty to 1 + FieldUtils.writeField(ArbitraryDataManager.getInstance(), "powDifficulty", 1, true); + + // Create PUT transaction, with a fee + Path path1 = ArbitraryUtils.generateRandomDataPath(dataLength); + long fee = 10000000; // sufficient + boolean computeNonce = false; + ArbitraryDataFile arbitraryDataFile = ArbitraryUtils.createAndMintTxn(repository, publicKey58, path1, name, identifier, ArbitraryTransactionData.Method.PUT, service, alice, chunkSize, fee, computeNonce, null, null, null, null); + + // Check that nonce validation succeeds, even though it wasn't computed. This is because we have included a sufficient fee. + byte[] signature = arbitraryDataFile.getSignature(); + TransactionData transactionData = repository.getTransactionRepository().fromSignature(signature); + ArbitraryTransaction transaction = new ArbitraryTransaction(repository, transactionData); + assertTrue(transaction.isSignatureValid()); + + // Increase difficulty to 15 + FieldUtils.writeField(ArbitraryDataManager.getInstance(), "powDifficulty", 15, true); + + // Make sure that nonce validation still succeeds, as the fee has allowed us to avoid including a nonce + assertTrue(transaction.isSignatureValid()); + } + } + + @Test + public void testLowFeeNoNonce() throws IllegalAccessException, DataException, IOException { + try (final Repository repository = RepositoryManager.getRepository()) { + PrivateKeyAccount alice = Common.getTestAccount(repository, "alice"); + String publicKey58 = Base58.encode(alice.getPublicKey()); + String name = "TEST"; // Can be anything for this test + String identifier = null; // Not used for this test + Service service = Service.ARBITRARY_DATA; + int chunkSize = 100; + int dataLength = 900; // Actual data length will be longer due to encryption + + // Register the name to Alice + RegisterNameTransactionData registerNameTransactionData = new RegisterNameTransactionData(TestTransaction.generateBase(alice), name, ""); + registerNameTransactionData.setFee(new RegisterNameTransaction(null, null).getUnitFee(registerNameTransactionData.getTimestamp())); + TransactionUtils.signAndMint(repository, registerNameTransactionData, alice); + + // Set difficulty to 1 + FieldUtils.writeField(ArbitraryDataManager.getInstance(), "powDifficulty", 1, true); + + // Create PUT transaction, with a fee that is too low. Also, don't compute a nonce. + Path path1 = ArbitraryUtils.generateRandomDataPath(dataLength); + long fee = 9999999; // insufficient + + ArbitraryDataTransactionBuilder txnBuilder = new ArbitraryDataTransactionBuilder( + repository, publicKey58, fee, path1, name, ArbitraryTransactionData.Method.PUT, service, identifier, null, null, null, null); + + txnBuilder.setChunkSize(chunkSize); + txnBuilder.build(); + ArbitraryTransactionData transactionData = txnBuilder.getArbitraryTransactionData(); + Transaction.ValidationResult result = TransactionUtils.signAndImport(repository, transactionData, alice); + + // Transaction should be invalid due to an insufficient fee + assertEquals(Transaction.ValidationResult.INSUFFICIENT_FEE, result); + } + } + + @Test + public void testZeroFeeNoNonce() throws IllegalAccessException, DataException, IOException { + try (final Repository repository = RepositoryManager.getRepository()) { + PrivateKeyAccount alice = Common.getTestAccount(repository, "alice"); + String publicKey58 = Base58.encode(alice.getPublicKey()); + String name = "TEST"; // Can be anything for this test + String identifier = null; // Not used for this test + Service service = Service.ARBITRARY_DATA; + int chunkSize = 100; + int dataLength = 900; // Actual data length will be longer due to encryption + + // Register the name to Alice + RegisterNameTransactionData registerNameTransactionData = new RegisterNameTransactionData(TestTransaction.generateBase(alice), name, ""); + registerNameTransactionData.setFee(new RegisterNameTransaction(null, null).getUnitFee(registerNameTransactionData.getTimestamp())); + TransactionUtils.signAndMint(repository, registerNameTransactionData, alice); + + // Set difficulty to 1 + FieldUtils.writeField(ArbitraryDataManager.getInstance(), "powDifficulty", 1, true); + + // Create PUT transaction, with a fee that is too low. Also, don't compute a nonce. + Path path1 = ArbitraryUtils.generateRandomDataPath(dataLength); + long fee = 0L; + + ArbitraryDataTransactionBuilder txnBuilder = new ArbitraryDataTransactionBuilder( + repository, publicKey58, fee, path1, name, ArbitraryTransactionData.Method.PUT, service, identifier, null, null, null, null); + + txnBuilder.setChunkSize(chunkSize); + txnBuilder.build(); + ArbitraryTransactionData transactionData = txnBuilder.getArbitraryTransactionData(); + ArbitraryTransaction arbitraryTransaction = new ArbitraryTransaction(repository, transactionData); + + // Transaction should be invalid + assertFalse(arbitraryTransaction.isSignatureValid()); + } + } + + @Test + public void testNonceAndFeeBeforeFeatureTrigger() throws IllegalAccessException, DataException, IOException { + // Use v2-minting settings, as these are pre-feature-trigger + Common.useSettings("test-settings-v2-minting.json"); + + try (final Repository repository = RepositoryManager.getRepository()) { + PrivateKeyAccount alice = Common.getTestAccount(repository, "alice"); + String publicKey58 = Base58.encode(alice.getPublicKey()); + String name = "TEST"; // Can be anything for this test + String identifier = null; // Not used for this test + Service service = Service.ARBITRARY_DATA; + int chunkSize = 100; + int dataLength = 900; // Actual data length will be longer due to encryption + + // Register the name to Alice + RegisterNameTransactionData registerNameTransactionData = new RegisterNameTransactionData(TestTransaction.generateBase(alice), name, ""); + registerNameTransactionData.setFee(new RegisterNameTransaction(null, null).getUnitFee(registerNameTransactionData.getTimestamp())); + TransactionUtils.signAndMint(repository, registerNameTransactionData, alice); + + // Set difficulty to 1 + FieldUtils.writeField(ArbitraryDataManager.getInstance(), "powDifficulty", 1, true); + + // Create PUT transaction, with a fee + Path path1 = ArbitraryUtils.generateRandomDataPath(dataLength); + long fee = 10000000; // sufficient + boolean computeNonce = true; + ArbitraryDataFile arbitraryDataFile = ArbitraryUtils.createAndMintTxn(repository, publicKey58, path1, name, identifier, ArbitraryTransactionData.Method.PUT, service, alice, chunkSize, fee, computeNonce, null, null, null, null); + + // Check that nonce validation succeeds + byte[] signature = arbitraryDataFile.getSignature(); + TransactionData transactionData = repository.getTransactionRepository().fromSignature(signature); + ArbitraryTransaction transaction = new ArbitraryTransaction(repository, transactionData); + assertTrue(transaction.isSignatureValid()); + + // Increase difficulty to 15 + FieldUtils.writeField(ArbitraryDataManager.getInstance(), "powDifficulty", 15, true); + + // Make sure the nonce validation fails, as we aren't allowing a fee to replace a nonce yet. + // Note: there is a very tiny chance this could succeed due to being extremely lucky + // and finding a high difficulty nonce in the first couple of cycles. It will be rare + // enough that we shouldn't need to account for it. + assertFalse(transaction.isSignatureValid()); + + // Reduce difficulty back to 1, to double check + FieldUtils.writeField(ArbitraryDataManager.getInstance(), "powDifficulty", 1, true); + assertTrue(transaction.isSignatureValid()); + } + } + + @Test + public void testNonceAndInsufficientFeeBeforeFeatureTrigger() throws IllegalAccessException, DataException, IOException { + // Use v2-minting settings, as these are pre-feature-trigger + Common.useSettings("test-settings-v2-minting.json"); + + try (final Repository repository = RepositoryManager.getRepository()) { + PrivateKeyAccount alice = Common.getTestAccount(repository, "alice"); + String publicKey58 = Base58.encode(alice.getPublicKey()); + String name = "TEST"; // Can be anything for this test + String identifier = null; // Not used for this test + Service service = Service.ARBITRARY_DATA; + int chunkSize = 100; + int dataLength = 900; // Actual data length will be longer due to encryption + + // Register the name to Alice + RegisterNameTransactionData registerNameTransactionData = new RegisterNameTransactionData(TestTransaction.generateBase(alice), name, ""); + registerNameTransactionData.setFee(new RegisterNameTransaction(null, null).getUnitFee(registerNameTransactionData.getTimestamp())); + TransactionUtils.signAndMint(repository, registerNameTransactionData, alice); + + // Set difficulty to 1 + FieldUtils.writeField(ArbitraryDataManager.getInstance(), "powDifficulty", 1, true); + + // Create PUT transaction, with a fee + Path path1 = ArbitraryUtils.generateRandomDataPath(dataLength); + long fee = 9999999; // insufficient + boolean computeNonce = true; + ArbitraryDataFile arbitraryDataFile = ArbitraryUtils.createAndMintTxn(repository, publicKey58, path1, name, identifier, ArbitraryTransactionData.Method.PUT, service, alice, chunkSize, fee, computeNonce, null, null, null, null); + + // Check that nonce validation succeeds + byte[] signature = arbitraryDataFile.getSignature(); + TransactionData transactionData = repository.getTransactionRepository().fromSignature(signature); + ArbitraryTransaction transaction = new ArbitraryTransaction(repository, transactionData); + assertTrue(transaction.isSignatureValid()); + + // The transaction should be valid because we don't care about the fee (before the feature trigger) + assertEquals(Transaction.ValidationResult.OK, transaction.isValidUnconfirmed()); + + // Increase difficulty to 15 + FieldUtils.writeField(ArbitraryDataManager.getInstance(), "powDifficulty", 15, true); + + // Make sure the nonce validation fails, as we aren't allowing a fee to replace a nonce yet (and it was insufficient anyway) + // Note: there is a very tiny chance this could succeed due to being extremely lucky + // and finding a high difficulty nonce in the first couple of cycles. It will be rare + // enough that we shouldn't need to account for it. + assertFalse(transaction.isSignatureValid()); + + // Reduce difficulty back to 1, to double check + FieldUtils.writeField(ArbitraryDataManager.getInstance(), "powDifficulty", 1, true); + assertTrue(transaction.isSignatureValid()); + } + } + + @Test + public void testNonceAndZeroFeeBeforeFeatureTrigger() throws IllegalAccessException, DataException, IOException { + // Use v2-minting settings, as these are pre-feature-trigger + Common.useSettings("test-settings-v2-minting.json"); + + try (final Repository repository = RepositoryManager.getRepository()) { + PrivateKeyAccount alice = Common.getTestAccount(repository, "alice"); + String publicKey58 = Base58.encode(alice.getPublicKey()); + String name = "TEST"; // Can be anything for this test + String identifier = null; // Not used for this test + Service service = Service.ARBITRARY_DATA; + int chunkSize = 100; + int dataLength = 900; // Actual data length will be longer due to encryption + + // Register the name to Alice + RegisterNameTransactionData registerNameTransactionData = new RegisterNameTransactionData(TestTransaction.generateBase(alice), name, ""); + registerNameTransactionData.setFee(new RegisterNameTransaction(null, null).getUnitFee(registerNameTransactionData.getTimestamp())); + TransactionUtils.signAndMint(repository, registerNameTransactionData, alice); + + // Set difficulty to 1 + FieldUtils.writeField(ArbitraryDataManager.getInstance(), "powDifficulty", 1, true); + + // Create PUT transaction, with a fee + Path path1 = ArbitraryUtils.generateRandomDataPath(dataLength); + long fee = 0L; + boolean computeNonce = true; + ArbitraryDataFile arbitraryDataFile = ArbitraryUtils.createAndMintTxn(repository, publicKey58, path1, name, identifier, ArbitraryTransactionData.Method.PUT, service, alice, chunkSize, fee, computeNonce, null, null, null, null); + + // Check that nonce validation succeeds + byte[] signature = arbitraryDataFile.getSignature(); + TransactionData transactionData = repository.getTransactionRepository().fromSignature(signature); + ArbitraryTransaction transaction = new ArbitraryTransaction(repository, transactionData); + assertTrue(transaction.isSignatureValid()); + + // The transaction should be valid because we don't care about the fee (before the feature trigger) + assertEquals(Transaction.ValidationResult.OK, transaction.isValidUnconfirmed()); + + // Increase difficulty to 15 + FieldUtils.writeField(ArbitraryDataManager.getInstance(), "powDifficulty", 15, true); + + // Make sure the nonce validation fails, as we aren't allowing a fee to replace a nonce yet (and it was insufficient anyway) + // Note: there is a very tiny chance this could succeed due to being extremely lucky + // and finding a high difficulty nonce in the first couple of cycles. It will be rare + // enough that we shouldn't need to account for it. + assertFalse(transaction.isSignatureValid()); + + // Reduce difficulty back to 1, to double check + FieldUtils.writeField(ArbitraryDataManager.getInstance(), "powDifficulty", 1, true); + assertTrue(transaction.isSignatureValid()); + } + } } diff --git a/src/test/java/org/qortal/test/common/ArbitraryUtils.java b/src/test/java/org/qortal/test/common/ArbitraryUtils.java index 81abf47f..73dc8097 100644 --- a/src/test/java/org/qortal/test/common/ArbitraryUtils.java +++ b/src/test/java/org/qortal/test/common/ArbitraryUtils.java @@ -29,19 +29,22 @@ public class ArbitraryUtils { int chunkSize) throws DataException { return ArbitraryUtils.createAndMintTxn(repository, publicKey58, path, name, identifier, method, service, - account, chunkSize, null, null, null, null); + account, chunkSize, 0L, true, null, null, null, null); } public static ArbitraryDataFile createAndMintTxn(Repository repository, String publicKey58, Path path, String name, String identifier, ArbitraryTransactionData.Method method, Service service, PrivateKeyAccount account, - int chunkSize, String title, String description, List tags, Category category) throws DataException { + int chunkSize, long fee, boolean computeNonce, + String title, String description, List tags, Category category) throws DataException { ArbitraryDataTransactionBuilder txnBuilder = new ArbitraryDataTransactionBuilder( - repository, publicKey58, path, name, method, service, identifier, title, description, tags, category); + repository, publicKey58, fee, path, name, method, service, identifier, title, description, tags, category); txnBuilder.setChunkSize(chunkSize); txnBuilder.build(); - txnBuilder.computeNonce(); + if (computeNonce) { + txnBuilder.computeNonce(); + } ArbitraryTransactionData transactionData = txnBuilder.getArbitraryTransactionData(); Transaction.ValidationResult result = TransactionUtils.signAndImport(repository, transactionData, account); assertEquals(Transaction.ValidationResult.OK, result); diff --git a/src/test/resources/test-chain-v2-block-timestamps.json b/src/test/resources/test-chain-v2-block-timestamps.json index 8c2e0503..3b4de702 100644 --- a/src/test/resources/test-chain-v2-block-timestamps.json +++ b/src/test/resources/test-chain-v2-block-timestamps.json @@ -75,7 +75,8 @@ "onlineAccountMinterLevelValidationHeight": 0, "selfSponsorshipAlgoV1Height": 999999999, "feeValidationFixTimestamp": 0, - "chatReferenceTimestamp": 0 + "chatReferenceTimestamp": 0, + "arbitraryOptionalFeeTimestamp": 0 }, "genesisInfo": { "version": 4, diff --git a/src/test/resources/test-chain-v2-disable-reference.json b/src/test/resources/test-chain-v2-disable-reference.json index f7f8e7d8..c93fbb78 100644 --- a/src/test/resources/test-chain-v2-disable-reference.json +++ b/src/test/resources/test-chain-v2-disable-reference.json @@ -78,7 +78,8 @@ "onlineAccountMinterLevelValidationHeight": 0, "selfSponsorshipAlgoV1Height": 999999999, "feeValidationFixTimestamp": 0, - "chatReferenceTimestamp": 0 + "chatReferenceTimestamp": 0, + "arbitraryOptionalFeeTimestamp": 0 }, "genesisInfo": { "version": 4, diff --git a/src/test/resources/test-chain-v2-founder-rewards.json b/src/test/resources/test-chain-v2-founder-rewards.json index 20d10233..1b068932 100644 --- a/src/test/resources/test-chain-v2-founder-rewards.json +++ b/src/test/resources/test-chain-v2-founder-rewards.json @@ -79,7 +79,8 @@ "onlineAccountMinterLevelValidationHeight": 0, "selfSponsorshipAlgoV1Height": 999999999, "feeValidationFixTimestamp": 0, - "chatReferenceTimestamp": 0 + "chatReferenceTimestamp": 0, + "arbitraryOptionalFeeTimestamp": 0 }, "genesisInfo": { "version": 4, diff --git a/src/test/resources/test-chain-v2-leftover-reward.json b/src/test/resources/test-chain-v2-leftover-reward.json index e71ebab6..aef76cc2 100644 --- a/src/test/resources/test-chain-v2-leftover-reward.json +++ b/src/test/resources/test-chain-v2-leftover-reward.json @@ -79,7 +79,8 @@ "onlineAccountMinterLevelValidationHeight": 0, "selfSponsorshipAlgoV1Height": 999999999, "feeValidationFixTimestamp": 0, - "chatReferenceTimestamp": 0 + "chatReferenceTimestamp": 0, + "arbitraryOptionalFeeTimestamp": 0 }, "genesisInfo": { "version": 4, diff --git a/src/test/resources/test-chain-v2-minting.json b/src/test/resources/test-chain-v2-minting.json index 2a388e1f..db6d8a0b 100644 --- a/src/test/resources/test-chain-v2-minting.json +++ b/src/test/resources/test-chain-v2-minting.json @@ -74,12 +74,13 @@ "calcChainWeightTimestamp": 0, "transactionV5Timestamp": 0, "transactionV6Timestamp": 0, - "disableReferenceTimestamp": 9999999999999, + "disableReferenceTimestamp": 0, "increaseOnlineAccountsDifficultyTimestamp": 9999999999999, "onlineAccountMinterLevelValidationHeight": 0, "selfSponsorshipAlgoV1Height": 999999999, "feeValidationFixTimestamp": 0, - "chatReferenceTimestamp": 0 + "chatReferenceTimestamp": 0, + "arbitraryOptionalFeeTimestamp": 9999999999999 }, "genesisInfo": { "version": 4, diff --git a/src/test/resources/test-chain-v2-qora-holder-extremes.json b/src/test/resources/test-chain-v2-qora-holder-extremes.json index cface0e7..2452d4d2 100644 --- a/src/test/resources/test-chain-v2-qora-holder-extremes.json +++ b/src/test/resources/test-chain-v2-qora-holder-extremes.json @@ -79,7 +79,8 @@ "onlineAccountMinterLevelValidationHeight": 0, "selfSponsorshipAlgoV1Height": 999999999, "feeValidationFixTimestamp": 0, - "chatReferenceTimestamp": 0 + "chatReferenceTimestamp": 0, + "arbitraryOptionalFeeTimestamp": 0 }, "genesisInfo": { "version": 4, diff --git a/src/test/resources/test-chain-v2-qora-holder-reduction.json b/src/test/resources/test-chain-v2-qora-holder-reduction.json index f233680b..23193729 100644 --- a/src/test/resources/test-chain-v2-qora-holder-reduction.json +++ b/src/test/resources/test-chain-v2-qora-holder-reduction.json @@ -80,7 +80,8 @@ "onlineAccountMinterLevelValidationHeight": 0, "selfSponsorshipAlgoV1Height": 999999999, "feeValidationFixTimestamp": 0, - "chatReferenceTimestamp": 0 + "chatReferenceTimestamp": 0, + "arbitraryOptionalFeeTimestamp": 0 }, "genesisInfo": { "version": 4, diff --git a/src/test/resources/test-chain-v2-qora-holder.json b/src/test/resources/test-chain-v2-qora-holder.json index 4ea82290..9d81632b 100644 --- a/src/test/resources/test-chain-v2-qora-holder.json +++ b/src/test/resources/test-chain-v2-qora-holder.json @@ -79,7 +79,8 @@ "onlineAccountMinterLevelValidationHeight": 0, "selfSponsorshipAlgoV1Height": 999999999, "feeValidationFixTimestamp": 0, - "chatReferenceTimestamp": 0 + "chatReferenceTimestamp": 0, + "arbitraryOptionalFeeTimestamp": 0 }, "genesisInfo": { "version": 4, diff --git a/src/test/resources/test-chain-v2-reward-levels.json b/src/test/resources/test-chain-v2-reward-levels.json index 5de8d9ff..81609595 100644 --- a/src/test/resources/test-chain-v2-reward-levels.json +++ b/src/test/resources/test-chain-v2-reward-levels.json @@ -79,7 +79,8 @@ "onlineAccountMinterLevelValidationHeight": 0, "selfSponsorshipAlgoV1Height": 999999999, "feeValidationFixTimestamp": 0, - "chatReferenceTimestamp": 0 + "chatReferenceTimestamp": 0, + "arbitraryOptionalFeeTimestamp": 0 }, "genesisInfo": { "version": 4, diff --git a/src/test/resources/test-chain-v2-reward-scaling.json b/src/test/resources/test-chain-v2-reward-scaling.json index c008ed42..21a5b7a7 100644 --- a/src/test/resources/test-chain-v2-reward-scaling.json +++ b/src/test/resources/test-chain-v2-reward-scaling.json @@ -79,7 +79,8 @@ "onlineAccountMinterLevelValidationHeight": 0, "selfSponsorshipAlgoV1Height": 999999999, "feeValidationFixTimestamp": 0, - "chatReferenceTimestamp": 0 + "chatReferenceTimestamp": 0, + "arbitraryOptionalFeeTimestamp": 0 }, "genesisInfo": { "version": 4, diff --git a/src/test/resources/test-chain-v2-reward-shares.json b/src/test/resources/test-chain-v2-reward-shares.json index 2fc0151f..6119ac48 100644 --- a/src/test/resources/test-chain-v2-reward-shares.json +++ b/src/test/resources/test-chain-v2-reward-shares.json @@ -79,7 +79,8 @@ "onlineAccountMinterLevelValidationHeight": 0, "selfSponsorshipAlgoV1Height": 999999999, "feeValidationFixTimestamp": 0, - "chatReferenceTimestamp": 0 + "chatReferenceTimestamp": 0, + "arbitraryOptionalFeeTimestamp": 0 }, "genesisInfo": { "version": 4, diff --git a/src/test/resources/test-chain-v2-self-sponsorship-algo.json b/src/test/resources/test-chain-v2-self-sponsorship-algo.json index 68b33cc3..dc5f3961 100644 --- a/src/test/resources/test-chain-v2-self-sponsorship-algo.json +++ b/src/test/resources/test-chain-v2-self-sponsorship-algo.json @@ -79,7 +79,8 @@ "onlineAccountMinterLevelValidationHeight": 0, "selfSponsorshipAlgoV1Height": 20, "feeValidationFixTimestamp": 0, - "chatReferenceTimestamp": 0 + "chatReferenceTimestamp": 0, + "arbitraryOptionalFeeTimestamp": 0 }, "genesisInfo": { "version": 4, diff --git a/src/test/resources/test-chain-v2.json b/src/test/resources/test-chain-v2.json index 63abc695..d0c460df 100644 --- a/src/test/resources/test-chain-v2.json +++ b/src/test/resources/test-chain-v2.json @@ -79,7 +79,8 @@ "onlineAccountMinterLevelValidationHeight": 0, "selfSponsorshipAlgoV1Height": 999999999, "feeValidationFixTimestamp": 0, - "chatReferenceTimestamp": 0 + "chatReferenceTimestamp": 0, + "arbitraryOptionalFeeTimestamp": 0 }, "genesisInfo": { "version": 4, From b6803490b9f84309bc518ca9cc3e7af5467420be Mon Sep 17 00:00:00 2001 From: CalDescent Date: Mon, 6 Mar 2023 14:13:58 +0000 Subject: [PATCH 13/16] Archive version is now loaded from the version of block 2 in the existing archive, or "defaultArchiveVersion" in settings if not available (default: 1). --- .../qortal/api/resource/AdminResource.java | 24 ++++++++++++++----- .../qortal/repository/BlockArchiveReader.java | 13 ++++++++++ .../qortal/repository/BlockArchiveWriter.java | 24 +++++++++++++++---- .../java/org/qortal/settings/Settings.java | 6 ++--- 4 files changed, 54 insertions(+), 13 deletions(-) diff --git a/src/main/java/org/qortal/api/resource/AdminResource.java b/src/main/java/org/qortal/api/resource/AdminResource.java index 0531f60d..ef2a3f95 100644 --- a/src/main/java/org/qortal/api/resource/AdminResource.java +++ b/src/main/java/org/qortal/api/resource/AdminResource.java @@ -738,8 +738,17 @@ public class AdminResource { @POST @Path("/repository/archive/rebuild") @Operation( - summary = "Rebuild archive.", - description = "Rebuilds archive files, using the serialization version specified via the archiveVersion setting.", + summary = "Rebuild archive", + description = "Rebuilds archive files, using the specified serialization version", + requestBody = @RequestBody( + required = true, + content = @Content( + mediaType = MediaType.TEXT_PLAIN, + schema = @Schema( + type = "number", example = "2" + ) + ) + ), responses = { @ApiResponse( description = "\"true\"", @@ -749,9 +758,14 @@ public class AdminResource { ) @ApiErrors({ApiError.REPOSITORY_ISSUE}) @SecurityRequirement(name = "apiKey") - public String rebuildArchive(@HeaderParam(Security.API_KEY_HEADER) String apiKey) { + public String rebuildArchive(@HeaderParam(Security.API_KEY_HEADER) String apiKey, Integer serializationVersion) { Security.checkApiCallAllowed(request); + // Default serialization version to value specified in settings + if (serializationVersion == null) { + serializationVersion = Settings.getInstance().getDefaultArchiveVersion(); + } + try { // We don't actually need to lock the blockchain here, but we'll do it anyway so that // the node can focus on rebuilding rather than synchronizing / minting. @@ -760,9 +774,7 @@ public class AdminResource { blockchainLock.lockInterruptibly(); try { - int archiveVersion = Settings.getInstance().getArchiveVersion(); - - BlockArchiveRebuilder blockArchiveRebuilder = new BlockArchiveRebuilder(archiveVersion); + BlockArchiveRebuilder blockArchiveRebuilder = new BlockArchiveRebuilder(serializationVersion); blockArchiveRebuilder.start(); return "true"; diff --git a/src/main/java/org/qortal/repository/BlockArchiveReader.java b/src/main/java/org/qortal/repository/BlockArchiveReader.java index e45f1fdf..1f04bced 100644 --- a/src/main/java/org/qortal/repository/BlockArchiveReader.java +++ b/src/main/java/org/qortal/repository/BlockArchiveReader.java @@ -64,6 +64,19 @@ public class BlockArchiveReader { this.fileListCache = Map.copyOf(map); } + public Integer fetchSerializationVersionForHeight(int height) { + if (this.fileListCache == null) { + this.fetchFileList(); + } + + Triple serializedBlock = this.fetchSerializedBlockBytesForHeight(height); + if (serializedBlock == null) { + return null; + } + Integer serializationVersion = serializedBlock.getB(); + return serializationVersion; + } + public BlockTransformation fetchBlockAtHeight(int height) { if (this.fileListCache == null) { this.fetchFileList(); diff --git a/src/main/java/org/qortal/repository/BlockArchiveWriter.java b/src/main/java/org/qortal/repository/BlockArchiveWriter.java index 87d0a93c..8f4d4498 100644 --- a/src/main/java/org/qortal/repository/BlockArchiveWriter.java +++ b/src/main/java/org/qortal/repository/BlockArchiveWriter.java @@ -43,7 +43,7 @@ public class BlockArchiveWriter { private int startHeight; private final int endHeight; - private final int serializationVersion; + private final Integer serializationVersion; private final Path archivePath; private final Repository repository; @@ -65,12 +65,17 @@ public class BlockArchiveWriter { * @param endHeight * @param repository */ - public BlockArchiveWriter(int startHeight, int endHeight, int serializationVersion, Path archivePath, Repository repository) { + public BlockArchiveWriter(int startHeight, int endHeight, Integer serializationVersion, Path archivePath, Repository repository) { this.startHeight = startHeight; this.endHeight = endHeight; - this.serializationVersion = serializationVersion; this.archivePath = archivePath.toAbsolutePath(); this.repository = repository; + + if (serializationVersion == null) { + // When serialization version isn't specified, fetch it from the existing archive + serializationVersion = this.findSerializationVersion(); + } + this.serializationVersion = serializationVersion; } /** @@ -80,7 +85,18 @@ public class BlockArchiveWriter { * @param repository */ public BlockArchiveWriter(int startHeight, int endHeight, Repository repository) { - this(startHeight, endHeight, Settings.getInstance().getArchiveVersion(), Paths.get(Settings.getInstance().getRepositoryPath(), "archive"), repository); + this(startHeight, endHeight, null, Paths.get(Settings.getInstance().getRepositoryPath(), "archive"), repository); + } + + private int findSerializationVersion() { + // Attempt to fetch the serialization version from the existing archive + Integer block2SerializationVersion = BlockArchiveReader.getInstance().fetchSerializationVersionForHeight(2); + if (block2SerializationVersion != null) { + return block2SerializationVersion; + } + + // Default to version specified in settings + return Settings.getInstance().getDefaultArchiveVersion(); } public static int getMaxArchiveHeight(Repository repository) throws DataException { diff --git a/src/main/java/org/qortal/settings/Settings.java b/src/main/java/org/qortal/settings/Settings.java index 52b3aed5..d3405d4e 100644 --- a/src/main/java/org/qortal/settings/Settings.java +++ b/src/main/java/org/qortal/settings/Settings.java @@ -179,7 +179,7 @@ public class Settings { /** How often to attempt archiving (ms). */ private long archiveInterval = 7171L; // milliseconds /** Serialization version to use when building an archive */ - private int archiveVersion = 1; + private int defaultArchiveVersion = 1; /** Whether to automatically bootstrap instead of syncing from genesis */ @@ -928,8 +928,8 @@ public class Settings { return this.archiveInterval; } - public int getArchiveVersion() { - return this.archiveVersion; + public int getDefaultArchiveVersion() { + return this.defaultArchiveVersion; } From 96ac8835158e2bfe30fd9d96a035c0f46289b435 Mon Sep 17 00:00:00 2001 From: CalDescent Date: Mon, 6 Mar 2023 14:40:17 +0000 Subject: [PATCH 14/16] Throw exception and break out of loop if archive rebuilding fails --- .../org/qortal/controller/repository/BlockArchiveRebuilder.java | 1 + 1 file changed, 1 insertion(+) diff --git a/src/main/java/org/qortal/controller/repository/BlockArchiveRebuilder.java b/src/main/java/org/qortal/controller/repository/BlockArchiveRebuilder.java index 78616a99..63579d3c 100644 --- a/src/main/java/org/qortal/controller/repository/BlockArchiveRebuilder.java +++ b/src/main/java/org/qortal/controller/repository/BlockArchiveRebuilder.java @@ -106,6 +106,7 @@ public class BlockArchiveRebuilder { } catch (IOException | TransformationException e) { LOGGER.info("Caught exception when rebuilding block archive", e); + throw new DataException("Unable to rebuild block archive"); } } From b1452bddf3123bdd3c3506e16682b598c93e7876 Mon Sep 17 00:00:00 2001 From: CalDescent Date: Mon, 6 Mar 2023 17:17:55 +0000 Subject: [PATCH 15/16] Added BlockArchiveV2 tests, and updated the V1 tests now that we no longer support bulk archiving/pruning --- ...iveTests.java => BlockArchiveV1Tests.java} | 217 +------- .../org/qortal/test/BlockArchiveV2Tests.java | 504 ++++++++++++++++++ .../test-settings-v2-block-archive.json | 3 +- 3 files changed, 512 insertions(+), 212 deletions(-) rename src/test/java/org/qortal/test/{BlockArchiveTests.java => BlockArchiveV1Tests.java} (69%) create mode 100644 src/test/java/org/qortal/test/BlockArchiveV2Tests.java diff --git a/src/test/java/org/qortal/test/BlockArchiveTests.java b/src/test/java/org/qortal/test/BlockArchiveV1Tests.java similarity index 69% rename from src/test/java/org/qortal/test/BlockArchiveTests.java rename to src/test/java/org/qortal/test/BlockArchiveV1Tests.java index 8b3de67b..a28bd28d 100644 --- a/src/test/java/org/qortal/test/BlockArchiveTests.java +++ b/src/test/java/org/qortal/test/BlockArchiveV1Tests.java @@ -1,6 +1,7 @@ package org.qortal.test; import org.apache.commons.io.FileUtils; +import org.apache.commons.lang3.reflect.FieldUtils; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -10,8 +11,6 @@ import org.qortal.data.at.ATStateData; import org.qortal.data.block.BlockData; import org.qortal.data.transaction.TransactionData; import org.qortal.repository.*; -import org.qortal.repository.hsqldb.HSQLDBDatabaseArchiving; -import org.qortal.repository.hsqldb.HSQLDBDatabasePruning; import org.qortal.repository.hsqldb.HSQLDBRepository; import org.qortal.settings.Settings; import org.qortal.test.common.AtUtils; @@ -26,7 +25,6 @@ import org.qortal.utils.NTP; import java.io.File; import java.io.IOException; -import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; import java.sql.SQLException; @@ -34,13 +32,16 @@ import java.util.List; import static org.junit.Assert.*; -public class BlockArchiveTests extends Common { +public class BlockArchiveV1Tests extends Common { @Before - public void beforeTest() throws DataException { + public void beforeTest() throws DataException, IllegalAccessException { Common.useSettings("test-settings-v2-block-archive.json"); NTP.setFixedOffset(Settings.getInstance().getTestNtpOffset()); this.deleteArchiveDirectory(); + + // Set default archive version to 1, so that archive builds in these tests use V2 + FieldUtils.writeField(Settings.getInstance(), "defaultArchiveVersion", 1, true); } @After @@ -333,212 +334,6 @@ public class BlockArchiveTests extends Common { } } - @Test - public void testBulkArchiveAndPrune() throws DataException, SQLException { - try (final Repository repository = RepositoryManager.getRepository()) { - HSQLDBRepository hsqldb = (HSQLDBRepository) repository; - - // Deploy an AT so that we have AT state data - PrivateKeyAccount deployer = Common.getTestAccount(repository, "alice"); - byte[] creationBytes = AtUtils.buildSimpleAT(); - long fundingAmount = 1_00000000L; - AtUtils.doDeployAT(repository, deployer, creationBytes, fundingAmount); - - // Mint some blocks so that we are able to archive them later - for (int i = 0; i < 1000; i++) { - BlockMinter.mintTestingBlock(repository, Common.getTestAccount(repository, "alice-reward-share")); - } - - // Assume 900 blocks are trimmed (this specifies the first untrimmed height) - repository.getBlockRepository().setOnlineAccountsSignaturesTrimHeight(901); - repository.getATRepository().setAtTrimHeight(901); - - // Check the max archive height - this should be one less than the first untrimmed height - final int maximumArchiveHeight = BlockArchiveWriter.getMaxArchiveHeight(repository); - assertEquals(900, maximumArchiveHeight); - - // Check the current archive height - assertEquals(0, repository.getBlockArchiveRepository().getBlockArchiveHeight()); - - // Write blocks 2-900 to the archive (using bulk method) - int fileSizeTarget = 428600; // Pre-calculated size of 900 blocks - assertTrue(HSQLDBDatabaseArchiving.buildBlockArchive(repository, fileSizeTarget)); - - // Ensure the block archive height has increased - assertEquals(901, repository.getBlockArchiveRepository().getBlockArchiveHeight()); - - // Ensure the SQL repository contains blocks 2 and 900... - assertNotNull(repository.getBlockRepository().fromHeight(2)); - assertNotNull(repository.getBlockRepository().fromHeight(900)); - - // Check the current prune heights - assertEquals(0, repository.getBlockRepository().getBlockPruneHeight()); - assertEquals(0, repository.getATRepository().getAtPruneHeight()); - - // Prior to archiving or pruning, ensure blocks 2 to 1002 and their AT states are available in the db - for (int i=2; i<=1002; i++) { - assertNotNull(repository.getBlockRepository().fromHeight(i)); - List atStates = repository.getATRepository().getBlockATStatesAtHeight(i); - assertNotNull(atStates); - assertEquals(1, atStates.size()); - } - - // Prune all the archived blocks and AT states (using bulk method) - assertTrue(HSQLDBDatabasePruning.pruneBlocks(hsqldb)); - assertTrue(HSQLDBDatabasePruning.pruneATStates(hsqldb)); - - // Ensure the current prune heights have increased - assertEquals(901, repository.getBlockRepository().getBlockPruneHeight()); - assertEquals(901, repository.getATRepository().getAtPruneHeight()); - - // Now ensure the SQL repository is missing blocks 2 and 900... - assertNull(repository.getBlockRepository().fromHeight(2)); - assertNull(repository.getBlockRepository().fromHeight(900)); - - // ... but it's not missing blocks 1 and 901 (we don't prune the genesis block) - assertNotNull(repository.getBlockRepository().fromHeight(1)); - assertNotNull(repository.getBlockRepository().fromHeight(901)); - - // Validate the latest block height in the repository - assertEquals(1002, (int) repository.getBlockRepository().getLastBlock().getHeight()); - - // Ensure blocks 2-900 are all available in the archive - for (int i=2; i<=900; i++) { - assertNotNull(repository.getBlockArchiveRepository().fromHeight(i)); - } - - // Ensure blocks 2-900 are NOT available in the db - for (int i=2; i<=900; i++) { - assertNull(repository.getBlockRepository().fromHeight(i)); - } - - // Ensure blocks 901 to 1002 and their AT states are available in the db - for (int i=901; i<=1002; i++) { - assertNotNull(repository.getBlockRepository().fromHeight(i)); - List atStates = repository.getATRepository().getBlockATStatesAtHeight(i); - assertNotNull(atStates); - assertEquals(1, atStates.size()); - } - - // Ensure blocks 901 to 1002 are not available in the archive - for (int i=901; i<=1002; i++) { - assertNull(repository.getBlockArchiveRepository().fromHeight(i)); - } - } - } - - @Test - public void testBulkArchiveAndPruneMultipleFiles() throws DataException, SQLException { - try (final Repository repository = RepositoryManager.getRepository()) { - HSQLDBRepository hsqldb = (HSQLDBRepository) repository; - - // Deploy an AT so that we have AT state data - PrivateKeyAccount deployer = Common.getTestAccount(repository, "alice"); - byte[] creationBytes = AtUtils.buildSimpleAT(); - long fundingAmount = 1_00000000L; - AtUtils.doDeployAT(repository, deployer, creationBytes, fundingAmount); - - // Mint some blocks so that we are able to archive them later - for (int i = 0; i < 1000; i++) { - BlockMinter.mintTestingBlock(repository, Common.getTestAccount(repository, "alice-reward-share")); - } - - // Assume 900 blocks are trimmed (this specifies the first untrimmed height) - repository.getBlockRepository().setOnlineAccountsSignaturesTrimHeight(901); - repository.getATRepository().setAtTrimHeight(901); - - // Check the max archive height - this should be one less than the first untrimmed height - final int maximumArchiveHeight = BlockArchiveWriter.getMaxArchiveHeight(repository); - assertEquals(900, maximumArchiveHeight); - - // Check the current archive height - assertEquals(0, repository.getBlockArchiveRepository().getBlockArchiveHeight()); - - // Write blocks 2-900 to the archive (using bulk method) - int fileSizeTarget = 42360; // Pre-calculated size of approx 90 blocks - assertTrue(HSQLDBDatabaseArchiving.buildBlockArchive(repository, fileSizeTarget)); - - // Ensure 10 archive files have been created - Path archivePath = Paths.get(Settings.getInstance().getRepositoryPath(), "archive"); - assertEquals(10, new File(archivePath.toString()).list().length); - - // Check the files exist - assertTrue(Files.exists(Paths.get(archivePath.toString(), "2-90.dat"))); - assertTrue(Files.exists(Paths.get(archivePath.toString(), "91-179.dat"))); - assertTrue(Files.exists(Paths.get(archivePath.toString(), "180-268.dat"))); - assertTrue(Files.exists(Paths.get(archivePath.toString(), "269-357.dat"))); - assertTrue(Files.exists(Paths.get(archivePath.toString(), "358-446.dat"))); - assertTrue(Files.exists(Paths.get(archivePath.toString(), "447-535.dat"))); - assertTrue(Files.exists(Paths.get(archivePath.toString(), "536-624.dat"))); - assertTrue(Files.exists(Paths.get(archivePath.toString(), "625-713.dat"))); - assertTrue(Files.exists(Paths.get(archivePath.toString(), "714-802.dat"))); - assertTrue(Files.exists(Paths.get(archivePath.toString(), "803-891.dat"))); - - // Ensure the block archive height has increased - // It won't be as high as 901, because blocks 892-901 were too small to reach the file size - // target of the 11th file - assertEquals(892, repository.getBlockArchiveRepository().getBlockArchiveHeight()); - - // Ensure the SQL repository contains blocks 2 and 891... - assertNotNull(repository.getBlockRepository().fromHeight(2)); - assertNotNull(repository.getBlockRepository().fromHeight(891)); - - // Check the current prune heights - assertEquals(0, repository.getBlockRepository().getBlockPruneHeight()); - assertEquals(0, repository.getATRepository().getAtPruneHeight()); - - // Prior to archiving or pruning, ensure blocks 2 to 1002 and their AT states are available in the db - for (int i=2; i<=1002; i++) { - assertNotNull(repository.getBlockRepository().fromHeight(i)); - List atStates = repository.getATRepository().getBlockATStatesAtHeight(i); - assertNotNull(atStates); - assertEquals(1, atStates.size()); - } - - // Prune all the archived blocks and AT states (using bulk method) - assertTrue(HSQLDBDatabasePruning.pruneBlocks(hsqldb)); - assertTrue(HSQLDBDatabasePruning.pruneATStates(hsqldb)); - - // Ensure the current prune heights have increased - assertEquals(892, repository.getBlockRepository().getBlockPruneHeight()); - assertEquals(892, repository.getATRepository().getAtPruneHeight()); - - // Now ensure the SQL repository is missing blocks 2 and 891... - assertNull(repository.getBlockRepository().fromHeight(2)); - assertNull(repository.getBlockRepository().fromHeight(891)); - - // ... but it's not missing blocks 1 and 901 (we don't prune the genesis block) - assertNotNull(repository.getBlockRepository().fromHeight(1)); - assertNotNull(repository.getBlockRepository().fromHeight(892)); - - // Validate the latest block height in the repository - assertEquals(1002, (int) repository.getBlockRepository().getLastBlock().getHeight()); - - // Ensure blocks 2-891 are all available in the archive - for (int i=2; i<=891; i++) { - assertNotNull(repository.getBlockArchiveRepository().fromHeight(i)); - } - - // Ensure blocks 2-891 are NOT available in the db - for (int i=2; i<=891; i++) { - assertNull(repository.getBlockRepository().fromHeight(i)); - } - - // Ensure blocks 892 to 1002 and their AT states are available in the db - for (int i=892; i<=1002; i++) { - assertNotNull(repository.getBlockRepository().fromHeight(i)); - List atStates = repository.getATRepository().getBlockATStatesAtHeight(i); - assertNotNull(atStates); - assertEquals(1, atStates.size()); - } - - // Ensure blocks 892 to 1002 are not available in the archive - for (int i=892; i<=1002; i++) { - assertNull(repository.getBlockArchiveRepository().fromHeight(i)); - } - } - } - @Test public void testTrimArchivePruneAndOrphan() throws DataException, InterruptedException, TransformationException, IOException { try (final Repository repository = RepositoryManager.getRepository()) { diff --git a/src/test/java/org/qortal/test/BlockArchiveV2Tests.java b/src/test/java/org/qortal/test/BlockArchiveV2Tests.java new file mode 100644 index 00000000..3b1d12d3 --- /dev/null +++ b/src/test/java/org/qortal/test/BlockArchiveV2Tests.java @@ -0,0 +1,504 @@ +package org.qortal.test; + +import org.apache.commons.io.FileUtils; +import org.apache.commons.lang3.reflect.FieldUtils; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.qortal.account.PrivateKeyAccount; +import org.qortal.controller.BlockMinter; +import org.qortal.data.at.ATStateData; +import org.qortal.data.block.BlockData; +import org.qortal.data.transaction.TransactionData; +import org.qortal.repository.*; +import org.qortal.repository.hsqldb.HSQLDBRepository; +import org.qortal.settings.Settings; +import org.qortal.test.common.AtUtils; +import org.qortal.test.common.BlockUtils; +import org.qortal.test.common.Common; +import org.qortal.transaction.DeployAtTransaction; +import org.qortal.transaction.Transaction; +import org.qortal.transform.TransformationException; +import org.qortal.transform.block.BlockTransformation; +import org.qortal.utils.BlockArchiveUtils; +import org.qortal.utils.NTP; + +import java.io.File; +import java.io.IOException; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.sql.SQLException; +import java.util.List; + +import static org.junit.Assert.*; + +public class BlockArchiveV2Tests extends Common { + + @Before + public void beforeTest() throws DataException, IllegalAccessException { + Common.useSettings("test-settings-v2-block-archive.json"); + NTP.setFixedOffset(Settings.getInstance().getTestNtpOffset()); + this.deleteArchiveDirectory(); + + // Set default archive version to 2, so that archive builds in these tests use V2 + FieldUtils.writeField(Settings.getInstance(), "defaultArchiveVersion", 2, true); + } + + @After + public void afterTest() throws DataException { + this.deleteArchiveDirectory(); + } + + + @Test + public void testWriter() throws DataException, InterruptedException, TransformationException, IOException { + try (final Repository repository = RepositoryManager.getRepository()) { + + // Mint some blocks so that we are able to archive them later + for (int i = 0; i < 1000; i++) { + BlockMinter.mintTestingBlock(repository, Common.getTestAccount(repository, "alice-reward-share")); + } + + // 900 blocks are trimmed (this specifies the first untrimmed height) + repository.getBlockRepository().setOnlineAccountsSignaturesTrimHeight(901); + repository.getATRepository().setAtTrimHeight(901); + + // Check the max archive height - this should be one less than the first untrimmed height + final int maximumArchiveHeight = BlockArchiveWriter.getMaxArchiveHeight(repository); + assertEquals(900, maximumArchiveHeight); + + // Write blocks 2-900 to the archive + BlockArchiveWriter writer = new BlockArchiveWriter(0, maximumArchiveHeight, repository); + writer.setShouldEnforceFileSizeTarget(false); // To avoid the need to pre-calculate file sizes + BlockArchiveWriter.BlockArchiveWriteResult result = writer.write(); + assertEquals(BlockArchiveWriter.BlockArchiveWriteResult.OK, result); + + // Make sure that the archive contains the correct number of blocks + assertEquals(900 - 1, writer.getWrittenCount()); + + // Increment block archive height + repository.getBlockArchiveRepository().setBlockArchiveHeight(writer.getWrittenCount()); + repository.saveChanges(); + assertEquals(900 - 1, repository.getBlockArchiveRepository().getBlockArchiveHeight()); + + // Ensure the file exists + File outputFile = writer.getOutputPath().toFile(); + assertTrue(outputFile.exists()); + } + } + + @Test + public void testWriterAndReader() throws DataException, InterruptedException, TransformationException, IOException { + try (final Repository repository = RepositoryManager.getRepository()) { + + // Mint some blocks so that we are able to archive them later + for (int i = 0; i < 1000; i++) { + BlockMinter.mintTestingBlock(repository, Common.getTestAccount(repository, "alice-reward-share")); + } + + // 900 blocks are trimmed (this specifies the first untrimmed height) + repository.getBlockRepository().setOnlineAccountsSignaturesTrimHeight(901); + repository.getATRepository().setAtTrimHeight(901); + + // Check the max archive height - this should be one less than the first untrimmed height + final int maximumArchiveHeight = BlockArchiveWriter.getMaxArchiveHeight(repository); + assertEquals(900, maximumArchiveHeight); + + // Write blocks 2-900 to the archive + BlockArchiveWriter writer = new BlockArchiveWriter(0, maximumArchiveHeight, repository); + writer.setShouldEnforceFileSizeTarget(false); // To avoid the need to pre-calculate file sizes + BlockArchiveWriter.BlockArchiveWriteResult result = writer.write(); + assertEquals(BlockArchiveWriter.BlockArchiveWriteResult.OK, result); + + // Make sure that the archive contains the correct number of blocks + assertEquals(900 - 1, writer.getWrittenCount()); + + // Increment block archive height + repository.getBlockArchiveRepository().setBlockArchiveHeight(writer.getWrittenCount()); + repository.saveChanges(); + assertEquals(900 - 1, repository.getBlockArchiveRepository().getBlockArchiveHeight()); + + // Ensure the file exists + File outputFile = writer.getOutputPath().toFile(); + assertTrue(outputFile.exists()); + + // Read block 2 from the archive + BlockArchiveReader reader = BlockArchiveReader.getInstance(); + BlockTransformation block2Info = reader.fetchBlockAtHeight(2); + BlockData block2ArchiveData = block2Info.getBlockData(); + + // Read block 2 from the repository + BlockData block2RepositoryData = repository.getBlockRepository().fromHeight(2); + + // Ensure the values match + assertEquals(block2ArchiveData.getHeight(), block2RepositoryData.getHeight()); + assertArrayEquals(block2ArchiveData.getSignature(), block2RepositoryData.getSignature()); + + // Test some values in the archive + assertEquals(1, block2ArchiveData.getOnlineAccountsCount()); + + // Read block 900 from the archive + BlockTransformation block900Info = reader.fetchBlockAtHeight(900); + BlockData block900ArchiveData = block900Info.getBlockData(); + + // Read block 900 from the repository + BlockData block900RepositoryData = repository.getBlockRepository().fromHeight(900); + + // Ensure the values match + assertEquals(block900ArchiveData.getHeight(), block900RepositoryData.getHeight()); + assertArrayEquals(block900ArchiveData.getSignature(), block900RepositoryData.getSignature()); + + // Test some values in the archive + assertEquals(1, block900ArchiveData.getOnlineAccountsCount()); + + } + } + + @Test + public void testArchivedAtStates() throws DataException, InterruptedException, TransformationException, IOException { + try (final Repository repository = RepositoryManager.getRepository()) { + + // Deploy an AT so that we have AT state data + PrivateKeyAccount deployer = Common.getTestAccount(repository, "alice"); + byte[] creationBytes = AtUtils.buildSimpleAT(); + long fundingAmount = 1_00000000L; + DeployAtTransaction deployAtTransaction = AtUtils.doDeployAT(repository, deployer, creationBytes, fundingAmount); + String atAddress = deployAtTransaction.getATAccount().getAddress(); + + // Mint some blocks so that we are able to archive them later + for (int i = 0; i < 1000; i++) { + BlockMinter.mintTestingBlock(repository, Common.getTestAccount(repository, "alice-reward-share")); + } + + // 9 blocks are trimmed (this specifies the first untrimmed height) + repository.getBlockRepository().setOnlineAccountsSignaturesTrimHeight(10); + repository.getATRepository().setAtTrimHeight(10); + + // Check the max archive height + final int maximumArchiveHeight = BlockArchiveWriter.getMaxArchiveHeight(repository); + assertEquals(9, maximumArchiveHeight); + + // Write blocks 2-9 to the archive + BlockArchiveWriter writer = new BlockArchiveWriter(0, maximumArchiveHeight, repository); + writer.setShouldEnforceFileSizeTarget(false); // To avoid the need to pre-calculate file sizes + BlockArchiveWriter.BlockArchiveWriteResult result = writer.write(); + assertEquals(BlockArchiveWriter.BlockArchiveWriteResult.OK, result); + + // Make sure that the archive contains the correct number of blocks + assertEquals(9 - 1, writer.getWrittenCount()); + + // Increment block archive height + repository.getBlockArchiveRepository().setBlockArchiveHeight(writer.getWrittenCount()); + repository.saveChanges(); + assertEquals(9 - 1, repository.getBlockArchiveRepository().getBlockArchiveHeight()); + + // Ensure the file exists + File outputFile = writer.getOutputPath().toFile(); + assertTrue(outputFile.exists()); + + // Check blocks 3-9 + for (Integer testHeight = 2; testHeight <= 9; testHeight++) { + + // Read a block from the archive + BlockArchiveReader reader = BlockArchiveReader.getInstance(); + BlockTransformation blockInfo = reader.fetchBlockAtHeight(testHeight); + BlockData archivedBlockData = blockInfo.getBlockData(); + byte[] archivedAtStateHash = blockInfo.getAtStatesHash(); + List archivedTransactions = blockInfo.getTransactions(); + + // Read the same block from the repository + BlockData repositoryBlockData = repository.getBlockRepository().fromHeight(testHeight); + ATStateData repositoryAtStateData = repository.getATRepository().getATStateAtHeight(atAddress, testHeight); + + // Ensure the repository has full AT state data + assertNotNull(repositoryAtStateData.getStateHash()); + assertNotNull(repositoryAtStateData.getStateData()); + + // Check the archived AT state + if (testHeight == 2) { + assertEquals(1, archivedTransactions.size()); + assertEquals(Transaction.TransactionType.DEPLOY_AT, archivedTransactions.get(0).getType()); + } + else { + // Blocks 3+ shouldn't have any transactions + assertTrue(archivedTransactions.isEmpty()); + } + + // Ensure the archive has the AT states hash + assertNotNull(archivedAtStateHash); + + // Also check the online accounts count and height + assertEquals(1, archivedBlockData.getOnlineAccountsCount()); + assertEquals(testHeight, archivedBlockData.getHeight()); + + // Ensure the values match + assertEquals(archivedBlockData.getHeight(), repositoryBlockData.getHeight()); + assertArrayEquals(archivedBlockData.getSignature(), repositoryBlockData.getSignature()); + assertEquals(archivedBlockData.getOnlineAccountsCount(), repositoryBlockData.getOnlineAccountsCount()); + assertArrayEquals(archivedBlockData.getMinterSignature(), repositoryBlockData.getMinterSignature()); + assertEquals(archivedBlockData.getATCount(), repositoryBlockData.getATCount()); + assertEquals(archivedBlockData.getOnlineAccountsCount(), repositoryBlockData.getOnlineAccountsCount()); + assertArrayEquals(archivedBlockData.getReference(), repositoryBlockData.getReference()); + assertEquals(archivedBlockData.getTimestamp(), repositoryBlockData.getTimestamp()); + assertEquals(archivedBlockData.getATFees(), repositoryBlockData.getATFees()); + assertEquals(archivedBlockData.getTotalFees(), repositoryBlockData.getTotalFees()); + assertEquals(archivedBlockData.getTransactionCount(), repositoryBlockData.getTransactionCount()); + assertArrayEquals(archivedBlockData.getTransactionsSignature(), repositoryBlockData.getTransactionsSignature()); + + // TODO: build atStatesHash and compare against value in archive + } + + // Check block 10 (unarchived) + BlockArchiveReader reader = BlockArchiveReader.getInstance(); + BlockTransformation blockInfo = reader.fetchBlockAtHeight(10); + assertNull(blockInfo); + + } + + } + + @Test + public void testArchiveAndPrune() throws DataException, InterruptedException, TransformationException, IOException { + try (final Repository repository = RepositoryManager.getRepository()) { + + // Deploy an AT so that we have AT state data + PrivateKeyAccount deployer = Common.getTestAccount(repository, "alice"); + byte[] creationBytes = AtUtils.buildSimpleAT(); + long fundingAmount = 1_00000000L; + AtUtils.doDeployAT(repository, deployer, creationBytes, fundingAmount); + + // Mint some blocks so that we are able to archive them later + for (int i = 0; i < 1000; i++) { + BlockMinter.mintTestingBlock(repository, Common.getTestAccount(repository, "alice-reward-share")); + } + + // Assume 900 blocks are trimmed (this specifies the first untrimmed height) + repository.getBlockRepository().setOnlineAccountsSignaturesTrimHeight(901); + repository.getATRepository().setAtTrimHeight(901); + + // Check the max archive height - this should be one less than the first untrimmed height + final int maximumArchiveHeight = BlockArchiveWriter.getMaxArchiveHeight(repository); + assertEquals(900, maximumArchiveHeight); + + // Write blocks 2-900 to the archive + BlockArchiveWriter writer = new BlockArchiveWriter(0, maximumArchiveHeight, repository); + writer.setShouldEnforceFileSizeTarget(false); // To avoid the need to pre-calculate file sizes + BlockArchiveWriter.BlockArchiveWriteResult result = writer.write(); + assertEquals(BlockArchiveWriter.BlockArchiveWriteResult.OK, result); + + // Make sure that the archive contains the correct number of blocks + assertEquals(900 - 1, writer.getWrittenCount()); + + // Increment block archive height + repository.getBlockArchiveRepository().setBlockArchiveHeight(901); + repository.saveChanges(); + assertEquals(901, repository.getBlockArchiveRepository().getBlockArchiveHeight()); + + // Ensure the file exists + File outputFile = writer.getOutputPath().toFile(); + assertTrue(outputFile.exists()); + + // Ensure the SQL repository contains blocks 2 and 900... + assertNotNull(repository.getBlockRepository().fromHeight(2)); + assertNotNull(repository.getBlockRepository().fromHeight(900)); + + // Prune all the archived blocks + int numBlocksPruned = repository.getBlockRepository().pruneBlocks(0, 900); + assertEquals(900-1, numBlocksPruned); + repository.getBlockRepository().setBlockPruneHeight(901); + + // Prune the AT states for the archived blocks + repository.getATRepository().rebuildLatestAtStates(900); + repository.saveChanges(); + int numATStatesPruned = repository.getATRepository().pruneAtStates(0, 900); + assertEquals(900-2, numATStatesPruned); // Minus 1 for genesis block, and another for the latest AT state + repository.getATRepository().setAtPruneHeight(901); + + // Now ensure the SQL repository is missing blocks 2 and 900... + assertNull(repository.getBlockRepository().fromHeight(2)); + assertNull(repository.getBlockRepository().fromHeight(900)); + + // ... but it's not missing blocks 1 and 901 (we don't prune the genesis block) + assertNotNull(repository.getBlockRepository().fromHeight(1)); + assertNotNull(repository.getBlockRepository().fromHeight(901)); + + // Validate the latest block height in the repository + assertEquals(1002, (int) repository.getBlockRepository().getLastBlock().getHeight()); + + } + } + + @Test + public void testTrimArchivePruneAndOrphan() throws DataException, InterruptedException, TransformationException, IOException { + try (final Repository repository = RepositoryManager.getRepository()) { + + // Deploy an AT so that we have AT state data + PrivateKeyAccount deployer = Common.getTestAccount(repository, "alice"); + byte[] creationBytes = AtUtils.buildSimpleAT(); + long fundingAmount = 1_00000000L; + AtUtils.doDeployAT(repository, deployer, creationBytes, fundingAmount); + + // Mint some blocks so that we are able to archive them later + for (int i = 0; i < 1000; i++) { + BlockMinter.mintTestingBlock(repository, Common.getTestAccount(repository, "alice-reward-share")); + } + + // Make sure that block 500 has full AT state data and data hash + List block500AtStatesData = repository.getATRepository().getBlockATStatesAtHeight(500); + ATStateData atStatesData = repository.getATRepository().getATStateAtHeight(block500AtStatesData.get(0).getATAddress(), 500); + assertNotNull(atStatesData.getStateHash()); + assertNotNull(atStatesData.getStateData()); + + // Trim the first 500 blocks + repository.getBlockRepository().trimOldOnlineAccountsSignatures(0, 500); + repository.getBlockRepository().setOnlineAccountsSignaturesTrimHeight(501); + repository.getATRepository().rebuildLatestAtStates(500); + repository.getATRepository().trimAtStates(0, 500, 1000); + repository.getATRepository().setAtTrimHeight(501); + + // Now block 499 should only have the AT state data hash + List block499AtStatesData = repository.getATRepository().getBlockATStatesAtHeight(499); + atStatesData = repository.getATRepository().getATStateAtHeight(block499AtStatesData.get(0).getATAddress(), 499); + assertNotNull(atStatesData.getStateHash()); + assertNull(atStatesData.getStateData()); + + // ... but block 500 should have the full data (due to being retained as the "latest" AT state in the trimmed range + block500AtStatesData = repository.getATRepository().getBlockATStatesAtHeight(500); + atStatesData = repository.getATRepository().getATStateAtHeight(block500AtStatesData.get(0).getATAddress(), 500); + assertNotNull(atStatesData.getStateHash()); + assertNotNull(atStatesData.getStateData()); + + // ... and block 501 should also have the full data + List block501AtStatesData = repository.getATRepository().getBlockATStatesAtHeight(501); + atStatesData = repository.getATRepository().getATStateAtHeight(block501AtStatesData.get(0).getATAddress(), 501); + assertNotNull(atStatesData.getStateHash()); + assertNotNull(atStatesData.getStateData()); + + // Check the max archive height - this should be one less than the first untrimmed height + final int maximumArchiveHeight = BlockArchiveWriter.getMaxArchiveHeight(repository); + assertEquals(500, maximumArchiveHeight); + + BlockData block3DataPreArchive = repository.getBlockRepository().fromHeight(3); + + // Write blocks 2-500 to the archive + BlockArchiveWriter writer = new BlockArchiveWriter(0, maximumArchiveHeight, repository); + writer.setShouldEnforceFileSizeTarget(false); // To avoid the need to pre-calculate file sizes + BlockArchiveWriter.BlockArchiveWriteResult result = writer.write(); + assertEquals(BlockArchiveWriter.BlockArchiveWriteResult.OK, result); + + // Make sure that the archive contains the correct number of blocks + assertEquals(500 - 1, writer.getWrittenCount()); // -1 for the genesis block + + // Increment block archive height + repository.getBlockArchiveRepository().setBlockArchiveHeight(writer.getWrittenCount()); + repository.saveChanges(); + assertEquals(500 - 1, repository.getBlockArchiveRepository().getBlockArchiveHeight()); + + // Ensure the file exists + File outputFile = writer.getOutputPath().toFile(); + assertTrue(outputFile.exists()); + + // Ensure the SQL repository contains blocks 2 and 500... + assertNotNull(repository.getBlockRepository().fromHeight(2)); + assertNotNull(repository.getBlockRepository().fromHeight(500)); + + // Prune all the archived blocks + int numBlocksPruned = repository.getBlockRepository().pruneBlocks(0, 500); + assertEquals(500-1, numBlocksPruned); + repository.getBlockRepository().setBlockPruneHeight(501); + + // Prune the AT states for the archived blocks + repository.getATRepository().rebuildLatestAtStates(500); + repository.saveChanges(); + int numATStatesPruned = repository.getATRepository().pruneAtStates(2, 500); + assertEquals(498, numATStatesPruned); // Minus 1 for genesis block, and another for the latest AT state + repository.getATRepository().setAtPruneHeight(501); + + // Now ensure the SQL repository is missing blocks 2 and 500... + assertNull(repository.getBlockRepository().fromHeight(2)); + assertNull(repository.getBlockRepository().fromHeight(500)); + + // ... but it's not missing blocks 1 and 501 (we don't prune the genesis block) + assertNotNull(repository.getBlockRepository().fromHeight(1)); + assertNotNull(repository.getBlockRepository().fromHeight(501)); + + // Validate the latest block height in the repository + assertEquals(1002, (int) repository.getBlockRepository().getLastBlock().getHeight()); + + // Now orphan some unarchived blocks. + BlockUtils.orphanBlocks(repository, 500); + assertEquals(502, (int) repository.getBlockRepository().getLastBlock().getHeight()); + + // We're close to the lower limit of the SQL database now, so + // we need to import some blocks from the archive + BlockArchiveUtils.importFromArchive(401, 500, repository); + + // Ensure the SQL repository now contains block 401 but not 400... + assertNotNull(repository.getBlockRepository().fromHeight(401)); + assertNull(repository.getBlockRepository().fromHeight(400)); + + // Import the remaining 399 blocks + BlockArchiveUtils.importFromArchive(2, 400, repository); + + // Verify that block 3 matches the original + BlockData block3DataPostArchive = repository.getBlockRepository().fromHeight(3); + assertArrayEquals(block3DataPreArchive.getSignature(), block3DataPostArchive.getSignature()); + assertEquals(block3DataPreArchive.getHeight(), block3DataPostArchive.getHeight()); + + // Orphan 2 more block, which should be the last one that is possible to be orphaned + // TODO: figure out why this is 1 block more than in the equivalent block archive V1 test + BlockUtils.orphanBlocks(repository, 2); + + // Orphan another block, which should fail + Exception exception = null; + try { + BlockUtils.orphanBlocks(repository, 1); + } catch (DataException e) { + exception = e; + } + + // Ensure that a DataException is thrown because there is no more AT states data available + assertNotNull(exception); + assertEquals(DataException.class, exception.getClass()); + + // FUTURE: we may be able to retain unique AT states when trimming, to avoid this exception + // and allow orphaning back through blocks with trimmed AT states. + + } + } + + + /** + * Many nodes are missing an ATStatesHeightIndex due to an earlier bug + * In these cases we disable archiving and pruning as this index is a + * very essential component in these processes. + */ + @Test + public void testMissingAtStatesHeightIndex() throws DataException, SQLException { + try (final HSQLDBRepository repository = (HSQLDBRepository) RepositoryManager.getRepository()) { + + // Firstly check that we're able to prune or archive when the index exists + assertTrue(repository.getATRepository().hasAtStatesHeightIndex()); + assertTrue(RepositoryManager.canArchiveOrPrune()); + + // Delete the index + repository.prepareStatement("DROP INDEX ATSTATESHEIGHTINDEX").execute(); + + // Ensure check that we're unable to prune or archive when the index doesn't exist + assertFalse(repository.getATRepository().hasAtStatesHeightIndex()); + assertFalse(RepositoryManager.canArchiveOrPrune()); + } + } + + + private void deleteArchiveDirectory() { + // Delete archive directory if exists + Path archivePath = Paths.get(Settings.getInstance().getRepositoryPath(), "archive").toAbsolutePath(); + try { + FileUtils.deleteDirectory(archivePath.toFile()); + } catch (IOException e) { + + } + } + +} diff --git a/src/test/resources/test-settings-v2-block-archive.json b/src/test/resources/test-settings-v2-block-archive.json index c5ed1aa8..209ce92d 100644 --- a/src/test/resources/test-settings-v2-block-archive.json +++ b/src/test/resources/test-settings-v2-block-archive.json @@ -9,5 +9,6 @@ "testNtpOffset": 0, "minPeers": 0, "pruneBlockLimit": 100, - "repositoryPath": "dbtest" + "repositoryPath": "dbtest", + "defaultArchiveVersion": 1 } From b4a736c5d2a8cdb566ce7b7f55bdafd43e71fda0 Mon Sep 17 00:00:00 2001 From: CalDescent Date: Fri, 10 Mar 2023 13:53:46 +0000 Subject: [PATCH 16/16] Added optional "sender" filter to GET /chat/messages --- src/main/java/org/qortal/api/resource/ChatResource.java | 2 ++ .../org/qortal/api/websocket/ChatMessagesWebSocket.java | 2 ++ src/main/java/org/qortal/repository/ChatRepository.java | 2 +- .../org/qortal/repository/hsqldb/HSQLDBChatRepository.java | 7 ++++++- 4 files changed, 11 insertions(+), 2 deletions(-) diff --git a/src/main/java/org/qortal/api/resource/ChatResource.java b/src/main/java/org/qortal/api/resource/ChatResource.java index 2601e938..150b6f63 100644 --- a/src/main/java/org/qortal/api/resource/ChatResource.java +++ b/src/main/java/org/qortal/api/resource/ChatResource.java @@ -72,6 +72,7 @@ public class ChatResource { @QueryParam("reference") String reference, @QueryParam("chatreference") String chatReference, @QueryParam("haschatreference") Boolean hasChatReference, + @QueryParam("sender") String sender, @Parameter(ref = "limit") @QueryParam("limit") Integer limit, @Parameter(ref = "offset") @QueryParam("offset") Integer offset, @Parameter(ref = "reverse") @QueryParam("reverse") Boolean reverse) { @@ -107,6 +108,7 @@ public class ChatResource { chatReferenceBytes, hasChatReference, involvingAddresses, + sender, limit, offset, reverse); } catch (DataException e) { throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e); diff --git a/src/main/java/org/qortal/api/websocket/ChatMessagesWebSocket.java b/src/main/java/org/qortal/api/websocket/ChatMessagesWebSocket.java index 76ed936c..c6d7aaed 100644 --- a/src/main/java/org/qortal/api/websocket/ChatMessagesWebSocket.java +++ b/src/main/java/org/qortal/api/websocket/ChatMessagesWebSocket.java @@ -49,6 +49,7 @@ public class ChatMessagesWebSocket extends ApiWebSocket { null, null, null, + null, null, null, null); sendMessages(session, chatMessages); @@ -79,6 +80,7 @@ public class ChatMessagesWebSocket extends ApiWebSocket { null, null, involvingAddresses, + null, null, null, null); sendMessages(session, chatMessages); diff --git a/src/main/java/org/qortal/repository/ChatRepository.java b/src/main/java/org/qortal/repository/ChatRepository.java index c4541907..34ad77dd 100644 --- a/src/main/java/org/qortal/repository/ChatRepository.java +++ b/src/main/java/org/qortal/repository/ChatRepository.java @@ -15,7 +15,7 @@ public interface ChatRepository { */ public List getMessagesMatchingCriteria(Long before, Long after, Integer txGroupId, byte[] reference, byte[] chatReferenceBytes, Boolean hasChatReference, - List involving, Integer limit, Integer offset, Boolean reverse) throws DataException; + List involving, String senderAddress, Integer limit, Integer offset, Boolean reverse) throws DataException; public ChatMessage toChatMessage(ChatTransactionData chatTransactionData) throws DataException; diff --git a/src/main/java/org/qortal/repository/hsqldb/HSQLDBChatRepository.java b/src/main/java/org/qortal/repository/hsqldb/HSQLDBChatRepository.java index a995a0b3..55467d87 100644 --- a/src/main/java/org/qortal/repository/hsqldb/HSQLDBChatRepository.java +++ b/src/main/java/org/qortal/repository/hsqldb/HSQLDBChatRepository.java @@ -24,7 +24,7 @@ public class HSQLDBChatRepository implements ChatRepository { @Override public List getMessagesMatchingCriteria(Long before, Long after, Integer txGroupId, byte[] referenceBytes, - byte[] chatReferenceBytes, Boolean hasChatReference, List involving, + byte[] chatReferenceBytes, Boolean hasChatReference, List involving, String senderAddress, Integer limit, Integer offset, Boolean reverse) throws DataException { // Check args meet expectations if ((txGroupId != null && involving != null && !involving.isEmpty()) @@ -74,6 +74,11 @@ public class HSQLDBChatRepository implements ChatRepository { whereClauses.add("chat_reference IS NULL"); } + if (senderAddress != null) { + whereClauses.add("sender = ?"); + bindParams.add(senderAddress); + } + if (txGroupId != null) { whereClauses.add("tx_group_id = " + txGroupId); // int safe to use literally whereClauses.add("recipient IS NULL");