forked from Qortal/qortal
Browse Source
# Conflicts: # src/main/java/org/qortal/api/resource/ArbitraryResource.javaqdn-on-chain-data
CalDescent
2 years ago
43 changed files with 1446 additions and 787 deletions
@ -0,0 +1,121 @@
|
||||
package org.qortal.controller.repository; |
||||
|
||||
import org.apache.commons.io.FileUtils; |
||||
import org.apache.logging.log4j.LogManager; |
||||
import org.apache.logging.log4j.Logger; |
||||
import org.qortal.controller.Controller; |
||||
import org.qortal.controller.Synchronizer; |
||||
import org.qortal.repository.*; |
||||
import org.qortal.settings.Settings; |
||||
import org.qortal.transform.TransformationException; |
||||
|
||||
import java.io.IOException; |
||||
import java.nio.file.Path; |
||||
import java.nio.file.Paths; |
||||
|
||||
|
||||
public class BlockArchiveRebuilder { |
||||
|
||||
private static final Logger LOGGER = LogManager.getLogger(BlockArchiveRebuilder.class); |
||||
|
||||
private final int serializationVersion; |
||||
|
||||
public BlockArchiveRebuilder(int serializationVersion) { |
||||
this.serializationVersion = serializationVersion; |
||||
} |
||||
|
||||
public void start() throws DataException, IOException { |
||||
if (!Settings.getInstance().isArchiveEnabled() || Settings.getInstance().isLite()) { |
||||
return; |
||||
} |
||||
|
||||
// New archive path is in a different location from original archive path, to avoid conflicts.
|
||||
// It will be moved later, once the process is complete.
|
||||
final Path newArchivePath = Paths.get(Settings.getInstance().getRepositoryPath(), "archive-rebuild"); |
||||
final Path originalArchivePath = Paths.get(Settings.getInstance().getRepositoryPath(), "archive"); |
||||
|
||||
// Delete archive-rebuild if it exists from a previous attempt
|
||||
FileUtils.deleteDirectory(newArchivePath.toFile()); |
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) { |
||||
int startHeight = 1; // We need to rebuild the entire archive
|
||||
|
||||
LOGGER.info("Rebuilding block archive from height {}...", startHeight); |
||||
|
||||
while (!Controller.isStopping()) { |
||||
repository.discardChanges(); |
||||
|
||||
Thread.sleep(1000L); |
||||
|
||||
// Don't even attempt if we're mid-sync as our repository requests will be delayed for ages
|
||||
if (Synchronizer.getInstance().isSynchronizing()) { |
||||
continue; |
||||
} |
||||
|
||||
// Rebuild archive
|
||||
try { |
||||
final int maximumArchiveHeight = BlockArchiveReader.getInstance().getHeightOfLastArchivedBlock(); |
||||
if (startHeight >= maximumArchiveHeight) { |
||||
// We've finished.
|
||||
// Delete existing archive and move the newly built one into its place
|
||||
FileUtils.deleteDirectory(originalArchivePath.toFile()); |
||||
FileUtils.moveDirectory(newArchivePath.toFile(), originalArchivePath.toFile()); |
||||
BlockArchiveReader.getInstance().invalidateFileListCache(); |
||||
LOGGER.info("Block archive successfully rebuilt"); |
||||
return; |
||||
} |
||||
|
||||
BlockArchiveWriter writer = new BlockArchiveWriter(startHeight, maximumArchiveHeight, serializationVersion, newArchivePath, repository); |
||||
|
||||
// Set data source to BLOCK_ARCHIVE as we are rebuilding
|
||||
writer.setDataSource(BlockArchiveWriter.BlockArchiveDataSource.BLOCK_ARCHIVE); |
||||
|
||||
// We can't enforce the 100MB file size target, as the final file needs to contain all blocks
|
||||
// that exist in the current archive. Otherwise, the final blocks in the archive will be lost.
|
||||
writer.setShouldEnforceFileSizeTarget(false); |
||||
|
||||
// We want to log the rebuild progress
|
||||
writer.setShouldLogProgress(true); |
||||
|
||||
BlockArchiveWriter.BlockArchiveWriteResult result = writer.write(); |
||||
switch (result) { |
||||
case OK: |
||||
// Increment block archive height
|
||||
startHeight += writer.getWrittenCount(); |
||||
repository.saveChanges(); |
||||
break; |
||||
|
||||
case STOPPING: |
||||
return; |
||||
|
||||
// We've reached the limit of the blocks we can archive
|
||||
// Sleep for a while to allow more to become available
|
||||
case NOT_ENOUGH_BLOCKS: |
||||
// This shouldn't happen, as we're not enforcing minimum file sizes
|
||||
repository.discardChanges(); |
||||
throw new DataException("Unable to rebuild archive due to unexpected NOT_ENOUGH_BLOCKS response."); |
||||
|
||||
case BLOCK_NOT_FOUND: |
||||
// We tried to archive a block that didn't exist. This is a major failure and likely means
|
||||
// that a bootstrap or re-sync is needed. Try again every minute until then.
|
||||
LOGGER.info("Error: block not found when rebuilding archive. If this error persists, " + |
||||
"a bootstrap or re-sync may be needed."); |
||||
repository.discardChanges(); |
||||
throw new DataException("Unable to rebuild archive because a block is missing."); |
||||
} |
||||
|
||||
} catch (IOException | TransformationException e) { |
||||
LOGGER.info("Caught exception when rebuilding block archive", e); |
||||
throw new DataException("Unable to rebuild block archive"); |
||||
} |
||||
|
||||
} |
||||
} catch (InterruptedException e) { |
||||
// Do nothing
|
||||
} finally { |
||||
// Delete archive-rebuild if it still exists, as that means something went wrong
|
||||
FileUtils.deleteDirectory(newArchivePath.toFile()); |
||||
} |
||||
} |
||||
|
||||
} |
@ -0,0 +1,43 @@
|
||||
package org.qortal.network.message; |
||||
|
||||
import com.google.common.primitives.Ints; |
||||
import org.qortal.block.Block; |
||||
import org.qortal.transform.TransformationException; |
||||
import org.qortal.transform.block.BlockTransformer; |
||||
|
||||
import java.io.ByteArrayOutputStream; |
||||
import java.io.IOException; |
||||
import java.nio.ByteBuffer; |
||||
|
||||
// This is an OUTGOING-only Message which more readily lends itself to being cached
|
||||
public class CachedBlockV2Message extends Message implements Cloneable { |
||||
|
||||
public CachedBlockV2Message(Block block) throws TransformationException { |
||||
super(MessageType.BLOCK_V2); |
||||
|
||||
ByteArrayOutputStream bytes = new ByteArrayOutputStream(); |
||||
|
||||
try { |
||||
bytes.write(Ints.toByteArray(block.getBlockData().getHeight())); |
||||
|
||||
bytes.write(BlockTransformer.toBytes(block)); |
||||
} catch (IOException e) { |
||||
throw new AssertionError("IOException shouldn't occur with ByteArrayOutputStream"); |
||||
} |
||||
|
||||
this.dataBytes = bytes.toByteArray(); |
||||
this.checksumBytes = Message.generateChecksum(this.dataBytes); |
||||
} |
||||
|
||||
public CachedBlockV2Message(byte[] cachedBytes) { |
||||
super(MessageType.BLOCK_V2); |
||||
|
||||
this.dataBytes = cachedBytes; |
||||
this.checksumBytes = Message.generateChecksum(this.dataBytes); |
||||
} |
||||
|
||||
public static Message fromByteBuffer(int id, ByteBuffer byteBuffer) { |
||||
throw new UnsupportedOperationException("CachedBlockMessageV2 is for outgoing messages only"); |
||||
} |
||||
|
||||
} |
@ -1,88 +0,0 @@
|
||||
package org.qortal.repository.hsqldb; |
||||
|
||||
import org.apache.logging.log4j.LogManager; |
||||
import org.apache.logging.log4j.Logger; |
||||
import org.qortal.controller.Controller; |
||||
import org.qortal.gui.SplashFrame; |
||||
import org.qortal.repository.BlockArchiveWriter; |
||||
import org.qortal.repository.DataException; |
||||
import org.qortal.repository.Repository; |
||||
import org.qortal.repository.RepositoryManager; |
||||
import org.qortal.transform.TransformationException; |
||||
|
||||
import java.io.IOException; |
||||
|
||||
/** |
||||
* |
||||
* When switching to an archiving node, we need to archive most of the database contents. |
||||
* This involves copying its data into flat files. |
||||
* If we do this entirely as a background process, it is very slow and can interfere with syncing. |
||||
* However, if we take the approach of doing this in bulk, before starting up the rest of the |
||||
* processes, this makes it much faster and less invasive. |
||||
* |
||||
* From that point, the original background archiving process will run, but can be dialled right down |
||||
* so not to interfere with syncing. |
||||
* |
||||
*/ |
||||
|
||||
|
||||
public class HSQLDBDatabaseArchiving { |
||||
|
||||
private static final Logger LOGGER = LogManager.getLogger(HSQLDBDatabaseArchiving.class); |
||||
|
||||
|
||||
public static boolean buildBlockArchive(Repository repository, long fileSizeTarget) throws DataException { |
||||
|
||||
// Only build the archive if we haven't already got one that is up to date
|
||||
boolean upToDate = BlockArchiveWriter.isArchiverUpToDate(repository); |
||||
if (upToDate) { |
||||
// Already archived
|
||||
return false; |
||||
} |
||||
|
||||
LOGGER.info("Building block archive - this process could take a while..."); |
||||
SplashFrame.getInstance().updateStatus("Building block archive..."); |
||||
|
||||
final int maximumArchiveHeight = BlockArchiveWriter.getMaxArchiveHeight(repository); |
||||
int startHeight = 0; |
||||
|
||||
while (!Controller.isStopping()) { |
||||
try { |
||||
BlockArchiveWriter writer = new BlockArchiveWriter(startHeight, maximumArchiveHeight, repository); |
||||
writer.setFileSizeTarget(fileSizeTarget); |
||||
BlockArchiveWriter.BlockArchiveWriteResult result = writer.write(); |
||||
switch (result) { |
||||
case OK: |
||||
// Increment block archive height
|
||||
startHeight = writer.getLastWrittenHeight() + 1; |
||||
repository.getBlockArchiveRepository().setBlockArchiveHeight(startHeight); |
||||
repository.saveChanges(); |
||||
break; |
||||
|
||||
case STOPPING: |
||||
return false; |
||||
|
||||
case NOT_ENOUGH_BLOCKS: |
||||
// We've reached the limit of the blocks we can archive
|
||||
// Return from the whole method
|
||||
return true; |
||||
|
||||
case BLOCK_NOT_FOUND: |
||||
// We tried to archive a block that didn't exist. This is a major failure and likely means
|
||||
// that a bootstrap or re-sync is needed. Return rom the method
|
||||
LOGGER.info("Error: block not found when building archive. If this error persists, " + |
||||
"a bootstrap or re-sync may be needed."); |
||||
return false; |
||||
} |
||||
|
||||
} catch (IOException | TransformationException | InterruptedException e) { |
||||
LOGGER.info("Caught exception when creating block cache", e); |
||||
return false; |
||||
} |
||||
} |
||||
|
||||
// If we got this far then something went wrong (most likely the app is stopping)
|
||||
return false; |
||||
} |
||||
|
||||
} |
@ -1,332 +0,0 @@
|
||||
package org.qortal.repository.hsqldb; |
||||
|
||||
import org.apache.logging.log4j.LogManager; |
||||
import org.apache.logging.log4j.Logger; |
||||
import org.qortal.controller.Controller; |
||||
import org.qortal.data.block.BlockData; |
||||
import org.qortal.gui.SplashFrame; |
||||
import org.qortal.repository.BlockArchiveWriter; |
||||
import org.qortal.repository.DataException; |
||||
import org.qortal.repository.Repository; |
||||
import org.qortal.settings.Settings; |
||||
|
||||
import java.sql.ResultSet; |
||||
import java.sql.SQLException; |
||||
import java.util.concurrent.TimeoutException; |
||||
|
||||
/** |
||||
* |
||||
* When switching from a full node to a pruning node, we need to delete most of the database contents. |
||||
* If we do this entirely as a background process, it is very slow and can interfere with syncing. |
||||
* However, if we take the approach of transferring only the necessary rows to a new table and then |
||||
* deleting the original table, this makes the process much faster. It was taking several days to |
||||
* delete the AT states in the background, but only a couple of minutes to copy them to a new table. |
||||
* |
||||
* The trade off is that we have to go through a form of "reshape" when starting the app for the first |
||||
* time after enabling pruning mode. But given that this is an opt-in mode, I don't think it will be |
||||
* a problem. |
||||
* |
||||
* Once the pruning is complete, it automatically performs a CHECKPOINT DEFRAG in order to |
||||
* shrink the database file size down to a fraction of what it was before. |
||||
* |
||||
* From this point, the original background process will run, but can be dialled right down so not |
||||
* to interfere with syncing. |
||||
* |
||||
*/ |
||||
|
||||
|
||||
public class HSQLDBDatabasePruning { |
||||
|
||||
private static final Logger LOGGER = LogManager.getLogger(HSQLDBDatabasePruning.class); |
||||
|
||||
|
||||
public static boolean pruneATStates(HSQLDBRepository repository) throws SQLException, DataException { |
||||
|
||||
// Only bulk prune AT states if we have never done so before
|
||||
int pruneHeight = repository.getATRepository().getAtPruneHeight(); |
||||
if (pruneHeight > 0) { |
||||
// Already pruned AT states
|
||||
return false; |
||||
} |
||||
|
||||
if (Settings.getInstance().isArchiveEnabled()) { |
||||
// Only proceed if we can see that the archiver has already finished
|
||||
// This way, if the archiver failed for any reason, we can prune once it has had
|
||||
// some opportunities to try again
|
||||
boolean upToDate = BlockArchiveWriter.isArchiverUpToDate(repository); |
||||
if (!upToDate) { |
||||
return false; |
||||
} |
||||
} |
||||
|
||||
LOGGER.info("Starting bulk prune of AT states - this process could take a while... " + |
||||
"(approx. 2 mins on high spec, or upwards of 30 mins in some cases)"); |
||||
SplashFrame.getInstance().updateStatus("Pruning database (takes up to 30 mins)..."); |
||||
|
||||
// Create new AT-states table to hold smaller dataset
|
||||
repository.executeCheckedUpdate("DROP TABLE IF EXISTS ATStatesNew"); |
||||
repository.executeCheckedUpdate("CREATE TABLE ATStatesNew (" |
||||
+ "AT_address QortalAddress, height INTEGER NOT NULL, state_hash ATStateHash NOT NULL, " |
||||
+ "fees QortalAmount NOT NULL, is_initial BOOLEAN NOT NULL, sleep_until_message_timestamp BIGINT, " |
||||
+ "PRIMARY KEY (AT_address, height), " |
||||
+ "FOREIGN KEY (AT_address) REFERENCES ATs (AT_address) ON DELETE CASCADE)"); |
||||
repository.executeCheckedUpdate("SET TABLE ATStatesNew NEW SPACE"); |
||||
repository.executeCheckedUpdate("CHECKPOINT"); |
||||
|
||||
// Add a height index
|
||||
LOGGER.info("Adding index to AT states table..."); |
||||
repository.executeCheckedUpdate("CREATE INDEX IF NOT EXISTS ATStatesNewHeightIndex ON ATStatesNew (height)"); |
||||
repository.executeCheckedUpdate("CHECKPOINT"); |
||||
|
||||
|
||||
// Find our latest block
|
||||
BlockData latestBlock = repository.getBlockRepository().getLastBlock(); |
||||
if (latestBlock == null) { |
||||
LOGGER.info("Unable to determine blockchain height, necessary for bulk block pruning"); |
||||
return false; |
||||
} |
||||
|
||||
// Calculate some constants for later use
|
||||
final int blockchainHeight = latestBlock.getHeight(); |
||||
int maximumBlockToTrim = blockchainHeight - Settings.getInstance().getPruneBlockLimit(); |
||||
if (Settings.getInstance().isArchiveEnabled()) { |
||||
// Archive mode - don't prune anything that hasn't been archived yet
|
||||
maximumBlockToTrim = Math.min(maximumBlockToTrim, repository.getBlockArchiveRepository().getBlockArchiveHeight() - 1); |
||||
} |
||||
final int endHeight = blockchainHeight; |
||||
final int blockStep = 10000; |
||||
|
||||
|
||||
// It's essential that we rebuild the latest AT states here, as we are using this data in the next query.
|
||||
// Failing to do this will result in important AT states being deleted, rendering the database unusable.
|
||||
repository.getATRepository().rebuildLatestAtStates(endHeight); |
||||
|
||||
|
||||
// Loop through all the LatestATStates and copy them to the new table
|
||||
LOGGER.info("Copying AT states..."); |
||||
for (int height = 0; height < endHeight; height += blockStep) { |
||||
final int batchEndHeight = height + blockStep - 1; |
||||
//LOGGER.info(String.format("Copying AT states between %d and %d...", height, batchEndHeight));
|
||||
|
||||
String sql = "SELECT height, AT_address FROM LatestATStates WHERE height BETWEEN ? AND ?"; |
||||
try (ResultSet latestAtStatesResultSet = repository.checkedExecute(sql, height, batchEndHeight)) { |
||||
if (latestAtStatesResultSet != null) { |
||||
do { |
||||
int latestAtHeight = latestAtStatesResultSet.getInt(1); |
||||
String latestAtAddress = latestAtStatesResultSet.getString(2); |
||||
|
||||
// Copy this latest ATState to the new table
|
||||
//LOGGER.info(String.format("Copying AT %s at height %d...", latestAtAddress, latestAtHeight));
|
||||
try { |
||||
String updateSql = "INSERT INTO ATStatesNew (" |
||||
+ "SELECT AT_address, height, state_hash, fees, is_initial, sleep_until_message_timestamp " |
||||
+ "FROM ATStates " |
||||
+ "WHERE height = ? AND AT_address = ?)"; |
||||
repository.executeCheckedUpdate(updateSql, latestAtHeight, latestAtAddress); |
||||
} catch (SQLException e) { |
||||
repository.examineException(e); |
||||
throw new DataException("Unable to copy ATStates", e); |
||||
} |
||||
|
||||
// If this batch includes blocks after the maximum block to trim, we will need to copy
|
||||
// each of its AT states above maximumBlockToTrim as they are considered "recent". We
|
||||
// need to do this for _all_ AT states in these blocks, regardless of their latest state.
|
||||
if (batchEndHeight >= maximumBlockToTrim) { |
||||
// Now copy this AT's states for each recent block they are present in
|
||||
for (int i = maximumBlockToTrim; i < endHeight; i++) { |
||||
if (latestAtHeight < i) { |
||||
// This AT finished before this block so there is nothing to copy
|
||||
continue; |
||||
} |
||||
|
||||
//LOGGER.info(String.format("Copying recent AT %s at height %d...", latestAtAddress, i));
|
||||
try { |
||||
// Copy each LatestATState to the new table
|
||||
String updateSql = "INSERT IGNORE INTO ATStatesNew (" |
||||
+ "SELECT AT_address, height, state_hash, fees, is_initial, sleep_until_message_timestamp " |
||||
+ "FROM ATStates " |
||||
+ "WHERE height = ? AND AT_address = ?)"; |
||||
repository.executeCheckedUpdate(updateSql, i, latestAtAddress); |
||||
} catch (SQLException e) { |
||||
repository.examineException(e); |
||||
throw new DataException("Unable to copy ATStates", e); |
||||
} |
||||
} |
||||
} |
||||
repository.saveChanges(); |
||||
|
||||
} while (latestAtStatesResultSet.next()); |
||||
} |
||||
} catch (SQLException e) { |
||||
throw new DataException("Unable to copy AT states", e); |
||||
} |
||||
} |
||||
|
||||
|
||||
// Finally, drop the original table and rename
|
||||
LOGGER.info("Deleting old AT states..."); |
||||
repository.executeCheckedUpdate("DROP TABLE ATStates"); |
||||
repository.executeCheckedUpdate("ALTER TABLE ATStatesNew RENAME TO ATStates"); |
||||
repository.executeCheckedUpdate("ALTER INDEX ATStatesNewHeightIndex RENAME TO ATStatesHeightIndex"); |
||||
repository.executeCheckedUpdate("CHECKPOINT"); |
||||
|
||||
// Update the prune height
|
||||
int nextPruneHeight = maximumBlockToTrim + 1; |
||||
repository.getATRepository().setAtPruneHeight(nextPruneHeight); |
||||
repository.saveChanges(); |
||||
|
||||
repository.executeCheckedUpdate("CHECKPOINT"); |
||||
|
||||
// Now prune/trim the ATStatesData, as this currently goes back over a month
|
||||
return HSQLDBDatabasePruning.pruneATStateData(repository); |
||||
} |
||||
|
||||
/* |
||||
* Bulk prune ATStatesData to catch up with the now pruned ATStates table |
||||
* This uses the existing AT States trimming code but with a much higher end block |
||||
*/ |
||||
private static boolean pruneATStateData(Repository repository) throws DataException { |
||||
|
||||
if (Settings.getInstance().isArchiveEnabled()) { |
||||
// Don't prune ATStatesData in archive mode
|
||||
return true; |
||||
} |
||||
|
||||
BlockData latestBlock = repository.getBlockRepository().getLastBlock(); |
||||
if (latestBlock == null) { |
||||
LOGGER.info("Unable to determine blockchain height, necessary for bulk ATStatesData pruning"); |
||||
return false; |
||||
} |
||||
final int blockchainHeight = latestBlock.getHeight(); |
||||
int upperPrunableHeight = blockchainHeight - Settings.getInstance().getPruneBlockLimit(); |
||||
// ATStateData is already trimmed - so carry on from where we left off in the past
|
||||
int pruneStartHeight = repository.getATRepository().getAtTrimHeight(); |
||||
|
||||
LOGGER.info("Starting bulk prune of AT states data - this process could take a while... (approx. 3 mins on high spec)"); |
||||
|
||||
while (pruneStartHeight < upperPrunableHeight) { |
||||
// Prune all AT state data up until our latest minus pruneBlockLimit (or our archive height)
|
||||
|
||||
if (Controller.isStopping()) { |
||||
return false; |
||||
} |
||||
|
||||
// Override batch size in the settings because this is a one-off process
|
||||
final int batchSize = 1000; |
||||
final int rowLimitPerBatch = 50000; |
||||
int upperBatchHeight = pruneStartHeight + batchSize; |
||||
int upperPruneHeight = Math.min(upperBatchHeight, upperPrunableHeight); |
||||
|
||||
LOGGER.trace(String.format("Pruning AT states data between %d and %d...", pruneStartHeight, upperPruneHeight)); |
||||
|
||||
int numATStatesPruned = repository.getATRepository().trimAtStates(pruneStartHeight, upperPruneHeight, rowLimitPerBatch); |
||||
repository.saveChanges(); |
||||
|
||||
if (numATStatesPruned > 0) { |
||||
LOGGER.trace(String.format("Pruned %d AT states data rows between blocks %d and %d", |
||||
numATStatesPruned, pruneStartHeight, upperPruneHeight)); |
||||
} else { |
||||
repository.getATRepository().setAtTrimHeight(upperBatchHeight); |
||||
// No need to rebuild the latest AT states as we aren't currently synchronizing
|
||||
repository.saveChanges(); |
||||
LOGGER.debug(String.format("Bumping AT states trim height to %d", upperBatchHeight)); |
||||
|
||||
// Can we move onto next batch?
|
||||
if (upperPrunableHeight > upperBatchHeight) { |
||||
pruneStartHeight = upperBatchHeight; |
||||
} |
||||
else { |
||||
// We've finished pruning
|
||||
break; |
||||
} |
||||
} |
||||
} |
||||
|
||||
return true; |
||||
} |
||||
|
||||
public static boolean pruneBlocks(Repository repository) throws SQLException, DataException { |
||||
|
||||
// Only bulk prune AT states if we have never done so before
|
||||
int pruneHeight = repository.getBlockRepository().getBlockPruneHeight(); |
||||
if (pruneHeight > 0) { |
||||
// Already pruned blocks
|
||||
return false; |
||||
} |
||||
|
||||
if (Settings.getInstance().isArchiveEnabled()) { |
||||
// Only proceed if we can see that the archiver has already finished
|
||||
// This way, if the archiver failed for any reason, we can prune once it has had
|
||||
// some opportunities to try again
|
||||
boolean upToDate = BlockArchiveWriter.isArchiverUpToDate(repository); |
||||
if (!upToDate) { |
||||
return false; |
||||
} |
||||
} |
||||
|
||||
BlockData latestBlock = repository.getBlockRepository().getLastBlock(); |
||||
if (latestBlock == null) { |
||||
LOGGER.info("Unable to determine blockchain height, necessary for bulk block pruning"); |
||||
return false; |
||||
} |
||||
final int blockchainHeight = latestBlock.getHeight(); |
||||
int upperPrunableHeight = blockchainHeight - Settings.getInstance().getPruneBlockLimit(); |
||||
int pruneStartHeight = 0; |
||||
|
||||
if (Settings.getInstance().isArchiveEnabled()) { |
||||
// Archive mode - don't prune anything that hasn't been archived yet
|
||||
upperPrunableHeight = Math.min(upperPrunableHeight, repository.getBlockArchiveRepository().getBlockArchiveHeight() - 1); |
||||
} |
||||
|
||||
LOGGER.info("Starting bulk prune of blocks - this process could take a while... (approx. 5 mins on high spec)"); |
||||
|
||||
while (pruneStartHeight < upperPrunableHeight) { |
||||
// Prune all blocks up until our latest minus pruneBlockLimit
|
||||
|
||||
int upperBatchHeight = pruneStartHeight + Settings.getInstance().getBlockPruneBatchSize(); |
||||
int upperPruneHeight = Math.min(upperBatchHeight, upperPrunableHeight); |
||||
|
||||
LOGGER.info(String.format("Pruning blocks between %d and %d...", pruneStartHeight, upperPruneHeight)); |
||||
|
||||
int numBlocksPruned = repository.getBlockRepository().pruneBlocks(pruneStartHeight, upperPruneHeight); |
||||
repository.saveChanges(); |
||||
|
||||
if (numBlocksPruned > 0) { |
||||
LOGGER.info(String.format("Pruned %d block%s between %d and %d", |
||||
numBlocksPruned, (numBlocksPruned != 1 ? "s" : ""), |
||||
pruneStartHeight, upperPruneHeight)); |
||||
} else { |
||||
final int nextPruneHeight = upperPruneHeight + 1; |
||||
repository.getBlockRepository().setBlockPruneHeight(nextPruneHeight); |
||||
repository.saveChanges(); |
||||
LOGGER.debug(String.format("Bumping block base prune height to %d", nextPruneHeight)); |
||||
|
||||
// Can we move onto next batch?
|
||||
if (upperPrunableHeight > nextPruneHeight) { |
||||
pruneStartHeight = nextPruneHeight; |
||||
} |
||||
else { |
||||
// We've finished pruning
|
||||
break; |
||||
} |
||||
} |
||||
} |
||||
|
||||
return true; |
||||
} |
||||
|
||||
public static void performMaintenance(Repository repository) throws SQLException, DataException { |
||||
try { |
||||
SplashFrame.getInstance().updateStatus("Performing maintenance..."); |
||||
|
||||
// Timeout if the database isn't ready for backing up after 5 minutes
|
||||
// Nothing else should be using the db at this point, so a timeout shouldn't happen
|
||||
long timeout = 5 * 60 * 1000L; |
||||
repository.performPeriodicMaintenance(timeout); |
||||
|
||||
} catch (TimeoutException e) { |
||||
LOGGER.info("Attempt to perform maintenance failed due to timeout: {}", e.getMessage()); |
||||
} |
||||
} |
||||
|
||||
} |
@ -0,0 +1,504 @@
|
||||
package org.qortal.test; |
||||
|
||||
import org.apache.commons.io.FileUtils; |
||||
import org.apache.commons.lang3.reflect.FieldUtils; |
||||
import org.junit.After; |
||||
import org.junit.Before; |
||||
import org.junit.Test; |
||||
import org.qortal.account.PrivateKeyAccount; |
||||
import org.qortal.controller.BlockMinter; |
||||
import org.qortal.data.at.ATStateData; |
||||
import org.qortal.data.block.BlockData; |
||||
import org.qortal.data.transaction.TransactionData; |
||||
import org.qortal.repository.*; |
||||
import org.qortal.repository.hsqldb.HSQLDBRepository; |
||||
import org.qortal.settings.Settings; |
||||
import org.qortal.test.common.AtUtils; |
||||
import org.qortal.test.common.BlockUtils; |
||||
import org.qortal.test.common.Common; |
||||
import org.qortal.transaction.DeployAtTransaction; |
||||
import org.qortal.transaction.Transaction; |
||||
import org.qortal.transform.TransformationException; |
||||
import org.qortal.transform.block.BlockTransformation; |
||||
import org.qortal.utils.BlockArchiveUtils; |
||||
import org.qortal.utils.NTP; |
||||
|
||||
import java.io.File; |
||||
import java.io.IOException; |
||||
import java.nio.file.Path; |
||||
import java.nio.file.Paths; |
||||
import java.sql.SQLException; |
||||
import java.util.List; |
||||
|
||||
import static org.junit.Assert.*; |
||||
|
||||
public class BlockArchiveV2Tests extends Common { |
||||
|
||||
@Before |
||||
public void beforeTest() throws DataException, IllegalAccessException { |
||||
Common.useSettings("test-settings-v2-block-archive.json"); |
||||
NTP.setFixedOffset(Settings.getInstance().getTestNtpOffset()); |
||||
this.deleteArchiveDirectory(); |
||||
|
||||
// Set default archive version to 2, so that archive builds in these tests use V2
|
||||
FieldUtils.writeField(Settings.getInstance(), "defaultArchiveVersion", 2, true); |
||||
} |
||||
|
||||
@After |
||||
public void afterTest() throws DataException { |
||||
this.deleteArchiveDirectory(); |
||||
} |
||||
|
||||
|
||||
@Test |
||||
public void testWriter() throws DataException, InterruptedException, TransformationException, IOException { |
||||
try (final Repository repository = RepositoryManager.getRepository()) { |
||||
|
||||
// Mint some blocks so that we are able to archive them later
|
||||
for (int i = 0; i < 1000; i++) { |
||||
BlockMinter.mintTestingBlock(repository, Common.getTestAccount(repository, "alice-reward-share")); |
||||
} |
||||
|
||||
// 900 blocks are trimmed (this specifies the first untrimmed height)
|
||||
repository.getBlockRepository().setOnlineAccountsSignaturesTrimHeight(901); |
||||
repository.getATRepository().setAtTrimHeight(901); |
||||
|
||||
// Check the max archive height - this should be one less than the first untrimmed height
|
||||
final int maximumArchiveHeight = BlockArchiveWriter.getMaxArchiveHeight(repository); |
||||
assertEquals(900, maximumArchiveHeight); |
||||
|
||||
// Write blocks 2-900 to the archive
|
||||
BlockArchiveWriter writer = new BlockArchiveWriter(0, maximumArchiveHeight, repository); |
||||
writer.setShouldEnforceFileSizeTarget(false); // To avoid the need to pre-calculate file sizes
|
||||
BlockArchiveWriter.BlockArchiveWriteResult result = writer.write(); |
||||
assertEquals(BlockArchiveWriter.BlockArchiveWriteResult.OK, result); |
||||
|
||||
// Make sure that the archive contains the correct number of blocks
|
||||
assertEquals(900 - 1, writer.getWrittenCount()); |
||||
|
||||
// Increment block archive height
|
||||
repository.getBlockArchiveRepository().setBlockArchiveHeight(writer.getWrittenCount()); |
||||
repository.saveChanges(); |
||||
assertEquals(900 - 1, repository.getBlockArchiveRepository().getBlockArchiveHeight()); |
||||
|
||||
// Ensure the file exists
|
||||
File outputFile = writer.getOutputPath().toFile(); |
||||
assertTrue(outputFile.exists()); |
||||
} |
||||
} |
||||
|
||||
@Test |
||||
public void testWriterAndReader() throws DataException, InterruptedException, TransformationException, IOException { |
||||
try (final Repository repository = RepositoryManager.getRepository()) { |
||||
|
||||
// Mint some blocks so that we are able to archive them later
|
||||
for (int i = 0; i < 1000; i++) { |
||||
BlockMinter.mintTestingBlock(repository, Common.getTestAccount(repository, "alice-reward-share")); |
||||
} |
||||
|
||||
// 900 blocks are trimmed (this specifies the first untrimmed height)
|
||||
repository.getBlockRepository().setOnlineAccountsSignaturesTrimHeight(901); |
||||
repository.getATRepository().setAtTrimHeight(901); |
||||
|
||||
// Check the max archive height - this should be one less than the first untrimmed height
|
||||
final int maximumArchiveHeight = BlockArchiveWriter.getMaxArchiveHeight(repository); |
||||
assertEquals(900, maximumArchiveHeight); |
||||
|
||||
// Write blocks 2-900 to the archive
|
||||
BlockArchiveWriter writer = new BlockArchiveWriter(0, maximumArchiveHeight, repository); |
||||
writer.setShouldEnforceFileSizeTarget(false); // To avoid the need to pre-calculate file sizes
|
||||
BlockArchiveWriter.BlockArchiveWriteResult result = writer.write(); |
||||
assertEquals(BlockArchiveWriter.BlockArchiveWriteResult.OK, result); |
||||
|
||||
// Make sure that the archive contains the correct number of blocks
|
||||
assertEquals(900 - 1, writer.getWrittenCount()); |
||||
|
||||
// Increment block archive height
|
||||
repository.getBlockArchiveRepository().setBlockArchiveHeight(writer.getWrittenCount()); |
||||
repository.saveChanges(); |
||||
assertEquals(900 - 1, repository.getBlockArchiveRepository().getBlockArchiveHeight()); |
||||
|
||||
// Ensure the file exists
|
||||
File outputFile = writer.getOutputPath().toFile(); |
||||
assertTrue(outputFile.exists()); |
||||
|
||||
// Read block 2 from the archive
|
||||
BlockArchiveReader reader = BlockArchiveReader.getInstance(); |
||||
BlockTransformation block2Info = reader.fetchBlockAtHeight(2); |
||||
BlockData block2ArchiveData = block2Info.getBlockData(); |
||||
|
||||
// Read block 2 from the repository
|
||||
BlockData block2RepositoryData = repository.getBlockRepository().fromHeight(2); |
||||
|
||||
// Ensure the values match
|
||||
assertEquals(block2ArchiveData.getHeight(), block2RepositoryData.getHeight()); |
||||
assertArrayEquals(block2ArchiveData.getSignature(), block2RepositoryData.getSignature()); |
||||
|
||||
// Test some values in the archive
|
||||
assertEquals(1, block2ArchiveData.getOnlineAccountsCount()); |
||||
|
||||
// Read block 900 from the archive
|
||||
BlockTransformation block900Info = reader.fetchBlockAtHeight(900); |
||||
BlockData block900ArchiveData = block900Info.getBlockData(); |
||||
|
||||
// Read block 900 from the repository
|
||||
BlockData block900RepositoryData = repository.getBlockRepository().fromHeight(900); |
||||
|
||||
// Ensure the values match
|
||||
assertEquals(block900ArchiveData.getHeight(), block900RepositoryData.getHeight()); |
||||
assertArrayEquals(block900ArchiveData.getSignature(), block900RepositoryData.getSignature()); |
||||
|
||||
// Test some values in the archive
|
||||
assertEquals(1, block900ArchiveData.getOnlineAccountsCount()); |
||||
|
||||
} |
||||
} |
||||
|
||||
@Test |
||||
public void testArchivedAtStates() throws DataException, InterruptedException, TransformationException, IOException { |
||||
try (final Repository repository = RepositoryManager.getRepository()) { |
||||
|
||||
// Deploy an AT so that we have AT state data
|
||||
PrivateKeyAccount deployer = Common.getTestAccount(repository, "alice"); |
||||
byte[] creationBytes = AtUtils.buildSimpleAT(); |
||||
long fundingAmount = 1_00000000L; |
||||
DeployAtTransaction deployAtTransaction = AtUtils.doDeployAT(repository, deployer, creationBytes, fundingAmount); |
||||
String atAddress = deployAtTransaction.getATAccount().getAddress(); |
||||
|
||||
// Mint some blocks so that we are able to archive them later
|
||||
for (int i = 0; i < 1000; i++) { |
||||
BlockMinter.mintTestingBlock(repository, Common.getTestAccount(repository, "alice-reward-share")); |
||||
} |
||||
|
||||
// 9 blocks are trimmed (this specifies the first untrimmed height)
|
||||
repository.getBlockRepository().setOnlineAccountsSignaturesTrimHeight(10); |
||||
repository.getATRepository().setAtTrimHeight(10); |
||||
|
||||
// Check the max archive height
|
||||
final int maximumArchiveHeight = BlockArchiveWriter.getMaxArchiveHeight(repository); |
||||
assertEquals(9, maximumArchiveHeight); |
||||
|
||||
// Write blocks 2-9 to the archive
|
||||
BlockArchiveWriter writer = new BlockArchiveWriter(0, maximumArchiveHeight, repository); |
||||
writer.setShouldEnforceFileSizeTarget(false); // To avoid the need to pre-calculate file sizes
|
||||
BlockArchiveWriter.BlockArchiveWriteResult result = writer.write(); |
||||
assertEquals(BlockArchiveWriter.BlockArchiveWriteResult.OK, result); |
||||
|
||||
// Make sure that the archive contains the correct number of blocks
|
||||
assertEquals(9 - 1, writer.getWrittenCount()); |
||||
|
||||
// Increment block archive height
|
||||
repository.getBlockArchiveRepository().setBlockArchiveHeight(writer.getWrittenCount()); |
||||
repository.saveChanges(); |
||||
assertEquals(9 - 1, repository.getBlockArchiveRepository().getBlockArchiveHeight()); |
||||
|
||||
// Ensure the file exists
|
||||
File outputFile = writer.getOutputPath().toFile(); |
||||
assertTrue(outputFile.exists()); |
||||
|
||||
// Check blocks 3-9
|
||||
for (Integer testHeight = 2; testHeight <= 9; testHeight++) { |
||||
|
||||
// Read a block from the archive
|
||||
BlockArchiveReader reader = BlockArchiveReader.getInstance(); |
||||
BlockTransformation blockInfo = reader.fetchBlockAtHeight(testHeight); |
||||
BlockData archivedBlockData = blockInfo.getBlockData(); |
||||
byte[] archivedAtStateHash = blockInfo.getAtStatesHash(); |
||||
List<TransactionData> archivedTransactions = blockInfo.getTransactions(); |
||||
|
||||
// Read the same block from the repository
|
||||
BlockData repositoryBlockData = repository.getBlockRepository().fromHeight(testHeight); |
||||
ATStateData repositoryAtStateData = repository.getATRepository().getATStateAtHeight(atAddress, testHeight); |
||||
|
||||
// Ensure the repository has full AT state data
|
||||
assertNotNull(repositoryAtStateData.getStateHash()); |
||||
assertNotNull(repositoryAtStateData.getStateData()); |
||||
|
||||
// Check the archived AT state
|
||||
if (testHeight == 2) { |
||||
assertEquals(1, archivedTransactions.size()); |
||||
assertEquals(Transaction.TransactionType.DEPLOY_AT, archivedTransactions.get(0).getType()); |
||||
} |
||||
else { |
||||
// Blocks 3+ shouldn't have any transactions
|
||||
assertTrue(archivedTransactions.isEmpty()); |
||||
} |
||||
|
||||
// Ensure the archive has the AT states hash
|
||||
assertNotNull(archivedAtStateHash); |
||||
|
||||
// Also check the online accounts count and height
|
||||
assertEquals(1, archivedBlockData.getOnlineAccountsCount()); |
||||
assertEquals(testHeight, archivedBlockData.getHeight()); |
||||
|
||||
// Ensure the values match
|
||||
assertEquals(archivedBlockData.getHeight(), repositoryBlockData.getHeight()); |
||||
assertArrayEquals(archivedBlockData.getSignature(), repositoryBlockData.getSignature()); |
||||
assertEquals(archivedBlockData.getOnlineAccountsCount(), repositoryBlockData.getOnlineAccountsCount()); |
||||
assertArrayEquals(archivedBlockData.getMinterSignature(), repositoryBlockData.getMinterSignature()); |
||||
assertEquals(archivedBlockData.getATCount(), repositoryBlockData.getATCount()); |
||||
assertEquals(archivedBlockData.getOnlineAccountsCount(), repositoryBlockData.getOnlineAccountsCount()); |
||||
assertArrayEquals(archivedBlockData.getReference(), repositoryBlockData.getReference()); |
||||
assertEquals(archivedBlockData.getTimestamp(), repositoryBlockData.getTimestamp()); |
||||
assertEquals(archivedBlockData.getATFees(), repositoryBlockData.getATFees()); |
||||
assertEquals(archivedBlockData.getTotalFees(), repositoryBlockData.getTotalFees()); |
||||
assertEquals(archivedBlockData.getTransactionCount(), repositoryBlockData.getTransactionCount()); |
||||
assertArrayEquals(archivedBlockData.getTransactionsSignature(), repositoryBlockData.getTransactionsSignature()); |
||||
|
||||
// TODO: build atStatesHash and compare against value in archive
|
||||
} |
||||
|
||||
// Check block 10 (unarchived)
|
||||
BlockArchiveReader reader = BlockArchiveReader.getInstance(); |
||||
BlockTransformation blockInfo = reader.fetchBlockAtHeight(10); |
||||
assertNull(blockInfo); |
||||
|
||||
} |
||||
|
||||
} |
||||
|
||||
@Test |
||||
public void testArchiveAndPrune() throws DataException, InterruptedException, TransformationException, IOException { |
||||
try (final Repository repository = RepositoryManager.getRepository()) { |
||||
|
||||
// Deploy an AT so that we have AT state data
|
||||
PrivateKeyAccount deployer = Common.getTestAccount(repository, "alice"); |
||||
byte[] creationBytes = AtUtils.buildSimpleAT(); |
||||
long fundingAmount = 1_00000000L; |
||||
AtUtils.doDeployAT(repository, deployer, creationBytes, fundingAmount); |
||||
|
||||
// Mint some blocks so that we are able to archive them later
|
||||
for (int i = 0; i < 1000; i++) { |
||||
BlockMinter.mintTestingBlock(repository, Common.getTestAccount(repository, "alice-reward-share")); |
||||
} |
||||
|
||||
// Assume 900 blocks are trimmed (this specifies the first untrimmed height)
|
||||
repository.getBlockRepository().setOnlineAccountsSignaturesTrimHeight(901); |
||||
repository.getATRepository().setAtTrimHeight(901); |
||||
|
||||
// Check the max archive height - this should be one less than the first untrimmed height
|
||||
final int maximumArchiveHeight = BlockArchiveWriter.getMaxArchiveHeight(repository); |
||||
assertEquals(900, maximumArchiveHeight); |
||||
|
||||
// Write blocks 2-900 to the archive
|
||||
BlockArchiveWriter writer = new BlockArchiveWriter(0, maximumArchiveHeight, repository); |
||||
writer.setShouldEnforceFileSizeTarget(false); // To avoid the need to pre-calculate file sizes
|
||||
BlockArchiveWriter.BlockArchiveWriteResult result = writer.write(); |
||||
assertEquals(BlockArchiveWriter.BlockArchiveWriteResult.OK, result); |
||||
|
||||
// Make sure that the archive contains the correct number of blocks
|
||||
assertEquals(900 - 1, writer.getWrittenCount()); |
||||
|
||||
// Increment block archive height
|
||||
repository.getBlockArchiveRepository().setBlockArchiveHeight(901); |
||||
repository.saveChanges(); |
||||
assertEquals(901, repository.getBlockArchiveRepository().getBlockArchiveHeight()); |
||||
|
||||
// Ensure the file exists
|
||||
File outputFile = writer.getOutputPath().toFile(); |
||||
assertTrue(outputFile.exists()); |
||||
|
||||
// Ensure the SQL repository contains blocks 2 and 900...
|
||||
assertNotNull(repository.getBlockRepository().fromHeight(2)); |
||||
assertNotNull(repository.getBlockRepository().fromHeight(900)); |
||||
|
||||
// Prune all the archived blocks
|
||||
int numBlocksPruned = repository.getBlockRepository().pruneBlocks(0, 900); |
||||
assertEquals(900-1, numBlocksPruned); |
||||
repository.getBlockRepository().setBlockPruneHeight(901); |
||||
|
||||
// Prune the AT states for the archived blocks
|
||||
repository.getATRepository().rebuildLatestAtStates(900); |
||||
repository.saveChanges(); |
||||
int numATStatesPruned = repository.getATRepository().pruneAtStates(0, 900); |
||||
assertEquals(900-2, numATStatesPruned); // Minus 1 for genesis block, and another for the latest AT state
|
||||
repository.getATRepository().setAtPruneHeight(901); |
||||
|
||||
// Now ensure the SQL repository is missing blocks 2 and 900...
|
||||
assertNull(repository.getBlockRepository().fromHeight(2)); |
||||
assertNull(repository.getBlockRepository().fromHeight(900)); |
||||
|
||||
// ... but it's not missing blocks 1 and 901 (we don't prune the genesis block)
|
||||
assertNotNull(repository.getBlockRepository().fromHeight(1)); |
||||
assertNotNull(repository.getBlockRepository().fromHeight(901)); |
||||
|
||||
// Validate the latest block height in the repository
|
||||
assertEquals(1002, (int) repository.getBlockRepository().getLastBlock().getHeight()); |
||||
|
||||
} |
||||
} |
||||
|
||||
@Test |
||||
public void testTrimArchivePruneAndOrphan() throws DataException, InterruptedException, TransformationException, IOException { |
||||
try (final Repository repository = RepositoryManager.getRepository()) { |
||||
|
||||
// Deploy an AT so that we have AT state data
|
||||
PrivateKeyAccount deployer = Common.getTestAccount(repository, "alice"); |
||||
byte[] creationBytes = AtUtils.buildSimpleAT(); |
||||
long fundingAmount = 1_00000000L; |
||||
AtUtils.doDeployAT(repository, deployer, creationBytes, fundingAmount); |
||||
|
||||
// Mint some blocks so that we are able to archive them later
|
||||
for (int i = 0; i < 1000; i++) { |
||||
BlockMinter.mintTestingBlock(repository, Common.getTestAccount(repository, "alice-reward-share")); |
||||
} |
||||
|
||||
// Make sure that block 500 has full AT state data and data hash
|
||||
List<ATStateData> block500AtStatesData = repository.getATRepository().getBlockATStatesAtHeight(500); |
||||
ATStateData atStatesData = repository.getATRepository().getATStateAtHeight(block500AtStatesData.get(0).getATAddress(), 500); |
||||
assertNotNull(atStatesData.getStateHash()); |
||||
assertNotNull(atStatesData.getStateData()); |
||||
|
||||
// Trim the first 500 blocks
|
||||
repository.getBlockRepository().trimOldOnlineAccountsSignatures(0, 500); |
||||
repository.getBlockRepository().setOnlineAccountsSignaturesTrimHeight(501); |
||||
repository.getATRepository().rebuildLatestAtStates(500); |
||||
repository.getATRepository().trimAtStates(0, 500, 1000); |
||||
repository.getATRepository().setAtTrimHeight(501); |
||||
|
||||
// Now block 499 should only have the AT state data hash
|
||||
List<ATStateData> block499AtStatesData = repository.getATRepository().getBlockATStatesAtHeight(499); |
||||
atStatesData = repository.getATRepository().getATStateAtHeight(block499AtStatesData.get(0).getATAddress(), 499); |
||||
assertNotNull(atStatesData.getStateHash()); |
||||
assertNull(atStatesData.getStateData()); |
||||
|
||||
// ... but block 500 should have the full data (due to being retained as the "latest" AT state in the trimmed range
|
||||
block500AtStatesData = repository.getATRepository().getBlockATStatesAtHeight(500); |
||||
atStatesData = repository.getATRepository().getATStateAtHeight(block500AtStatesData.get(0).getATAddress(), 500); |
||||
assertNotNull(atStatesData.getStateHash()); |
||||
assertNotNull(atStatesData.getStateData()); |
||||
|
||||
// ... and block 501 should also have the full data
|
||||
List<ATStateData> block501AtStatesData = repository.getATRepository().getBlockATStatesAtHeight(501); |
||||
atStatesData = repository.getATRepository().getATStateAtHeight(block501AtStatesData.get(0).getATAddress(), 501); |
||||
assertNotNull(atStatesData.getStateHash()); |
||||
assertNotNull(atStatesData.getStateData()); |
||||
|
||||
// Check the max archive height - this should be one less than the first untrimmed height
|
||||
final int maximumArchiveHeight = BlockArchiveWriter.getMaxArchiveHeight(repository); |
||||
assertEquals(500, maximumArchiveHeight); |
||||
|
||||
BlockData block3DataPreArchive = repository.getBlockRepository().fromHeight(3); |
||||
|
||||
// Write blocks 2-500 to the archive
|
||||
BlockArchiveWriter writer = new BlockArchiveWriter(0, maximumArchiveHeight, repository); |
||||
writer.setShouldEnforceFileSizeTarget(false); // To avoid the need to pre-calculate file sizes
|
||||
BlockArchiveWriter.BlockArchiveWriteResult result = writer.write(); |
||||
assertEquals(BlockArchiveWriter.BlockArchiveWriteResult.OK, result); |
||||
|
||||
// Make sure that the archive contains the correct number of blocks
|
||||
assertEquals(500 - 1, writer.getWrittenCount()); // -1 for the genesis block
|
||||
|
||||
// Increment block archive height
|
||||
repository.getBlockArchiveRepository().setBlockArchiveHeight(writer.getWrittenCount()); |
||||
repository.saveChanges(); |
||||
assertEquals(500 - 1, repository.getBlockArchiveRepository().getBlockArchiveHeight()); |
||||
|
||||
// Ensure the file exists
|
||||
File outputFile = writer.getOutputPath().toFile(); |
||||
assertTrue(outputFile.exists()); |
||||
|
||||
// Ensure the SQL repository contains blocks 2 and 500...
|
||||
assertNotNull(repository.getBlockRepository().fromHeight(2)); |
||||
assertNotNull(repository.getBlockRepository().fromHeight(500)); |
||||
|
||||
// Prune all the archived blocks
|
||||
int numBlocksPruned = repository.getBlockRepository().pruneBlocks(0, 500); |
||||
assertEquals(500-1, numBlocksPruned); |
||||
repository.getBlockRepository().setBlockPruneHeight(501); |
||||
|
||||
// Prune the AT states for the archived blocks
|
||||
repository.getATRepository().rebuildLatestAtStates(500); |
||||
repository.saveChanges(); |
||||
int numATStatesPruned = repository.getATRepository().pruneAtStates(2, 500); |
||||
assertEquals(498, numATStatesPruned); // Minus 1 for genesis block, and another for the latest AT state
|
||||
repository.getATRepository().setAtPruneHeight(501); |
||||
|
||||
// Now ensure the SQL repository is missing blocks 2 and 500...
|
||||
assertNull(repository.getBlockRepository().fromHeight(2)); |
||||
assertNull(repository.getBlockRepository().fromHeight(500)); |
||||
|
||||
// ... but it's not missing blocks 1 and 501 (we don't prune the genesis block)
|
||||
assertNotNull(repository.getBlockRepository().fromHeight(1)); |
||||
assertNotNull(repository.getBlockRepository().fromHeight(501)); |
||||
|
||||
// Validate the latest block height in the repository
|
||||
assertEquals(1002, (int) repository.getBlockRepository().getLastBlock().getHeight()); |
||||
|
||||
// Now orphan some unarchived blocks.
|
||||
BlockUtils.orphanBlocks(repository, 500); |
||||
assertEquals(502, (int) repository.getBlockRepository().getLastBlock().getHeight()); |
||||
|
||||
// We're close to the lower limit of the SQL database now, so
|
||||
// we need to import some blocks from the archive
|
||||
BlockArchiveUtils.importFromArchive(401, 500, repository); |
||||
|
||||
// Ensure the SQL repository now contains block 401 but not 400...
|
||||
assertNotNull(repository.getBlockRepository().fromHeight(401)); |
||||
assertNull(repository.getBlockRepository().fromHeight(400)); |
||||
|
||||
// Import the remaining 399 blocks
|
||||
BlockArchiveUtils.importFromArchive(2, 400, repository); |
||||
|
||||
// Verify that block 3 matches the original
|
||||
BlockData block3DataPostArchive = repository.getBlockRepository().fromHeight(3); |
||||
assertArrayEquals(block3DataPreArchive.getSignature(), block3DataPostArchive.getSignature()); |
||||
assertEquals(block3DataPreArchive.getHeight(), block3DataPostArchive.getHeight()); |
||||
|
||||
// Orphan 2 more block, which should be the last one that is possible to be orphaned
|
||||
// TODO: figure out why this is 1 block more than in the equivalent block archive V1 test
|
||||
BlockUtils.orphanBlocks(repository, 2); |
||||
|
||||
// Orphan another block, which should fail
|
||||
Exception exception = null; |
||||
try { |
||||
BlockUtils.orphanBlocks(repository, 1); |
||||
} catch (DataException e) { |
||||
exception = e; |
||||
} |
||||
|
||||
// Ensure that a DataException is thrown because there is no more AT states data available
|
||||
assertNotNull(exception); |
||||
assertEquals(DataException.class, exception.getClass()); |
||||
|
||||
// FUTURE: we may be able to retain unique AT states when trimming, to avoid this exception
|
||||
// and allow orphaning back through blocks with trimmed AT states.
|
||||
|
||||
} |
||||
} |
||||
|
||||
|
||||
/** |
||||
* Many nodes are missing an ATStatesHeightIndex due to an earlier bug |
||||
* In these cases we disable archiving and pruning as this index is a |
||||
* very essential component in these processes. |
||||
*/ |
||||
@Test |
||||
public void testMissingAtStatesHeightIndex() throws DataException, SQLException { |
||||
try (final HSQLDBRepository repository = (HSQLDBRepository) RepositoryManager.getRepository()) { |
||||
|
||||
// Firstly check that we're able to prune or archive when the index exists
|
||||
assertTrue(repository.getATRepository().hasAtStatesHeightIndex()); |
||||
assertTrue(RepositoryManager.canArchiveOrPrune()); |
||||
|
||||
// Delete the index
|
||||
repository.prepareStatement("DROP INDEX ATSTATESHEIGHTINDEX").execute(); |
||||
|
||||
// Ensure check that we're unable to prune or archive when the index doesn't exist
|
||||
assertFalse(repository.getATRepository().hasAtStatesHeightIndex()); |
||||
assertFalse(RepositoryManager.canArchiveOrPrune()); |
||||
} |
||||
} |
||||
|
||||
|
||||
private void deleteArchiveDirectory() { |
||||
// Delete archive directory if exists
|
||||
Path archivePath = Paths.get(Settings.getInstance().getRepositoryPath(), "archive").toAbsolutePath(); |
||||
try { |
||||
FileUtils.deleteDirectory(archivePath.toFile()); |
||||
} catch (IOException e) { |
||||
|
||||
} |
||||
} |
||||
|
||||
} |
Loading…
Reference in new issue