diff --git a/.gitignore b/.gitignore index cf1e7ed2..8f2de896 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,5 @@ /db* +/lists/ /bin/ /target/ /qortal-backup/ @@ -15,8 +16,8 @@ /settings.json /testnet* /settings*.json -/testchain.json -/run-testnet.sh +/testchain*.json +/run-testnet*.sh /.idea /qortal.iml .DS_Store @@ -25,4 +26,6 @@ /run.pid /run.log /WindowsInstaller/Install Files/qortal.jar +/*.7z +/tmp /data* diff --git a/WindowsInstaller/Qortal.aip b/WindowsInstaller/Qortal.aip index 7d69ffb9..61ee6934 100755 --- a/WindowsInstaller/Qortal.aip +++ b/WindowsInstaller/Qortal.aip @@ -17,10 +17,10 @@ - + - + @@ -212,7 +212,7 @@ - + diff --git a/WindowsInstaller/qortal.ico b/WindowsInstaller/qortal.ico old mode 100755 new mode 100644 index b0f8f5fb..a44ed445 Binary files a/WindowsInstaller/qortal.ico and b/WindowsInstaller/qortal.ico differ diff --git a/pom.xml b/pom.xml index f91d4cfa..1495dea7 100644 --- a/pom.xml +++ b/pom.xml @@ -14,6 +14,9 @@ 1.3.8 3.6 1.8 + 2.6 + 1.21 + 1.9 1.2.2 28.1-jre 2.5.1 @@ -454,7 +457,17 @@ commons-io commons-io - 2.6 + ${commons-io.version} + + + org.apache.commons + commons-compress + ${commons-compress.version} + + + org.tukaani + xz + ${xz.version} diff --git a/src/main/java/org/qortal/RepositoryMaintenance.java b/src/main/java/org/qortal/RepositoryMaintenance.java index c3ae0616..b085822b 100644 --- a/src/main/java/org/qortal/RepositoryMaintenance.java +++ b/src/main/java/org/qortal/RepositoryMaintenance.java @@ -1,6 +1,7 @@ package org.qortal; import java.security.Security; +import java.util.concurrent.TimeoutException; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -57,10 +58,10 @@ public class RepositoryMaintenance { LOGGER.info("Starting repository periodic maintenance. This can take a while..."); try (final Repository repository = RepositoryManager.getRepository()) { - repository.performPeriodicMaintenance(); + repository.performPeriodicMaintenance(null); LOGGER.info("Repository periodic maintenance completed"); - } catch (DataException e) { + } catch (DataException | TimeoutException e) { LOGGER.error("Repository periodic maintenance failed", e); } diff --git a/src/main/java/org/qortal/api/ApiExceptionFactory.java b/src/main/java/org/qortal/api/ApiExceptionFactory.java index e66c6e84..294cef83 100644 --- a/src/main/java/org/qortal/api/ApiExceptionFactory.java +++ b/src/main/java/org/qortal/api/ApiExceptionFactory.java @@ -16,4 +16,8 @@ public enum ApiExceptionFactory { return createException(request, apiError, null); } + public ApiException createCustomException(HttpServletRequest request, ApiError apiError, String message) { + return new ApiException(apiError.getStatus(), apiError.getCode(), message, null); + } + } diff --git a/src/main/java/org/qortal/api/ApiService.java b/src/main/java/org/qortal/api/ApiService.java index 5baf2c5d..cafba4ae 100644 --- a/src/main/java/org/qortal/api/ApiService.java +++ b/src/main/java/org/qortal/api/ApiService.java @@ -14,6 +14,8 @@ import java.security.SecureRandom; import javax.net.ssl.KeyManagerFactory; import javax.net.ssl.SSLContext; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.eclipse.jetty.http.HttpVersion; import org.eclipse.jetty.rewrite.handler.RedirectPatternRule; import org.eclipse.jetty.rewrite.handler.RewriteHandler; @@ -50,6 +52,8 @@ import org.qortal.settings.Settings; public class ApiService { + private static final Logger LOGGER = LogManager.getLogger(ApiService.class); + private static ApiService instance; private final ResourceConfig config; @@ -203,6 +207,9 @@ public class ApiService { context.addServlet(TradeBotWebSocket.class, "/websockets/crosschain/tradebot"); context.addServlet(PresenceWebSocket.class, "/websockets/presence"); + // Warn about API security if needed + this.checkApiSecurity(); + // Start server this.server.start(); } catch (Exception e) { @@ -222,4 +229,23 @@ public class ApiService { this.server = null; } + private void checkApiSecurity() { + // Warn about API security if needed + boolean allConnectionsAllowed = false; + if (Settings.getInstance().isApiKeyDisabled()) { + for (String pattern : Settings.getInstance().getApiWhitelist()) { + if (pattern.startsWith("0.0.0.0/") || pattern.startsWith("::/") || pattern.endsWith("/0")) { + allConnectionsAllowed = true; + } + } + + if (allConnectionsAllowed) { + LOGGER.warn("Warning: API key validation is currently disabled, and the API whitelist " + + "is allowing all connections. This can be a security risk."); + LOGGER.warn("To fix, set the apiKeyDisabled setting to false, or allow only specific local " + + "IP addresses using the apiWhitelist setting."); + } + } + } + } diff --git a/src/main/java/org/qortal/api/Security.java b/src/main/java/org/qortal/api/Security.java index 448f951a..4e25b03b 100644 --- a/src/main/java/org/qortal/api/Security.java +++ b/src/main/java/org/qortal/api/Security.java @@ -12,6 +12,11 @@ public abstract class Security { public static final String API_KEY_HEADER = "X-API-KEY"; public static void checkApiCallAllowed(HttpServletRequest request) { + // If API key checking has been disabled, we will allow the request in all cases + boolean isApiKeyDisabled = Settings.getInstance().isApiKeyDisabled(); + if (isApiKeyDisabled) + return; + String expectedApiKey = Settings.getInstance().getApiKey(); String passedApiKey = request.getHeader(API_KEY_HEADER); diff --git a/src/main/java/org/qortal/api/model/AddressListRequest.java b/src/main/java/org/qortal/api/model/AddressListRequest.java new file mode 100644 index 00000000..c600609f --- /dev/null +++ b/src/main/java/org/qortal/api/model/AddressListRequest.java @@ -0,0 +1,18 @@ +package org.qortal.api.model; + +import io.swagger.v3.oas.annotations.media.Schema; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import java.util.List; + +@XmlAccessorType(XmlAccessType.FIELD) +public class AddressListRequest { + + @Schema(description = "A list of addresses") + public List addresses; + + public AddressListRequest() { + } + +} diff --git a/src/main/java/org/qortal/api/resource/AdminResource.java b/src/main/java/org/qortal/api/resource/AdminResource.java index 35fccd96..2fdacf9d 100644 --- a/src/main/java/org/qortal/api/resource/AdminResource.java +++ b/src/main/java/org/qortal/api/resource/AdminResource.java @@ -22,6 +22,7 @@ import java.time.OffsetDateTime; import java.time.ZoneOffset; import java.util.List; import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; import java.util.concurrent.locks.ReentrantLock; import java.util.stream.Collectors; @@ -35,6 +36,7 @@ import javax.ws.rs.core.Context; import javax.ws.rs.core.MediaType; import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.core.LoggerContext; import org.apache.logging.log4j.core.appender.RollingFileAppender; import org.qortal.account.Account; @@ -67,6 +69,8 @@ import com.google.common.collect.Lists; @Tag(name = "Admin") public class AdminResource { + private static final Logger LOGGER = LogManager.getLogger(AdminResource.class); + private static final int MAX_LOG_LINES = 500; @Context @@ -460,6 +464,23 @@ public class AdminResource { if (targetHeight <= 0 || targetHeight > Controller.getInstance().getChainHeight()) throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_HEIGHT); + // Make sure we're not orphaning as far back as the archived blocks + // FUTURE: we could support this by first importing earlier blocks from the archive + if (Settings.getInstance().isTopOnly() || + Settings.getInstance().isArchiveEnabled()) { + + try (final Repository repository = RepositoryManager.getRepository()) { + // Find the first unarchived block + int oldestBlock = repository.getBlockArchiveRepository().getBlockArchiveHeight(); + // Add some extra blocks just in case we're currently archiving/pruning + oldestBlock += 100; + if (targetHeight <= oldestBlock) { + LOGGER.info("Unable to orphan beyond block {} because it is archived", oldestBlock); + throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_HEIGHT); + } + } + } + if (BlockChain.orphan(targetHeight)) return "true"; else @@ -554,13 +575,13 @@ public class AdminResource { @Path("/repository/data") @Operation( summary = "Import data into repository.", - description = "Imports data from file on local machine. Filename is forced to 'import.json' if apiKey is not set.", + description = "Imports data from file on local machine. Filename is forced to 'qortal-backup/TradeBotStates.json' if apiKey is not set.", requestBody = @RequestBody( required = true, content = @Content( mediaType = MediaType.TEXT_PLAIN, schema = @Schema( - type = "string", example = "MintingAccounts.script" + type = "string", example = "qortal-backup/TradeBotStates.json" ) ) ), @@ -578,7 +599,7 @@ public class AdminResource { // Hard-coded because it's too dangerous to allow user-supplied filenames in weaker security contexts if (Settings.getInstance().getApiKey() == null) - filename = "import.json"; + filename = "qortal-backup/TradeBotStates.json"; try (final Repository repository = RepositoryManager.getRepository()) { ReentrantLock blockchainLock = Controller.getInstance().getBlockchainLock(); @@ -590,6 +611,10 @@ public class AdminResource { repository.saveChanges(); return "true"; + + } catch (IOException e) { + throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA, e); + } finally { blockchainLock.unlock(); } @@ -645,14 +670,16 @@ public class AdminResource { blockchainLock.lockInterruptibly(); try { - repository.backup(true); + // Timeout if the database isn't ready for backing up after 60 seconds + long timeout = 60 * 1000L; + repository.backup(true, "backup", timeout); repository.saveChanges(); return "true"; } finally { blockchainLock.unlock(); } - } catch (InterruptedException e) { + } catch (InterruptedException | TimeoutException e) { // We couldn't lock blockchain to perform backup return "false"; } catch (DataException e) { @@ -677,13 +704,15 @@ public class AdminResource { blockchainLock.lockInterruptibly(); try { - repository.performPeriodicMaintenance(); + // Timeout if the database isn't ready to start after 60 seconds + long timeout = 60 * 1000L; + repository.performPeriodicMaintenance(timeout); } finally { blockchainLock.unlock(); } } catch (InterruptedException e) { // No big deal - } catch (DataException e) { + } catch (DataException | TimeoutException e) { throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e); } } diff --git a/src/main/java/org/qortal/api/resource/BlocksResource.java b/src/main/java/org/qortal/api/resource/BlocksResource.java index 8920ecc1..b8163c7d 100644 --- a/src/main/java/org/qortal/api/resource/BlocksResource.java +++ b/src/main/java/org/qortal/api/resource/BlocksResource.java @@ -15,6 +15,8 @@ import java.math.BigDecimal; import java.math.BigInteger; import java.math.RoundingMode; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Comparator; import java.util.List; import javax.servlet.http.HttpServletRequest; @@ -33,11 +35,13 @@ import org.qortal.api.ApiExceptionFactory; import org.qortal.api.model.BlockMintingInfo; import org.qortal.api.model.BlockSignerSummary; import org.qortal.block.Block; +import org.qortal.controller.Controller; import org.qortal.crypto.Crypto; import org.qortal.data.account.AccountData; import org.qortal.data.block.BlockData; import org.qortal.data.block.BlockSummaryData; import org.qortal.data.transaction.TransactionData; +import org.qortal.repository.BlockArchiveReader; import org.qortal.repository.DataException; import org.qortal.repository.Repository; import org.qortal.repository.RepositoryManager; @@ -81,11 +85,19 @@ public class BlocksResource { } try (final Repository repository = RepositoryManager.getRepository()) { + // Check the database first BlockData blockData = repository.getBlockRepository().fromSignature(signature); - if (blockData == null) - throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.BLOCK_UNKNOWN); + if (blockData != null) { + return blockData; + } - return blockData; + // Not found, so try the block archive + blockData = repository.getBlockArchiveRepository().fromSignature(signature); + if (blockData != null) { + return blockData; + } + + throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.BLOCK_UNKNOWN); } catch (DataException e) { throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e); } @@ -116,16 +128,24 @@ public class BlocksResource { } try (final Repository repository = RepositoryManager.getRepository()) { + + // Check the database first BlockData blockData = repository.getBlockRepository().fromSignature(signature); - if (blockData == null) - throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.BLOCK_UNKNOWN); + if (blockData != null) { + Block block = new Block(repository, blockData); + ByteArrayOutputStream bytes = new ByteArrayOutputStream(); + bytes.write(Ints.toByteArray(block.getBlockData().getHeight())); + bytes.write(BlockTransformer.toBytes(block)); + return Base58.encode(bytes.toByteArray()); + } - Block block = new Block(repository, blockData); - ByteArrayOutputStream bytes = new ByteArrayOutputStream(); - bytes.write(Ints.toByteArray(block.getBlockData().getHeight())); - bytes.write(BlockTransformer.toBytes(block)); - return Base58.encode(bytes.toByteArray()); + // Not found, so try the block archive + byte[] bytes = BlockArchiveReader.getInstance().fetchSerializedBlockBytesForSignature(signature, false, repository); + if (bytes != null) { + return Base58.encode(bytes); + } + throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.BLOCK_UNKNOWN); } catch (TransformationException e) { throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_DATA, e); } catch (DataException | IOException e) { @@ -170,8 +190,12 @@ public class BlocksResource { } try (final Repository repository = RepositoryManager.getRepository()) { - if (repository.getBlockRepository().getHeightFromSignature(signature) == 0) + // Check if the block exists in either the database or archive + if (repository.getBlockRepository().getHeightFromSignature(signature) == 0 && + repository.getBlockArchiveRepository().getHeightFromSignature(signature) == 0) { + // Not found in either the database or archive throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.BLOCK_UNKNOWN); + } return repository.getBlockRepository().getTransactionsFromSignature(signature, limit, offset, reverse); } catch (DataException e) { @@ -200,7 +224,19 @@ public class BlocksResource { }) public BlockData getFirstBlock() { try (final Repository repository = RepositoryManager.getRepository()) { - return repository.getBlockRepository().fromHeight(1); + // Check the database first + BlockData blockData = repository.getBlockRepository().fromHeight(1); + if (blockData != null) { + return blockData; + } + + // Try the archive + blockData = repository.getBlockArchiveRepository().fromHeight(1); + if (blockData != null) { + return blockData; + } + + throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.BLOCK_UNKNOWN); } catch (DataException e) { throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e); } @@ -262,17 +298,28 @@ public class BlocksResource { } try (final Repository repository = RepositoryManager.getRepository()) { + BlockData childBlockData = null; + + // Check if block exists in database BlockData blockData = repository.getBlockRepository().fromSignature(signature); + if (blockData != null) { + return repository.getBlockRepository().fromReference(signature); + } - // Check block exists - if (blockData == null) - throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.BLOCK_UNKNOWN); - - BlockData childBlockData = repository.getBlockRepository().fromReference(signature); + // Not found, so try the archive + // This also checks that the parent block exists + // It will return null if either the parent or child don't exit + childBlockData = repository.getBlockArchiveRepository().fromReference(signature); // Check child block exists - if (childBlockData == null) + if (childBlockData == null) { throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.BLOCK_UNKNOWN); + } + + // Check child block's reference matches the supplied signature + if (!Arrays.equals(childBlockData.getReference(), signature)) { + throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.BLOCK_UNKNOWN); + } return childBlockData; } catch (DataException e) { @@ -338,13 +385,20 @@ public class BlocksResource { } try (final Repository repository = RepositoryManager.getRepository()) { + // Firstly check the database BlockData blockData = repository.getBlockRepository().fromSignature(signature); + if (blockData != null) { + return blockData.getHeight(); + } - // Check block exists - if (blockData == null) - throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.BLOCK_UNKNOWN); + // Not found, so try the archive + blockData = repository.getBlockArchiveRepository().fromSignature(signature); + if (blockData != null) { + return blockData.getHeight(); + } + + throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.BLOCK_UNKNOWN); - return blockData.getHeight(); } catch (DataException e) { throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e); } @@ -371,11 +425,20 @@ public class BlocksResource { }) public BlockData getByHeight(@PathParam("height") int height) { try (final Repository repository = RepositoryManager.getRepository()) { + // Firstly check the database BlockData blockData = repository.getBlockRepository().fromHeight(height); - if (blockData == null) - throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.BLOCK_UNKNOWN); + if (blockData != null) { + return blockData; + } + + // Not found, so try the archive + blockData = repository.getBlockArchiveRepository().fromHeight(height); + if (blockData != null) { + return blockData; + } + + throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.BLOCK_UNKNOWN); - return blockData; } catch (DataException e) { throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e); } @@ -402,12 +465,31 @@ public class BlocksResource { }) public BlockMintingInfo getBlockMintingInfoByHeight(@PathParam("height") int height) { try (final Repository repository = RepositoryManager.getRepository()) { + // Try the database BlockData blockData = repository.getBlockRepository().fromHeight(height); - if (blockData == null) - throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.BLOCK_UNKNOWN); + if (blockData == null) { + + // Not found, so try the archive + blockData = repository.getBlockArchiveRepository().fromHeight(height); + if (blockData == null) { + + // Still not found + throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.BLOCK_UNKNOWN); + } + } Block block = new Block(repository, blockData); BlockData parentBlockData = repository.getBlockRepository().fromSignature(blockData.getReference()); + if (parentBlockData == null) { + // Parent block not found - try the archive + parentBlockData = repository.getBlockArchiveRepository().fromSignature(blockData.getReference()); + if (parentBlockData == null) { + + // Still not found + throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.BLOCK_UNKNOWN); + } + } + int minterLevel = Account.getRewardShareEffectiveMintingLevel(repository, blockData.getMinterPublicKey()); if (minterLevel == 0) // This may be unavailable when requesting a trimmed block @@ -454,13 +536,26 @@ public class BlocksResource { }) public BlockData getByTimestamp(@PathParam("timestamp") long timestamp) { try (final Repository repository = RepositoryManager.getRepository()) { - int height = repository.getBlockRepository().getHeightFromTimestamp(timestamp); - if (height == 0) - throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.BLOCK_UNKNOWN); + BlockData blockData = null; - BlockData blockData = repository.getBlockRepository().fromHeight(height); - if (blockData == null) + // Try the Blocks table + int height = repository.getBlockRepository().getHeightFromTimestamp(timestamp); + if (height > 0) { + // Found match in Blocks table + return repository.getBlockRepository().fromHeight(height); + } + + // Not found in Blocks table, so try the archive + height = repository.getBlockArchiveRepository().getHeightFromTimestamp(timestamp); + if (height > 0) { + // Found match in archive + blockData = repository.getBlockArchiveRepository().fromHeight(height); + } + + // Ensure block exists + if (blockData == null) { throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.BLOCK_UNKNOWN); + } return blockData; } catch (DataException e) { @@ -497,9 +592,14 @@ public class BlocksResource { for (/* count already set */; count > 0; --count, ++height) { BlockData blockData = repository.getBlockRepository().fromHeight(height); - if (blockData == null) - // Run out of blocks! - break; + if (blockData == null) { + // Not found - try the archive + blockData = repository.getBlockArchiveRepository().fromHeight(height); + if (blockData == null) { + // Run out of blocks! + break; + } + } blocks.add(blockData); } @@ -544,7 +644,29 @@ public class BlocksResource { if (accountData == null || accountData.getPublicKey() == null) throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.PUBLIC_KEY_NOT_FOUND); - return repository.getBlockRepository().getBlockSummariesBySigner(accountData.getPublicKey(), limit, offset, reverse); + + List summaries = repository.getBlockRepository() + .getBlockSummariesBySigner(accountData.getPublicKey(), limit, offset, reverse); + + // Add any from the archive + List archivedSummaries = repository.getBlockArchiveRepository() + .getBlockSummariesBySigner(accountData.getPublicKey(), limit, offset, reverse); + if (archivedSummaries != null && !archivedSummaries.isEmpty()) { + summaries.addAll(archivedSummaries); + } + else { + summaries = archivedSummaries; + } + + // Sort the results (because they may have been obtained from two places) + if (reverse != null && reverse) { + summaries.sort((s1, s2) -> Integer.valueOf(s2.getHeight()).compareTo(Integer.valueOf(s1.getHeight()))); + } + else { + summaries.sort(Comparator.comparing(s -> Integer.valueOf(s.getHeight()))); + } + + return summaries; } catch (DataException e) { throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e); } @@ -580,7 +702,8 @@ public class BlocksResource { if (!Crypto.isValidAddress(address)) throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_ADDRESS); - return repository.getBlockRepository().getBlockSigners(addresses, limit, offset, reverse); + // This method pulls data from both Blocks and BlockArchive, so no need to query serparately + return repository.getBlockArchiveRepository().getBlockSigners(addresses, limit, offset, reverse); } catch (DataException e) { throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e); } @@ -620,7 +743,76 @@ public class BlocksResource { throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA); try (final Repository repository = RepositoryManager.getRepository()) { - return repository.getBlockRepository().getBlockSummaries(startHeight, endHeight, count); + + /* + * start end count result + * 10 40 null blocks 10 to 39 (excludes end block, ignore count) + * + * null null null blocks 1 to 50 (assume count=50, maybe start=1) + * 30 null null blocks 30 to 79 (assume count=50) + * 30 null 10 blocks 30 to 39 + * + * null null 50 last 50 blocks? so if max(blocks.height) is 200, then blocks 151 to 200 + * null 200 null blocks 150 to 199 (excludes end block, assume count=50) + * null 200 10 blocks 190 to 199 (excludes end block) + */ + + List blockSummaries = new ArrayList<>(); + + // Use the latest X blocks if only a count is specified + if (startHeight == null && endHeight == null && count != null) { + BlockData chainTip = repository.getBlockRepository().getLastBlock(); + startHeight = chainTip.getHeight() - count; + endHeight = chainTip.getHeight(); + } + + // ... otherwise default the start height to 1 + if (startHeight == null && endHeight == null) { + startHeight = 1; + } + + // Default the count to 50 + if (count == null) { + count = 50; + } + + // If both a start and end height exist, ignore the count + if (startHeight != null && endHeight != null) { + if (startHeight > 0 && endHeight > 0) { + count = Integer.MAX_VALUE; + } + } + + // Derive start height from end height if missing + if (startHeight == null || startHeight == 0) { + if (endHeight != null && endHeight > 0) { + if (count != null) { + startHeight = endHeight - count; + } + } + } + + for (/* count already set */; count > 0; --count, ++startHeight) { + if (endHeight != null && startHeight >= endHeight) { + break; + } + BlockData blockData = repository.getBlockRepository().fromHeight(startHeight); + if (blockData == null) { + // Not found - try the archive + blockData = repository.getBlockArchiveRepository().fromHeight(startHeight); + if (blockData == null) { + // Run out of blocks! + break; + } + } + + if (blockData != null) { + BlockSummaryData blockSummaryData = new BlockSummaryData(blockData); + blockSummaries.add(blockSummaryData); + } + } + + return blockSummaries; } catch (DataException e) { throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e); } diff --git a/src/main/java/org/qortal/api/resource/BootstrapResource.java b/src/main/java/org/qortal/api/resource/BootstrapResource.java new file mode 100644 index 00000000..9b9b7f2a --- /dev/null +++ b/src/main/java/org/qortal/api/resource/BootstrapResource.java @@ -0,0 +1,92 @@ +package org.qortal.api.resource; + +import io.swagger.v3.oas.annotations.Operation; +import io.swagger.v3.oas.annotations.media.Content; +import io.swagger.v3.oas.annotations.media.Schema; +import io.swagger.v3.oas.annotations.responses.ApiResponse; +import io.swagger.v3.oas.annotations.tags.Tag; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.qortal.api.ApiError; +import org.qortal.api.ApiExceptionFactory; +import org.qortal.api.Security; +import org.qortal.repository.Bootstrap; +import org.qortal.repository.DataException; +import org.qortal.repository.Repository; +import org.qortal.repository.RepositoryManager; + +import javax.servlet.http.HttpServletRequest; +import javax.ws.rs.*; +import javax.ws.rs.core.Context; +import javax.ws.rs.core.MediaType; +import java.io.IOException; + + +@Path("/bootstrap") +@Tag(name = "Bootstrap") +public class BootstrapResource { + + private static final Logger LOGGER = LogManager.getLogger(BootstrapResource.class); + + @Context + HttpServletRequest request; + + @POST + @Path("/create") + @Operation( + summary = "Create bootstrap", + description = "Builds a bootstrap file for distribution", + responses = { + @ApiResponse( + description = "path to file on success, an exception on failure", + content = @Content(mediaType = MediaType.TEXT_PLAIN, schema = @Schema(type = "string")) + ) + } + ) + public String createBootstrap() { + Security.checkApiCallAllowed(request); + + try (final Repository repository = RepositoryManager.getRepository()) { + + Bootstrap bootstrap = new Bootstrap(repository); + try { + bootstrap.checkRepositoryState(); + } catch (DataException e) { + LOGGER.info("Not ready to create bootstrap: {}", e.getMessage()); + throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.REPOSITORY_ISSUE, e.getMessage()); + } + bootstrap.validateBlockchain(); + return bootstrap.create(); + + } catch (DataException | InterruptedException | IOException e) { + LOGGER.info("Unable to create bootstrap", e); + throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.REPOSITORY_ISSUE, e.getMessage()); + } + } + + @GET + @Path("/validate") + @Operation( + summary = "Validate blockchain", + description = "Useful to check database integrity prior to creating or after installing a bootstrap. " + + "This process is intensive and can take over an hour to run.", + responses = { + @ApiResponse( + description = "true if valid, false if invalid", + content = @Content(mediaType = MediaType.TEXT_PLAIN, schema = @Schema(type = "boolean")) + ) + } + ) + public boolean validateBootstrap() { + Security.checkApiCallAllowed(request); + + try (final Repository repository = RepositoryManager.getRepository()) { + + Bootstrap bootstrap = new Bootstrap(repository); + return bootstrap.validateCompleteBlockchain(); + + } catch (DataException e) { + throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE); + } + } +} diff --git a/src/main/java/org/qortal/api/resource/CrossChainHtlcResource.java b/src/main/java/org/qortal/api/resource/CrossChainHtlcResource.java index 0076609a..46d7ebc6 100644 --- a/src/main/java/org/qortal/api/resource/CrossChainHtlcResource.java +++ b/src/main/java/org/qortal/api/resource/CrossChainHtlcResource.java @@ -11,6 +11,7 @@ import java.util.List; import javax.servlet.http.HttpServletRequest; import javax.ws.rs.GET; +import javax.ws.rs.POST; import javax.ws.rs.Path; import javax.ws.rs.PathParam; import javax.ws.rs.core.Context; @@ -173,7 +174,7 @@ public class CrossChainHtlcResource { } } - @GET + @POST @Path("/redeem/{ataddress}") @Operation( summary = "Redeems HTLC associated with supplied AT", @@ -231,7 +232,7 @@ public class CrossChainHtlcResource { } } - @GET + @POST @Path("/redeemAll") @Operation( summary = "Redeems HTLC for all applicable ATs in tradebot data", @@ -415,7 +416,7 @@ public class CrossChainHtlcResource { return false; } - @GET + @POST @Path("/refund/{ataddress}") @Operation( summary = "Refunds HTLC associated with supplied AT", @@ -463,7 +464,7 @@ public class CrossChainHtlcResource { } - @GET + @POST @Path("/refundAll") @Operation( summary = "Refunds HTLC for all applicable ATs in tradebot data", @@ -478,8 +479,6 @@ public class CrossChainHtlcResource { ) @ApiErrors({ApiError.INVALID_CRITERIA, ApiError.INVALID_ADDRESS, ApiError.ADDRESS_UNKNOWN}) public boolean refundAllHtlc() { - Security.checkApiCallAllowed(request); - Security.checkApiCallAllowed(request); boolean success = false; @@ -568,6 +567,13 @@ public class CrossChainHtlcResource { if (crossChainTradeData == null) throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA); + // If the AT is "finished" then it will have a zero balance + // In these cases we should avoid HTLC refunds if tbe QORT haven't been returned to the seller + if (atData.getIsFinished() && crossChainTradeData.mode != AcctMode.REFUNDED && crossChainTradeData.mode != AcctMode.CANCELLED) { + LOGGER.info(String.format("Skipping AT %s because the QORT has already been redemed", atAddress)); + throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA); + } + List allTradeBotData = repository.getCrossChainRepository().getAllTradeBotData(); TradeBotData tradeBotData = allTradeBotData.stream().filter(tradeBotDataItem -> tradeBotDataItem.getAtAddress().equals(atAddress)).findFirst().orElse(null); if (tradeBotData == null) diff --git a/src/main/java/org/qortal/api/resource/ListsResource.java b/src/main/java/org/qortal/api/resource/ListsResource.java new file mode 100644 index 00000000..dea6690c --- /dev/null +++ b/src/main/java/org/qortal/api/resource/ListsResource.java @@ -0,0 +1,298 @@ +package org.qortal.api.resource; + +import io.swagger.v3.oas.annotations.Operation; +import io.swagger.v3.oas.annotations.media.ArraySchema; +import io.swagger.v3.oas.annotations.media.Content; +import io.swagger.v3.oas.annotations.media.Schema; +import io.swagger.v3.oas.annotations.parameters.RequestBody; +import io.swagger.v3.oas.annotations.responses.ApiResponse; +import io.swagger.v3.oas.annotations.tags.Tag; + +import org.qortal.api.*; +import org.qortal.api.model.AddressListRequest; +import org.qortal.crypto.Crypto; +import org.qortal.data.account.AccountData; +import org.qortal.list.ResourceListManager; +import org.qortal.repository.DataException; +import org.qortal.repository.Repository; +import org.qortal.repository.RepositoryManager; + +import javax.servlet.http.HttpServletRequest; +import javax.ws.rs.*; +import javax.ws.rs.core.Context; +import javax.ws.rs.core.MediaType; + + +@Path("/lists") +@Tag(name = "Lists") +public class ListsResource { + + @Context + HttpServletRequest request; + + @POST + @Path("/blacklist/address/{address}") + @Operation( + summary = "Add a QORT address to the local blacklist", + responses = { + @ApiResponse( + description = "Returns true on success, or an exception on failure", + content = @Content(mediaType = MediaType.TEXT_PLAIN, schema = @Schema(type = "boolean")) + ) + } + ) + @ApiErrors({ApiError.INVALID_ADDRESS, ApiError.ADDRESS_UNKNOWN, ApiError.REPOSITORY_ISSUE}) + public String addAddressToBlacklist(@PathParam("address") String address) { + Security.checkApiCallAllowed(request); + + if (!Crypto.isValidAddress(address)) + throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_ADDRESS); + + try (final Repository repository = RepositoryManager.getRepository()) { + AccountData accountData = repository.getAccountRepository().getAccount(address); + // Not found? + if (accountData == null) + throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.ADDRESS_UNKNOWN); + + // Valid address, so go ahead and blacklist it + boolean success = ResourceListManager.getInstance().addAddressToBlacklist(address, true); + + return success ? "true" : "false"; + } catch (DataException e) { + throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e); + } + } + + @POST + @Path("/blacklist/addresses") + @Operation( + summary = "Add one or more QORT addresses to the local blacklist", + requestBody = @RequestBody( + required = true, + content = @Content( + mediaType = MediaType.APPLICATION_JSON, + schema = @Schema( + implementation = AddressListRequest.class + ) + ) + ), + responses = { + @ApiResponse( + description = "Returns true if all addresses were processed, false if any couldn't be " + + "processed, or an exception on failure. If false or an exception is returned, " + + "the list will not be updated, and the request will need to be re-issued.", + content = @Content(mediaType = MediaType.TEXT_PLAIN, schema = @Schema(type = "boolean")) + ) + } + ) + @ApiErrors({ApiError.INVALID_ADDRESS, ApiError.ADDRESS_UNKNOWN, ApiError.REPOSITORY_ISSUE}) + public String addAddressesToBlacklist(AddressListRequest addressListRequest) { + Security.checkApiCallAllowed(request); + + if (addressListRequest == null || addressListRequest.addresses == null) { + throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA); + } + + int successCount = 0; + int errorCount = 0; + + try (final Repository repository = RepositoryManager.getRepository()) { + + for (String address : addressListRequest.addresses) { + + if (!Crypto.isValidAddress(address)) { + errorCount++; + continue; + } + + AccountData accountData = repository.getAccountRepository().getAccount(address); + // Not found? + if (accountData == null) { + errorCount++; + continue; + } + + // Valid address, so go ahead and blacklist it + boolean success = ResourceListManager.getInstance().addAddressToBlacklist(address, false); + if (success) { + successCount++; + } + else { + errorCount++; + } + } + } catch (DataException e) { + throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e); + } + + if (successCount > 0 && errorCount == 0) { + // All were successful, so save the blacklist + ResourceListManager.getInstance().saveBlacklist(); + return "true"; + } + else { + // Something went wrong, so revert + ResourceListManager.getInstance().revertBlacklist(); + return "false"; + } + } + + + @DELETE + @Path("/blacklist/address/{address}") + @Operation( + summary = "Remove a QORT address from the local blacklist", + responses = { + @ApiResponse( + description = "Returns true on success, or an exception on failure", + content = @Content(mediaType = MediaType.TEXT_PLAIN, schema = @Schema(type = "boolean")) + ) + } + ) + @ApiErrors({ApiError.INVALID_ADDRESS, ApiError.ADDRESS_UNKNOWN, ApiError.REPOSITORY_ISSUE}) + public String removeAddressFromBlacklist(@PathParam("address") String address) { + Security.checkApiCallAllowed(request); + + if (!Crypto.isValidAddress(address)) + throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_ADDRESS); + + try (final Repository repository = RepositoryManager.getRepository()) { + AccountData accountData = repository.getAccountRepository().getAccount(address); + // Not found? + if (accountData == null) + throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.ADDRESS_UNKNOWN); + + // Valid address, so go ahead and blacklist it + boolean success = ResourceListManager.getInstance().removeAddressFromBlacklist(address, true); + + return success ? "true" : "false"; + } catch (DataException e) { + throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e); + } + } + + @DELETE + @Path("/blacklist/addresses") + @Operation( + summary = "Remove one or more QORT addresses from the local blacklist", + requestBody = @RequestBody( + required = true, + content = @Content( + mediaType = MediaType.APPLICATION_JSON, + schema = @Schema( + implementation = AddressListRequest.class + ) + ) + ), + responses = { + @ApiResponse( + description = "Returns true if all addresses were processed, false if any couldn't be " + + "processed, or an exception on failure. If false or an exception is returned, " + + "the list will not be updated, and the request will need to be re-issued.", + content = @Content(mediaType = MediaType.TEXT_PLAIN, schema = @Schema(type = "boolean")) + ) + } + ) + @ApiErrors({ApiError.INVALID_ADDRESS, ApiError.ADDRESS_UNKNOWN, ApiError.REPOSITORY_ISSUE}) + public String removeAddressesFromBlacklist(AddressListRequest addressListRequest) { + Security.checkApiCallAllowed(request); + + if (addressListRequest == null || addressListRequest.addresses == null) { + throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA); + } + + int successCount = 0; + int errorCount = 0; + + try (final Repository repository = RepositoryManager.getRepository()) { + + for (String address : addressListRequest.addresses) { + + if (!Crypto.isValidAddress(address)) { + errorCount++; + continue; + } + + AccountData accountData = repository.getAccountRepository().getAccount(address); + // Not found? + if (accountData == null) { + errorCount++; + continue; + } + + // Valid address, so go ahead and blacklist it + // Don't save as we will do this at the end of the process + boolean success = ResourceListManager.getInstance().removeAddressFromBlacklist(address, false); + if (success) { + successCount++; + } + else { + errorCount++; + } + } + } catch (DataException e) { + throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e); + } + + if (successCount > 0 && errorCount == 0) { + // All were successful, so save the blacklist + ResourceListManager.getInstance().saveBlacklist(); + return "true"; + } + else { + // Something went wrong, so revert + ResourceListManager.getInstance().revertBlacklist(); + return "false"; + } + } + + @GET + @Path("/blacklist/addresses") + @Operation( + summary = "Fetch the list of blacklisted addresses", + responses = { + @ApiResponse( + description = "A JSON array of addresses", + content = @Content(mediaType = MediaType.APPLICATION_JSON, array = @ArraySchema(schema = @Schema(implementation = String.class))) + ) + } + ) + public String getAddressBlacklist() { + Security.checkApiCallAllowed(request); + return ResourceListManager.getInstance().getBlacklistJSONString(); + } + + @GET + @Path("/blacklist/address/{address}") + @Operation( + summary = "Check if an address is present in the local blacklist", + responses = { + @ApiResponse( + description = "Returns true or false if the list was queried, or an exception on failure", + content = @Content(mediaType = MediaType.TEXT_PLAIN, schema = @Schema(type = "boolean")) + ) + } + ) + @ApiErrors({ApiError.INVALID_ADDRESS, ApiError.ADDRESS_UNKNOWN, ApiError.REPOSITORY_ISSUE}) + public String checkAddressInBlacklist(@PathParam("address") String address) { + Security.checkApiCallAllowed(request); + + if (!Crypto.isValidAddress(address)) + throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_ADDRESS); + + try (final Repository repository = RepositoryManager.getRepository()) { + AccountData accountData = repository.getAccountRepository().getAccount(address); + // Not found? + if (accountData == null) + throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.ADDRESS_UNKNOWN); + + // Valid address, so go ahead and blacklist it + boolean blacklisted = ResourceListManager.getInstance().isAddressInBlacklist(address); + + return blacklisted ? "true" : "false"; + } catch (DataException e) { + throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e); + } + } + +} diff --git a/src/main/java/org/qortal/at/AT.java b/src/main/java/org/qortal/at/AT.java index e82ab14e..005bb0cd 100644 --- a/src/main/java/org/qortal/at/AT.java +++ b/src/main/java/org/qortal/at/AT.java @@ -1,5 +1,7 @@ package org.qortal.at; +import java.util.Arrays; +import java.util.Collections; import java.util.List; import org.ciyam.at.MachineState; @@ -56,12 +58,12 @@ public class AT { this.atData = new ATData(atAddress, creatorPublicKey, creation, machineState.version, assetId, codeBytes, codeHash, machineState.isSleeping(), machineState.getSleepUntilHeight(), machineState.isFinished(), machineState.hadFatalError(), - machineState.isFrozen(), machineState.getFrozenBalance()); + machineState.isFrozen(), machineState.getFrozenBalance(), null); byte[] stateData = machineState.toBytes(); byte[] stateHash = Crypto.digest(stateData); - this.atStateData = new ATStateData(atAddress, height, stateData, stateHash, 0L, true); + this.atStateData = new ATStateData(atAddress, height, stateData, stateHash, 0L, true, null); } // Getters / setters @@ -84,13 +86,28 @@ public class AT { this.repository.getATRepository().delete(this.atData.getATAddress()); } + /** + * Potentially execute AT. + *

+ * Note that sleep-until-message support might set/reset + * sleep-related flags/values. + *

+ * {@link #getATStateData()} will return null if nothing happened. + *

+ * @param blockHeight + * @param blockTimestamp + * @return AT-generated transactions, possibly empty + * @throws DataException + */ public List run(int blockHeight, long blockTimestamp) throws DataException { String atAddress = this.atData.getATAddress(); QortalATAPI api = new QortalATAPI(repository, this.atData, blockTimestamp); QortalAtLoggerFactory loggerFactory = QortalAtLoggerFactory.getInstance(); - byte[] codeBytes = this.atData.getCodeBytes(); + if (!api.willExecute(blockHeight)) + // this.atStateData will be null + return Collections.emptyList(); // Fetch latest ATStateData for this AT ATStateData latestAtStateData = this.repository.getATRepository().getLatestATState(atAddress); @@ -100,8 +117,10 @@ public class AT { throw new IllegalStateException("No previous AT state data found"); // [Re]create AT machine state using AT state data or from scratch as applicable + byte[] codeBytes = this.atData.getCodeBytes(); MachineState state = MachineState.fromBytes(api, loggerFactory, latestAtStateData.getStateData(), codeBytes); try { + api.preExecute(state); state.execute(); } catch (Exception e) { throw new DataException(String.format("Uncaught exception while running AT '%s'", atAddress), e); @@ -109,9 +128,18 @@ public class AT { byte[] stateData = state.toBytes(); byte[] stateHash = Crypto.digest(stateData); - long atFees = api.calcFinalFees(state); - this.atStateData = new ATStateData(atAddress, blockHeight, stateData, stateHash, atFees, false); + // Nothing happened? + if (state.getSteps() == 0 && Arrays.equals(stateHash, latestAtStateData.getStateHash())) + // We currently want to execute frozen ATs, to maintain backwards support. + if (state.isFrozen() == false) + // this.atStateData will be null + return Collections.emptyList(); + + long atFees = api.calcFinalFees(state); + Long sleepUntilMessageTimestamp = this.atData.getSleepUntilMessageTimestamp(); + + this.atStateData = new ATStateData(atAddress, blockHeight, stateData, stateHash, atFees, false, sleepUntilMessageTimestamp); return api.getTransactions(); } @@ -130,6 +158,10 @@ public class AT { this.atData.setHadFatalError(state.hadFatalError()); this.atData.setIsFrozen(state.isFrozen()); this.atData.setFrozenBalance(state.getFrozenBalance()); + + // Special sleep-until-message support + this.atData.setSleepUntilMessageTimestamp(this.atStateData.getSleepUntilMessageTimestamp()); + this.repository.getATRepository().save(this.atData); } @@ -157,6 +189,10 @@ public class AT { this.atData.setHadFatalError(state.hadFatalError()); this.atData.setIsFrozen(state.isFrozen()); this.atData.setFrozenBalance(state.getFrozenBalance()); + + // Special sleep-until-message support + this.atData.setSleepUntilMessageTimestamp(previousStateData.getSleepUntilMessageTimestamp()); + this.repository.getATRepository().save(this.atData); } diff --git a/src/main/java/org/qortal/at/QortalATAPI.java b/src/main/java/org/qortal/at/QortalATAPI.java index 6a379d59..c393a684 100644 --- a/src/main/java/org/qortal/at/QortalATAPI.java +++ b/src/main/java/org/qortal/at/QortalATAPI.java @@ -32,6 +32,7 @@ import org.qortal.group.Group; import org.qortal.repository.ATRepository; import org.qortal.repository.DataException; import org.qortal.repository.Repository; +import org.qortal.repository.ATRepository.NextTransactionInfo; import org.qortal.transaction.AtTransaction; import org.qortal.transaction.Transaction.TransactionType; import org.qortal.utils.Base58; @@ -74,8 +75,45 @@ public class QortalATAPI extends API { return this.transactions; } - public long calcFinalFees(MachineState state) { - return state.getSteps() * this.ciyamAtSettings.feePerStep; + public boolean willExecute(int blockHeight) throws DataException { + // Sleep-until-message/height checking + Long sleepUntilMessageTimestamp = this.atData.getSleepUntilMessageTimestamp(); + + if (sleepUntilMessageTimestamp != null) { + // Quicker to check height, if sleep-until-height also active + Integer sleepUntilHeight = this.atData.getSleepUntilHeight(); + + boolean wakeDueToHeight = sleepUntilHeight != null && sleepUntilHeight != 0 && blockHeight >= sleepUntilHeight; + + boolean wakeDueToMessage = false; + if (!wakeDueToHeight) { + // No avoiding asking repository + Timestamp previousTxTimestamp = new Timestamp(sleepUntilMessageTimestamp); + NextTransactionInfo nextTransactionInfo = this.repository.getATRepository().findNextTransaction(this.atData.getATAddress(), + previousTxTimestamp.blockHeight, + previousTxTimestamp.transactionSequence); + + wakeDueToMessage = nextTransactionInfo != null; + } + + // Can we skip? + if (!wakeDueToHeight && !wakeDueToMessage) + return false; + } + + return true; + } + + public void preExecute(MachineState state) { + // Sleep-until-message/height checking + Long sleepUntilMessageTimestamp = this.atData.getSleepUntilMessageTimestamp(); + + if (sleepUntilMessageTimestamp != null) { + // We've passed checks, so clear sleep-related flags/values + this.setIsSleeping(state, false); + this.setSleepUntilHeight(state, 0); + this.atData.setSleepUntilMessageTimestamp(null); + } } // Inherited methods from CIYAM AT API @@ -412,6 +450,10 @@ public class QortalATAPI extends API { // Utility methods + public long calcFinalFees(MachineState state) { + return state.getSteps() * this.ciyamAtSettings.feePerStep; + } + /** Returns partial transaction signature, used to verify we're operating on the same transaction and not naively using block height & sequence. */ public static byte[] partialSignature(byte[] fullSignature) { return Arrays.copyOfRange(fullSignature, 8, 32); @@ -460,6 +502,15 @@ public class QortalATAPI extends API { } } + /*package*/ void sleepUntilMessageOrHeight(MachineState state, long txTimestamp, Long sleepUntilHeight) { + this.setIsSleeping(state, true); + + this.atData.setSleepUntilMessageTimestamp(txTimestamp); + + if (sleepUntilHeight != null) + this.setSleepUntilHeight(state, sleepUntilHeight.intValue()); + } + /** Returns AT's account */ /* package */ Account getATAccount() { return new Account(this.repository, this.atData.getATAddress()); diff --git a/src/main/java/org/qortal/at/QortalFunctionCode.java b/src/main/java/org/qortal/at/QortalFunctionCode.java index 0d11e488..7069290a 100644 --- a/src/main/java/org/qortal/at/QortalFunctionCode.java +++ b/src/main/java/org/qortal/at/QortalFunctionCode.java @@ -84,6 +84,43 @@ public enum QortalFunctionCode { api.setB(state, bBytes); } }, + /** + * Sleep AT until a new message arrives after 'tx-timestamp'.
+ * 0x0503 tx-timestamp + */ + SLEEP_UNTIL_MESSAGE(0x0503, 1, false) { + @Override + protected void postCheckExecute(FunctionData functionData, MachineState state, short rawFunctionCode) throws ExecutionException { + if (functionData.value1 <= 0) + return; + + long txTimestamp = functionData.value1; + + QortalATAPI api = (QortalATAPI) state.getAPI(); + api.sleepUntilMessageOrHeight(state, txTimestamp, null); + } + }, + /** + * Sleep AT until a new message arrives, after 'tx-timestamp', or height reached.
+ * 0x0504 tx-timestamp height + */ + SLEEP_UNTIL_MESSAGE_OR_HEIGHT(0x0504, 2, false) { + @Override + protected void postCheckExecute(FunctionData functionData, MachineState state, short rawFunctionCode) throws ExecutionException { + if (functionData.value1 <= 0) + return; + + long txTimestamp = functionData.value1; + + if (functionData.value2 <= 0) + return; + + long sleepUntilHeight = functionData.value2; + + QortalATAPI api = (QortalATAPI) state.getAPI(); + api.sleepUntilMessageOrHeight(state, txTimestamp, sleepUntilHeight); + } + }, /** * Convert address in B to 20-byte value in LSB of B1, and all of B2 & B3.
* 0x0510 diff --git a/src/main/java/org/qortal/block/Block.java b/src/main/java/org/qortal/block/Block.java index 798a4f91..d6bb11f3 100644 --- a/src/main/java/org/qortal/block/Block.java +++ b/src/main/java/org/qortal/block/Block.java @@ -1104,9 +1104,14 @@ public class Block { // Create repository savepoint here so we can rollback to it after testing transactions repository.setSavepoint(); - if (this.blockData.getHeight() == 212937) + if (this.blockData.getHeight() == 212937) { // Apply fix for block 212937 but fix will be rolled back before we exit method Block212937.processFix(this); + } + else if (InvalidNameRegistrationBlocks.isAffectedBlock(this.blockData.getHeight())) { + // Apply fix for affected name registration blocks, but fix will be rolled back before we exit method + InvalidNameRegistrationBlocks.processFix(this); + } for (Transaction transaction : this.getTransactions()) { TransactionData transactionData = transaction.getTransactionData(); @@ -1145,7 +1150,7 @@ public class Block { // Check transaction can even be processed validationResult = transaction.isProcessable(); if (validationResult != Transaction.ValidationResult.OK) { - LOGGER.debug(String.format("Error during transaction validation, tx %s: %s", Base58.encode(transactionData.getSignature()), validationResult.name())); + LOGGER.info(String.format("Error during transaction validation, tx %s: %s", Base58.encode(transactionData.getSignature()), validationResult.name())); return ValidationResult.TRANSACTION_INVALID; } @@ -1259,12 +1264,13 @@ public class Block { for (ATData atData : executableATs) { AT at = new AT(this.repository, atData); List atTransactions = at.run(this.blockData.getHeight(), this.blockData.getTimestamp()); + ATStateData atStateData = at.getATStateData(); + // Didn't execute? (e.g. sleeping) + if (atStateData == null) + continue; allAtTransactions.addAll(atTransactions); - - ATStateData atStateData = at.getATStateData(); this.ourAtStates.add(atStateData); - this.ourAtFees += atStateData.getFees(); } @@ -1293,6 +1299,21 @@ public class Block { return mintingAccount.canMint(); } + /** + * Pre-process block, and its transactions. + * This allows for any database integrity checks prior to validation. + * This is called before isValid() and process() + * + * @throws DataException + */ + public void preProcess() throws DataException { + List blocksTransactions = this.getTransactions(); + + for (Transaction transaction : blocksTransactions) { + transaction.preProcess(); + } + } + /** * Process block, and its transactions, adding them to the blockchain. * diff --git a/src/main/java/org/qortal/block/BlockChain.java b/src/main/java/org/qortal/block/BlockChain.java index e6b8db4e..7a6d6605 100644 --- a/src/main/java/org/qortal/block/BlockChain.java +++ b/src/main/java/org/qortal/block/BlockChain.java @@ -4,10 +4,7 @@ import java.io.File; import java.io.FileNotFoundException; import java.io.InputStream; import java.sql.SQLException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.Map; +import java.util.*; import java.util.concurrent.locks.ReentrantLock; import javax.xml.bind.JAXBContext; @@ -27,11 +24,9 @@ import org.eclipse.persistence.jaxb.UnmarshallerProperties; import org.qortal.controller.Controller; import org.qortal.data.block.BlockData; import org.qortal.network.Network; -import org.qortal.repository.BlockRepository; -import org.qortal.repository.DataException; -import org.qortal.repository.Repository; -import org.qortal.repository.RepositoryManager; +import org.qortal.repository.*; import org.qortal.settings.Settings; +import org.qortal.utils.Base58; import org.qortal.utils.StringLongMapXmlAdapter; /** @@ -506,29 +501,105 @@ public class BlockChain { * @throws SQLException */ public static void validate() throws DataException { - // Check first block is Genesis Block - if (!isGenesisBlockValid()) - rebuildBlockchain(); + boolean isTopOnly = Settings.getInstance().isTopOnly(); + boolean archiveEnabled = Settings.getInstance().isArchiveEnabled(); + boolean canBootstrap = Settings.getInstance().getBootstrap(); + boolean needsArchiveRebuild = false; + BlockData chainTip; + + try (final Repository repository = RepositoryManager.getRepository()) { + chainTip = repository.getBlockRepository().getLastBlock(); + + // Ensure archive is (at least partially) intact, and force a bootstrap if it isn't + if (!isTopOnly && archiveEnabled && canBootstrap) { + needsArchiveRebuild = (repository.getBlockArchiveRepository().fromHeight(2) == null); + if (needsArchiveRebuild) { + LOGGER.info("Couldn't retrieve block 2 from archive. Bootstrapping..."); + + // If there are minting accounts, make sure to back them up + // Don't backup if there are no minting accounts, as this can cause problems + if (!repository.getAccountRepository().getMintingAccounts().isEmpty()) { + Controller.getInstance().exportRepositoryData(); + } + } + } + } + + boolean hasBlocks = (chainTip != null && chainTip.getHeight() > 1); + + if (isTopOnly && hasBlocks) { + // Top-only mode is enabled and we have blocks, so it's possible that the genesis block has been pruned + // It's best not to validate it, and there's no real need to + } else { + // Check first block is Genesis Block + if (!isGenesisBlockValid() || needsArchiveRebuild) { + try { + rebuildBlockchain(); + + } catch (InterruptedException e) { + throw new DataException(String.format("Interrupted when trying to rebuild blockchain: %s", e.getMessage())); + } + } + } + + // We need to create a new connection, as the previous repository and its connections may be been + // closed by rebuildBlockchain() if a bootstrap was applied try (final Repository repository = RepositoryManager.getRepository()) { repository.checkConsistency(); - int startHeight = Math.max(repository.getBlockRepository().getBlockchainHeight() - 1440, 1); + // Set the number of blocks to validate based on the pruned state of the chain + // If pruned, subtract an extra 10 to allow room for error + int blocksToValidate = (isTopOnly || archiveEnabled) ? Settings.getInstance().getPruneBlockLimit() - 10 : 1440; + int startHeight = Math.max(repository.getBlockRepository().getBlockchainHeight() - blocksToValidate, 1); BlockData detachedBlockData = repository.getBlockRepository().getDetachedBlockSignature(startHeight); if (detachedBlockData != null) { - LOGGER.error(String.format("Block %d's reference does not match any block's signature", detachedBlockData.getHeight())); + LOGGER.error(String.format("Block %d's reference does not match any block's signature", + detachedBlockData.getHeight())); + LOGGER.error(String.format("Your chain may be invalid and you should consider bootstrapping" + + " or re-syncing from genesis.")); + } + } + } - // Wait for blockchain lock (whereas orphan() only tries to get lock) - ReentrantLock blockchainLock = Controller.getInstance().getBlockchainLock(); - blockchainLock.lock(); - try { - LOGGER.info(String.format("Orphaning back to block %d", detachedBlockData.getHeight() - 1)); - orphan(detachedBlockData.getHeight() - 1); - } finally { - blockchainLock.unlock(); + /** + * More thorough blockchain validation method. Useful for validating bootstraps. + * A DataException is thrown if anything is invalid. + * + * @throws DataException + */ + public static void validateAllBlocks() throws DataException { + try (final Repository repository = RepositoryManager.getRepository()) { + BlockData chainTip = repository.getBlockRepository().getLastBlock(); + final int chainTipHeight = chainTip.getHeight(); + final int oldestBlock = 1; // TODO: increase if in pruning mode + byte[] lastReference = null; + + for (int height = chainTipHeight; height > oldestBlock; height--) { + BlockData blockData = repository.getBlockRepository().fromHeight(height); + if (blockData == null) { + blockData = repository.getBlockArchiveRepository().fromHeight(height); } + + if (blockData == null) { + String error = String.format("Missing block at height %d", height); + LOGGER.error(error); + throw new DataException(error); + } + + if (height != chainTipHeight) { + // Check reference + if (!Arrays.equals(blockData.getSignature(), lastReference)) { + String error = String.format("Invalid reference for block at height %d: %s (should be %s)", + height, Base58.encode(blockData.getReference()), Base58.encode(lastReference)); + LOGGER.error(error); + throw new DataException(error); + } + } + + lastReference = blockData.getReference(); } } } @@ -551,7 +622,15 @@ public class BlockChain { } } - private static void rebuildBlockchain() throws DataException { + private static void rebuildBlockchain() throws DataException, InterruptedException { + boolean shouldBootstrap = Settings.getInstance().getBootstrap(); + if (shouldBootstrap) { + // Settings indicate that we should apply a bootstrap rather than rebuilding and syncing from genesis + Bootstrap bootstrap = new Bootstrap(); + bootstrap.startImport(); + return; + } + // (Re)build repository if (!RepositoryManager.wasPristineAtOpen()) RepositoryManager.rebuild(); diff --git a/src/main/java/org/qortal/block/InvalidNameRegistrationBlocks.java b/src/main/java/org/qortal/block/InvalidNameRegistrationBlocks.java new file mode 100644 index 00000000..ebef366f --- /dev/null +++ b/src/main/java/org/qortal/block/InvalidNameRegistrationBlocks.java @@ -0,0 +1,114 @@ +package org.qortal.block; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.qortal.naming.Name; +import org.qortal.repository.DataException; + +import java.util.HashMap; +import java.util.Map; + +/** + * Invalid Name Registration Blocks + *

+ * A node minted a version of block 535658 that contained one transaction: + * a REGISTER_NAME transaction that attempted to register a name that was already registered. + *

+ * This invalid transaction made block 535658 (rightly) invalid to several nodes, + * which refused to use that block. + * However, it seems there were no other nodes minting an alternative, valid block at that time + * and so the chain stalled for several nodes in the network. + *

+ * Additionally, the invalid block 535658 affected all new installations, regardless of whether + * they synchronized from scratch (block 1) or used an 'official release' bootstrap. + *

+ * The diagnosis found the following: + * - The original problem occurred in block 535205 where for some unknown reason many nodes didn't + * add the name from a REGISTER_NAME transaction to their Names table. + * - As a result, those nodes had a corrupt db, because they weren't holding a record of the name. + * - This invalid db then caused them to treat a candidate for block 535658 as valid when it + * should have been invalid. + * - As such, the chain continued on with a technically invalid block in it, for a subset of the network + *

+ * As with block 212937, there were three options, but the only feasible one was to apply edits to block + * 535658 to make it valid. There were several cross-chain trades completed after this block, so doing + * any kind of rollback was out of the question. + *

+ * To complicate things further, a custom data field was used for the first REGISTER_NAME transaction, + * and the default data field was used for the second. So it was important that all nodes ended up with + * the exact same data regardless of how they arrived there. + *

+ * The invalid block 535658 signature is: 3oiuDhok...NdXvCLEV. + *

+ * The invalid transaction in block 212937 is: + *

+ *

+	 {
+		 "type": "REGISTER_NAME",
+		 "timestamp": 1630739437517,
+		 "reference": "4peRechwSPxP6UkRj9Y8ox9YxkWb34sWk5zyMc1WyMxEsACxD4Gmm7LZVsQ6Skpze8QCSBMZasvEZg6RgdqkyADW",
+		 "fee": "0.00100000",
+		 "signature": "2t1CryCog8KPDBarzY5fDCKu499nfnUcGrz4Lz4w5wNb5nWqm7y126P48dChYY7huhufcBV3RJPkgKP4Ywxc1gXx",
+		 "txGroupId": 0,
+		 "blockHeight": 535658,
+		 "approvalStatus": "NOT_REQUIRED",
+		 "creatorAddress": "Qbx9ojxv7XNi1xDMWzzw7xDvd1zYW6SKFB",
+		 "registrantPublicKey": "HJqGEf6cW695Xun4ydhkB2excGFwsDxznhNCRHZStyyx",
+		 "name": "Qplay",
+		 "data": "Registered Name on the Qortal Chain"
+	 }
+   
+ *

+ * Account Qbx9ojxv7XNi1xDMWzzw7xDvd1zYW6SKFB attempted to register the name Qplay + * when they had already registered it 12 hours before in block 535205. + *

+ * However, on the broken DB nodes, their Names table was missing a record for the `Qplay` name + * which was sufficient to make the transaction valid. + * + * This problem then occurred two more times, in blocks 536140 and 541334 + * To reduce duplication, I have combined all three block fixes into a single class + * + */ +public final class InvalidNameRegistrationBlocks { + + private static final Logger LOGGER = LogManager.getLogger(InvalidNameRegistrationBlocks.class); + + public static Map invalidBlocksNamesMap = new HashMap() + { + { + put(535658, "Qplay"); + put(536140, "Qweb"); + put(541334, "Qithub"); + } + }; + + private InvalidNameRegistrationBlocks() { + /* Do not instantiate */ + } + + public static boolean isAffectedBlock(int height) { + return (invalidBlocksNamesMap.containsKey(height)); + } + + public static void processFix(Block block) throws DataException { + Integer blockHeight = block.getBlockData().getHeight(); + String invalidName = invalidBlocksNamesMap.get(blockHeight); + if (invalidName == null) { + throw new DataException(String.format("Unable to lookup invalid name for block height %d", blockHeight)); + } + + // Unregister the existing name record if it exists + // This ensures that the duplicate name is considered valid, and therefore + // the second (i.e. duplicate) REGISTER_NAME transaction data is applied. + // Both were issued by the same user account, so there is no conflict. + Name name = new Name(block.repository, invalidName); + name.unregister(); + + LOGGER.debug("Applied name registration patch for block {}", blockHeight); + } + + // Note: + // There is no need to write an orphanFix() method, as we do not have + // the necessary ATStatesData to orphan back this far anyway + +} diff --git a/src/main/java/org/qortal/controller/AutoUpdate.java b/src/main/java/org/qortal/controller/AutoUpdate.java index 6c1dd928..f07e82d1 100644 --- a/src/main/java/org/qortal/controller/AutoUpdate.java +++ b/src/main/java/org/qortal/controller/AutoUpdate.java @@ -14,6 +14,7 @@ import java.security.NoSuchAlgorithmException; import java.util.ArrayList; import java.util.Arrays; import java.util.List; +import java.util.concurrent.TimeoutException; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -215,8 +216,17 @@ public class AutoUpdate extends Thread { } // Give repository a chance to backup in case things go badly wrong (if enabled) - if (Settings.getInstance().getRepositoryBackupInterval() > 0) - RepositoryManager.backup(true); + if (Settings.getInstance().getRepositoryBackupInterval() > 0) { + try { + // Timeout if the database isn't ready for backing up after 60 seconds + long timeout = 60 * 1000L; + RepositoryManager.backup(true, "backup", timeout); + + } catch (TimeoutException e) { + LOGGER.info("Attempt to backup repository failed due to timeout: {}", e.getMessage()); + // Continue with the auto update anyway... + } + } // Call ApplyUpdate to end this process (unlocking current JAR so it can be replaced) String javaHome = System.getProperty("java.home"); diff --git a/src/main/java/org/qortal/controller/BlockMinter.java b/src/main/java/org/qortal/controller/BlockMinter.java index 8b6563f2..33431258 100644 --- a/src/main/java/org/qortal/controller/BlockMinter.java +++ b/src/main/java/org/qortal/controller/BlockMinter.java @@ -44,6 +44,9 @@ public class BlockMinter extends Thread { private static Long lastLogTimestamp; private static Long logTimeout; + // Recovery + public static final long INVALID_BLOCK_RECOVERY_TIMEOUT = 10 * 60 * 1000L; // ms + // Constructors public BlockMinter() { @@ -144,9 +147,25 @@ public class BlockMinter extends Thread { if (peers.size() < Settings.getInstance().getMinBlockchainPeers()) continue; + // If we are stuck on an invalid block, we should allow an alternative to be minted + boolean recoverInvalidBlock = false; + if (Synchronizer.getInstance().timeInvalidBlockLastReceived != null) { + // We've had at least one invalid block + long timeSinceLastValidBlock = NTP.getTime() - Synchronizer.getInstance().timeValidBlockLastReceived; + long timeSinceLastInvalidBlock = NTP.getTime() - Synchronizer.getInstance().timeInvalidBlockLastReceived; + if (timeSinceLastValidBlock > INVALID_BLOCK_RECOVERY_TIMEOUT) { + if (timeSinceLastInvalidBlock < INVALID_BLOCK_RECOVERY_TIMEOUT) { + // Last valid block was more than 10 mins ago, but we've had an invalid block since then + // Assume that the chain has stalled because there is no alternative valid candidate + // Enter recovery mode to allow alternative, valid candidates to be minted + recoverInvalidBlock = true; + } + } + } + // If our latest block isn't recent then we need to synchronize instead of minting, unless we're in recovery mode. if (!peers.isEmpty() && lastBlockData.getTimestamp() < minLatestBlockTimestamp) - if (Controller.getInstance().getRecoveryMode() == false) + if (Controller.getInstance().getRecoveryMode() == false && recoverInvalidBlock == false) continue; // There are enough peers with a recent block and our latest block is recent @@ -230,6 +249,8 @@ public class BlockMinter extends Thread { if (testBlock.isTimestampValid() != ValidationResult.OK) continue; + testBlock.preProcess(); + // Is new block valid yet? (Before adding unconfirmed transactions) ValidationResult result = testBlock.isValid(); if (result != ValidationResult.OK) { @@ -421,7 +442,8 @@ public class BlockMinter extends Thread { // Add to blockchain newBlock.process(); - LOGGER.info(String.format("Minted new test block: %d", newBlock.getBlockData().getHeight())); + LOGGER.info(String.format("Minted new test block: %d sig: %.8s", + newBlock.getBlockData().getHeight(), Base58.encode(newBlock.getBlockData().getSignature()))); repository.saveChanges(); diff --git a/src/main/java/org/qortal/controller/Controller.java b/src/main/java/org/qortal/controller/Controller.java index 6bab9d42..5b04aae5 100644 --- a/src/main/java/org/qortal/controller/Controller.java +++ b/src/main/java/org/qortal/controller/Controller.java @@ -1,10 +1,47 @@ package org.qortal.controller; -import com.google.common.primitives.Longs; +import java.awt.TrayIcon.MessageType; +import java.io.File; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.InputStream; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.security.SecureRandom; +import java.security.Security; +import java.time.LocalDateTime; +import java.time.ZoneOffset; +import java.time.format.DateTimeFormatter; +import java.util.ArrayDeque; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.Deque; +import java.util.HashMap; +import java.util.Iterator; +import java.util.LinkedHashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Properties; +import java.util.Random; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.locks.ReentrantLock; +import java.util.function.Function; +import java.util.function.Predicate; +import java.util.stream.Collectors; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; + import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.bouncycastle.jce.provider.BouncyCastleProvider; import org.bouncycastle.jsse.provider.BouncyCastleJsseProvider; +import com.google.common.primitives.Longs; import org.qortal.account.Account; import org.qortal.account.PrivateKeyAccount; import org.qortal.account.PublicKeyAccount; @@ -13,9 +50,11 @@ import org.qortal.api.DomainMapService; import org.qortal.block.Block; import org.qortal.block.BlockChain; import org.qortal.block.BlockChain.BlockTimingByHeight; -import org.qortal.controller.Synchronizer.SynchronizationResult; import org.qortal.controller.arbitrary.ArbitraryDataCleanupManager; import org.qortal.controller.arbitrary.ArbitraryDataManager; +import org.qortal.controller.Synchronizer.SynchronizationResult; +import org.qortal.controller.repository.PruneManager; +import org.qortal.controller.repository.NamesDatabaseIntegrityCheck; import org.qortal.controller.tradebot.TradeBot; import org.qortal.data.account.MintingAccountData; import org.qortal.data.account.RewardShareData; @@ -34,10 +73,7 @@ import org.qortal.gui.SysTray; import org.qortal.network.Network; import org.qortal.network.Peer; import org.qortal.network.message.*; -import org.qortal.repository.DataException; -import org.qortal.repository.Repository; -import org.qortal.repository.RepositoryFactory; -import org.qortal.repository.RepositoryManager; +import org.qortal.repository.*; import org.qortal.repository.hsqldb.HSQLDBRepositoryFactory; import org.qortal.settings.Settings; import org.qortal.transaction.Transaction; @@ -123,6 +159,7 @@ public class Controller extends Thread { }; private long repositoryBackupTimestamp = startTime; // ms + private long repositoryMaintenanceTimestamp = startTime; // ms private long repositoryCheckpointTimestamp = startTime; // ms private long ntpCheckTimestamp = startTime; // ms private long deleteExpiredTimestamp = startTime + DELETE_EXPIRED_INTERVAL; // ms @@ -291,6 +328,10 @@ public class Controller extends Thread { return this.buildVersion; } + public String getVersionStringWithoutPrefix() { + return this.buildVersion.replaceFirst(VERSION_PREFIX, ""); + } + /** Returns current blockchain height, or 0 if it's not available. */ public int getChainHeight() { synchronized (this.latestBlocks) { @@ -334,7 +375,7 @@ public class Controller extends Thread { return this.savedArgs; } - /* package */ public static boolean isStopping() { + public static boolean isStopping() { return isStopping; } @@ -392,6 +433,12 @@ public class Controller extends Thread { try { RepositoryFactory repositoryFactory = new HSQLDBRepositoryFactory(getRepositoryUrl()); RepositoryManager.setRepositoryFactory(repositoryFactory); + RepositoryManager.setRequestedCheckpoint(Boolean.TRUE); + + try (final Repository repository = RepositoryManager.getRepository()) { + RepositoryManager.archive(repository); + RepositoryManager.prune(repository); + } } catch (DataException e) { // If exception has no cause then repository is in use by some other process. if (e.getCause() == null) { @@ -405,6 +452,11 @@ public class Controller extends Thread { return; // Not System.exit() so that GUI can display error } + // Rebuild Names table and check database integrity + NamesDatabaseIntegrityCheck namesDatabaseIntegrityCheck = new NamesDatabaseIntegrityCheck(); + namesDatabaseIntegrityCheck.rebuildAllNames(); + namesDatabaseIntegrityCheck.runIntegrityCheck(); + LOGGER.info("Validating blockchain"); try { BlockChain.validate(); @@ -417,6 +469,12 @@ public class Controller extends Thread { return; // Not System.exit() so that GUI can display error } + // Import current trade bot states and minting accounts if they exist + Controller.importRepositoryData(); + + // Add the initial peers to the repository if we don't have any + Controller.installInitialPeers(); + LOGGER.info("Starting controller"); Controller.getInstance().start(); @@ -500,10 +558,10 @@ public class Controller extends Thread { final long repositoryBackupInterval = Settings.getInstance().getRepositoryBackupInterval(); final long repositoryCheckpointInterval = Settings.getInstance().getRepositoryCheckpointInterval(); + long repositoryMaintenanceInterval = getRandomRepositoryMaintenanceInterval(); - ExecutorService trimExecutor = Executors.newCachedThreadPool(new DaemonThreadFactory()); - trimExecutor.execute(new AtStatesTrimmer()); - trimExecutor.execute(new OnlineAccountsSignaturesTrimmer()); + // Start executor service for trimming or pruning + PruneManager.getInstance().start(); try { while (!isStopping) { @@ -562,7 +620,39 @@ public class Controller extends Thread { Translator.INSTANCE.translate("SysTray", "CREATING_BACKUP_OF_DB_FILES"), MessageType.INFO); - RepositoryManager.backup(true); + try { + // Timeout if the database isn't ready for backing up after 60 seconds + long timeout = 60 * 1000L; + RepositoryManager.backup(true, "backup", timeout); + + } catch (TimeoutException e) { + LOGGER.info("Attempt to backup repository failed due to timeout: {}", e.getMessage()); + } + } + + // Give repository a chance to perform maintenance (if enabled) + if (repositoryMaintenanceInterval > 0 && now >= repositoryMaintenanceTimestamp + repositoryMaintenanceInterval) { + repositoryMaintenanceTimestamp = now + repositoryMaintenanceInterval; + + if (Settings.getInstance().getShowMaintenanceNotification()) + SysTray.getInstance().showMessage(Translator.INSTANCE.translate("SysTray", "DB_MAINTENANCE"), + Translator.INSTANCE.translate("SysTray", "PERFORMING_DB_MAINTENANCE"), + MessageType.INFO); + + LOGGER.info("Starting scheduled repository maintenance. This can take a while..."); + try (final Repository repository = RepositoryManager.getRepository()) { + + // Timeout if the database isn't ready for maintenance after 60 seconds + long timeout = 60 * 1000L; + repository.performPeriodicMaintenance(timeout); + + LOGGER.info("Scheduled repository maintenance completed"); + } catch (DataException | TimeoutException e) { + LOGGER.error("Scheduled repository maintenance failed", e); + } + + // Get a new random interval + repositoryMaintenanceInterval = getRandomRepositoryMaintenanceInterval(); } // Prune stuck/slow/old peers @@ -589,13 +679,68 @@ public class Controller extends Thread { Thread.interrupted(); // Fall-through to exit } finally { - trimExecutor.shutdownNow(); + PruneManager.getInstance().stop(); + } + } + + /** + * Import current trade bot states and minting accounts. + * This is needed because the user may have bootstrapped, or there could be a database inconsistency + * if the core crashed when computing the nonce during the start of the trade process. + */ + private static void importRepositoryData() { + try (final Repository repository = RepositoryManager.getRepository()) { + + String exportPath = Settings.getInstance().getExportPath(); + try { + Path importPath = Paths.get(exportPath, "TradeBotStates.json"); + repository.importDataFromFile(importPath.toString()); + } catch (FileNotFoundException e) { + // Do nothing, as the files will only exist in certain cases + } try { - trimExecutor.awaitTermination(2L, TimeUnit.SECONDS); - } catch (InterruptedException e) { - // We tried... + Path importPath = Paths.get(exportPath, "MintingAccounts.json"); + repository.importDataFromFile(importPath.toString()); + } catch (FileNotFoundException e) { + // Do nothing, as the files will only exist in certain cases } + repository.saveChanges(); + } + catch (DataException | IOException e) { + LOGGER.info("Unable to import data into repository: {}", e.getMessage()); + } + } + + private static void installInitialPeers() { + try (final Repository repository = RepositoryManager.getRepository()) { + if (repository.getNetworkRepository().getAllPeers().isEmpty()) { + Network.installInitialPeers(repository); + } + + } catch (DataException e) { + // Fail silently as this is an optional step + } + } + + private long getRandomRepositoryMaintenanceInterval() { + final long minInterval = Settings.getInstance().getRepositoryMaintenanceMinInterval(); + final long maxInterval = Settings.getInstance().getRepositoryMaintenanceMaxInterval(); + if (maxInterval == 0) { + return 0; + } + return (new Random().nextLong() % (maxInterval - minInterval)) + minInterval; + } + + /** + * Export current trade bot states and minting accounts. + */ + public void exportRepositoryData() { + try (final Repository repository = RepositoryManager.getRepository()) { + repository.exportNodeLocalData(); + + } catch (DataException e) { + // Fail silently as this is an optional step } } @@ -878,7 +1023,7 @@ public class Controller extends Thread { } } - String tooltip = String.format("%s - %d %s - %s %d", actionText, numberOfPeers, connectionsText, heightText, height) + "\n" + String.format("Build version: %s", this.buildVersion); + String tooltip = String.format("%s - %d %s - %s %d", actionText, numberOfPeers, connectionsText, heightText, height) + "\n" + String.format("%s: %s", Translator.INSTANCE.translate("SysTray", "BUILD_VERSION"), this.buildVersion); SysTray.getInstance().setToolTipText(tooltip); this.callbackExecutor.execute(() -> { @@ -951,6 +1096,10 @@ public class Controller extends Thread { } } + // Export local data + LOGGER.info("Backing up local data"); + this.exportRepositoryData(); + LOGGER.info("Shutting down networking"); Network.getInstance().shutdown(); @@ -1291,6 +1440,34 @@ public class Controller extends Thread { try (final Repository repository = RepositoryManager.getRepository()) { BlockData blockData = repository.getBlockRepository().fromSignature(signature); + if (blockData != null) { + if (PruneManager.getInstance().isBlockPruned(blockData.getHeight())) { + // If this is a pruned block, we likely only have partial data, so best not to sent it + blockData = null; + } + } + + // If we have no block data, we should check the archive in case it's there + if (blockData == null) { + if (Settings.getInstance().isArchiveEnabled()) { + byte[] bytes = BlockArchiveReader.getInstance().fetchSerializedBlockBytesForSignature(signature, true, repository); + if (bytes != null) { + CachedBlockMessage blockMessage = new CachedBlockMessage(bytes); + blockMessage.setId(message.getId()); + + // This call also causes the other needed data to be pulled in from repository + if (!peer.sendMessage(blockMessage)) { + peer.disconnect("failed to send block"); + // Don't fall-through to caching because failure to send might be from failure to build message + return; + } + + // Sent successfully from archive, so nothing more to do + return; + } + } + } + if (blockData == null) { // We don't have this block this.stats.getBlockMessageStats.unknownBlocks.getAndIncrement(); @@ -1459,12 +1636,29 @@ public class Controller extends Thread { int numberRequested = Math.min(Network.MAX_BLOCK_SUMMARIES_PER_REPLY, getBlockSummariesMessage.getNumberRequested()); BlockData blockData = repository.getBlockRepository().fromReference(parentSignature); + if (blockData == null) { + // Try the archive + blockData = repository.getBlockArchiveRepository().fromReference(parentSignature); + } + + if (blockData != null) { + if (PruneManager.getInstance().isBlockPruned(blockData.getHeight())) { + // If this request contains a pruned block, we likely only have partial data, so best not to sent anything + // We always prune from the oldest first, so it's fine to just check the first block requested + blockData = null; + } + } while (blockData != null && blockSummaries.size() < numberRequested) { BlockSummaryData blockSummary = new BlockSummaryData(blockData); blockSummaries.add(blockSummary); - blockData = repository.getBlockRepository().fromReference(blockData.getSignature()); + byte[] previousSignature = blockData.getSignature(); + blockData = repository.getBlockRepository().fromReference(previousSignature); + if (blockData == null) { + // Try the archive + blockData = repository.getBlockArchiveRepository().fromReference(previousSignature); + } } } catch (DataException e) { LOGGER.error(String.format("Repository issue while sending block summaries after %s to peer %s", Base58.encode(parentSignature), peer), e); @@ -1513,11 +1707,20 @@ public class Controller extends Thread { try (final Repository repository = RepositoryManager.getRepository()) { int numberRequested = getSignaturesMessage.getNumberRequested(); BlockData blockData = repository.getBlockRepository().fromReference(parentSignature); + if (blockData == null) { + // Try the archive + blockData = repository.getBlockArchiveRepository().fromReference(parentSignature); + } while (blockData != null && signatures.size() < numberRequested) { signatures.add(blockData.getSignature()); - blockData = repository.getBlockRepository().fromReference(blockData.getSignature()); + byte[] previousSignature = blockData.getSignature(); + blockData = repository.getBlockRepository().fromReference(previousSignature); + if (blockData == null) { + // Try the archive + blockData = repository.getBlockArchiveRepository().fromReference(previousSignature); + } } } catch (DataException e) { LOGGER.error(String.format("Repository issue while sending V2 signatures after %s to peer %s", Base58.encode(parentSignature), peer), e); diff --git a/src/main/java/org/qortal/controller/Synchronizer.java b/src/main/java/org/qortal/controller/Synchronizer.java index 6ddbad16..fde89851 100644 --- a/src/main/java/org/qortal/controller/Synchronizer.java +++ b/src/main/java/org/qortal/controller/Synchronizer.java @@ -3,12 +3,9 @@ package org.qortal.controller; import java.math.BigInteger; import java.text.DecimalFormat; import java.text.NumberFormat; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; +import java.util.*; import java.util.concurrent.locks.ReentrantLock; import java.util.stream.Collectors; -import java.util.Iterator; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -71,6 +68,11 @@ public class Synchronizer { // Keep track of the size of the last re-org, so it can be logged private int lastReorgSize; + // Keep track of invalid blocks so that we don't keep trying to sync them + private Map invalidBlockSignatures = Collections.synchronizedMap(new HashMap<>()); + public Long timeValidBlockLastReceived = null; + public Long timeInvalidBlockLastReceived = null; + private static Synchronizer instance; public enum SynchronizationResult { @@ -346,6 +348,12 @@ public class Synchronizer { } } + // Ignore this peer if it holds an invalid block + if (this.containsInvalidBlockSummary(peer.getCommonBlockData().getBlockSummariesAfterCommonBlock())) { + LOGGER.debug("Ignoring peer %s because it holds an invalid block", peer); + peers.remove(peer); + } + // Reduce minChainLength if needed. If we don't have any blocks, this peer will be excluded from chain weight comparisons later in the process, so we shouldn't update minChainLength List peerBlockSummaries = peer.getCommonBlockData().getBlockSummariesAfterCommonBlock(); if (peerBlockSummaries != null && peerBlockSummaries.size() > 0) @@ -489,6 +497,71 @@ public class Synchronizer { } + + /* Invalid block signature tracking */ + + private void addInvalidBlockSignature(byte[] signature) { + Long now = NTP.getTime(); + if (now == null) { + return; + } + + // Add or update existing entry + String sig58 = Base58.encode(signature); + invalidBlockSignatures.put(sig58, now); + } + private void deleteOlderInvalidSignatures(Long now) { + if (now == null) { + return; + } + + // Delete signatures with older timestamps + Iterator it = invalidBlockSignatures.entrySet().iterator(); + while (it.hasNext()) { + Map.Entry pair = (Map.Entry)it.next(); + Long lastSeen = (Long) pair.getValue(); + + // Remove signature if we haven't seen it for more than 1 hour + if (now - lastSeen > 60 * 60 * 1000L) { + it.remove(); + } + } + } + private boolean containsInvalidBlockSummary(List blockSummaries) { + if (blockSummaries == null || invalidBlockSignatures == null) { + return false; + } + + // Loop through our known invalid blocks and check each one against supplied block summaries + for (String invalidSignature58 : invalidBlockSignatures.keySet()) { + byte[] invalidSignature = Base58.decode(invalidSignature58); + for (BlockSummaryData blockSummary : blockSummaries) { + byte[] signature = blockSummary.getSignature(); + if (Arrays.equals(signature, invalidSignature)) { + return true; + } + } + } + return false; + } + private boolean containsInvalidBlockSignature(List blockSignatures) { + if (blockSignatures == null || invalidBlockSignatures == null) { + return false; + } + + // Loop through our known invalid blocks and check each one against supplied block signatures + for (String invalidSignature58 : invalidBlockSignatures.keySet()) { + byte[] invalidSignature = Base58.decode(invalidSignature58); + for (byte[] signature : blockSignatures) { + if (Arrays.equals(signature, invalidSignature)) { + return true; + } + } + } + return false; + } + + /** * Attempt to synchronize blockchain with peer. *

@@ -535,6 +608,15 @@ public class Synchronizer { // Reset last re-org size as we are starting a new sync round this.lastReorgSize = 0; + // Set the initial value of timeValidBlockLastReceived if it's null + Long now = NTP.getTime(); + if (this.timeValidBlockLastReceived == null) { + this.timeValidBlockLastReceived = now; + } + + // Delete invalid signatures with older timestamps + this.deleteOlderInvalidSignatures(now); + List peerBlockSummaries = new ArrayList<>(); SynchronizationResult findCommonBlockResult = fetchSummariesFromCommonBlock(repository, peer, ourInitialHeight, force, peerBlockSummaries, true); if (findCommonBlockResult != SynchronizationResult.OK) { @@ -883,6 +965,12 @@ public class Synchronizer { break; } + // Catch a block with an invalid signature before orphaning, so that we retain our existing valid candidate + if (this.containsInvalidBlockSignature(peerBlockSignatures)) { + LOGGER.info(String.format("Peer %s sent invalid block signature: %.8s", peer, Base58.encode(latestPeerSignature))); + return SynchronizationResult.INVALID_DATA; + } + byte[] nextPeerSignature = peerBlockSignatures.get(0); int nextHeight = height + 1; @@ -985,13 +1073,20 @@ public class Synchronizer { if (Controller.isStopping()) return SynchronizationResult.SHUTTING_DOWN; + newBlock.preProcess(); + ValidationResult blockResult = newBlock.isValid(); if (blockResult != ValidationResult.OK) { LOGGER.info(String.format("Peer %s sent invalid block for height %d, sig %.8s: %s", peer, newBlock.getBlockData().getHeight(), Base58.encode(newBlock.getSignature()), blockResult.name())); + this.addInvalidBlockSignature(newBlock.getSignature()); + this.timeInvalidBlockLastReceived = NTP.getTime(); return SynchronizationResult.INVALID_DATA; } + // Block is valid + this.timeValidBlockLastReceived = NTP.getTime(); + // Save transactions attached to this block for (Transaction transaction : newBlock.getTransactions()) { TransactionData transactionData = transaction.getTransactionData(); @@ -1173,13 +1268,20 @@ public class Synchronizer { for (Transaction transaction : newBlock.getTransactions()) transaction.setInitialApprovalStatus(); + newBlock.preProcess(); + ValidationResult blockResult = newBlock.isValid(); if (blockResult != ValidationResult.OK) { LOGGER.info(String.format("Peer %s sent invalid block for height %d, sig %.8s: %s", peer, ourHeight, Base58.encode(latestPeerSignature), blockResult.name())); + this.addInvalidBlockSignature(newBlock.getSignature()); + this.timeInvalidBlockLastReceived = NTP.getTime(); return SynchronizationResult.INVALID_DATA; } + // Block is valid + this.timeValidBlockLastReceived = NTP.getTime(); + // Save transactions attached to this block for (Transaction transaction : newBlock.getTransactions()) { TransactionData transactionData = transaction.getTransactionData(); diff --git a/src/main/java/org/qortal/controller/repository/AtStatesPruner.java b/src/main/java/org/qortal/controller/repository/AtStatesPruner.java new file mode 100644 index 00000000..3b92db51 --- /dev/null +++ b/src/main/java/org/qortal/controller/repository/AtStatesPruner.java @@ -0,0 +1,109 @@ +package org.qortal.controller.repository; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.qortal.controller.Controller; +import org.qortal.data.block.BlockData; +import org.qortal.repository.DataException; +import org.qortal.repository.Repository; +import org.qortal.repository.RepositoryManager; +import org.qortal.settings.Settings; +import org.qortal.utils.NTP; + +public class AtStatesPruner implements Runnable { + + private static final Logger LOGGER = LogManager.getLogger(AtStatesPruner.class); + + @Override + public void run() { + Thread.currentThread().setName("AT States pruner"); + + boolean archiveMode = false; + if (!Settings.getInstance().isTopOnly()) { + // Top-only mode isn't enabled, but we might want to prune for the purposes of archiving + if (!Settings.getInstance().isArchiveEnabled()) { + // No pruning or archiving, so we must not prune anything + return; + } + else { + // We're allowed to prune blocks that have already been archived + archiveMode = true; + } + } + + try (final Repository repository = RepositoryManager.getRepository()) { + int pruneStartHeight = repository.getATRepository().getAtPruneHeight(); + + repository.discardChanges(); + repository.getATRepository().rebuildLatestAtStates(); + + while (!Controller.isStopping()) { + repository.discardChanges(); + + Thread.sleep(Settings.getInstance().getAtStatesPruneInterval()); + + BlockData chainTip = Controller.getInstance().getChainTip(); + if (chainTip == null || NTP.getTime() == null) + continue; + + // Don't even attempt if we're mid-sync as our repository requests will be delayed for ages + if (Controller.getInstance().isSynchronizing()) + continue; + + // Prune AT states for all blocks up until our latest minus pruneBlockLimit + final int ourLatestHeight = chainTip.getHeight(); + int upperPrunableHeight = ourLatestHeight - Settings.getInstance().getPruneBlockLimit(); + + // In archive mode we are only allowed to trim blocks that have already been archived + if (archiveMode) { + upperPrunableHeight = repository.getBlockArchiveRepository().getBlockArchiveHeight() - 1; + + // TODO: validate that the actual archived data exists before pruning it? + } + + int upperBatchHeight = pruneStartHeight + Settings.getInstance().getAtStatesPruneBatchSize(); + int upperPruneHeight = Math.min(upperBatchHeight, upperPrunableHeight); + + if (pruneStartHeight >= upperPruneHeight) + continue; + + LOGGER.debug(String.format("Pruning AT states between blocks %d and %d...", pruneStartHeight, upperPruneHeight)); + + int numAtStatesPruned = repository.getATRepository().pruneAtStates(pruneStartHeight, upperPruneHeight); + repository.saveChanges(); + int numAtStateDataRowsTrimmed = repository.getATRepository().trimAtStates( + pruneStartHeight, upperPruneHeight, Settings.getInstance().getAtStatesTrimLimit()); + repository.saveChanges(); + + if (numAtStatesPruned > 0 || numAtStateDataRowsTrimmed > 0) { + final int finalPruneStartHeight = pruneStartHeight; + LOGGER.debug(() -> String.format("Pruned %d AT state%s between blocks %d and %d", + numAtStatesPruned, (numAtStatesPruned != 1 ? "s" : ""), + finalPruneStartHeight, upperPruneHeight)); + } else { + // Can we move onto next batch? + if (upperPrunableHeight > upperBatchHeight) { + pruneStartHeight = upperBatchHeight; + repository.getATRepository().setAtPruneHeight(pruneStartHeight); + repository.getATRepository().rebuildLatestAtStates(); + repository.saveChanges(); + + final int finalPruneStartHeight = pruneStartHeight; + LOGGER.debug(() -> String.format("Bumping AT state base prune height to %d", finalPruneStartHeight)); + } + else { + // We've pruned up to the upper prunable height + // Back off for a while to save CPU for syncing + repository.discardChanges(); + Thread.sleep(5*60*1000L); + } + } + } + } catch (DataException e) { + LOGGER.warn(String.format("Repository issue trying to prune AT states: %s", e.getMessage())); + } catch (InterruptedException e) { + // Time to exit + } + } + +} diff --git a/src/main/java/org/qortal/controller/AtStatesTrimmer.java b/src/main/java/org/qortal/controller/repository/AtStatesTrimmer.java similarity index 92% rename from src/main/java/org/qortal/controller/AtStatesTrimmer.java rename to src/main/java/org/qortal/controller/repository/AtStatesTrimmer.java index b452b3cc..98a1a889 100644 --- a/src/main/java/org/qortal/controller/AtStatesTrimmer.java +++ b/src/main/java/org/qortal/controller/repository/AtStatesTrimmer.java @@ -1,7 +1,8 @@ -package org.qortal.controller; +package org.qortal.controller.repository; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.qortal.controller.Controller; import org.qortal.data.block.BlockData; import org.qortal.repository.DataException; import org.qortal.repository.Repository; @@ -20,8 +21,8 @@ public class AtStatesTrimmer implements Runnable { try (final Repository repository = RepositoryManager.getRepository()) { int trimStartHeight = repository.getATRepository().getAtTrimHeight(); - repository.getATRepository().prepareForAtStateTrimming(); - repository.saveChanges(); + repository.discardChanges(); + repository.getATRepository().rebuildLatestAtStates(); while (!Controller.isStopping()) { repository.discardChanges(); @@ -62,7 +63,7 @@ public class AtStatesTrimmer implements Runnable { if (upperTrimmableHeight > upperBatchHeight) { trimStartHeight = upperBatchHeight; repository.getATRepository().setAtTrimHeight(trimStartHeight); - repository.getATRepository().prepareForAtStateTrimming(); + repository.getATRepository().rebuildLatestAtStates(); repository.saveChanges(); final int finalTrimStartHeight = trimStartHeight; diff --git a/src/main/java/org/qortal/controller/repository/BlockArchiver.java b/src/main/java/org/qortal/controller/repository/BlockArchiver.java new file mode 100644 index 00000000..2a987d97 --- /dev/null +++ b/src/main/java/org/qortal/controller/repository/BlockArchiver.java @@ -0,0 +1,113 @@ +package org.qortal.controller.repository; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.qortal.controller.Controller; +import org.qortal.data.block.BlockData; +import org.qortal.repository.*; +import org.qortal.settings.Settings; +import org.qortal.transform.TransformationException; +import org.qortal.utils.NTP; + +import java.io.IOException; + +public class BlockArchiver implements Runnable { + + private static final Logger LOGGER = LogManager.getLogger(BlockArchiver.class); + + private static final long INITIAL_SLEEP_PERIOD = 0L; // TODO: 5 * 60 * 1000L + 1234L; // ms + + public void run() { + Thread.currentThread().setName("Block archiver"); + + if (!Settings.getInstance().isArchiveEnabled()) { + return; + } + + try (final Repository repository = RepositoryManager.getRepository()) { + // Don't even start building until initial rush has ended + Thread.sleep(INITIAL_SLEEP_PERIOD); + + int startHeight = repository.getBlockArchiveRepository().getBlockArchiveHeight(); + + // Don't attempt to archive if we have no ATStatesHeightIndex, as it will be too slow + boolean hasAtStatesHeightIndex = repository.getATRepository().hasAtStatesHeightIndex(); + if (!hasAtStatesHeightIndex) { + LOGGER.info("Unable to start block archiver due to missing ATStatesHeightIndex. Bootstrapping is recommended."); + repository.discardChanges(); + return; + } + + LOGGER.info("Starting block archiver..."); + + while (!Controller.isStopping()) { + repository.discardChanges(); + + Thread.sleep(Settings.getInstance().getArchiveInterval()); + + BlockData chainTip = Controller.getInstance().getChainTip(); + if (chainTip == null || NTP.getTime() == null) { + continue; + } + + // Don't even attempt if we're mid-sync as our repository requests will be delayed for ages + if (Controller.getInstance().isSynchronizing()) { + continue; + } + + // Don't attempt to archive if we're not synced yet + final Long minLatestBlockTimestamp = Controller.getMinimumLatestBlockTimestamp(); + if (minLatestBlockTimestamp == null || chainTip.getTimestamp() < minLatestBlockTimestamp) { + continue; + } + + + // Build cache of blocks + try { + final int maximumArchiveHeight = BlockArchiveWriter.getMaxArchiveHeight(repository); + BlockArchiveWriter writer = new BlockArchiveWriter(startHeight, maximumArchiveHeight, repository); + BlockArchiveWriter.BlockArchiveWriteResult result = writer.write(); + switch (result) { + case OK: + // Increment block archive height + startHeight += writer.getWrittenCount(); + repository.getBlockArchiveRepository().setBlockArchiveHeight(startHeight); + repository.saveChanges(); + break; + + case STOPPING: + return; + + // We've reached the limit of the blocks we can archive + // Sleep for a while to allow more to become available + case NOT_ENOUGH_BLOCKS: + // We didn't reach our file size target, so that must mean that we don't have enough blocks + // yet or something went wrong. Sleep for a while and then try again. + repository.discardChanges(); + Thread.sleep(60 * 60 * 1000L); // 1 hour + break; + + case BLOCK_NOT_FOUND: + // We tried to archive a block that didn't exist. This is a major failure and likely means + // that a bootstrap or re-sync is needed. Try again every minute until then. + LOGGER.info("Error: block not found when building archive. If this error persists, " + + "a bootstrap or re-sync may be needed."); + repository.discardChanges(); + Thread.sleep( 60 * 1000L); // 1 minute + break; + } + + } catch (IOException | TransformationException e) { + LOGGER.info("Caught exception when creating block cache", e); + } + + } + } catch (DataException e) { + LOGGER.info("Caught exception when creating block cache", e); + } catch (InterruptedException e) { + // Do nothing + } + + } + +} diff --git a/src/main/java/org/qortal/controller/repository/BlockPruner.java b/src/main/java/org/qortal/controller/repository/BlockPruner.java new file mode 100644 index 00000000..1258ee38 --- /dev/null +++ b/src/main/java/org/qortal/controller/repository/BlockPruner.java @@ -0,0 +1,114 @@ +package org.qortal.controller.repository; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.qortal.controller.Controller; +import org.qortal.data.block.BlockData; +import org.qortal.repository.DataException; +import org.qortal.repository.Repository; +import org.qortal.repository.RepositoryManager; +import org.qortal.settings.Settings; +import org.qortal.utils.NTP; + +public class BlockPruner implements Runnable { + + private static final Logger LOGGER = LogManager.getLogger(BlockPruner.class); + + @Override + public void run() { + Thread.currentThread().setName("Block pruner"); + + boolean archiveMode = false; + if (!Settings.getInstance().isTopOnly()) { + // Top-only mode isn't enabled, but we might want to prune for the purposes of archiving + if (!Settings.getInstance().isArchiveEnabled()) { + // No pruning or archiving, so we must not prune anything + return; + } + else { + // We're allowed to prune blocks that have already been archived + archiveMode = true; + } + } + + try (final Repository repository = RepositoryManager.getRepository()) { + int pruneStartHeight = repository.getBlockRepository().getBlockPruneHeight(); + + // Don't attempt to prune if we have no ATStatesHeightIndex, as it will be too slow + boolean hasAtStatesHeightIndex = repository.getATRepository().hasAtStatesHeightIndex(); + if (!hasAtStatesHeightIndex) { + LOGGER.info("Unable to start block pruner due to missing ATStatesHeightIndex. Bootstrapping is recommended."); + return; + } + + while (!Controller.isStopping()) { + repository.discardChanges(); + + Thread.sleep(Settings.getInstance().getBlockPruneInterval()); + + BlockData chainTip = Controller.getInstance().getChainTip(); + if (chainTip == null || NTP.getTime() == null) + continue; + + // Don't even attempt if we're mid-sync as our repository requests will be delayed for ages + if (Controller.getInstance().isSynchronizing()) { + continue; + } + + // Don't attempt to prune if we're not synced yet + final Long minLatestBlockTimestamp = Controller.getMinimumLatestBlockTimestamp(); + if (minLatestBlockTimestamp == null || chainTip.getTimestamp() < minLatestBlockTimestamp) { + continue; + } + + // Prune all blocks up until our latest minus pruneBlockLimit + final int ourLatestHeight = chainTip.getHeight(); + int upperPrunableHeight = ourLatestHeight - Settings.getInstance().getPruneBlockLimit(); + + // In archive mode we are only allowed to trim blocks that have already been archived + if (archiveMode) { + upperPrunableHeight = repository.getBlockArchiveRepository().getBlockArchiveHeight() - 1; + } + + int upperBatchHeight = pruneStartHeight + Settings.getInstance().getBlockPruneBatchSize(); + int upperPruneHeight = Math.min(upperBatchHeight, upperPrunableHeight); + + if (pruneStartHeight >= upperPruneHeight) { + continue; + } + + LOGGER.debug(String.format("Pruning blocks between %d and %d...", pruneStartHeight, upperPruneHeight)); + + int numBlocksPruned = repository.getBlockRepository().pruneBlocks(pruneStartHeight, upperPruneHeight); + repository.saveChanges(); + + if (numBlocksPruned > 0) { + LOGGER.debug(String.format("Pruned %d block%s between %d and %d", + numBlocksPruned, (numBlocksPruned != 1 ? "s" : ""), + pruneStartHeight, upperPruneHeight)); + } else { + final int nextPruneHeight = upperPruneHeight + 1; + repository.getBlockRepository().setBlockPruneHeight(nextPruneHeight); + repository.saveChanges(); + LOGGER.debug(String.format("Bumping block base prune height to %d", pruneStartHeight)); + + // Can we move onto next batch? + if (upperPrunableHeight > nextPruneHeight) { + pruneStartHeight = nextPruneHeight; + } + else { + // We've pruned up to the upper prunable height + // Back off for a while to save CPU for syncing + repository.discardChanges(); + Thread.sleep(10*60*1000L); + } + } + } + } catch (DataException e) { + LOGGER.warn(String.format("Repository issue trying to prune blocks: %s", e.getMessage())); + } catch (InterruptedException e) { + // Time to exit + } + } + +} diff --git a/src/main/java/org/qortal/controller/repository/NamesDatabaseIntegrityCheck.java b/src/main/java/org/qortal/controller/repository/NamesDatabaseIntegrityCheck.java new file mode 100644 index 00000000..f12bd14a --- /dev/null +++ b/src/main/java/org/qortal/controller/repository/NamesDatabaseIntegrityCheck.java @@ -0,0 +1,410 @@ +package org.qortal.controller.repository; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.qortal.account.PublicKeyAccount; +import org.qortal.api.resource.TransactionsResource.ConfirmationStatus; +import org.qortal.data.naming.NameData; +import org.qortal.data.transaction.*; +import org.qortal.naming.Name; +import org.qortal.repository.DataException; +import org.qortal.repository.Repository; +import org.qortal.repository.RepositoryManager; +import org.qortal.transaction.Transaction.TransactionType; +import org.qortal.utils.Unicode; + +import java.util.*; + +public class NamesDatabaseIntegrityCheck { + + private static final Logger LOGGER = LogManager.getLogger(NamesDatabaseIntegrityCheck.class); + + private static final List ALL_NAME_TX_TYPE = Arrays.asList( + TransactionType.REGISTER_NAME, + TransactionType.UPDATE_NAME, + TransactionType.BUY_NAME, + TransactionType.SELL_NAME + ); + + private List nameTransactions = new ArrayList<>(); + + public int rebuildName(String name, Repository repository) { + int modificationCount = 0; + try { + List transactions = this.fetchAllTransactionsInvolvingName(name, repository); + if (transactions.isEmpty()) { + // This name was never registered, so there's nothing to do + return modificationCount; + } + + // Loop through each past transaction and re-apply it to the Names table + for (TransactionData currentTransaction : transactions) { + + // Process REGISTER_NAME transactions + if (currentTransaction.getType() == TransactionType.REGISTER_NAME) { + RegisterNameTransactionData registerNameTransactionData = (RegisterNameTransactionData) currentTransaction; + Name nameObj = new Name(repository, registerNameTransactionData); + nameObj.register(); + modificationCount++; + LOGGER.trace("Processed REGISTER_NAME transaction for name {}", name); + } + + // Process UPDATE_NAME transactions + if (currentTransaction.getType() == TransactionType.UPDATE_NAME) { + UpdateNameTransactionData updateNameTransactionData = (UpdateNameTransactionData) currentTransaction; + + if (Objects.equals(updateNameTransactionData.getNewName(), name) && + !Objects.equals(updateNameTransactionData.getName(), updateNameTransactionData.getNewName())) { + // This renames an existing name, so we need to process that instead + this.rebuildName(updateNameTransactionData.getName(), repository); + } + else { + Name nameObj = new Name(repository, name); + if (nameObj != null && nameObj.getNameData() != null) { + nameObj.update(updateNameTransactionData); + modificationCount++; + LOGGER.trace("Processed UPDATE_NAME transaction for name {}", name); + } else { + // Something went wrong + throw new DataException(String.format("Name data not found for name %s", updateNameTransactionData.getName())); + } + } + } + + // Process SELL_NAME transactions + if (currentTransaction.getType() == TransactionType.SELL_NAME) { + SellNameTransactionData sellNameTransactionData = (SellNameTransactionData) currentTransaction; + Name nameObj = new Name(repository, sellNameTransactionData.getName()); + if (nameObj != null && nameObj.getNameData() != null) { + nameObj.sell(sellNameTransactionData); + modificationCount++; + LOGGER.trace("Processed SELL_NAME transaction for name {}", name); + } + else { + // Something went wrong + throw new DataException(String.format("Name data not found for name %s", sellNameTransactionData.getName())); + } + } + + // Process BUY_NAME transactions + if (currentTransaction.getType() == TransactionType.BUY_NAME) { + BuyNameTransactionData buyNameTransactionData = (BuyNameTransactionData) currentTransaction; + Name nameObj = new Name(repository, buyNameTransactionData.getName()); + if (nameObj != null && nameObj.getNameData() != null) { + nameObj.buy(buyNameTransactionData); + modificationCount++; + LOGGER.trace("Processed BUY_NAME transaction for name {}", name); + } + else { + // Something went wrong + throw new DataException(String.format("Name data not found for name %s", buyNameTransactionData.getName())); + } + } + } + + } catch (DataException e) { + LOGGER.info("Unable to run integrity check for name {}: {}", name, e.getMessage()); + } + + return modificationCount; + } + + public int rebuildAllNames() { + int modificationCount = 0; + try (final Repository repository = RepositoryManager.getRepository()) { + List names = this.fetchAllNames(repository); + for (String name : names) { + modificationCount += this.rebuildName(name, repository); + } + repository.saveChanges(); + } + catch (DataException e) { + LOGGER.info("Error when running integrity check for all names: {}", e.getMessage()); + } + + //LOGGER.info("modificationCount: {}", modificationCount); + return modificationCount; + } + + public void runIntegrityCheck() { + boolean integrityCheckFailed = false; + try (final Repository repository = RepositoryManager.getRepository()) { + + // Fetch all the (confirmed) REGISTER_NAME transactions + List registerNameTransactions = this.fetchRegisterNameTransactions(); + + // Loop through each REGISTER_NAME txn signature and request the full transaction data + for (RegisterNameTransactionData registerNameTransactionData : registerNameTransactions) { + String registeredName = registerNameTransactionData.getName(); + NameData nameData = repository.getNameRepository().fromName(registeredName); + + // Check to see if this name has been updated or bought at any point + TransactionData latestUpdate = this.fetchLatestModificationTransactionInvolvingName(registeredName, repository); + if (latestUpdate == null) { + // Name was never updated once registered + // We expect this name to still be registered to this transaction's creator + + if (nameData == null) { + LOGGER.info("Error: registered name {} doesn't exist in Names table. Adding...", registeredName); + integrityCheckFailed = true; + } + else { + LOGGER.trace("Registered name {} is correctly registered", registeredName); + } + + // Check the owner is correct + PublicKeyAccount creator = new PublicKeyAccount(repository, registerNameTransactionData.getCreatorPublicKey()); + if (!Objects.equals(creator.getAddress(), nameData.getOwner())) { + LOGGER.info("Error: registered name {} is owned by {}, but it should be {}", + registeredName, nameData.getOwner(), creator.getAddress()); + integrityCheckFailed = true; + } + else { + LOGGER.trace("Registered name {} has the correct owner", registeredName); + } + } + else { + // Check if owner is correct after update + + // Check for name updates + if (latestUpdate.getType() == TransactionType.UPDATE_NAME) { + UpdateNameTransactionData updateNameTransactionData = (UpdateNameTransactionData) latestUpdate; + PublicKeyAccount creator = new PublicKeyAccount(repository, updateNameTransactionData.getCreatorPublicKey()); + + // When this name is the "new name", we expect the current owner to match the txn creator + if (Objects.equals(updateNameTransactionData.getNewName(), registeredName)) { + if (!Objects.equals(creator.getAddress(), nameData.getOwner())) { + LOGGER.info("Error: registered name {} is owned by {}, but it should be {}", + registeredName, nameData.getOwner(), creator.getAddress()); + integrityCheckFailed = true; + } + else { + LOGGER.trace("Registered name {} has the correct owner after being updated", registeredName); + } + } + + // When this name is the old name, we expect the "new name"'s owner to match the txn creator + // The old name will then be unregistered, or re-registered. + // FUTURE: check database integrity for names that have been updated and then the original name re-registered + else if (Objects.equals(updateNameTransactionData.getName(), registeredName)) { + NameData newNameData = repository.getNameRepository().fromName(updateNameTransactionData.getNewName()); + if (!Objects.equals(creator.getAddress(), newNameData.getOwner())) { + LOGGER.info("Error: registered name {} is owned by {}, but it should be {}", + updateNameTransactionData.getNewName(), newNameData.getOwner(), creator.getAddress()); + integrityCheckFailed = true; + } + else { + LOGGER.trace("Registered name {} has the correct owner after being updated", updateNameTransactionData.getNewName()); + } + } + + else { + LOGGER.info("Unhandled update case for name {}", registeredName); + } + } + + // Check for name buys + else if (latestUpdate.getType() == TransactionType.BUY_NAME) { + BuyNameTransactionData buyNameTransactionData = (BuyNameTransactionData) latestUpdate; + PublicKeyAccount creator = new PublicKeyAccount(repository, buyNameTransactionData.getCreatorPublicKey()); + if (!Objects.equals(creator.getAddress(), nameData.getOwner())) { + LOGGER.info("Error: registered name {} is owned by {}, but it should be {}", + registeredName, nameData.getOwner(), creator.getAddress()); + integrityCheckFailed = true; + } + else { + LOGGER.trace("Registered name {} has the correct owner after being bought", registeredName); + } + } + + // Check for name sells + else if (latestUpdate.getType() == TransactionType.SELL_NAME) { + SellNameTransactionData sellNameTransactionData = (SellNameTransactionData) latestUpdate; + PublicKeyAccount creator = new PublicKeyAccount(repository, sellNameTransactionData.getCreatorPublicKey()); + if (!Objects.equals(creator.getAddress(), nameData.getOwner())) { + LOGGER.info("Error: registered name {} is owned by {}, but it should be {}", + registeredName, nameData.getOwner(), creator.getAddress()); + integrityCheckFailed = true; + } + else { + LOGGER.trace("Registered name {} has the correct owner after being listed for sale", registeredName); + } + } + + else { + LOGGER.info("Unhandled case for name {}", registeredName); + } + + } + + } + + } catch (DataException e) { + LOGGER.warn(String.format("Repository issue trying to trim online accounts signatures: %s", e.getMessage())); + integrityCheckFailed = true; + } + + if (integrityCheckFailed) { + LOGGER.info("Registered names database integrity check failed. Bootstrapping is recommended."); + } else { + LOGGER.info("Registered names database integrity check passed."); + } + } + + private List fetchRegisterNameTransactions() { + List registerNameTransactions = new ArrayList<>(); + + for (TransactionData transactionData : this.nameTransactions) { + if (transactionData.getType() == TransactionType.REGISTER_NAME) { + RegisterNameTransactionData registerNameTransactionData = (RegisterNameTransactionData) transactionData; + registerNameTransactions.add(registerNameTransactionData); + } + } + return registerNameTransactions; + } + + private List fetchUpdateNameTransactions() { + List updateNameTransactions = new ArrayList<>(); + + for (TransactionData transactionData : this.nameTransactions) { + if (transactionData.getType() == TransactionType.UPDATE_NAME) { + UpdateNameTransactionData updateNameTransactionData = (UpdateNameTransactionData) transactionData; + updateNameTransactions.add(updateNameTransactionData); + } + } + return updateNameTransactions; + } + + private List fetchSellNameTransactions() { + List sellNameTransactions = new ArrayList<>(); + + for (TransactionData transactionData : this.nameTransactions) { + if (transactionData.getType() == TransactionType.SELL_NAME) { + SellNameTransactionData sellNameTransactionData = (SellNameTransactionData) transactionData; + sellNameTransactions.add(sellNameTransactionData); + } + } + return sellNameTransactions; + } + + private List fetchBuyNameTransactions() { + List buyNameTransactions = new ArrayList<>(); + + for (TransactionData transactionData : this.nameTransactions) { + if (transactionData.getType() == TransactionType.BUY_NAME) { + BuyNameTransactionData buyNameTransactionData = (BuyNameTransactionData) transactionData; + buyNameTransactions.add(buyNameTransactionData); + } + } + return buyNameTransactions; + } + + private void fetchAllNameTransactions(Repository repository) throws DataException { + List nameTransactions = new ArrayList<>(); + + // Fetch all the confirmed REGISTER_NAME transaction signatures + List signatures = repository.getTransactionRepository().getSignaturesMatchingCriteria( + null, null, null, ALL_NAME_TX_TYPE, null, null, + ConfirmationStatus.CONFIRMED, null, null, false); + + for (byte[] signature : signatures) { + TransactionData transactionData = repository.getTransactionRepository().fromSignature(signature); + nameTransactions.add(transactionData); + } + this.nameTransactions = nameTransactions; + } + + private List fetchAllTransactionsInvolvingName(String name, Repository repository) throws DataException { + List transactions = new ArrayList<>(); + String reducedName = Unicode.sanitize(name); + + // Fetch all the confirmed name-modification transactions + if (this.nameTransactions.isEmpty()) { + this.fetchAllNameTransactions(repository); + } + + for (TransactionData transactionData : this.nameTransactions) { + + if ((transactionData instanceof RegisterNameTransactionData)) { + RegisterNameTransactionData registerNameTransactionData = (RegisterNameTransactionData) transactionData; + if (Objects.equals(registerNameTransactionData.getReducedName(), reducedName)) { + transactions.add(transactionData); + } + } + if ((transactionData instanceof UpdateNameTransactionData)) { + UpdateNameTransactionData updateNameTransactionData = (UpdateNameTransactionData) transactionData; + if (Objects.equals(updateNameTransactionData.getName(), name) || + Objects.equals(updateNameTransactionData.getReducedNewName(), reducedName)) { + transactions.add(transactionData); + } + } + if ((transactionData instanceof BuyNameTransactionData)) { + BuyNameTransactionData buyNameTransactionData = (BuyNameTransactionData) transactionData; + if (Objects.equals(buyNameTransactionData.getName(), name)) { + transactions.add(transactionData); + } + } + if ((transactionData instanceof SellNameTransactionData)) { + SellNameTransactionData sellNameTransactionData = (SellNameTransactionData) transactionData; + if (Objects.equals(sellNameTransactionData.getName(), name)) { + transactions.add(transactionData); + } + } + } + return transactions; + } + + private TransactionData fetchLatestModificationTransactionInvolvingName(String registeredName, Repository repository) throws DataException { + List transactionsInvolvingName = this.fetchAllTransactionsInvolvingName(registeredName, repository); + + // Get the latest update for this name (excluding REGISTER_NAME transactions) + TransactionData latestUpdateToName = transactionsInvolvingName.stream() + .filter(txn -> txn.getType() != TransactionType.REGISTER_NAME) + .max(Comparator.comparing(TransactionData::getTimestamp)) + .orElse(null); + + return latestUpdateToName; + } + + private List fetchAllNames(Repository repository) throws DataException { + List names = new ArrayList<>(); + + // Fetch all the confirmed name transactions + if (this.nameTransactions.isEmpty()) { + this.fetchAllNameTransactions(repository); + } + + for (TransactionData transactionData : this.nameTransactions) { + + if ((transactionData instanceof RegisterNameTransactionData)) { + RegisterNameTransactionData registerNameTransactionData = (RegisterNameTransactionData) transactionData; + if (!names.contains(registerNameTransactionData.getName())) { + names.add(registerNameTransactionData.getName()); + } + } + if ((transactionData instanceof UpdateNameTransactionData)) { + UpdateNameTransactionData updateNameTransactionData = (UpdateNameTransactionData) transactionData; + if (!names.contains(updateNameTransactionData.getName())) { + names.add(updateNameTransactionData.getName()); + } + if (!names.contains(updateNameTransactionData.getNewName())) { + names.add(updateNameTransactionData.getNewName()); + } + } + if ((transactionData instanceof BuyNameTransactionData)) { + BuyNameTransactionData buyNameTransactionData = (BuyNameTransactionData) transactionData; + if (!names.contains(buyNameTransactionData.getName())) { + names.add(buyNameTransactionData.getName()); + } + } + if ((transactionData instanceof SellNameTransactionData)) { + SellNameTransactionData sellNameTransactionData = (SellNameTransactionData) transactionData; + if (!names.contains(sellNameTransactionData.getName())) { + names.add(sellNameTransactionData.getName()); + } + } + } + return names; + } + +} diff --git a/src/main/java/org/qortal/controller/OnlineAccountsSignaturesTrimmer.java b/src/main/java/org/qortal/controller/repository/OnlineAccountsSignaturesTrimmer.java similarity index 97% rename from src/main/java/org/qortal/controller/OnlineAccountsSignaturesTrimmer.java rename to src/main/java/org/qortal/controller/repository/OnlineAccountsSignaturesTrimmer.java index b32a2b06..c7f248d5 100644 --- a/src/main/java/org/qortal/controller/OnlineAccountsSignaturesTrimmer.java +++ b/src/main/java/org/qortal/controller/repository/OnlineAccountsSignaturesTrimmer.java @@ -1,8 +1,9 @@ -package org.qortal.controller; +package org.qortal.controller.repository; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.qortal.block.BlockChain; +import org.qortal.controller.Controller; import org.qortal.data.block.BlockData; import org.qortal.repository.DataException; import org.qortal.repository.Repository; diff --git a/src/main/java/org/qortal/controller/repository/PruneManager.java b/src/main/java/org/qortal/controller/repository/PruneManager.java new file mode 100644 index 00000000..ec27456f --- /dev/null +++ b/src/main/java/org/qortal/controller/repository/PruneManager.java @@ -0,0 +1,160 @@ +package org.qortal.controller.repository; + +import org.apache.commons.io.FileUtils; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.qortal.controller.Controller; + +import org.qortal.data.block.BlockData; +import org.qortal.repository.DataException; +import org.qortal.repository.Repository; +import org.qortal.settings.Settings; +import org.qortal.utils.DaemonThreadFactory; + +import java.io.IOException; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; + +public class PruneManager { + + private static final Logger LOGGER = LogManager.getLogger(PruneManager.class); + + private static PruneManager instance; + + private boolean isTopOnly = Settings.getInstance().isTopOnly(); + private int pruneBlockLimit = Settings.getInstance().getPruneBlockLimit(); + + private ExecutorService executorService; + + private PruneManager() { + + } + + public static synchronized PruneManager getInstance() { + if (instance == null) + instance = new PruneManager(); + + return instance; + } + + public void start() { + this.executorService = Executors.newCachedThreadPool(new DaemonThreadFactory()); + + if (Settings.getInstance().isTopOnly()) { + // Top-only-sync + this.startTopOnlySyncMode(); + } + else if (Settings.getInstance().isArchiveEnabled()) { + // Full node with block archive + this.startFullNodeWithBlockArchive(); + } + else { + // Full node with full SQL support + this.startFullSQLNode(); + } + } + + /** + * Top-only-sync + * In this mode, we delete (prune) all blocks except + * a small number of recent ones. There is no need for + * trimming or archiving, because all relevant blocks + * are deleted. + */ + private void startTopOnlySyncMode() { + this.startPruning(); + + // We don't need the block archive in top-only mode + this.deleteArchive(); + } + + /** + * Full node with block archive + * In this mode we archive trimmed blocks, and then + * prune archived blocks to keep the database small + */ + private void startFullNodeWithBlockArchive() { + this.startTrimming(); + this.startArchiving(); + this.startPruning(); + } + + /** + * Full node with full SQL support + * In this mode we trim the database but don't prune + * or archive any data, because we want to maintain + * full SQL support of old blocks. This mode will not + * be actively maintained but can be used by those who + * need to perform SQL analysis on older blocks. + */ + private void startFullSQLNode() { + this.startTrimming(); + } + + + private void startPruning() { + this.executorService.execute(new AtStatesPruner()); + this.executorService.execute(new BlockPruner()); + } + + private void startTrimming() { + this.executorService.execute(new AtStatesTrimmer()); + this.executorService.execute(new OnlineAccountsSignaturesTrimmer()); + } + + private void startArchiving() { + this.executorService.execute(new BlockArchiver()); + } + + private void deleteArchive() { + if (!Settings.getInstance().isTopOnly()) { + LOGGER.error("Refusing to delete archive when not in top-only mode"); + } + + try { + Path archivePath = Paths.get(Settings.getInstance().getRepositoryPath(), "archive"); + if (archivePath.toFile().exists()) { + LOGGER.info("Deleting block archive because we are in top-only mode..."); + FileUtils.deleteDirectory(archivePath.toFile()); + } + + } catch (IOException e) { + LOGGER.info("Couldn't delete archive: {}", e.getMessage()); + } + } + + public void stop() { + this.executorService.shutdownNow(); + + try { + this.executorService.awaitTermination(2L, TimeUnit.SECONDS); + } catch (InterruptedException e) { + // We tried... + } + } + + public boolean isBlockPruned(int height) throws DataException { + if (!this.isTopOnly) { + return false; + } + + BlockData chainTip = Controller.getInstance().getChainTip(); + if (chainTip == null) { + throw new DataException("Unable to determine chain tip when checking if a block is pruned"); + } + + if (height == 1) { + // We don't prune the genesis block + return false; + } + + final int ourLatestHeight = chainTip.getHeight(); + final int latestUnprunedHeight = ourLatestHeight - this.pruneBlockLimit; + + return (height < latestUnprunedHeight); + } + +} diff --git a/src/main/java/org/qortal/controller/tradebot/BitcoinACCTv1TradeBot.java b/src/main/java/org/qortal/controller/tradebot/BitcoinACCTv1TradeBot.java index 790584d3..038ecded 100644 --- a/src/main/java/org/qortal/controller/tradebot/BitcoinACCTv1TradeBot.java +++ b/src/main/java/org/qortal/controller/tradebot/BitcoinACCTv1TradeBot.java @@ -360,6 +360,7 @@ public class BitcoinACCTv1TradeBot implements AcctTradeBot { case BOB_DONE: case ALICE_REFUNDED: case BOB_REFUNDED: + case ALICE_REFUNDING_A: return true; default: diff --git a/src/main/java/org/qortal/controller/tradebot/DogecoinACCTv1TradeBot.java b/src/main/java/org/qortal/controller/tradebot/DogecoinACCTv1TradeBot.java index 516fa621..e7b60b25 100644 --- a/src/main/java/org/qortal/controller/tradebot/DogecoinACCTv1TradeBot.java +++ b/src/main/java/org/qortal/controller/tradebot/DogecoinACCTv1TradeBot.java @@ -353,6 +353,7 @@ public class DogecoinACCTv1TradeBot implements AcctTradeBot { case BOB_DONE: case ALICE_REFUNDED: case BOB_REFUNDED: + case ALICE_REFUNDING_A: return true; default: diff --git a/src/main/java/org/qortal/controller/tradebot/LitecoinACCTv1TradeBot.java b/src/main/java/org/qortal/controller/tradebot/LitecoinACCTv1TradeBot.java index 0246c199..686b675e 100644 --- a/src/main/java/org/qortal/controller/tradebot/LitecoinACCTv1TradeBot.java +++ b/src/main/java/org/qortal/controller/tradebot/LitecoinACCTv1TradeBot.java @@ -364,6 +364,7 @@ public class LitecoinACCTv1TradeBot implements AcctTradeBot { case BOB_DONE: case ALICE_REFUNDED: case BOB_REFUNDED: + case ALICE_REFUNDING_A: return true; default: diff --git a/src/main/java/org/qortal/controller/tradebot/TradeBot.java b/src/main/java/org/qortal/controller/tradebot/TradeBot.java index 6e9d1474..36351927 100644 --- a/src/main/java/org/qortal/controller/tradebot/TradeBot.java +++ b/src/main/java/org/qortal/controller/tradebot/TradeBot.java @@ -245,17 +245,17 @@ public class TradeBot implements Listener { } } - /*package*/ static byte[] generateTradePrivateKey() { + public static byte[] generateTradePrivateKey() { // The private key is used for both Curve25519 and secp256k1 so needs to be valid for both. // Curve25519 accepts any seed, so generate a valid secp256k1 key and use that. return new ECKey().getPrivKeyBytes(); } - /*package*/ static byte[] deriveTradeNativePublicKey(byte[] privateKey) { + public static byte[] deriveTradeNativePublicKey(byte[] privateKey) { return PrivateKeyAccount.toPublicKey(privateKey); } - /*package*/ static byte[] deriveTradeForeignPublicKey(byte[] privateKey) { + public static byte[] deriveTradeForeignPublicKey(byte[] privateKey) { return ECKey.fromPrivate(privateKey).getPubKey(); } diff --git a/src/main/java/org/qortal/crosschain/Bitcoiny.java b/src/main/java/org/qortal/crosschain/Bitcoiny.java index d4693818..3665f4ba 100644 --- a/src/main/java/org/qortal/crosschain/Bitcoiny.java +++ b/src/main/java/org/qortal/crosschain/Bitcoiny.java @@ -406,14 +406,24 @@ public abstract class Bitcoiny implements ForeignBlockchain { protected SimpleTransaction convertToSimpleTransaction(BitcoinyTransaction t, Set keySet) { long amount = 0; long total = 0L; + long totalInputAmount = 0L; + long totalOutputAmount = 0L; + List inputs = new ArrayList<>(); + List outputs = new ArrayList<>(); + for (BitcoinyTransaction.Input input : t.inputs) { try { BitcoinyTransaction t2 = getTransaction(input.outputTxHash); List senders = t2.outputs.get(input.outputVout).addresses; + long inputAmount = t2.outputs.get(input.outputVout).value; + totalInputAmount += inputAmount; for (String sender : senders) { + boolean addressInWallet = false; if (keySet.contains(sender)) { - total += t2.outputs.get(input.outputVout).value; + total += inputAmount; + addressInWallet = true; } + inputs.add(new SimpleTransaction.Input(sender, inputAmount, addressInWallet)); } } catch (ForeignBlockchainException e) { LOGGER.trace("Failed to retrieve transaction information {}", input.outputTxHash); @@ -422,17 +432,22 @@ public abstract class Bitcoiny implements ForeignBlockchain { if (t.outputs != null && !t.outputs.isEmpty()) { for (BitcoinyTransaction.Output output : t.outputs) { for (String address : output.addresses) { + boolean addressInWallet = false; if (keySet.contains(address)) { if (total > 0L) { amount -= (total - output.value); } else { amount += output.value; } + addressInWallet = true; } + outputs.add(new SimpleTransaction.Output(address, output.value, addressInWallet)); } + totalOutputAmount += output.value; } } - return new SimpleTransaction(t.txHash, t.timestamp, amount); + long fee = totalInputAmount - totalOutputAmount; + return new SimpleTransaction(t.txHash, t.timestamp, amount, fee, inputs, outputs); } /** diff --git a/src/main/java/org/qortal/crosschain/ElectrumX.java b/src/main/java/org/qortal/crosschain/ElectrumX.java index 8f41ed86..4ab7e0b1 100644 --- a/src/main/java/org/qortal/crosschain/ElectrumX.java +++ b/src/main/java/org/qortal/crosschain/ElectrumX.java @@ -653,18 +653,27 @@ public class ElectrumX extends BitcoinyBlockchainProvider { Object errorObj = responseJson.get("error"); if (errorObj != null) { - if (errorObj instanceof String) - throw new ForeignBlockchainException.NetworkException(String.format("Unexpected error message from ElectrumX RPC %s: %s", method, (String) errorObj), this.currentServer); + if (errorObj instanceof String) { + LOGGER.debug(String.format("Unexpected error message from ElectrumX server %s for RPC method %s: %s", this.currentServer, method, (String) errorObj)); + // Try another server + return null; + } - if (!(errorObj instanceof JSONObject)) - throw new ForeignBlockchainException.NetworkException(String.format("Unexpected error response from ElectrumX RPC %s", method), this.currentServer); + if (!(errorObj instanceof JSONObject)) { + LOGGER.debug(String.format("Unexpected error response from ElectrumX server %s for RPC method %s", this.currentServer, method)); + // Try another server + return null; + } JSONObject errorJson = (JSONObject) errorObj; Object messageObj = errorJson.get("message"); - if (!(messageObj instanceof String)) - throw new ForeignBlockchainException.NetworkException(String.format("Missing/invalid message in error response from ElectrumX RPC %s", method), this.currentServer); + if (!(messageObj instanceof String)) { + LOGGER.debug(String.format("Missing/invalid message in error response from ElectrumX server %s for RPC method %s", this.currentServer, method)); + // Try another server + return null; + } String message = (String) messageObj; diff --git a/src/main/java/org/qortal/crosschain/Litecoin.java b/src/main/java/org/qortal/crosschain/Litecoin.java index 0c04243c..42ee70de 100644 --- a/src/main/java/org/qortal/crosschain/Litecoin.java +++ b/src/main/java/org/qortal/crosschain/Litecoin.java @@ -21,6 +21,8 @@ public class Litecoin extends Bitcoiny { private static final Coin DEFAULT_FEE_PER_KB = Coin.valueOf(10000); // 0.0001 LTC per 1000 bytes + private static final long MINIMUM_ORDER_AMOUNT = 1000000; // 0.01 LTC minimum order, to avoid dust errors + // Temporary values until a dynamic fee system is written. private static final long MAINNET_FEE = 1000L; private static final long NON_MAINNET_FEE = 1000L; // enough for TESTNET3 and should be OK for REGTEST @@ -164,6 +166,11 @@ public class Litecoin extends Bitcoiny { return DEFAULT_FEE_PER_KB; } + @Override + public long getMinimumOrderAmount() { + return MINIMUM_ORDER_AMOUNT; + } + /** * Returns estimated LTC fee, in sats per 1000bytes, optionally for historic timestamp. * diff --git a/src/main/java/org/qortal/crosschain/SimpleTransaction.java b/src/main/java/org/qortal/crosschain/SimpleTransaction.java index 0fae20a5..27c9f9e3 100644 --- a/src/main/java/org/qortal/crosschain/SimpleTransaction.java +++ b/src/main/java/org/qortal/crosschain/SimpleTransaction.java @@ -2,20 +2,85 @@ package org.qortal.crosschain; import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; +import java.util.List; @XmlAccessorType(XmlAccessType.FIELD) public class SimpleTransaction { private String txHash; private Integer timestamp; private long totalAmount; + private long feeAmount; + private List inputs; + private List outputs; + + + @XmlAccessorType(XmlAccessType.FIELD) + public static class Input { + private String address; + private long amount; + private boolean addressInWallet; + + public Input() { + } + + public Input(String address, long amount, boolean addressInWallet) { + this.address = address; + this.amount = amount; + this.addressInWallet = addressInWallet; + } + + public String getAddress() { + return address; + } + + public long getAmount() { + return amount; + } + + public boolean getAddressInWallet() { + return addressInWallet; + } + } + + @XmlAccessorType(XmlAccessType.FIELD) + public static class Output { + private String address; + private long amount; + private boolean addressInWallet; + + public Output() { + } + + public Output(String address, long amount, boolean addressInWallet) { + this.address = address; + this.amount = amount; + this.addressInWallet = addressInWallet; + } + + public String getAddress() { + return address; + } + + public long getAmount() { + return amount; + } + + public boolean getAddressInWallet() { + return addressInWallet; + } + } + public SimpleTransaction() { } - public SimpleTransaction(String txHash, Integer timestamp, long totalAmount) { + public SimpleTransaction(String txHash, Integer timestamp, long totalAmount, long feeAmount, List inputs, List outputs) { this.txHash = txHash; this.timestamp = timestamp; this.totalAmount = totalAmount; + this.feeAmount = feeAmount; + this.inputs = inputs; + this.outputs = outputs; } public String getTxHash() { @@ -29,4 +94,16 @@ public class SimpleTransaction { public long getTotalAmount() { return totalAmount; } -} \ No newline at end of file + + public long getFeeAmount() { + return feeAmount; + } + + public List getInputs() { + return this.inputs; + } + + public List getOutputs() { + return this.outputs; + } +} diff --git a/src/main/java/org/qortal/crypto/Crypto.java b/src/main/java/org/qortal/crypto/Crypto.java index 49cdd2fb..5d91781c 100644 --- a/src/main/java/org/qortal/crypto/Crypto.java +++ b/src/main/java/org/qortal/crypto/Crypto.java @@ -1,5 +1,8 @@ package org.qortal.crypto; +import java.io.File; +import java.io.FileInputStream; +import java.io.IOException; import java.nio.ByteBuffer; import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; @@ -75,12 +78,74 @@ public abstract class Crypto { return digest(digest(input)); } + /** + * Returns 32-byte SHA-256 digest of file passed in input. + * + * @param file + * file in which to perform digest + * @return byte[32] digest, or null if SHA-256 algorithm can't be accessed + * + * @throws IOException if the file cannot be read + */ + public static byte[] digest(File file) throws IOException { + return Crypto.digest(file, 8192); + } + + /** + * Returns 32-byte SHA-256 digest of file passed in input, in hex format + * + * @param file + * file in which to perform digest + * @return String digest as a hexadecimal string, or null if SHA-256 algorithm can't be accessed + * + * @throws IOException if the file cannot be read + */ + public static String digestHexString(File file, int bufferSize) throws IOException { + byte[] digest = Crypto.digest(file, bufferSize); + + // Convert to hex + StringBuilder stringBuilder = new StringBuilder(); + for (byte b : digest) { + stringBuilder.append(String.format("%02x", b)); + } + return stringBuilder.toString(); + } + + /** + * Returns 32-byte SHA-256 digest of file passed in input. + * + * @param file + * file in which to perform digest + * @param bufferSize + * the number of bytes to load into memory + * @return byte[32] digest, or null if SHA-256 algorithm can't be accessed + * + * @throws IOException if the file cannot be read + */ + public static byte[] digest(File file, int bufferSize) throws IOException { + try { + MessageDigest sha256 = MessageDigest.getInstance("SHA-256"); + FileInputStream fileInputStream = new FileInputStream(file); + byte[] bytes = new byte[bufferSize]; + int count; + + while ((count = fileInputStream.read(bytes)) != -1) { + sha256.update(bytes, 0, count); + } + fileInputStream.close(); + + return sha256.digest(); + + } catch (NoSuchAlgorithmException e) { + throw new RuntimeException("SHA-256 message digest not available"); + } + } + /** * Returns 64-byte duplicated digest of message passed in input. *

* Effectively Bytes.concat(digest(input), digest(input)). - * - * @param addressVersion + * * @param input */ public static byte[] dupDigest(byte[] input) { diff --git a/src/main/java/org/qortal/data/account/MintingAccountData.java b/src/main/java/org/qortal/data/account/MintingAccountData.java index 02b4c0f8..63c6c723 100644 --- a/src/main/java/org/qortal/data/account/MintingAccountData.java +++ b/src/main/java/org/qortal/data/account/MintingAccountData.java @@ -4,10 +4,12 @@ import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlTransient; +import org.json.JSONObject; import org.qortal.crypto.Crypto; import io.swagger.v3.oas.annotations.media.Schema; import io.swagger.v3.oas.annotations.media.Schema.AccessMode; +import org.qortal.utils.Base58; // All properties to be converted to JSON via JAXB @XmlAccessorType(XmlAccessType.FIELD) @@ -61,4 +63,21 @@ public class MintingAccountData { return this.publicKey; } + + // JSON + + public JSONObject toJson() { + JSONObject jsonObject = new JSONObject(); + jsonObject.put("privateKey", Base58.encode(this.getPrivateKey())); + jsonObject.put("publicKey", Base58.encode(this.getPublicKey())); + return jsonObject; + } + + public static MintingAccountData fromJson(JSONObject json) { + return new MintingAccountData( + json.isNull("privateKey") ? null : Base58.decode(json.getString("privateKey")), + json.isNull("publicKey") ? null : Base58.decode(json.getString("publicKey")) + ); + } + } diff --git a/src/main/java/org/qortal/data/at/ATData.java b/src/main/java/org/qortal/data/at/ATData.java index 02f79f84..9e977acf 100644 --- a/src/main/java/org/qortal/data/at/ATData.java +++ b/src/main/java/org/qortal/data/at/ATData.java @@ -23,6 +23,7 @@ public class ATData { private boolean isFrozen; @XmlJavaTypeAdapter(value = org.qortal.api.AmountTypeAdapter.class) private Long frozenBalance; + private Long sleepUntilMessageTimestamp; // Constructors @@ -31,7 +32,8 @@ public class ATData { } public ATData(String ATAddress, byte[] creatorPublicKey, long creation, int version, long assetId, byte[] codeBytes, byte[] codeHash, - boolean isSleeping, Integer sleepUntilHeight, boolean isFinished, boolean hadFatalError, boolean isFrozen, Long frozenBalance) { + boolean isSleeping, Integer sleepUntilHeight, boolean isFinished, boolean hadFatalError, boolean isFrozen, Long frozenBalance, + Long sleepUntilMessageTimestamp) { this.ATAddress = ATAddress; this.creatorPublicKey = creatorPublicKey; this.creation = creation; @@ -45,6 +47,7 @@ public class ATData { this.hadFatalError = hadFatalError; this.isFrozen = isFrozen; this.frozenBalance = frozenBalance; + this.sleepUntilMessageTimestamp = sleepUntilMessageTimestamp; } /** For constructing skeleton ATData with bare minimum info. */ @@ -133,4 +136,12 @@ public class ATData { this.frozenBalance = frozenBalance; } + public Long getSleepUntilMessageTimestamp() { + return this.sleepUntilMessageTimestamp; + } + + public void setSleepUntilMessageTimestamp(Long sleepUntilMessageTimestamp) { + this.sleepUntilMessageTimestamp = sleepUntilMessageTimestamp; + } + } diff --git a/src/main/java/org/qortal/data/at/ATStateData.java b/src/main/java/org/qortal/data/at/ATStateData.java index e689f5ae..ddace8e3 100644 --- a/src/main/java/org/qortal/data/at/ATStateData.java +++ b/src/main/java/org/qortal/data/at/ATStateData.java @@ -10,35 +10,32 @@ public class ATStateData { private Long fees; private boolean isInitial; + // Qortal-AT-specific + private Long sleepUntilMessageTimestamp; + // Constructors /** Create new ATStateData */ - public ATStateData(String ATAddress, Integer height, byte[] stateData, byte[] stateHash, Long fees, boolean isInitial) { + public ATStateData(String ATAddress, Integer height, byte[] stateData, byte[] stateHash, Long fees, + boolean isInitial, Long sleepUntilMessageTimestamp) { this.ATAddress = ATAddress; this.height = height; this.stateData = stateData; this.stateHash = stateHash; this.fees = fees; this.isInitial = isInitial; + this.sleepUntilMessageTimestamp = sleepUntilMessageTimestamp; } /** For recreating per-block ATStateData from repository where not all info is needed */ public ATStateData(String ATAddress, int height, byte[] stateHash, Long fees, boolean isInitial) { - this(ATAddress, height, null, stateHash, fees, isInitial); - } - - /** For creating ATStateData from serialized bytes when we don't have all the info */ - public ATStateData(String ATAddress, byte[] stateHash) { - // This won't ever be initial AT state from deployment as that's never serialized over the network, - // but generated when the DeployAtTransaction is processed locally. - this(ATAddress, null, null, stateHash, null, false); + this(ATAddress, height, null, stateHash, fees, isInitial, null); } /** For creating ATStateData from serialized bytes when we don't have all the info */ public ATStateData(String ATAddress, byte[] stateHash, Long fees) { - // This won't ever be initial AT state from deployment as that's never serialized over the network, - // but generated when the DeployAtTransaction is processed locally. - this(ATAddress, null, null, stateHash, fees, false); + // This won't ever be initial AT state from deployment, as that's never serialized over the network. + this(ATAddress, null, null, stateHash, fees, false, null); } // Getters / setters @@ -72,4 +69,12 @@ public class ATStateData { return this.isInitial; } + public Long getSleepUntilMessageTimestamp() { + return this.sleepUntilMessageTimestamp; + } + + public void setSleepUntilMessageTimestamp(Long sleepUntilMessageTimestamp) { + this.sleepUntilMessageTimestamp = sleepUntilMessageTimestamp; + } + } diff --git a/src/main/java/org/qortal/data/block/BlockArchiveData.java b/src/main/java/org/qortal/data/block/BlockArchiveData.java new file mode 100644 index 00000000..c9db4032 --- /dev/null +++ b/src/main/java/org/qortal/data/block/BlockArchiveData.java @@ -0,0 +1,47 @@ +package org.qortal.data.block; + +import org.qortal.block.Block; + +public class BlockArchiveData { + + // Properties + private byte[] signature; + private Integer height; + private Long timestamp; + private byte[] minterPublicKey; + + // Constructors + + public BlockArchiveData(byte[] signature, Integer height, long timestamp, byte[] minterPublicKey) { + this.signature = signature; + this.height = height; + this.timestamp = timestamp; + this.minterPublicKey = minterPublicKey; + } + + public BlockArchiveData(BlockData blockData) { + this.signature = blockData.getSignature(); + this.height = blockData.getHeight(); + this.timestamp = blockData.getTimestamp(); + this.minterPublicKey = blockData.getMinterPublicKey(); + } + + // Getters/setters + + public byte[] getSignature() { + return this.signature; + } + + public Integer getHeight() { + return this.height; + } + + public Long getTimestamp() { + return this.timestamp; + } + + public byte[] getMinterPublicKey() { + return this.minterPublicKey; + } + +} diff --git a/src/main/java/org/qortal/data/transaction/RegisterNameTransactionData.java b/src/main/java/org/qortal/data/transaction/RegisterNameTransactionData.java index d4455da1..c2b06fd2 100644 --- a/src/main/java/org/qortal/data/transaction/RegisterNameTransactionData.java +++ b/src/main/java/org/qortal/data/transaction/RegisterNameTransactionData.java @@ -26,7 +26,7 @@ public class RegisterNameTransactionData extends TransactionData { @Schema(description = "requested name", example = "my-name") private String name; - @Schema(description = "simple name-related info in JSON format", example = "{ \"age\": 30 }") + @Schema(description = "simple name-related info in JSON or text format", example = "Registered Name on the Qortal Chain") private String data; // For internal use diff --git a/src/main/java/org/qortal/data/transaction/UpdateNameTransactionData.java b/src/main/java/org/qortal/data/transaction/UpdateNameTransactionData.java index 43c8da59..b43361db 100644 --- a/src/main/java/org/qortal/data/transaction/UpdateNameTransactionData.java +++ b/src/main/java/org/qortal/data/transaction/UpdateNameTransactionData.java @@ -26,7 +26,7 @@ public class UpdateNameTransactionData extends TransactionData { @Schema(description = "new name", example = "my-new-name") private String newName; - @Schema(description = "replacement simple name-related info in JSON format", example = "{ \"age\": 30 }") + @Schema(description = "replacement simple name-related info in JSON or text format", example = "Registered Name on the Qortal Chain") private String newData; // For internal use diff --git a/src/main/java/org/qortal/gui/Gui.java b/src/main/java/org/qortal/gui/Gui.java index 118718e2..87342f6a 100644 --- a/src/main/java/org/qortal/gui/Gui.java +++ b/src/main/java/org/qortal/gui/Gui.java @@ -23,17 +23,21 @@ public class Gui { private SysTray sysTray = null; private Gui() { - this.isHeadless = GraphicsEnvironment.isHeadless(); + try { + this.isHeadless = GraphicsEnvironment.isHeadless(); - if (!this.isHeadless) { - try { - UIManager.setLookAndFeel(UIManager.getSystemLookAndFeelClassName()); - } catch (ClassNotFoundException | InstantiationException | IllegalAccessException - | UnsupportedLookAndFeelException e) { - // Use whatever look-and-feel comes by default then + if (!this.isHeadless) { + try { + UIManager.setLookAndFeel(UIManager.getSystemLookAndFeelClassName()); + } catch (ClassNotFoundException | InstantiationException | IllegalAccessException + | UnsupportedLookAndFeelException e) { + // Use whatever look-and-feel comes by default then + } + + showSplash(); } - - showSplash(); + } catch (Exception e) { + LOGGER.info("Unable to initialize GUI: {}", e.getMessage()); } } diff --git a/src/main/java/org/qortal/gui/SplashFrame.java b/src/main/java/org/qortal/gui/SplashFrame.java index 37d20ec5..c4ea51d0 100644 --- a/src/main/java/org/qortal/gui/SplashFrame.java +++ b/src/main/java/org/qortal/gui/SplashFrame.java @@ -6,9 +6,11 @@ import java.util.List; import java.awt.image.BufferedImage; import javax.swing.*; +import javax.swing.border.EmptyBorder; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.qortal.controller.Controller; public class SplashFrame { @@ -16,6 +18,7 @@ public class SplashFrame { private static SplashFrame instance; private JFrame splashDialog; + private SplashPanel splashPanel; @SuppressWarnings("serial") public static class SplashPanel extends JPanel { @@ -23,26 +26,53 @@ public class SplashFrame { private String defaultSplash = "Qlogo_512.png"; + private JLabel statusLabel; + public SplashPanel() { image = Gui.loadImage(defaultSplash); - setOpaque(false); - setLayout(new GridBagLayout()); - } + setOpaque(true); + setLayout(new BoxLayout(this, BoxLayout.Y_AXIS)); + setBorder(new EmptyBorder(10, 10, 10, 10)); + setBackground(Color.BLACK); - @Override - protected void paintComponent(Graphics g) { - super.paintComponent(g); - g.drawImage(image, 0, 0, getWidth(), getHeight(), this); + // Add logo + JLabel imageLabel = new JLabel(new ImageIcon(image)); + imageLabel.setSize(new Dimension(300, 300)); + add(imageLabel); + + // Add spacing + add(Box.createRigidArea(new Dimension(0, 16))); + + // Add status label + String text = String.format("Starting Qortal Core v%s...", Controller.getInstance().getVersionStringWithoutPrefix()); + statusLabel = new JLabel(text, JLabel.CENTER); + statusLabel.setMaximumSize(new Dimension(500, 50)); + statusLabel.setFont(new Font("Verdana", Font.PLAIN, 20)); + statusLabel.setBackground(Color.BLACK); + statusLabel.setForeground(new Color(255, 255, 255, 255)); + statusLabel.setOpaque(true); + statusLabel.setBorder(null); + add(statusLabel); } @Override public Dimension getPreferredSize() { - return new Dimension(500, 500); + return new Dimension(500, 580); + } + + public void updateStatus(String text) { + if (statusLabel != null) { + statusLabel.setText(text); + } } } private SplashFrame() { + if (GraphicsEnvironment.isHeadless()) { + return; + } + this.splashDialog = new JFrame(); List icons = new ArrayList<>(); @@ -55,12 +85,13 @@ public class SplashFrame { icons.add(Gui.loadImage("icons/Qlogo_128.png")); this.splashDialog.setIconImages(icons); - this.splashDialog.getContentPane().add(new SplashPanel()); + this.splashPanel = new SplashPanel(); + this.splashDialog.getContentPane().add(this.splashPanel); this.splashDialog.setDefaultCloseOperation(JFrame.DO_NOTHING_ON_CLOSE); this.splashDialog.setUndecorated(true); this.splashDialog.pack(); this.splashDialog.setLocationRelativeTo(null); - this.splashDialog.setBackground(new Color(0,0,0,0)); + this.splashDialog.setBackground(Color.BLACK); this.splashDialog.setVisible(true); } @@ -79,4 +110,10 @@ public class SplashFrame { this.splashDialog.dispose(); } + public void updateStatus(String text) { + if (this.splashPanel != null) { + this.splashPanel.updateStatus(text); + } + } + } diff --git a/src/main/java/org/qortal/list/ResourceList.java b/src/main/java/org/qortal/list/ResourceList.java new file mode 100644 index 00000000..c80deac3 --- /dev/null +++ b/src/main/java/org/qortal/list/ResourceList.java @@ -0,0 +1,157 @@ +package org.qortal.list; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.json.JSONArray; +import org.qortal.settings.Settings; + +import java.io.BufferedWriter; +import java.io.File; +import java.io.FileWriter; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.List; + +public class ResourceList { + + private static final Logger LOGGER = LogManager.getLogger(ResourceList.class); + + private String category; + private String resourceName; + private List list; + + /** + * ResourceList + * Creates or updates a list for the purpose of tracking resources on the Qortal network + * This can be used for local blocking, or even for curating and sharing content lists + * Lists are backed off to JSON files (in the lists folder) to ease sharing between nodes and users + * + * @param category - for instance "blacklist", "whitelist", or "userlist" + * @param resourceName - for instance "address", "poll", or "group" + * @throws IOException + */ + public ResourceList(String category, String resourceName) throws IOException { + this.category = category; + this.resourceName = resourceName; + this.list = new ArrayList<>(); + this.load(); + } + + + /* Filesystem */ + + private Path getFilePath() { + String pathString = String.format("%s%s%s_%s.json", Settings.getInstance().getListsPath(), + File.separator, this.resourceName, this.category); + return Paths.get(pathString); + } + + public void save() throws IOException { + if (this.resourceName == null) { + throw new IllegalStateException("Can't save list with missing resource name"); + } + if (this.category == null) { + throw new IllegalStateException("Can't save list with missing category"); + } + String jsonString = ResourceList.listToJSONString(this.list); + Path filePath = this.getFilePath(); + + // Create parent directory if needed + try { + Files.createDirectories(filePath.getParent()); + } catch (IOException e) { + throw new IllegalStateException("Unable to create lists directory"); + } + + BufferedWriter writer = new BufferedWriter(new FileWriter(filePath.toString())); + writer.write(jsonString); + writer.close(); + } + + private boolean load() throws IOException { + Path path = this.getFilePath(); + File resourceListFile = new File(path.toString()); + if (!resourceListFile.exists()) { + return false; + } + + try { + String jsonString = new String(Files.readAllBytes(path)); + this.list = ResourceList.listFromJSONString(jsonString); + } catch (IOException e) { + throw new IOException(String.format("Couldn't read contents from file %s", path.toString())); + } + + return true; + } + + public boolean revert() { + try { + return this.load(); + } catch (IOException e) { + LOGGER.info("Unable to revert {} {}", this.resourceName, this.category); + } + return false; + } + + + /* List management */ + + public void add(String resource) { + if (resource == null || this.list == null) { + return; + } + if (!this.contains(resource)) { + this.list.add(resource); + } + } + + public void remove(String resource) { + if (resource == null || this.list == null) { + return; + } + this.list.remove(resource); + } + + public boolean contains(String resource) { + if (resource == null || this.list == null) { + return false; + } + return this.list.contains(resource); + } + + + /* Utils */ + + public static String listToJSONString(List list) { + if (list == null) { + return null; + } + JSONArray items = new JSONArray(); + for (String item : list) { + items.put(item); + } + return items.toString(4); + } + + private static List listFromJSONString(String jsonString) { + if (jsonString == null) { + return null; + } + JSONArray jsonList = new JSONArray(jsonString); + List resourceList = new ArrayList<>(); + for (int i=0; i 0) { for (Peer peer : peersToDisconnect) { - LOGGER.info("Forcing disconnection of peer {} because connection age ({} ms) " + + LOGGER.debug("Forcing disconnection of peer {} because connection age ({} ms) " + "has reached the maximum ({} ms)", peer, peer.getConnectionAge(), peer.getMaxConnectionAge()); peer.disconnect("Connection age too old"); } diff --git a/src/main/java/org/qortal/network/message/CachedBlockMessage.java b/src/main/java/org/qortal/network/message/CachedBlockMessage.java index 7a175810..e5029ab0 100644 --- a/src/main/java/org/qortal/network/message/CachedBlockMessage.java +++ b/src/main/java/org/qortal/network/message/CachedBlockMessage.java @@ -23,7 +23,7 @@ public class CachedBlockMessage extends Message { this.block = block; } - private CachedBlockMessage(byte[] cachedBytes) { + public CachedBlockMessage(byte[] cachedBytes) { super(MessageType.BLOCK); this.block = null; diff --git a/src/main/java/org/qortal/repository/ATRepository.java b/src/main/java/org/qortal/repository/ATRepository.java index 5516ac28..0f537ae9 100644 --- a/src/main/java/org/qortal/repository/ATRepository.java +++ b/src/main/java/org/qortal/repository/ATRepository.java @@ -1,5 +1,7 @@ package org.qortal.repository; +import java.sql.ResultSet; +import java.sql.SQLException; import java.util.List; import java.util.Set; @@ -103,7 +105,7 @@ public interface ATRepository { /** * Returns all ATStateData for a given block height. *

- * Unlike getATState, only returns ATStateData saved at the given height. + * Unlike getATState, only returns partial ATStateData saved at the given height. * * @param height * - block height @@ -112,6 +114,14 @@ public interface ATRepository { */ public List getBlockATStatesAtHeight(int height) throws DataException; + + /** Rebuild the latest AT states cache, necessary for AT state trimming/pruning. + *

+ * NOTE: performs implicit repository.saveChanges(). + */ + public void rebuildLatestAtStates() throws DataException; + + /** Returns height of first trimmable AT state. */ public int getAtTrimHeight() throws DataException; @@ -121,12 +131,27 @@ public interface ATRepository { */ public void setAtTrimHeight(int trimHeight) throws DataException; - /** Hook to allow repository to prepare/cache info for AT state trimming. */ - public void prepareForAtStateTrimming() throws DataException; - /** Trims full AT state data between passed heights. Returns number of trimmed rows. */ public int trimAtStates(int minHeight, int maxHeight, int limit) throws DataException; + + /** Returns height of first prunable AT state. */ + public int getAtPruneHeight() throws DataException; + + /** Sets new base height for AT state pruning. + *

+ * NOTE: performs implicit repository.saveChanges(). + */ + public void setAtPruneHeight(int pruneHeight) throws DataException; + + /** Prunes full AT state data between passed heights. Returns number of pruned rows. */ + public int pruneAtStates(int minHeight, int maxHeight) throws DataException; + + + /** Checks for the presence of the ATStatesHeightIndex in repository */ + public boolean hasAtStatesHeightIndex() throws DataException; + + /** * Save ATStateData into repository. *

diff --git a/src/main/java/org/qortal/repository/AccountRepository.java b/src/main/java/org/qortal/repository/AccountRepository.java index a23771f9..256f9556 100644 --- a/src/main/java/org/qortal/repository/AccountRepository.java +++ b/src/main/java/org/qortal/repository/AccountRepository.java @@ -191,6 +191,8 @@ public interface AccountRepository { public List getMintingAccounts() throws DataException; + public MintingAccountData getMintingAccount(byte[] mintingAccountKey) throws DataException; + public void save(MintingAccountData mintingAccountData) throws DataException; /** Delete minting account info, used by BlockMinter, from repository using passed public or private key. */ diff --git a/src/main/java/org/qortal/repository/BlockArchiveReader.java b/src/main/java/org/qortal/repository/BlockArchiveReader.java new file mode 100644 index 00000000..cff272a8 --- /dev/null +++ b/src/main/java/org/qortal/repository/BlockArchiveReader.java @@ -0,0 +1,284 @@ +package org.qortal.repository; + +import com.google.common.primitives.Ints; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.qortal.data.at.ATStateData; +import org.qortal.data.block.BlockArchiveData; +import org.qortal.data.block.BlockData; +import org.qortal.data.transaction.TransactionData; +import org.qortal.settings.Settings; +import org.qortal.transform.TransformationException; +import org.qortal.transform.block.BlockTransformer; +import org.qortal.utils.Triple; + +import static org.qortal.transform.Transformer.INT_LENGTH; + +import java.io.*; +import java.nio.ByteBuffer; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.*; + +public class BlockArchiveReader { + + private static BlockArchiveReader instance; + private Map> fileListCache = Collections.synchronizedMap(new HashMap<>()); + + private static final Logger LOGGER = LogManager.getLogger(BlockArchiveReader.class); + + public BlockArchiveReader() { + + } + + public static synchronized BlockArchiveReader getInstance() { + if (instance == null) { + instance = new BlockArchiveReader(); + } + + return instance; + } + + private void fetchFileList() { + Path archivePath = Paths.get(Settings.getInstance().getRepositoryPath(), "archive").toAbsolutePath(); + File archiveDirFile = archivePath.toFile(); + String[] files = archiveDirFile.list(); + Map> map = new HashMap<>(); + + if (files != null) { + for (String file : files) { + Path filePath = Paths.get(file); + String filename = filePath.getFileName().toString(); + + // Parse the filename + if (filename == null || !filename.contains("-") || !filename.contains(".")) { + // Not a usable file + continue; + } + // Remove the extension and split into two parts + String[] parts = filename.substring(0, filename.lastIndexOf('.')).split("-"); + Integer startHeight = Integer.parseInt(parts[0]); + Integer endHeight = Integer.parseInt(parts[1]); + Integer range = endHeight - startHeight; + map.put(filename, new Triple(startHeight, endHeight, range)); + } + } + this.fileListCache = map; + } + + public Triple, List> fetchBlockAtHeight(int height) { + if (this.fileListCache.isEmpty()) { + this.fetchFileList(); + } + + byte[] serializedBytes = this.fetchSerializedBlockBytesForHeight(height); + if (serializedBytes == null) { + return null; + } + + ByteBuffer byteBuffer = ByteBuffer.wrap(serializedBytes); + Triple, List> blockInfo = null; + try { + blockInfo = BlockTransformer.fromByteBuffer(byteBuffer); + if (blockInfo != null && blockInfo.getA() != null) { + // Block height is stored outside of the main serialized bytes, so it + // won't be set automatically. + blockInfo.getA().setHeight(height); + } + } catch (TransformationException e) { + return null; + } + return blockInfo; + } + + public Triple, List> fetchBlockWithSignature( + byte[] signature, Repository repository) { + + if (this.fileListCache.isEmpty()) { + this.fetchFileList(); + } + + Integer height = this.fetchHeightForSignature(signature, repository); + if (height != null) { + return this.fetchBlockAtHeight(height); + } + return null; + } + + public List, List>> fetchBlocksFromRange( + int startHeight, int endHeight) { + + List, List>> blockInfoList = new ArrayList<>(); + + for (int height = startHeight; height <= endHeight; height++) { + Triple, List> blockInfo = this.fetchBlockAtHeight(height); + if (blockInfo == null) { + return blockInfoList; + } + blockInfoList.add(blockInfo); + } + return blockInfoList; + } + + public Integer fetchHeightForSignature(byte[] signature, Repository repository) { + // Lookup the height for the requested signature + try { + BlockArchiveData archivedBlock = repository.getBlockArchiveRepository().getBlockArchiveDataForSignature(signature); + if (archivedBlock == null) { + return null; + } + return archivedBlock.getHeight(); + + } catch (DataException e) { + return null; + } + } + + public int fetchHeightForTimestamp(long timestamp, Repository repository) { + // Lookup the height for the requested signature + try { + return repository.getBlockArchiveRepository().getHeightFromTimestamp(timestamp); + + } catch (DataException e) { + return 0; + } + } + + private String getFilenameForHeight(int height) { + Iterator it = this.fileListCache.entrySet().iterator(); + while (it.hasNext()) { + Map.Entry pair = (Map.Entry)it.next(); + if (pair == null && pair.getKey() == null && pair.getValue() == null) { + continue; + } + Triple heightInfo = (Triple) pair.getValue(); + Integer startHeight = heightInfo.getA(); + Integer endHeight = heightInfo.getB(); + + if (height >= startHeight && height <= endHeight) { + // Found the correct file + String filename = (String) pair.getKey(); + return filename; + } + } + + return null; + } + + public byte[] fetchSerializedBlockBytesForSignature(byte[] signature, boolean includeHeightPrefix, Repository repository) { + + if (this.fileListCache.isEmpty()) { + this.fetchFileList(); + } + + Integer height = this.fetchHeightForSignature(signature, repository); + if (height != null) { + byte[] blockBytes = this.fetchSerializedBlockBytesForHeight(height); + if (blockBytes == null) { + return null; + } + + // When responding to a peer with a BLOCK message, we must prefix the byte array with the block height + // This mimics the toData() method in BlockMessage and CachedBlockMessage + if (includeHeightPrefix) { + ByteArrayOutputStream bytes = new ByteArrayOutputStream(blockBytes.length + INT_LENGTH); + try { + bytes.write(Ints.toByteArray(height)); + bytes.write(blockBytes); + return bytes.toByteArray(); + + } catch (IOException e) { + return null; + } + } + return blockBytes; + } + return null; + } + + public byte[] fetchSerializedBlockBytesForHeight(int height) { + String filename = this.getFilenameForHeight(height); + if (filename == null) { + // We don't have this block in the archive + // Invalidate the file list cache in case it is out of date + this.invalidateFileListCache(); + return null; + } + + Path filePath = Paths.get(Settings.getInstance().getRepositoryPath(), "archive", filename).toAbsolutePath(); + RandomAccessFile file = null; + try { + file = new RandomAccessFile(filePath.toString(), "r"); + // Get info about this file (the "fixed length header") + final int version = file.readInt(); // Do not remove or comment out, as it is moving the file pointer + final int startHeight = file.readInt(); // Do not remove or comment out, as it is moving the file pointer + final int endHeight = file.readInt(); // Do not remove or comment out, as it is moving the file pointer + file.readInt(); // Block count (unused) // Do not remove or comment out, as it is moving the file pointer + final int variableHeaderLength = file.readInt(); // Do not remove or comment out, as it is moving the file pointer + final int fixedHeaderLength = (int)file.getFilePointer(); + // End of fixed length header + + // Make sure the version is one we recognize + if (version != 1) { + LOGGER.info("Error: unknown version in file {}: {}", filename, version); + return null; + } + + // Verify that the block is within the reported range + if (height < startHeight || height > endHeight) { + LOGGER.info("Error: requested height {} but the range of file {} is {}-{}", + height, filename, startHeight, endHeight); + return null; + } + + // Seek to the location of the block index in the variable length header + final int locationOfBlockIndexInVariableHeaderSegment = (height - startHeight) * INT_LENGTH; + file.seek(fixedHeaderLength + locationOfBlockIndexInVariableHeaderSegment); + + // Read the value to obtain the index of this block in the data segment + int locationOfBlockInDataSegment = file.readInt(); + + // Now seek to the block data itself + int dataSegmentStartIndex = fixedHeaderLength + variableHeaderLength + INT_LENGTH; // Confirmed correct + file.seek(dataSegmentStartIndex + locationOfBlockInDataSegment); + + // Read the block metadata + int blockHeight = file.readInt(); // Do not remove or comment out, as it is moving the file pointer + int blockLength = file.readInt(); // Do not remove or comment out, as it is moving the file pointer + + // Ensure the block height matches the one requested + if (blockHeight != height) { + LOGGER.info("Error: height {} does not match requested: {}", blockHeight, height); + return null; + } + + // Now retrieve the block's serialized bytes + byte[] blockBytes = new byte[blockLength]; + file.read(blockBytes); + + return blockBytes; + + } catch (FileNotFoundException e) { + LOGGER.info("File {} not found: {}", filename, e.getMessage()); + return null; + } catch (IOException e) { + LOGGER.info("Unable to read block {} from archive: {}", height, e.getMessage()); + return null; + } + finally { + // Close the file + if (file != null) { + try { + file.close(); + } catch (IOException e) { + // Failed to close, but no need to handle this + } + } + } + } + + public void invalidateFileListCache() { + this.fileListCache.clear(); + } + +} diff --git a/src/main/java/org/qortal/repository/BlockArchiveRepository.java b/src/main/java/org/qortal/repository/BlockArchiveRepository.java new file mode 100644 index 00000000..45465e93 --- /dev/null +++ b/src/main/java/org/qortal/repository/BlockArchiveRepository.java @@ -0,0 +1,130 @@ +package org.qortal.repository; + +import org.qortal.api.model.BlockSignerSummary; +import org.qortal.data.block.BlockArchiveData; +import org.qortal.data.block.BlockData; +import org.qortal.data.block.BlockSummaryData; + +import java.util.List; + +public interface BlockArchiveRepository { + + /** + * Returns BlockData from archive using block signature. + * + * @param signature + * @return block data, or null if not found in archive. + * @throws DataException + */ + public BlockData fromSignature(byte[] signature) throws DataException; + + /** + * Return height of block in archive using block's signature. + * + * @param signature + * @return height, or 0 if not found in blockchain. + * @throws DataException + */ + public int getHeightFromSignature(byte[] signature) throws DataException; + + /** + * Returns BlockData from archive using block height. + * + * @param height + * @return block data, or null if not found in blockchain. + * @throws DataException + */ + public BlockData fromHeight(int height) throws DataException; + + /** + * Returns a list of BlockData objects from archive using + * block height range. + * + * @param startHeight + * @return a list of BlockData objects, or an empty list if + * not found in blockchain. It is not guaranteed that all + * requested blocks will be returned. + * @throws DataException + */ + public List fromRange(int startHeight, int endHeight) throws DataException; + + /** + * Returns BlockData from archive using block reference. + * Currently relies on a child block being the one block + * higher than its parent. This limitation can be removed + * by storing the reference in the BlockArchive table, but + * this has been avoided to reduce space. + * + * @param reference + * @return block data, or null if either parent or child + * not found in the archive. + * @throws DataException + */ + public BlockData fromReference(byte[] reference) throws DataException; + + /** + * Return height of block with timestamp just before passed timestamp. + * + * @param timestamp + * @return height, or 0 if not found in blockchain. + * @throws DataException + */ + public int getHeightFromTimestamp(long timestamp) throws DataException; + + /** + * Returns block summaries for blocks signed by passed public key, or reward-share with minter with passed public key. + */ + public List getBlockSummariesBySigner(byte[] signerPublicKey, Integer limit, Integer offset, Boolean reverse) throws DataException; + + /** + * Returns summaries of block signers, optionally limited to passed addresses. + * This combines both the BlockArchive and the Blocks data into a single result set. + */ + public List getBlockSigners(List addresses, Integer limit, Integer offset, Boolean reverse) throws DataException; + + + /** Returns height of first unarchived block. */ + public int getBlockArchiveHeight() throws DataException; + + /** Sets new height for block archiving. + *

+ * NOTE: performs implicit repository.saveChanges(). + */ + public void setBlockArchiveHeight(int archiveHeight) throws DataException; + + + /** + * Returns the block archive data for a given signature, from the block archive. + *

+ * This method will return null if no block archive has been built for the + * requested signature. In those cases, the height (and other data) can be + * looked up using the Blocks table. This allows a block to be located in + * the archive when we only know its signature. + *

+ * + * @param signature + * @throws DataException + */ + public BlockArchiveData getBlockArchiveDataForSignature(byte[] signature) throws DataException; + + /** + * Saves a block archive entry into the repository. + *

+ * This can be used to find the height of a block by its signature, without + * having access to the block data itself. + *

+ * + * @param blockArchiveData + * @throws DataException + */ + public void save(BlockArchiveData blockArchiveData) throws DataException; + + /** + * Deletes a block archive entry from the repository. + * + * @param blockArchiveData + * @throws DataException + */ + public void delete(BlockArchiveData blockArchiveData) throws DataException; + +} diff --git a/src/main/java/org/qortal/repository/BlockArchiveWriter.java b/src/main/java/org/qortal/repository/BlockArchiveWriter.java new file mode 100644 index 00000000..39c28fd6 --- /dev/null +++ b/src/main/java/org/qortal/repository/BlockArchiveWriter.java @@ -0,0 +1,201 @@ +package org.qortal.repository; + +import com.google.common.primitives.Ints; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.qortal.block.Block; +import org.qortal.controller.Controller; +import org.qortal.data.block.BlockArchiveData; +import org.qortal.data.block.BlockData; +import org.qortal.settings.Settings; +import org.qortal.transform.TransformationException; +import org.qortal.transform.block.BlockTransformer; + +import java.io.ByteArrayOutputStream; +import java.io.FileOutputStream; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; + +public class BlockArchiveWriter { + + public enum BlockArchiveWriteResult { + OK, + STOPPING, + NOT_ENOUGH_BLOCKS, + BLOCK_NOT_FOUND + } + + private static final Logger LOGGER = LogManager.getLogger(BlockArchiveWriter.class); + + public static final long DEFAULT_FILE_SIZE_TARGET = 100 * 1024 * 1024; // 100MiB + + private int startHeight; + private final int endHeight; + private final Repository repository; + + private long fileSizeTarget = DEFAULT_FILE_SIZE_TARGET; + private boolean shouldEnforceFileSizeTarget = true; + + private int writtenCount; + private int lastWrittenHeight; + private Path outputPath; + + public BlockArchiveWriter(int startHeight, int endHeight, Repository repository) { + this.startHeight = startHeight; + this.endHeight = endHeight; + this.repository = repository; + } + + public static int getMaxArchiveHeight(Repository repository) throws DataException { + // We must only archive trimmed blocks, or the archive will grow far too large + final int accountSignaturesTrimStartHeight = repository.getBlockRepository().getOnlineAccountsSignaturesTrimHeight(); + final int atTrimStartHeight = repository.getATRepository().getAtTrimHeight(); + final int trimStartHeight = Math.min(accountSignaturesTrimStartHeight, atTrimStartHeight); + return trimStartHeight - 1; // subtract 1 because these values represent the first _untrimmed_ block + } + + public static boolean isArchiverUpToDate(Repository repository) throws DataException { + final int maxArchiveHeight = BlockArchiveWriter.getMaxArchiveHeight(repository); + final int actualArchiveHeight = repository.getBlockArchiveRepository().getBlockArchiveHeight(); + final float progress = (float)actualArchiveHeight / (float) maxArchiveHeight; + LOGGER.debug(String.format("maxArchiveHeight: %d, actualArchiveHeight: %d, progress: %f", + maxArchiveHeight, actualArchiveHeight, progress)); + + // If archiver is within 95% of the maximum, treat it as up to date + // We need several percent as an allowance because the archiver will only + // save files when they reach the target size + return (progress >= 0.95); + } + + public BlockArchiveWriteResult write() throws DataException, IOException, TransformationException, InterruptedException { + // Create the archive folder if it doesn't exist + // This is a subfolder of the db directory, to make bootstrapping easier + Path archivePath = Paths.get(Settings.getInstance().getRepositoryPath(), "archive").toAbsolutePath(); + try { + Files.createDirectories(archivePath); + } catch (IOException e) { + LOGGER.info("Unable to create archive folder"); + throw new DataException("Unable to create archive folder"); + } + + // Determine start height of blocks to fetch + if (startHeight <= 2) { + // Skip genesis block, as it's not designed to be transmitted, and we can build that from blockchain.json + // TODO: include genesis block if we can + startHeight = 2; + } + + // Header bytes will store the block indexes + ByteArrayOutputStream headerBytes = new ByteArrayOutputStream(); + // Bytes will store the actual block data + ByteArrayOutputStream bytes = new ByteArrayOutputStream(); + + LOGGER.info(String.format("Fetching blocks from height %d...", startHeight)); + int i = 0; + while (headerBytes.size() + bytes.size() < this.fileSizeTarget + || this.shouldEnforceFileSizeTarget == false) { + + if (Controller.isStopping()) { + return BlockArchiveWriteResult.STOPPING; + } + if (Controller.getInstance().isSynchronizing()) { + continue; + } + + int currentHeight = startHeight + i; + if (currentHeight > endHeight) { + break; + } + + //LOGGER.info("Fetching block {}...", currentHeight); + + BlockData blockData = repository.getBlockRepository().fromHeight(currentHeight); + if (blockData == null) { + return BlockArchiveWriteResult.BLOCK_NOT_FOUND; + } + + // Write the signature and height into the BlockArchive table + BlockArchiveData blockArchiveData = new BlockArchiveData(blockData); + repository.getBlockArchiveRepository().save(blockArchiveData); + repository.saveChanges(); + + // Write the block data to some byte buffers + Block block = new Block(repository, blockData); + int blockIndex = bytes.size(); + // Write block index to header + headerBytes.write(Ints.toByteArray(blockIndex)); + // Write block height + bytes.write(Ints.toByteArray(block.getBlockData().getHeight())); + byte[] blockBytes = BlockTransformer.toBytes(block); + // Write block length + bytes.write(Ints.toByteArray(blockBytes.length)); + // Write block bytes + bytes.write(blockBytes); + i++; + + } + int totalLength = headerBytes.size() + bytes.size(); + LOGGER.info(String.format("Total length of %d blocks is %d bytes", i, totalLength)); + + // Validate file size, in case something went wrong + if (totalLength < fileSizeTarget && this.shouldEnforceFileSizeTarget) { + return BlockArchiveWriteResult.NOT_ENOUGH_BLOCKS; + } + + // We have enough blocks to create a new file + int endHeight = startHeight + i - 1; + int version = 1; + String filePath = String.format("%s/%d-%d.dat", archivePath.toString(), startHeight, endHeight); + FileOutputStream fileOutputStream = new FileOutputStream(filePath); + // Write version number + fileOutputStream.write(Ints.toByteArray(version)); + // Write start height + fileOutputStream.write(Ints.toByteArray(startHeight)); + // Write end height + fileOutputStream.write(Ints.toByteArray(endHeight)); + // Write total count + fileOutputStream.write(Ints.toByteArray(i)); + // Write dynamic header (block indexes) segment length + fileOutputStream.write(Ints.toByteArray(headerBytes.size())); + // Write dynamic header (block indexes) data + headerBytes.writeTo(fileOutputStream); + // Write data segment (block data) length + fileOutputStream.write(Ints.toByteArray(bytes.size())); + // Write data + bytes.writeTo(fileOutputStream); + // Close the file + fileOutputStream.close(); + + // Invalidate cache so that the rest of the app picks up the new file + BlockArchiveReader.getInstance().invalidateFileListCache(); + + this.writtenCount = i; + this.lastWrittenHeight = endHeight; + this.outputPath = Paths.get(filePath); + return BlockArchiveWriteResult.OK; + } + + public int getWrittenCount() { + return this.writtenCount; + } + + public int getLastWrittenHeight() { + return this.lastWrittenHeight; + } + + public Path getOutputPath() { + return this.outputPath; + } + + public void setFileSizeTarget(long fileSizeTarget) { + this.fileSizeTarget = fileSizeTarget; + } + + // For testing, to avoid having to pre-calculate file sizes + public void setShouldEnforceFileSizeTarget(boolean shouldEnforceFileSizeTarget) { + this.shouldEnforceFileSizeTarget = shouldEnforceFileSizeTarget; + } + +} diff --git a/src/main/java/org/qortal/repository/BlockRepository.java b/src/main/java/org/qortal/repository/BlockRepository.java index 78eba399..76891c36 100644 --- a/src/main/java/org/qortal/repository/BlockRepository.java +++ b/src/main/java/org/qortal/repository/BlockRepository.java @@ -137,11 +137,6 @@ public interface BlockRepository { */ public List getBlockSummaries(int firstBlockHeight, int lastBlockHeight) throws DataException; - /** - * Returns block summaries for the passed height range, for API use. - */ - public List getBlockSummaries(Integer startHeight, Integer endHeight, Integer count) throws DataException; - /** Returns height of first trimmable online accounts signatures. */ public int getOnlineAccountsSignaturesTrimHeight() throws DataException; @@ -166,6 +161,20 @@ public interface BlockRepository { */ public BlockData getDetachedBlockSignature(int startHeight) throws DataException; + + /** Returns height of first prunable block. */ + public int getBlockPruneHeight() throws DataException; + + /** Sets new base height for block pruning. + *

+ * NOTE: performs implicit repository.saveChanges(). + */ + public void setBlockPruneHeight(int pruneHeight) throws DataException; + + /** Prunes full block data between passed heights. Returns number of pruned rows. */ + public int pruneBlocks(int minHeight, int maxHeight) throws DataException; + + /** * Saves block into repository. * diff --git a/src/main/java/org/qortal/repository/Bootstrap.java b/src/main/java/org/qortal/repository/Bootstrap.java new file mode 100644 index 00000000..6e72067e --- /dev/null +++ b/src/main/java/org/qortal/repository/Bootstrap.java @@ -0,0 +1,509 @@ +package org.qortal.repository; + +import org.apache.commons.io.FileUtils; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.qortal.block.BlockChain; +import org.qortal.controller.Controller; +import org.qortal.crypto.Crypto; +import org.qortal.data.account.MintingAccountData; +import org.qortal.data.block.BlockData; +import org.qortal.data.crosschain.TradeBotData; +import org.qortal.gui.SplashFrame; +import org.qortal.repository.hsqldb.HSQLDBImportExport; +import org.qortal.repository.hsqldb.HSQLDBRepositoryFactory; +import org.qortal.settings.Settings; +import org.qortal.utils.NTP; +import org.qortal.utils.SevenZ; + +import java.io.BufferedInputStream; +import java.io.FileOutputStream; +import java.io.IOException; +import java.net.HttpURLConnection; +import java.net.MalformedURLException; +import java.net.URL; +import java.nio.file.*; +import java.security.SecureRandom; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.locks.ReentrantLock; + +import static java.nio.file.StandardCopyOption.REPLACE_EXISTING; + + +public class Bootstrap { + + private Repository repository; + + private int retryMinutes = 1; + + private static final Logger LOGGER = LogManager.getLogger(Bootstrap.class); + + /** The maximum number of untrimmed blocks allowed to be included in a bootstrap, beyond the trim threshold */ + private static final int MAXIMUM_UNTRIMMED_BLOCKS = 100; + + /** The maximum number of unpruned blocks allowed to be included in a bootstrap, beyond the prune threshold */ + private static final int MAXIMUM_UNPRUNED_BLOCKS = 100; + + + public Bootstrap() { + } + + public Bootstrap(Repository repository) { + this.repository = repository; + } + + /** + * canCreateBootstrap() + * Performs basic initial checks to ensure everything is in order + * @return true if ready for bootstrap creation, or an exception if not + * All failure reasons are logged and included in the exception + * @throws DataException + */ + public boolean checkRepositoryState() throws DataException { + LOGGER.info("Checking repository state..."); + + final boolean isTopOnly = Settings.getInstance().isTopOnly(); + final boolean archiveEnabled = Settings.getInstance().isArchiveEnabled(); + + // Make sure we have a repository instance + if (repository == null) { + throw new DataException("Repository instance required to check if we can create a bootstrap."); + } + + // Require that a block archive has been built + if (!isTopOnly && !archiveEnabled) { + throw new DataException("Unable to create bootstrap because the block archive isn't enabled. " + + "Set {\"archivedEnabled\": true} in settings.json to fix."); + } + + // Make sure that the block archiver is up to date + boolean upToDate = BlockArchiveWriter.isArchiverUpToDate(repository); + if (!upToDate) { + throw new DataException("Unable to create bootstrap because the block archive isn't fully built yet."); + } + + // Ensure that this database contains the ATStatesHeightIndex which was missing in some cases + boolean hasAtStatesHeightIndex = repository.getATRepository().hasAtStatesHeightIndex(); + if (!hasAtStatesHeightIndex) { + throw new DataException("Unable to create bootstrap due to missing ATStatesHeightIndex. A re-sync from genesis is needed."); + } + + // Ensure we have synced NTP time + if (NTP.getTime() == null) { + throw new DataException("Unable to create bootstrap because the node hasn't synced its time yet."); + } + + // Ensure the chain is synced + final BlockData chainTip = Controller.getInstance().getChainTip(); + final Long minLatestBlockTimestamp = Controller.getMinimumLatestBlockTimestamp(); + if (minLatestBlockTimestamp == null || chainTip.getTimestamp() < minLatestBlockTimestamp) { + throw new DataException("Unable to create bootstrap because the blockchain isn't fully synced."); + } + + // FUTURE: ensure trim and prune settings are using default values + + if (!isTopOnly) { + // We don't trim in top-only mode because we prune the blocks instead + // If we're not in top-only mode we should make sure that trimming is up to date + + // Ensure that the online account signatures have been fully trimmed + final int accountsTrimStartHeight = repository.getBlockRepository().getOnlineAccountsSignaturesTrimHeight(); + final long accountsUpperTrimmableTimestamp = NTP.getTime() - BlockChain.getInstance().getOnlineAccountSignaturesMaxLifetime(); + final int accountsUpperTrimmableHeight = repository.getBlockRepository().getHeightFromTimestamp(accountsUpperTrimmableTimestamp); + final int accountsBlocksRemaining = accountsUpperTrimmableHeight - accountsTrimStartHeight; + if (accountsBlocksRemaining > MAXIMUM_UNTRIMMED_BLOCKS) { + throw new DataException(String.format("Blockchain is not fully trimmed. Please allow the node to run for longer, " + + "then try again. Blocks remaining (online accounts signatures): %d", accountsBlocksRemaining)); + } + + // Ensure that the AT states data has been fully trimmed + final int atTrimStartHeight = repository.getATRepository().getAtTrimHeight(); + final long atUpperTrimmableTimestamp = chainTip.getTimestamp() - Settings.getInstance().getAtStatesMaxLifetime(); + final int atUpperTrimmableHeight = repository.getBlockRepository().getHeightFromTimestamp(atUpperTrimmableTimestamp); + final int atBlocksRemaining = atUpperTrimmableHeight - atTrimStartHeight; + if (atBlocksRemaining > MAXIMUM_UNTRIMMED_BLOCKS) { + throw new DataException(String.format("Blockchain is not fully trimmed. Please allow the node to run " + + "for longer, then try again. Blocks remaining (AT states): %d", atBlocksRemaining)); + } + } + + // Ensure that blocks have been fully pruned + final int blockPruneStartHeight = repository.getBlockRepository().getBlockPruneHeight(); + int blockUpperPrunableHeight = chainTip.getHeight() - Settings.getInstance().getPruneBlockLimit(); + if (archiveEnabled) { + blockUpperPrunableHeight = repository.getBlockArchiveRepository().getBlockArchiveHeight() - 1; + } + final int blocksPruneRemaining = blockUpperPrunableHeight - blockPruneStartHeight; + if (blocksPruneRemaining > MAXIMUM_UNPRUNED_BLOCKS) { + throw new DataException(String.format("Blockchain is not fully pruned. Please allow the node to run " + + "for longer, then try again. Blocks remaining: %d", blocksPruneRemaining)); + } + + // Ensure that AT states have been fully pruned + final int atPruneStartHeight = repository.getATRepository().getAtPruneHeight(); + int atUpperPrunableHeight = chainTip.getHeight() - Settings.getInstance().getPruneBlockLimit(); + if (archiveEnabled) { + atUpperPrunableHeight = repository.getBlockArchiveRepository().getBlockArchiveHeight() - 1; + } + final int atPruneRemaining = atUpperPrunableHeight - atPruneStartHeight; + if (atPruneRemaining > MAXIMUM_UNPRUNED_BLOCKS) { + throw new DataException(String.format("Blockchain is not fully pruned. Please allow the node to run " + + "for longer, then try again. Blocks remaining (AT states): %d", atPruneRemaining)); + } + + LOGGER.info("Repository state checks passed"); + return true; + } + + /** + * validateBlockchain + * Performs quick validation of recent blocks in blockchain, prior to creating a bootstrap + * @return true if valid, an exception if not + * @throws DataException + */ + public boolean validateBlockchain() throws DataException { + LOGGER.info("Validating blockchain..."); + + try { + BlockChain.validate(); + + LOGGER.info("Blockchain is valid"); + + return true; + } catch (DataException e) { + throw new DataException(String.format("Blockchain validation failed: %s", e.getMessage())); + } + } + + /** + * validateCompleteBlockchain + * Performs intensive validation of all blocks in blockchain + * @return true if valid, false if not + */ + public boolean validateCompleteBlockchain() { + LOGGER.info("Validating blockchain..."); + + try { + // Perform basic startup validation + BlockChain.validate(); + + // Perform more intensive full-chain validation + BlockChain.validateAllBlocks(); + + LOGGER.info("Blockchain is valid"); + + return true; + } catch (DataException e) { + LOGGER.info("Blockchain validation failed: {}", e.getMessage()); + return false; + } + } + + public String create() throws DataException, InterruptedException, IOException { + + // Make sure we have a repository instance + if (repository == null) { + throw new DataException("Repository instance required in order to create a boostrap"); + } + + LOGGER.info("Deleting temp directory if it exists..."); + this.deleteAllTempDirectories(); + + LOGGER.info("Acquiring blockchain lock..."); + ReentrantLock blockchainLock = Controller.getInstance().getBlockchainLock(); + blockchainLock.lockInterruptibly(); + + Path inputPath = null; + Path outputPath = null; + + try { + + LOGGER.info("Exporting local data..."); + repository.exportNodeLocalData(); + + LOGGER.info("Deleting trade bot states..."); + List allTradeBotData = repository.getCrossChainRepository().getAllTradeBotData(); + for (TradeBotData tradeBotData : allTradeBotData) { + repository.getCrossChainRepository().delete(tradeBotData.getTradePrivateKey()); + } + + LOGGER.info("Deleting minting accounts..."); + List mintingAccounts = repository.getAccountRepository().getMintingAccounts(); + for (MintingAccountData mintingAccount : mintingAccounts) { + repository.getAccountRepository().delete(mintingAccount.getPrivateKey()); + } + + repository.saveChanges(); + + LOGGER.info("Deleting peers list..."); + repository.getNetworkRepository().deleteAllPeers(); + repository.saveChanges(); + + LOGGER.info("Creating bootstrap..."); + // Timeout if the database isn't ready for backing up after 10 seconds + long timeout = 10 * 1000L; + repository.backup(false, "bootstrap", timeout); + + LOGGER.info("Moving files to output directory..."); + inputPath = Paths.get(Settings.getInstance().getRepositoryPath(), "bootstrap"); + outputPath = Paths.get(this.createTempDirectory().toString(), "bootstrap"); + + + // Move the db backup to a "bootstrap" folder in the root directory + Files.move(inputPath, outputPath, REPLACE_EXISTING); + + // If in archive mode, copy the archive folder to inside the bootstrap folder + if (!Settings.getInstance().isTopOnly() && Settings.getInstance().isArchiveEnabled()) { + FileUtils.copyDirectory( + Paths.get(Settings.getInstance().getRepositoryPath(), "archive").toFile(), + Paths.get(outputPath.toString(), "archive").toFile() + ); + } + + LOGGER.info("Preparing output path..."); + Path compressedOutputPath = this.getBootstrapOutputPath(); + try { + Files.delete(compressedOutputPath); + } catch (NoSuchFileException e) { + // Doesn't exist, so no need to delete + } + + LOGGER.info("Compressing..."); + SevenZ.compress(compressedOutputPath.toString(), outputPath.toFile()); + + LOGGER.info("Generating checksum file..."); + String checksum = Crypto.digestHexString(compressedOutputPath.toFile(), 1024*1024); + Path checksumPath = Paths.get(String.format("%s.sha256", compressedOutputPath.toString())); + Files.writeString(checksumPath, checksum, StandardOpenOption.CREATE); + + // Return the path to the compressed bootstrap file + LOGGER.info("Bootstrap creation complete. Output file: {}", compressedOutputPath.toAbsolutePath().toString()); + return compressedOutputPath.toAbsolutePath().toString(); + + } + catch (TimeoutException e) { + throw new DataException(String.format("Unable to create bootstrap due to timeout: %s", e.getMessage())); + } + finally { + try { + LOGGER.info("Re-importing local data..."); + Path exportPath = HSQLDBImportExport.getExportDirectory(false); + repository.importDataFromFile(Paths.get(exportPath.toString(), "TradeBotStates.json").toString()); + repository.importDataFromFile(Paths.get(exportPath.toString(), "MintingAccounts.json").toString()); + repository.saveChanges(); + + } catch (IOException e) { + LOGGER.info("Unable to re-import local data, but created bootstrap is still valid. {}", e); + } + + LOGGER.info("Unlocking blockchain..."); + blockchainLock.unlock(); + + // Cleanup + LOGGER.info("Cleaning up..."); + Thread.sleep(5000L); + this.deleteAllTempDirectories(); + } + } + + public void startImport() throws InterruptedException { + while (!Controller.isStopping()) { + try (final Repository repository = RepositoryManager.getRepository()) { + this.repository = repository; + + this.updateStatus("Starting import of bootstrap..."); + + this.doImport(); + break; + + } catch (DataException e) { + LOGGER.info("Bootstrap import failed", e); + this.updateStatus(String.format("Bootstrapping failed. Retrying in %d minutes...", retryMinutes)); + Thread.sleep(retryMinutes * 60 * 1000L); + retryMinutes *= 2; + } + } + } + + private void doImport() throws DataException { + Path path = null; + try { + Path tempDir = this.createTempDirectory(); + String filename = String.format("%s%s", Settings.getInstance().getBootstrapFilenamePrefix(), this.getFilename()); + path = Paths.get(tempDir.toString(), filename); + + this.downloadToPath(path); + this.importFromPath(path); + + } catch (InterruptedException | DataException | IOException e) { + throw new DataException("Unable to import bootstrap", e); + } + finally { + if (path != null) { + try { + Files.delete(path); + + } catch (IOException e) { + // Temp folder will be cleaned up below, so ignore this failure + } + } + this.deleteAllTempDirectories(); + } + } + + private String getFilename() { + boolean isTopOnly = Settings.getInstance().isTopOnly(); + boolean archiveEnabled = Settings.getInstance().isArchiveEnabled(); + boolean isTestnet = Settings.getInstance().isTestNet(); + String prefix = isTestnet ? "testnet-" : ""; + + if (isTopOnly) { + return prefix.concat("bootstrap-toponly.7z"); + } + else if (archiveEnabled) { + return prefix.concat("bootstrap-archive.7z"); + } + else { + return prefix.concat("bootstrap-full.7z"); + } + } + + private void downloadToPath(Path path) throws DataException { + String bootstrapHost = this.getRandomHost(); + String bootstrapFilename = this.getFilename(); + String bootstrapUrl = String.format("%s/%s", bootstrapHost, bootstrapFilename); + String type = Settings.getInstance().isTopOnly() ? "top-only" : "full node"; + + SplashFrame.getInstance().updateStatus(String.format("Downloading %s bootstrap...", type)); + LOGGER.info(String.format("Downloading %s bootstrap from %s ...", type, bootstrapUrl)); + + // Delete an existing file if it exists + try { + Files.delete(path); + } catch (IOException e) { + // No need to do anything + } + + // Get the total file size + URL url; + long fileSize; + try { + url = new URL(bootstrapUrl); + HttpURLConnection connection = (HttpURLConnection) url.openConnection(); + connection.setRequestMethod("HEAD"); + connection.connect(); + fileSize = connection.getContentLengthLong(); + connection.disconnect(); + + } catch (MalformedURLException e) { + throw new DataException(String.format("Malformed URL when downloading bootstrap: %s", e.getMessage())); + } catch (IOException e) { + throw new DataException(String.format("Unable to get bootstrap file size from %s. " + + "Please check your internet connection.", e.getMessage())); + } + + // Download the file and update the status with progress + try (BufferedInputStream in = new BufferedInputStream(url.openStream()); + FileOutputStream fileOutputStream = new FileOutputStream(path.toFile())) { + byte[] buffer = new byte[1024 * 1024]; + long downloaded = 0; + int bytesRead; + while ((bytesRead = in.read(buffer, 0, 1024)) != -1) { + fileOutputStream.write(buffer, 0, bytesRead); + downloaded += bytesRead; + + if (fileSize > 0) { + int progress = (int)((double)downloaded / (double)fileSize * 100); + SplashFrame.getInstance().updateStatus(String.format("Downloading %s bootstrap... (%d%%)", type, progress)); + } + } + + } catch (IOException e) { + throw new DataException(String.format("Unable to download bootstrap: %s", e.getMessage())); + } + } + + public String getRandomHost() { + // Select a random host from bootstrapHosts + String[] hosts = Settings.getInstance().getBootstrapHosts(); + int index = new SecureRandom().nextInt(hosts.length); + String bootstrapHost = hosts[index]; + return bootstrapHost; + } + + public void importFromPath(Path path) throws InterruptedException, DataException, IOException { + + ReentrantLock blockchainLock = Controller.getInstance().getBlockchainLock(); + blockchainLock.lockInterruptibly(); + + try { + this.updateStatus("Stopping repository..."); + // Close the repository while we are still able to + // Otherwise, the caller will run into difficulties when it tries to close it + repository.discardChanges(); + repository.close(); + // Now close the repository factory so that we can swap out the database files + RepositoryManager.closeRepositoryFactory(); + + this.updateStatus("Deleting existing repository..."); + Path input = path.toAbsolutePath(); + Path output = path.toAbsolutePath().getParent().toAbsolutePath(); + Path inputPath = Paths.get(output.toString(), "bootstrap"); + Path outputPath = Paths.get(Settings.getInstance().getRepositoryPath()); + FileUtils.deleteDirectory(outputPath.toFile()); + + this.updateStatus("Extracting bootstrap..."); + SevenZ.decompress(input.toString(), output.toFile()); + + if (!inputPath.toFile().exists()) { + throw new DataException("Extracted bootstrap doesn't exist"); + } + + // Move the "bootstrap" folder in place of the "db" folder + this.updateStatus("Moving files to output directory..."); + Files.move(inputPath, outputPath); + + this.updateStatus("Starting repository from bootstrap..."); + } + finally { + RepositoryFactory repositoryFactory = new HSQLDBRepositoryFactory(Controller.getRepositoryUrl()); + RepositoryManager.setRepositoryFactory(repositoryFactory); + + blockchainLock.unlock(); + } + } + + private Path createTempDirectory() throws IOException { + Path initialPath = Paths.get(Settings.getInstance().getRepositoryPath()).toAbsolutePath().getParent(); + String baseDir = Paths.get(initialPath.toString(), "tmp").toFile().getCanonicalPath(); + String identifier = UUID.randomUUID().toString(); + Path tempDir = Paths.get(baseDir, identifier); + Files.createDirectories(tempDir); + return tempDir; + } + + private void deleteAllTempDirectories() { + Path initialPath = Paths.get(Settings.getInstance().getRepositoryPath()).toAbsolutePath().getParent(); + Path path = Paths.get(initialPath.toString(), "tmp"); + try { + FileUtils.deleteDirectory(path.toFile()); + } catch (IOException e) { + LOGGER.info("Unable to delete temp directory path: {}", path.toString(), e); + } + } + + public Path getBootstrapOutputPath() { + Path initialPath = Paths.get(Settings.getInstance().getRepositoryPath()).toAbsolutePath().getParent(); + String compressedFilename = String.format("%s%s", Settings.getInstance().getBootstrapFilenamePrefix(), this.getFilename()); + Path compressedOutputPath = Paths.get(initialPath.toString(), compressedFilename); + return compressedOutputPath; + } + + private void updateStatus(String text) { + LOGGER.info(text); + SplashFrame.getInstance().updateStatus(text); + } + +} diff --git a/src/main/java/org/qortal/repository/Repository.java b/src/main/java/org/qortal/repository/Repository.java index 656e6e1e..c0bdb0d9 100644 --- a/src/main/java/org/qortal/repository/Repository.java +++ b/src/main/java/org/qortal/repository/Repository.java @@ -1,5 +1,8 @@ package org.qortal.repository; +import java.io.IOException; +import java.util.concurrent.TimeoutException; + public interface Repository extends AutoCloseable { public ATRepository getATRepository(); @@ -12,6 +15,8 @@ public interface Repository extends AutoCloseable { public BlockRepository getBlockRepository(); + public BlockArchiveRepository getBlockArchiveRepository(); + public ChatRepository getChatRepository(); public CrossChainRepository getCrossChainRepository(); @@ -45,14 +50,16 @@ public interface Repository extends AutoCloseable { public void setDebug(boolean debugState); - public void backup(boolean quick) throws DataException; + public void backup(boolean quick, String name, Long timeout) throws DataException, TimeoutException; - public void performPeriodicMaintenance() throws DataException; + public void performPeriodicMaintenance(Long timeout) throws DataException, TimeoutException; public void exportNodeLocalData() throws DataException; - public void importDataFromFile(String filename) throws DataException; + public void importDataFromFile(String filename) throws DataException, IOException; public void checkConsistency() throws DataException; + public static void attemptRecovery(String connectionUrl, String name) throws DataException {} + } diff --git a/src/main/java/org/qortal/repository/RepositoryManager.java b/src/main/java/org/qortal/repository/RepositoryManager.java index df578888..714ada28 100644 --- a/src/main/java/org/qortal/repository/RepositoryManager.java +++ b/src/main/java/org/qortal/repository/RepositoryManager.java @@ -1,8 +1,18 @@ package org.qortal.repository; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.qortal.gui.SplashFrame; +import org.qortal.repository.hsqldb.HSQLDBDatabaseArchiving; +import org.qortal.repository.hsqldb.HSQLDBDatabasePruning; +import org.qortal.repository.hsqldb.HSQLDBRepository; +import org.qortal.settings.Settings; + import java.sql.SQLException; +import java.util.concurrent.TimeoutException; public abstract class RepositoryManager { + private static final Logger LOGGER = LogManager.getLogger(RepositoryManager.class); private static RepositoryFactory repositoryFactory = null; @@ -43,14 +53,60 @@ public abstract class RepositoryManager { repositoryFactory = null; } - public static void backup(boolean quick) { + public static void backup(boolean quick, String name, Long timeout) throws TimeoutException { try (final Repository repository = getRepository()) { - repository.backup(quick); + repository.backup(quick, name, timeout); } catch (DataException e) { // Backup is best-effort so don't complain } } + public static boolean archive(Repository repository) { + // Bulk archive the database the first time we use archive mode + if (Settings.getInstance().isArchiveEnabled()) { + if (RepositoryManager.canArchiveOrPrune()) { + try { + return HSQLDBDatabaseArchiving.buildBlockArchive(repository, BlockArchiveWriter.DEFAULT_FILE_SIZE_TARGET); + + } catch (DataException e) { + LOGGER.info("Unable to build block archive. The database may have been left in an inconsistent state."); + } + } + else { + LOGGER.info("Unable to build block archive due to missing ATStatesHeightIndex. Bootstrapping is recommended."); + LOGGER.info("To bootstrap, stop the core and delete the db folder, then start the core again."); + SplashFrame.getInstance().updateStatus("Missing index. Bootstrapping is recommended."); + } + } + return false; + } + + public static boolean prune(Repository repository) { + // Bulk prune the database the first time we use top-only or block archive mode + if (Settings.getInstance().isTopOnly() || + Settings.getInstance().isArchiveEnabled()) { + if (RepositoryManager.canArchiveOrPrune()) { + try { + boolean prunedATStates = HSQLDBDatabasePruning.pruneATStates((HSQLDBRepository) repository); + boolean prunedBlocks = HSQLDBDatabasePruning.pruneBlocks((HSQLDBRepository) repository); + + // Perform repository maintenance to shrink the db size down + if (prunedATStates && prunedBlocks) { + HSQLDBDatabasePruning.performMaintenance(repository); + return true; + } + + } catch (SQLException | DataException e) { + LOGGER.info("Unable to bulk prune AT states. The database may have been left in an inconsistent state."); + } + } + else { + LOGGER.info("Unable to prune blocks due to missing ATStatesHeightIndex. Bootstrapping is recommended."); + } + } + return false; + } + public static void setRequestedCheckpoint(Boolean quick) { quickCheckpointRequested = quick; } @@ -77,4 +133,12 @@ public abstract class RepositoryManager { return SQLException.class.isInstance(cause) && repositoryFactory.isDeadlockException((SQLException) cause); } + public static boolean canArchiveOrPrune() { + try (final Repository repository = getRepository()) { + return repository.getATRepository().hasAtStatesHeightIndex(); + } catch (DataException e) { + return false; + } + } + } diff --git a/src/main/java/org/qortal/repository/hsqldb/HSQLDBATRepository.java b/src/main/java/org/qortal/repository/hsqldb/HSQLDBATRepository.java index c21dbf8c..04823925 100644 --- a/src/main/java/org/qortal/repository/hsqldb/HSQLDBATRepository.java +++ b/src/main/java/org/qortal/repository/hsqldb/HSQLDBATRepository.java @@ -8,6 +8,7 @@ import java.util.Set; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.qortal.controller.Controller; import org.qortal.data.at.ATData; import org.qortal.data.at.ATStateData; import org.qortal.repository.ATRepository; @@ -32,7 +33,7 @@ public class HSQLDBATRepository implements ATRepository { public ATData fromATAddress(String atAddress) throws DataException { String sql = "SELECT creator, created_when, version, asset_id, code_bytes, code_hash, " + "is_sleeping, sleep_until_height, is_finished, had_fatal_error, " - + "is_frozen, frozen_balance " + + "is_frozen, frozen_balance, sleep_until_message_timestamp " + "FROM ATs " + "WHERE AT_address = ? LIMIT 1"; @@ -60,8 +61,13 @@ public class HSQLDBATRepository implements ATRepository { if (frozenBalance == 0 && resultSet.wasNull()) frozenBalance = null; + Long sleepUntilMessageTimestamp = resultSet.getLong(13); + if (sleepUntilMessageTimestamp == 0 && resultSet.wasNull()) + sleepUntilMessageTimestamp = null; + return new ATData(atAddress, creatorPublicKey, created, version, assetId, codeBytes, codeHash, - isSleeping, sleepUntilHeight, isFinished, hadFatalError, isFrozen, frozenBalance); + isSleeping, sleepUntilHeight, isFinished, hadFatalError, isFrozen, frozenBalance, + sleepUntilMessageTimestamp); } catch (SQLException e) { throw new DataException("Unable to fetch AT from repository", e); } @@ -94,7 +100,7 @@ public class HSQLDBATRepository implements ATRepository { public List getAllExecutableATs() throws DataException { String sql = "SELECT AT_address, creator, created_when, version, asset_id, code_bytes, code_hash, " + "is_sleeping, sleep_until_height, had_fatal_error, " - + "is_frozen, frozen_balance " + + "is_frozen, frozen_balance, sleep_until_message_timestamp " + "FROM ATs " + "WHERE is_finished = false " + "ORDER BY created_when ASC"; @@ -128,8 +134,13 @@ public class HSQLDBATRepository implements ATRepository { if (frozenBalance == 0 && resultSet.wasNull()) frozenBalance = null; + Long sleepUntilMessageTimestamp = resultSet.getLong(13); + if (sleepUntilMessageTimestamp == 0 && resultSet.wasNull()) + sleepUntilMessageTimestamp = null; + ATData atData = new ATData(atAddress, creatorPublicKey, created, version, assetId, codeBytes, codeHash, - isSleeping, sleepUntilHeight, isFinished, hadFatalError, isFrozen, frozenBalance); + isSleeping, sleepUntilHeight, isFinished, hadFatalError, isFrozen, frozenBalance, + sleepUntilMessageTimestamp); executableATs.add(atData); } while (resultSet.next()); @@ -147,7 +158,7 @@ public class HSQLDBATRepository implements ATRepository { sql.append("SELECT AT_address, creator, created_when, version, asset_id, code_bytes, ") .append("is_sleeping, sleep_until_height, is_finished, had_fatal_error, ") - .append("is_frozen, frozen_balance ") + .append("is_frozen, frozen_balance, sleep_until_message_timestamp ") .append("FROM ATs ") .append("WHERE code_hash = ? "); bindParams.add(codeHash); @@ -191,8 +202,13 @@ public class HSQLDBATRepository implements ATRepository { if (frozenBalance == 0 && resultSet.wasNull()) frozenBalance = null; + Long sleepUntilMessageTimestamp = resultSet.getLong(13); + if (sleepUntilMessageTimestamp == 0 && resultSet.wasNull()) + sleepUntilMessageTimestamp = null; + ATData atData = new ATData(atAddress, creatorPublicKey, created, version, assetId, codeBytes, codeHash, - isSleeping, sleepUntilHeight, isFinished, hadFatalError, isFrozen, frozenBalance); + isSleeping, sleepUntilHeight, isFinished, hadFatalError, isFrozen, frozenBalance, + sleepUntilMessageTimestamp); matchingATs.add(atData); } while (resultSet.next()); @@ -210,7 +226,7 @@ public class HSQLDBATRepository implements ATRepository { sql.append("SELECT AT_address, creator, created_when, version, asset_id, code_bytes, ") .append("is_sleeping, sleep_until_height, is_finished, had_fatal_error, ") - .append("is_frozen, frozen_balance, code_hash ") + .append("is_frozen, frozen_balance, code_hash, sleep_until_message_timestamp ") .append("FROM "); // (VALUES (?), (?), ...) AS ATCodeHashes (code_hash) @@ -264,9 +280,10 @@ public class HSQLDBATRepository implements ATRepository { frozenBalance = null; byte[] codeHash = resultSet.getBytes(13); + Long sleepUntilMessageTimestamp = resultSet.getLong(14); ATData atData = new ATData(atAddress, creatorPublicKey, created, version, assetId, codeBytes, codeHash, - isSleeping, sleepUntilHeight, isFinished, hadFatalError, isFrozen, frozenBalance); + isSleeping, sleepUntilHeight, isFinished, hadFatalError, isFrozen, frozenBalance, sleepUntilMessageTimestamp); matchingATs.add(atData); } while (resultSet.next()); @@ -305,7 +322,7 @@ public class HSQLDBATRepository implements ATRepository { .bind("code_bytes", atData.getCodeBytes()).bind("code_hash", atData.getCodeHash()) .bind("is_sleeping", atData.getIsSleeping()).bind("sleep_until_height", atData.getSleepUntilHeight()) .bind("is_finished", atData.getIsFinished()).bind("had_fatal_error", atData.getHadFatalError()).bind("is_frozen", atData.getIsFrozen()) - .bind("frozen_balance", atData.getFrozenBalance()); + .bind("frozen_balance", atData.getFrozenBalance()).bind("sleep_until_message_timestamp", atData.getSleepUntilMessageTimestamp()); try { saveHelper.execute(this.repository); @@ -328,7 +345,7 @@ public class HSQLDBATRepository implements ATRepository { @Override public ATStateData getATStateAtHeight(String atAddress, int height) throws DataException { - String sql = "SELECT state_data, state_hash, fees, is_initial " + String sql = "SELECT state_data, state_hash, fees, is_initial, sleep_until_message_timestamp " + "FROM ATStates " + "LEFT OUTER JOIN ATStatesData USING (AT_address, height) " + "WHERE ATStates.AT_address = ? AND ATStates.height = ? " @@ -343,7 +360,11 @@ public class HSQLDBATRepository implements ATRepository { long fees = resultSet.getLong(3); boolean isInitial = resultSet.getBoolean(4); - return new ATStateData(atAddress, height, stateData, stateHash, fees, isInitial); + Long sleepUntilMessageTimestamp = resultSet.getLong(5); + if (sleepUntilMessageTimestamp == 0 && resultSet.wasNull()) + sleepUntilMessageTimestamp = null; + + return new ATStateData(atAddress, height, stateData, stateHash, fees, isInitial, sleepUntilMessageTimestamp); } catch (SQLException e) { throw new DataException("Unable to fetch AT state from repository", e); } @@ -351,7 +372,7 @@ public class HSQLDBATRepository implements ATRepository { @Override public ATStateData getLatestATState(String atAddress) throws DataException { - String sql = "SELECT height, state_data, state_hash, fees, is_initial " + String sql = "SELECT height, state_data, state_hash, fees, is_initial, sleep_until_message_timestamp " + "FROM ATStates " + "JOIN ATStatesData USING (AT_address, height) " + "WHERE ATStates.AT_address = ? " @@ -370,7 +391,11 @@ public class HSQLDBATRepository implements ATRepository { long fees = resultSet.getLong(4); boolean isInitial = resultSet.getBoolean(5); - return new ATStateData(atAddress, height, stateData, stateHash, fees, isInitial); + Long sleepUntilMessageTimestamp = resultSet.getLong(6); + if (sleepUntilMessageTimestamp == 0 && resultSet.wasNull()) + sleepUntilMessageTimestamp = null; + + return new ATStateData(atAddress, height, stateData, stateHash, fees, isInitial, sleepUntilMessageTimestamp); } catch (SQLException e) { throw new DataException("Unable to fetch latest AT state from repository", e); } @@ -383,10 +408,10 @@ public class HSQLDBATRepository implements ATRepository { StringBuilder sql = new StringBuilder(1024); List bindParams = new ArrayList<>(); - sql.append("SELECT AT_address, height, state_data, state_hash, fees, is_initial " + sql.append("SELECT AT_address, height, state_data, state_hash, fees, is_initial, FinalATStates.sleep_until_message_timestamp " + "FROM ATs " + "CROSS JOIN LATERAL(" - + "SELECT height, state_data, state_hash, fees, is_initial " + + "SELECT height, state_data, state_hash, fees, is_initial, sleep_until_message_timestamp " + "FROM ATStates " + "JOIN ATStatesData USING (AT_address, height) " + "WHERE ATStates.AT_address = ATs.AT_address "); @@ -440,7 +465,11 @@ public class HSQLDBATRepository implements ATRepository { long fees = resultSet.getLong(5); boolean isInitial = resultSet.getBoolean(6); - ATStateData atStateData = new ATStateData(atAddress, height, stateData, stateHash, fees, isInitial); + Long sleepUntilMessageTimestamp = resultSet.getLong(7); + if (sleepUntilMessageTimestamp == 0 && resultSet.wasNull()) + sleepUntilMessageTimestamp = null; + + ATStateData atStateData = new ATStateData(atAddress, height, stateData, stateHash, fees, isInitial, sleepUntilMessageTimestamp); atStates.add(atStateData); } while (resultSet.next()); @@ -471,7 +500,7 @@ public class HSQLDBATRepository implements ATRepository { StringBuilder sql = new StringBuilder(1024); List bindParams = new ArrayList<>(); - sql.append("SELECT AT_address, height, state_data, state_hash, fees, is_initial " + sql.append("SELECT AT_address, height, state_data, state_hash, fees, is_initial, sleep_until_message_timestamp " + "FROM ATs " + "CROSS JOIN LATERAL(" + "SELECT height, state_data, state_hash, fees, is_initial " @@ -526,8 +555,10 @@ public class HSQLDBATRepository implements ATRepository { byte[] stateHash = resultSet.getBytes(4); long fees = resultSet.getLong(5); boolean isInitial = resultSet.getBoolean(6); + Long sleepUntilMessageTimestamp = resultSet.getLong(7); - ATStateData atStateData = new ATStateData(atAddress, height, stateData, stateHash, fees, isInitial); + ATStateData atStateData = new ATStateData(atAddress, height, stateData, stateHash, fees, isInitial, + sleepUntilMessageTimestamp); atStates.add(atStateData); } while (resultSet.next()); @@ -570,6 +601,44 @@ public class HSQLDBATRepository implements ATRepository { return atStates; } + + @Override + public void rebuildLatestAtStates() throws DataException { + // latestATStatesLock is to prevent concurrent updates on LatestATStates + // that could result in one process using a partial or empty dataset + // because it was in the process of being rebuilt by another thread + synchronized (this.repository.latestATStatesLock) { + LOGGER.trace("Rebuilding latest AT states..."); + + // Rebuild cache of latest AT states that we can't trim + String deleteSql = "DELETE FROM LatestATStates"; + try { + this.repository.executeCheckedUpdate(deleteSql); + } catch (SQLException e) { + repository.examineException(e); + throw new DataException("Unable to delete temporary latest AT states cache from repository", e); + } + + String insertSql = "INSERT INTO LatestATStates (" + + "SELECT AT_address, height FROM ATs " + + "CROSS JOIN LATERAL(" + + "SELECT height FROM ATStates " + + "WHERE ATStates.AT_address = ATs.AT_address " + + "ORDER BY AT_address DESC, height DESC LIMIT 1" + + ") " + + ")"; + try { + this.repository.executeCheckedUpdate(insertSql); + } catch (SQLException e) { + repository.examineException(e); + throw new DataException("Unable to populate temporary latest AT states cache in repository", e); + } + this.repository.saveChanges(); + LOGGER.trace("Rebuilt latest AT states"); + } + } + + @Override public int getAtTrimHeight() throws DataException { String sql = "SELECT AT_trim_height FROM DatabaseInfo"; @@ -595,63 +664,153 @@ public class HSQLDBATRepository implements ATRepository { this.repository.executeCheckedUpdate(updateSql, trimHeight); this.repository.saveChanges(); } catch (SQLException e) { - repository.examineException(e); + this.repository.examineException(e); throw new DataException("Unable to set AT state trim height in repository", e); } } } - @Override - public void prepareForAtStateTrimming() throws DataException { - // Rebuild cache of latest AT states that we can't trim - String deleteSql = "DELETE FROM LatestATStates"; - try { - this.repository.executeCheckedUpdate(deleteSql); - } catch (SQLException e) { - repository.examineException(e); - throw new DataException("Unable to delete temporary latest AT states cache from repository", e); - } - - String insertSql = "INSERT INTO LatestATStates (" - + "SELECT AT_address, height FROM ATs " - + "CROSS JOIN LATERAL(" - + "SELECT height FROM ATStates " - + "WHERE ATStates.AT_address = ATs.AT_address " - + "ORDER BY AT_address DESC, height DESC LIMIT 1" - + ") " - + ")"; - try { - this.repository.executeCheckedUpdate(insertSql); - } catch (SQLException e) { - repository.examineException(e); - throw new DataException("Unable to populate temporary latest AT states cache in repository", e); - } - } - @Override public int trimAtStates(int minHeight, int maxHeight, int limit) throws DataException { if (minHeight >= maxHeight) return 0; - // We're often called so no need to trim all states in one go. - // Limit updates to reduce CPU and memory load. - String sql = "DELETE FROM ATStatesData " - + "WHERE height BETWEEN ? AND ? " - + "AND NOT EXISTS(" + // latestATStatesLock is to prevent concurrent updates on LatestATStates + // that could result in one process using a partial or empty dataset + // because it was in the process of being rebuilt by another thread + synchronized (this.repository.latestATStatesLock) { + + // We're often called so no need to trim all states in one go. + // Limit updates to reduce CPU and memory load. + String sql = "DELETE FROM ATStatesData " + + "WHERE height BETWEEN ? AND ? " + + "AND NOT EXISTS(" + "SELECT TRUE FROM LatestATStates " + "WHERE LatestATStates.AT_address = ATStatesData.AT_address " + "AND LatestATStates.height = ATStatesData.height" - + ") " - + "LIMIT ?"; + + ") " + + "LIMIT ?"; - try { - return this.repository.executeCheckedUpdate(sql, minHeight, maxHeight, limit); - } catch (SQLException e) { - repository.examineException(e); - throw new DataException("Unable to trim AT states in repository", e); + try { + int modifiedRows = this.repository.executeCheckedUpdate(sql, minHeight, maxHeight, limit); + this.repository.saveChanges(); + return modifiedRows; + + } catch (SQLException e) { + repository.examineException(e); + throw new DataException("Unable to trim AT states in repository", e); + } } } + + @Override + public int getAtPruneHeight() throws DataException { + String sql = "SELECT AT_prune_height FROM DatabaseInfo"; + + try (ResultSet resultSet = this.repository.checkedExecute(sql)) { + if (resultSet == null) + return 0; + + return resultSet.getInt(1); + } catch (SQLException e) { + throw new DataException("Unable to fetch AT state prune height from repository", e); + } + } + + @Override + public void setAtPruneHeight(int pruneHeight) throws DataException { + // trimHeightsLock is to prevent concurrent update on DatabaseInfo + // that could result in "transaction rollback: serialization failure" + synchronized (this.repository.trimHeightsLock) { + String updateSql = "UPDATE DatabaseInfo SET AT_prune_height = ?"; + + try { + this.repository.executeCheckedUpdate(updateSql, pruneHeight); + this.repository.saveChanges(); + } catch (SQLException e) { + repository.examineException(e); + throw new DataException("Unable to set AT state prune height in repository", e); + } + } + } + + @Override + public int pruneAtStates(int minHeight, int maxHeight) throws DataException { + // latestATStatesLock is to prevent concurrent updates on LatestATStates + // that could result in one process using a partial or empty dataset + // because it was in the process of being rebuilt by another thread + synchronized (this.repository.latestATStatesLock) { + + int deletedCount = 0; + + for (int height = minHeight; height <= maxHeight; height++) { + + // Give up if we're stopping + if (Controller.isStopping()) { + return deletedCount; + } + + // Get latest AT states for this height + List atAddresses = new ArrayList<>(); + String updateSql = "SELECT AT_address FROM LatestATStates WHERE height = ?"; + try (ResultSet resultSet = this.repository.checkedExecute(updateSql, height)) { + if (resultSet != null) { + do { + String atAddress = resultSet.getString(1); + atAddresses.add(atAddress); + + } while (resultSet.next()); + } + } catch (SQLException e) { + throw new DataException("Unable to fetch latest AT states from repository", e); + } + + List atStates = this.getBlockATStatesAtHeight(height); + for (ATStateData atState : atStates) { + //LOGGER.info("Found atState {} at height {}", atState.getATAddress(), atState.getHeight()); + + // Give up if we're stopping + if (Controller.isStopping()) { + return deletedCount; + } + + if (atAddresses.contains(atState.getATAddress())) { + // We don't want to delete this AT state because it is still active + LOGGER.trace("Skipping atState {} at height {}", atState.getATAddress(), atState.getHeight()); + continue; + } + + // Safe to delete everything else for this height + try { + this.repository.delete("ATStates", "AT_address = ? AND height = ?", + atState.getATAddress(), atState.getHeight()); + deletedCount++; + } catch (SQLException e) { + throw new DataException("Unable to delete AT state data from repository", e); + } + } + } + this.repository.saveChanges(); + + return deletedCount; + } + } + + + @Override + public boolean hasAtStatesHeightIndex() throws DataException { + String sql = "SELECT INDEX_NAME FROM INFORMATION_SCHEMA.SYSTEM_INDEXINFO where INDEX_NAME='ATSTATESHEIGHTINDEX'"; + + try (ResultSet resultSet = this.repository.checkedExecute(sql)) { + return resultSet != null; + + } catch (SQLException e) { + throw new DataException("Unable to check for ATStatesHeightIndex in repository", e); + } + } + + @Override public void save(ATStateData atStateData) throws DataException { // We shouldn't ever save partial ATStateData @@ -662,7 +821,8 @@ public class HSQLDBATRepository implements ATRepository { atStatesSaver.bind("AT_address", atStateData.getATAddress()).bind("height", atStateData.getHeight()) .bind("state_hash", atStateData.getStateHash()) - .bind("fees", atStateData.getFees()).bind("is_initial", atStateData.isInitial()); + .bind("fees", atStateData.getFees()).bind("is_initial", atStateData.isInitial()) + .bind("sleep_until_message_timestamp", atStateData.getSleepUntilMessageTimestamp()); try { atStatesSaver.execute(this.repository); diff --git a/src/main/java/org/qortal/repository/hsqldb/HSQLDBAccountRepository.java b/src/main/java/org/qortal/repository/hsqldb/HSQLDBAccountRepository.java index 0dca46eb..b28a224c 100644 --- a/src/main/java/org/qortal/repository/hsqldb/HSQLDBAccountRepository.java +++ b/src/main/java/org/qortal/repository/hsqldb/HSQLDBAccountRepository.java @@ -904,6 +904,25 @@ public class HSQLDBAccountRepository implements AccountRepository { } } + @Override + public MintingAccountData getMintingAccount(byte[] mintingAccountKey) throws DataException { + try (ResultSet resultSet = this.repository.checkedExecute("SELECT minter_private_key, minter_public_key " + + "FROM MintingAccounts WHERE minter_private_key = ? OR minter_public_key = ?", + mintingAccountKey, mintingAccountKey)) { + + if (resultSet == null) + return null; + + byte[] minterPrivateKey = resultSet.getBytes(1); + byte[] minterPublicKey = resultSet.getBytes(2); + + return new MintingAccountData(minterPrivateKey, minterPublicKey); + + } catch (SQLException e) { + throw new DataException("Unable to fetch minting accounts from repository", e); + } + } + @Override public void save(MintingAccountData mintingAccountData) throws DataException { HSQLDBSaver saveHelper = new HSQLDBSaver("MintingAccounts"); diff --git a/src/main/java/org/qortal/repository/hsqldb/HSQLDBBlockArchiveRepository.java b/src/main/java/org/qortal/repository/hsqldb/HSQLDBBlockArchiveRepository.java new file mode 100644 index 00000000..46008c25 --- /dev/null +++ b/src/main/java/org/qortal/repository/hsqldb/HSQLDBBlockArchiveRepository.java @@ -0,0 +1,296 @@ +package org.qortal.repository.hsqldb; + +import org.qortal.api.ApiError; +import org.qortal.api.ApiExceptionFactory; +import org.qortal.api.model.BlockSignerSummary; +import org.qortal.block.Block; +import org.qortal.data.block.BlockArchiveData; +import org.qortal.data.block.BlockData; +import org.qortal.data.block.BlockSummaryData; +import org.qortal.repository.BlockArchiveReader; +import org.qortal.repository.BlockArchiveRepository; +import org.qortal.repository.DataException; +import org.qortal.utils.Triple; + +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +public class HSQLDBBlockArchiveRepository implements BlockArchiveRepository { + + protected HSQLDBRepository repository; + + public HSQLDBBlockArchiveRepository(HSQLDBRepository repository) { + this.repository = repository; + } + + + @Override + public BlockData fromSignature(byte[] signature) throws DataException { + Triple blockInfo = BlockArchiveReader.getInstance().fetchBlockWithSignature(signature, this.repository); + if (blockInfo != null) { + return (BlockData) blockInfo.getA(); + } + return null; + } + + @Override + public int getHeightFromSignature(byte[] signature) throws DataException { + Integer height = BlockArchiveReader.getInstance().fetchHeightForSignature(signature, this.repository); + if (height == null || height == 0) { + return 0; + } + return height; + } + + @Override + public BlockData fromHeight(int height) throws DataException { + Triple blockInfo = BlockArchiveReader.getInstance().fetchBlockAtHeight(height); + if (blockInfo != null) { + return (BlockData) blockInfo.getA(); + } + return null; + } + + @Override + public List fromRange(int startHeight, int endHeight) throws DataException { + List blocks = new ArrayList<>(); + + for (int height = startHeight; height < endHeight; height++) { + BlockData blockData = this.fromHeight(height); + if (blockData == null) { + return blocks; + } + blocks.add(blockData); + } + return blocks; + } + + @Override + public BlockData fromReference(byte[] reference) throws DataException { + BlockData referenceBlock = this.repository.getBlockArchiveRepository().fromSignature(reference); + if (referenceBlock == null) { + // Try the main block repository. Needed for genesis block. + referenceBlock = this.repository.getBlockRepository().fromSignature(reference); + } + if (referenceBlock != null) { + int height = referenceBlock.getHeight(); + if (height > 0) { + // Request the block at height + 1 + Triple blockInfo = BlockArchiveReader.getInstance().fetchBlockAtHeight(height + 1); + if (blockInfo != null) { + return (BlockData) blockInfo.getA(); + } + } + } + return null; + } + + @Override + public int getHeightFromTimestamp(long timestamp) throws DataException { + String sql = "SELECT height FROM BlockArchive WHERE minted_when <= ? ORDER BY minted_when DESC, height DESC LIMIT 1"; + + try (ResultSet resultSet = this.repository.checkedExecute(sql, timestamp)) { + if (resultSet == null) { + return 0; + } + return resultSet.getInt(1); + + } catch (SQLException e) { + throw new DataException("Error fetching height from BlockArchive repository", e); + } + } + + @Override + public List getBlockSummariesBySigner(byte[] signerPublicKey, Integer limit, Integer offset, Boolean reverse) throws DataException { + StringBuilder sql = new StringBuilder(512); + sql.append("SELECT signature, height, BlockArchive.minter FROM "); + + // List of minter account's public key and reward-share public keys with minter's public key + sql.append("(SELECT * FROM (VALUES (CAST(? AS QortalPublicKey))) UNION (SELECT reward_share_public_key FROM RewardShares WHERE minter_public_key = ?)) AS PublicKeys (public_key) "); + + // Match BlockArchive blocks signed with public key from above list + sql.append("JOIN BlockArchive ON BlockArchive.minter = public_key "); + + sql.append("ORDER BY BlockArchive.height "); + if (reverse != null && reverse) + sql.append("DESC "); + + HSQLDBRepository.limitOffsetSql(sql, limit, offset); + + List blockSummaries = new ArrayList<>(); + + try (ResultSet resultSet = this.repository.checkedExecute(sql.toString(), signerPublicKey, signerPublicKey)) { + if (resultSet == null) + return blockSummaries; + + do { + byte[] signature = resultSet.getBytes(1); + int height = resultSet.getInt(2); + byte[] blockMinterPublicKey = resultSet.getBytes(3); + + // Fetch additional info from the archive itself + int onlineAccountsCount = 0; + BlockData blockData = this.fromSignature(signature); + if (blockData != null) { + onlineAccountsCount = blockData.getOnlineAccountsCount(); + } + + BlockSummaryData blockSummary = new BlockSummaryData(height, signature, blockMinterPublicKey, onlineAccountsCount); + blockSummaries.add(blockSummary); + } while (resultSet.next()); + + return blockSummaries; + } catch (SQLException e) { + throw new DataException("Unable to fetch minter's block summaries from repository", e); + } + } + + @Override + public List getBlockSigners(List addresses, Integer limit, Integer offset, Boolean reverse) throws DataException { + String subquerySql = "SELECT minter, COUNT(signature) FROM (" + + "(SELECT minter, signature FROM Blocks) UNION ALL (SELECT minter, signature FROM BlockArchive)" + + ") GROUP BY minter"; + + StringBuilder sql = new StringBuilder(1024); + sql.append("SELECT DISTINCT block_minter, n_blocks, minter_public_key, minter, recipient FROM ("); + sql.append(subquerySql); + sql.append(") AS Minters (block_minter, n_blocks) LEFT OUTER JOIN RewardShares ON reward_share_public_key = block_minter "); + + if (addresses != null && !addresses.isEmpty()) { + sql.append(" LEFT OUTER JOIN Accounts AS BlockMinterAccounts ON BlockMinterAccounts.public_key = block_minter "); + sql.append(" LEFT OUTER JOIN Accounts AS RewardShareMinterAccounts ON RewardShareMinterAccounts.public_key = minter_public_key "); + sql.append(" JOIN (VALUES "); + + final int addressesSize = addresses.size(); + for (int ai = 0; ai < addressesSize; ++ai) { + if (ai != 0) + sql.append(", "); + + sql.append("(?)"); + } + + sql.append(") AS FilterAccounts (account) "); + sql.append(" ON FilterAccounts.account IN (recipient, BlockMinterAccounts.account, RewardShareMinterAccounts.account) "); + } else { + addresses = Collections.emptyList(); + } + + sql.append("ORDER BY n_blocks "); + if (reverse != null && reverse) + sql.append("DESC "); + + HSQLDBRepository.limitOffsetSql(sql, limit, offset); + + List summaries = new ArrayList<>(); + + try (ResultSet resultSet = this.repository.checkedExecute(sql.toString(), addresses.toArray())) { + if (resultSet == null) + return summaries; + + do { + byte[] blockMinterPublicKey = resultSet.getBytes(1); + int nBlocks = resultSet.getInt(2); + + // May not be present if no reward-share: + byte[] mintingAccountPublicKey = resultSet.getBytes(3); + String minterAccount = resultSet.getString(4); + String recipientAccount = resultSet.getString(5); + + BlockSignerSummary blockSignerSummary; + if (recipientAccount == null) + blockSignerSummary = new BlockSignerSummary(blockMinterPublicKey, nBlocks); + else + blockSignerSummary = new BlockSignerSummary(blockMinterPublicKey, nBlocks, mintingAccountPublicKey, minterAccount, recipientAccount); + + summaries.add(blockSignerSummary); + } while (resultSet.next()); + + return summaries; + } catch (SQLException e) { + throw new DataException("Unable to fetch block minters from repository", e); + } + } + + + @Override + public int getBlockArchiveHeight() throws DataException { + String sql = "SELECT block_archive_height FROM DatabaseInfo"; + + try (ResultSet resultSet = this.repository.checkedExecute(sql)) { + if (resultSet == null) + return 0; + + return resultSet.getInt(1); + } catch (SQLException e) { + throw new DataException("Unable to fetch block archive height from repository", e); + } + } + + @Override + public void setBlockArchiveHeight(int archiveHeight) throws DataException { + // trimHeightsLock is to prevent concurrent update on DatabaseInfo + // that could result in "transaction rollback: serialization failure" + synchronized (this.repository.trimHeightsLock) { + String updateSql = "UPDATE DatabaseInfo SET block_archive_height = ?"; + + try { + this.repository.executeCheckedUpdate(updateSql, archiveHeight); + this.repository.saveChanges(); + } catch (SQLException e) { + repository.examineException(e); + throw new DataException("Unable to set block archive height in repository", e); + } + } + } + + + @Override + public BlockArchiveData getBlockArchiveDataForSignature(byte[] signature) throws DataException { + String sql = "SELECT height, signature, minted_when, minter FROM BlockArchive WHERE signature = ? LIMIT 1"; + + try (ResultSet resultSet = this.repository.checkedExecute(sql, signature)) { + if (resultSet == null) { + return null; + } + int height = resultSet.getInt(1); + byte[] sig = resultSet.getBytes(2); + long timestamp = resultSet.getLong(3); + byte[] minterPublicKey = resultSet.getBytes(4); + return new BlockArchiveData(sig, height, timestamp, minterPublicKey); + + } catch (SQLException e) { + throw new DataException("Error fetching height from BlockArchive repository", e); + } + } + + + @Override + public void save(BlockArchiveData blockArchiveData) throws DataException { + HSQLDBSaver saveHelper = new HSQLDBSaver("BlockArchive"); + + saveHelper.bind("signature", blockArchiveData.getSignature()) + .bind("height", blockArchiveData.getHeight()) + .bind("minted_when", blockArchiveData.getTimestamp()) + .bind("minter", blockArchiveData.getMinterPublicKey()); + + try { + saveHelper.execute(this.repository); + } catch (SQLException e) { + throw new DataException("Unable to save SimpleBlockData into BlockArchive repository", e); + } + } + + @Override + public void delete(BlockArchiveData blockArchiveData) throws DataException { + try { + this.repository.delete("BlockArchive", + "block_signature = ?", blockArchiveData.getSignature()); + } catch (SQLException e) { + throw new DataException("Unable to delete SimpleBlockData from BlockArchive repository", e); + } + } + +} diff --git a/src/main/java/org/qortal/repository/hsqldb/HSQLDBBlockRepository.java b/src/main/java/org/qortal/repository/hsqldb/HSQLDBBlockRepository.java index b486e6a0..b8238085 100644 --- a/src/main/java/org/qortal/repository/hsqldb/HSQLDBBlockRepository.java +++ b/src/main/java/org/qortal/repository/hsqldb/HSQLDBBlockRepository.java @@ -10,6 +10,7 @@ import org.qortal.api.model.BlockSignerSummary; import org.qortal.data.block.BlockData; import org.qortal.data.block.BlockSummaryData; import org.qortal.data.block.BlockTransactionData; +import org.qortal.data.block.BlockArchiveData; import org.qortal.data.transaction.TransactionData; import org.qortal.repository.BlockRepository; import org.qortal.repository.DataException; @@ -382,86 +383,6 @@ public class HSQLDBBlockRepository implements BlockRepository { } } - @Override - public List getBlockSummaries(Integer startHeight, Integer endHeight, Integer count) throws DataException { - StringBuilder sql = new StringBuilder(512); - List bindParams = new ArrayList<>(); - - sql.append("SELECT signature, height, minter, online_accounts_count, minted_when, transaction_count "); - - /* - * start end count result - * 10 40 null blocks 10 to 39 (excludes end block, ignore count) - * - * null null null blocks 1 to 50 (assume count=50, maybe start=1) - * 30 null null blocks 30 to 79 (assume count=50) - * 30 null 10 blocks 30 to 39 - * - * null null 50 last 50 blocks? so if max(blocks.height) is 200, then blocks 151 to 200 - * null 200 null blocks 150 to 199 (excludes end block, assume count=50) - * null 200 10 blocks 190 to 199 (excludes end block) - */ - - if (startHeight != null && endHeight != null) { - sql.append("FROM Blocks "); - sql.append("WHERE height BETWEEN ? AND ?"); - bindParams.add(startHeight); - bindParams.add(Integer.valueOf(endHeight - 1)); - } else if (endHeight != null || (startHeight == null && count != null)) { - // we are going to return blocks from the end of the chain - if (count == null) - count = 50; - - if (endHeight == null) { - sql.append("FROM (SELECT height FROM Blocks ORDER BY height DESC LIMIT 1) AS MaxHeights (max_height) "); - sql.append("JOIN Blocks ON height BETWEEN (max_height - ? + 1) AND max_height "); - bindParams.add(count); - } else { - sql.append("FROM Blocks "); - sql.append("WHERE height BETWEEN ? AND ?"); - bindParams.add(Integer.valueOf(endHeight - count)); - bindParams.add(Integer.valueOf(endHeight - 1)); - } - - } else { - // we are going to return blocks from the start of the chain - if (startHeight == null) - startHeight = 1; - - if (count == null) - count = 50; - - sql.append("FROM Blocks "); - sql.append("WHERE height BETWEEN ? AND ?"); - bindParams.add(startHeight); - bindParams.add(Integer.valueOf(startHeight + count - 1)); - } - - List blockSummaries = new ArrayList<>(); - - try (ResultSet resultSet = this.repository.checkedExecute(sql.toString(), bindParams.toArray())) { - if (resultSet == null) - return blockSummaries; - - do { - byte[] signature = resultSet.getBytes(1); - int height = resultSet.getInt(2); - byte[] minterPublicKey = resultSet.getBytes(3); - int onlineAccountsCount = resultSet.getInt(4); - long timestamp = resultSet.getLong(5); - int transactionCount = resultSet.getInt(6); - - BlockSummaryData blockSummary = new BlockSummaryData(height, signature, minterPublicKey, onlineAccountsCount, - timestamp, transactionCount); - blockSummaries.add(blockSummary); - } while (resultSet.next()); - - return blockSummaries; - } catch (SQLException e) { - throw new DataException("Unable to fetch height-ranged block summaries from repository", e); - } - } - @Override public int getOnlineAccountsSignaturesTrimHeight() throws DataException { String sql = "SELECT online_signatures_trim_height FROM DatabaseInfo"; @@ -509,6 +430,53 @@ public class HSQLDBBlockRepository implements BlockRepository { } } + + @Override + public int getBlockPruneHeight() throws DataException { + String sql = "SELECT block_prune_height FROM DatabaseInfo"; + + try (ResultSet resultSet = this.repository.checkedExecute(sql)) { + if (resultSet == null) + return 0; + + return resultSet.getInt(1); + } catch (SQLException e) { + throw new DataException("Unable to fetch block prune height from repository", e); + } + } + + @Override + public void setBlockPruneHeight(int pruneHeight) throws DataException { + // trimHeightsLock is to prevent concurrent update on DatabaseInfo + // that could result in "transaction rollback: serialization failure" + synchronized (this.repository.trimHeightsLock) { + String updateSql = "UPDATE DatabaseInfo SET block_prune_height = ?"; + + try { + this.repository.executeCheckedUpdate(updateSql, pruneHeight); + this.repository.saveChanges(); + } catch (SQLException e) { + repository.examineException(e); + throw new DataException("Unable to set block prune height in repository", e); + } + } + } + + @Override + public int pruneBlocks(int minHeight, int maxHeight) throws DataException { + // Don't prune the genesis block + if (minHeight <= 1) { + minHeight = 2; + } + + try { + return this.repository.delete("Blocks", "height BETWEEN ? AND ?", minHeight, maxHeight); + } catch (SQLException e) { + throw new DataException("Unable to prune blocks from repository", e); + } + } + + @Override public BlockData getDetachedBlockSignature(int startHeight) throws DataException { String sql = "SELECT " + BLOCK_DB_COLUMNS + " FROM Blocks " diff --git a/src/main/java/org/qortal/repository/hsqldb/HSQLDBDatabaseArchiving.java b/src/main/java/org/qortal/repository/hsqldb/HSQLDBDatabaseArchiving.java new file mode 100644 index 00000000..77136ab9 --- /dev/null +++ b/src/main/java/org/qortal/repository/hsqldb/HSQLDBDatabaseArchiving.java @@ -0,0 +1,88 @@ +package org.qortal.repository.hsqldb; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.qortal.controller.Controller; +import org.qortal.gui.SplashFrame; +import org.qortal.repository.BlockArchiveWriter; +import org.qortal.repository.DataException; +import org.qortal.repository.Repository; +import org.qortal.repository.RepositoryManager; +import org.qortal.transform.TransformationException; + +import java.io.IOException; + +/** + * + * When switching to an archiving node, we need to archive most of the database contents. + * This involves copying its data into flat files. + * If we do this entirely as a background process, it is very slow and can interfere with syncing. + * However, if we take the approach of doing this in bulk, before starting up the rest of the + * processes, this makes it much faster and less invasive. + * + * From that point, the original background archiving process will run, but can be dialled right down + * so not to interfere with syncing. + * + */ + + +public class HSQLDBDatabaseArchiving { + + private static final Logger LOGGER = LogManager.getLogger(HSQLDBDatabaseArchiving.class); + + + public static boolean buildBlockArchive(Repository repository, long fileSizeTarget) throws DataException { + + // Only build the archive if we haven't already got one that is up to date + boolean upToDate = BlockArchiveWriter.isArchiverUpToDate(repository); + if (upToDate) { + // Already archived + return false; + } + + LOGGER.info("Building block archive - this process could take a while... (approx. 15 mins on high spec)"); + SplashFrame.getInstance().updateStatus("Building block archive (takes 60+ mins)..."); + + final int maximumArchiveHeight = BlockArchiveWriter.getMaxArchiveHeight(repository); + int startHeight = 0; + + while (!Controller.isStopping()) { + try { + BlockArchiveWriter writer = new BlockArchiveWriter(startHeight, maximumArchiveHeight, repository); + writer.setFileSizeTarget(fileSizeTarget); + BlockArchiveWriter.BlockArchiveWriteResult result = writer.write(); + switch (result) { + case OK: + // Increment block archive height + startHeight = writer.getLastWrittenHeight() + 1; + repository.getBlockArchiveRepository().setBlockArchiveHeight(startHeight); + repository.saveChanges(); + break; + + case STOPPING: + return false; + + case NOT_ENOUGH_BLOCKS: + // We've reached the limit of the blocks we can archive + // Return from the whole method + return true; + + case BLOCK_NOT_FOUND: + // We tried to archive a block that didn't exist. This is a major failure and likely means + // that a bootstrap or re-sync is needed. Return rom the method + LOGGER.info("Error: block not found when building archive. If this error persists, " + + "a bootstrap or re-sync may be needed."); + return false; + } + + } catch (IOException | TransformationException | InterruptedException e) { + LOGGER.info("Caught exception when creating block cache", e); + return false; + } + } + + // If we got this far then something went wrong (most likely the app is stopping) + return false; + } + +} diff --git a/src/main/java/org/qortal/repository/hsqldb/HSQLDBDatabasePruning.java b/src/main/java/org/qortal/repository/hsqldb/HSQLDBDatabasePruning.java new file mode 100644 index 00000000..978ba25e --- /dev/null +++ b/src/main/java/org/qortal/repository/hsqldb/HSQLDBDatabasePruning.java @@ -0,0 +1,332 @@ +package org.qortal.repository.hsqldb; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.qortal.controller.Controller; +import org.qortal.data.block.BlockData; +import org.qortal.gui.SplashFrame; +import org.qortal.repository.BlockArchiveWriter; +import org.qortal.repository.DataException; +import org.qortal.repository.Repository; +import org.qortal.settings.Settings; + +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.concurrent.TimeoutException; + +/** + * + * When switching from a full node to a pruning node, we need to delete most of the database contents. + * If we do this entirely as a background process, it is very slow and can interfere with syncing. + * However, if we take the approach of transferring only the necessary rows to a new table and then + * deleting the original table, this makes the process much faster. It was taking several days to + * delete the AT states in the background, but only a couple of minutes to copy them to a new table. + * + * The trade off is that we have to go through a form of "reshape" when starting the app for the first + * time after enabling pruning mode. But given that this is an opt-in mode, I don't think it will be + * a problem. + * + * Once the pruning is complete, it automatically performs a CHECKPOINT DEFRAG in order to + * shrink the database file size down to a fraction of what it was before. + * + * From this point, the original background process will run, but can be dialled right down so not + * to interfere with syncing. + * + */ + + +public class HSQLDBDatabasePruning { + + private static final Logger LOGGER = LogManager.getLogger(HSQLDBDatabasePruning.class); + + + public static boolean pruneATStates(HSQLDBRepository repository) throws SQLException, DataException { + + // Only bulk prune AT states if we have never done so before + int pruneHeight = repository.getATRepository().getAtPruneHeight(); + if (pruneHeight > 0) { + // Already pruned AT states + return false; + } + + if (Settings.getInstance().isArchiveEnabled()) { + // Only proceed if we can see that the archiver has already finished + // This way, if the archiver failed for any reason, we can prune once it has had + // some opportunities to try again + boolean upToDate = BlockArchiveWriter.isArchiverUpToDate(repository); + if (!upToDate) { + return false; + } + } + + LOGGER.info("Starting bulk prune of AT states - this process could take a while... " + + "(approx. 2 mins on high spec, or upwards of 30 mins in some cases)"); + SplashFrame.getInstance().updateStatus("Pruning database (takes up to 30 mins)..."); + + // Create new AT-states table to hold smaller dataset + repository.executeCheckedUpdate("DROP TABLE IF EXISTS ATStatesNew"); + repository.executeCheckedUpdate("CREATE TABLE ATStatesNew (" + + "AT_address QortalAddress, height INTEGER NOT NULL, state_hash ATStateHash NOT NULL, " + + "fees QortalAmount NOT NULL, is_initial BOOLEAN NOT NULL, sleep_until_message_timestamp BIGINT, " + + "PRIMARY KEY (AT_address, height), " + + "FOREIGN KEY (AT_address) REFERENCES ATs (AT_address) ON DELETE CASCADE)"); + repository.executeCheckedUpdate("SET TABLE ATStatesNew NEW SPACE"); + repository.executeCheckedUpdate("CHECKPOINT"); + + // Add a height index + LOGGER.info("Adding index to AT states table..."); + repository.executeCheckedUpdate("CREATE INDEX IF NOT EXISTS ATStatesNewHeightIndex ON ATStatesNew (height)"); + repository.executeCheckedUpdate("CHECKPOINT"); + + + // Find our latest block + BlockData latestBlock = repository.getBlockRepository().getLastBlock(); + if (latestBlock == null) { + LOGGER.info("Unable to determine blockchain height, necessary for bulk block pruning"); + return false; + } + + // Calculate some constants for later use + final int blockchainHeight = latestBlock.getHeight(); + int maximumBlockToTrim = blockchainHeight - Settings.getInstance().getPruneBlockLimit(); + if (Settings.getInstance().isArchiveEnabled()) { + // Archive mode - don't prune anything that hasn't been archived yet + maximumBlockToTrim = Math.min(maximumBlockToTrim, repository.getBlockArchiveRepository().getBlockArchiveHeight() - 1); + } + final int endHeight = blockchainHeight; + final int blockStep = 10000; + + + // It's essential that we rebuild the latest AT states here, as we are using this data in the next query. + // Failing to do this will result in important AT states being deleted, rendering the database unusable. + repository.getATRepository().rebuildLatestAtStates(); + + + // Loop through all the LatestATStates and copy them to the new table + LOGGER.info("Copying AT states..."); + for (int height = 0; height < endHeight; height += blockStep) { + final int batchEndHeight = height + blockStep - 1; + //LOGGER.info(String.format("Copying AT states between %d and %d...", height, batchEndHeight)); + + String sql = "SELECT height, AT_address FROM LatestATStates WHERE height BETWEEN ? AND ?"; + try (ResultSet latestAtStatesResultSet = repository.checkedExecute(sql, height, batchEndHeight)) { + if (latestAtStatesResultSet != null) { + do { + int latestAtHeight = latestAtStatesResultSet.getInt(1); + String latestAtAddress = latestAtStatesResultSet.getString(2); + + // Copy this latest ATState to the new table + //LOGGER.info(String.format("Copying AT %s at height %d...", latestAtAddress, latestAtHeight)); + try { + String updateSql = "INSERT INTO ATStatesNew (" + + "SELECT AT_address, height, state_hash, fees, is_initial, sleep_until_message_timestamp " + + "FROM ATStates " + + "WHERE height = ? AND AT_address = ?)"; + repository.executeCheckedUpdate(updateSql, latestAtHeight, latestAtAddress); + } catch (SQLException e) { + repository.examineException(e); + throw new DataException("Unable to copy ATStates", e); + } + + // If this batch includes blocks after the maximum block to trim, we will need to copy + // each of its AT states above maximumBlockToTrim as they are considered "recent". We + // need to do this for _all_ AT states in these blocks, regardless of their latest state. + if (batchEndHeight >= maximumBlockToTrim) { + // Now copy this AT's states for each recent block they are present in + for (int i = maximumBlockToTrim; i < endHeight; i++) { + if (latestAtHeight < i) { + // This AT finished before this block so there is nothing to copy + continue; + } + + //LOGGER.info(String.format("Copying recent AT %s at height %d...", latestAtAddress, i)); + try { + // Copy each LatestATState to the new table + String updateSql = "INSERT IGNORE INTO ATStatesNew (" + + "SELECT AT_address, height, state_hash, fees, is_initial, sleep_until_message_timestamp " + + "FROM ATStates " + + "WHERE height = ? AND AT_address = ?)"; + repository.executeCheckedUpdate(updateSql, i, latestAtAddress); + } catch (SQLException e) { + repository.examineException(e); + throw new DataException("Unable to copy ATStates", e); + } + } + } + repository.saveChanges(); + + } while (latestAtStatesResultSet.next()); + } + } catch (SQLException e) { + throw new DataException("Unable to copy AT states", e); + } + } + + + // Finally, drop the original table and rename + LOGGER.info("Deleting old AT states..."); + repository.executeCheckedUpdate("DROP TABLE ATStates"); + repository.executeCheckedUpdate("ALTER TABLE ATStatesNew RENAME TO ATStates"); + repository.executeCheckedUpdate("ALTER INDEX ATStatesNewHeightIndex RENAME TO ATStatesHeightIndex"); + repository.executeCheckedUpdate("CHECKPOINT"); + + // Update the prune height + int nextPruneHeight = maximumBlockToTrim + 1; + repository.getATRepository().setAtPruneHeight(nextPruneHeight); + repository.saveChanges(); + + repository.executeCheckedUpdate("CHECKPOINT"); + + // Now prune/trim the ATStatesData, as this currently goes back over a month + return HSQLDBDatabasePruning.pruneATStateData(repository); + } + + /* + * Bulk prune ATStatesData to catch up with the now pruned ATStates table + * This uses the existing AT States trimming code but with a much higher end block + */ + private static boolean pruneATStateData(Repository repository) throws DataException { + + if (Settings.getInstance().isArchiveEnabled()) { + // Don't prune ATStatesData in archive mode + return true; + } + + BlockData latestBlock = repository.getBlockRepository().getLastBlock(); + if (latestBlock == null) { + LOGGER.info("Unable to determine blockchain height, necessary for bulk ATStatesData pruning"); + return false; + } + final int blockchainHeight = latestBlock.getHeight(); + int upperPrunableHeight = blockchainHeight - Settings.getInstance().getPruneBlockLimit(); + // ATStateData is already trimmed - so carry on from where we left off in the past + int pruneStartHeight = repository.getATRepository().getAtTrimHeight(); + + LOGGER.info("Starting bulk prune of AT states data - this process could take a while... (approx. 3 mins on high spec)"); + + while (pruneStartHeight < upperPrunableHeight) { + // Prune all AT state data up until our latest minus pruneBlockLimit (or our archive height) + + if (Controller.isStopping()) { + return false; + } + + // Override batch size in the settings because this is a one-off process + final int batchSize = 1000; + final int rowLimitPerBatch = 50000; + int upperBatchHeight = pruneStartHeight + batchSize; + int upperPruneHeight = Math.min(upperBatchHeight, upperPrunableHeight); + + LOGGER.trace(String.format("Pruning AT states data between %d and %d...", pruneStartHeight, upperPruneHeight)); + + int numATStatesPruned = repository.getATRepository().trimAtStates(pruneStartHeight, upperPruneHeight, rowLimitPerBatch); + repository.saveChanges(); + + if (numATStatesPruned > 0) { + LOGGER.trace(String.format("Pruned %d AT states data rows between blocks %d and %d", + numATStatesPruned, pruneStartHeight, upperPruneHeight)); + } else { + repository.getATRepository().setAtTrimHeight(upperBatchHeight); + // No need to rebuild the latest AT states as we aren't currently synchronizing + repository.saveChanges(); + LOGGER.debug(String.format("Bumping AT states trim height to %d", upperBatchHeight)); + + // Can we move onto next batch? + if (upperPrunableHeight > upperBatchHeight) { + pruneStartHeight = upperBatchHeight; + } + else { + // We've finished pruning + break; + } + } + } + + return true; + } + + public static boolean pruneBlocks(Repository repository) throws SQLException, DataException { + + // Only bulk prune AT states if we have never done so before + int pruneHeight = repository.getBlockRepository().getBlockPruneHeight(); + if (pruneHeight > 0) { + // Already pruned blocks + return false; + } + + if (Settings.getInstance().isArchiveEnabled()) { + // Only proceed if we can see that the archiver has already finished + // This way, if the archiver failed for any reason, we can prune once it has had + // some opportunities to try again + boolean upToDate = BlockArchiveWriter.isArchiverUpToDate(repository); + if (!upToDate) { + return false; + } + } + + BlockData latestBlock = repository.getBlockRepository().getLastBlock(); + if (latestBlock == null) { + LOGGER.info("Unable to determine blockchain height, necessary for bulk block pruning"); + return false; + } + final int blockchainHeight = latestBlock.getHeight(); + int upperPrunableHeight = blockchainHeight - Settings.getInstance().getPruneBlockLimit(); + int pruneStartHeight = 0; + + if (Settings.getInstance().isArchiveEnabled()) { + // Archive mode - don't prune anything that hasn't been archived yet + upperPrunableHeight = Math.min(upperPrunableHeight, repository.getBlockArchiveRepository().getBlockArchiveHeight() - 1); + } + + LOGGER.info("Starting bulk prune of blocks - this process could take a while... (approx. 5 mins on high spec)"); + + while (pruneStartHeight < upperPrunableHeight) { + // Prune all blocks up until our latest minus pruneBlockLimit + + int upperBatchHeight = pruneStartHeight + Settings.getInstance().getBlockPruneBatchSize(); + int upperPruneHeight = Math.min(upperBatchHeight, upperPrunableHeight); + + LOGGER.info(String.format("Pruning blocks between %d and %d...", pruneStartHeight, upperPruneHeight)); + + int numBlocksPruned = repository.getBlockRepository().pruneBlocks(pruneStartHeight, upperPruneHeight); + repository.saveChanges(); + + if (numBlocksPruned > 0) { + LOGGER.info(String.format("Pruned %d block%s between %d and %d", + numBlocksPruned, (numBlocksPruned != 1 ? "s" : ""), + pruneStartHeight, upperPruneHeight)); + } else { + final int nextPruneHeight = upperPruneHeight + 1; + repository.getBlockRepository().setBlockPruneHeight(nextPruneHeight); + repository.saveChanges(); + LOGGER.debug(String.format("Bumping block base prune height to %d", nextPruneHeight)); + + // Can we move onto next batch? + if (upperPrunableHeight > nextPruneHeight) { + pruneStartHeight = nextPruneHeight; + } + else { + // We've finished pruning + break; + } + } + } + + return true; + } + + public static void performMaintenance(Repository repository) throws SQLException, DataException { + try { + SplashFrame.getInstance().updateStatus("Performing maintenance..."); + + // Timeout if the database isn't ready for backing up after 5 minutes + // Nothing else should be using the db at this point, so a timeout shouldn't happen + long timeout = 5 * 60 * 1000L; + repository.performPeriodicMaintenance(timeout); + + } catch (TimeoutException e) { + LOGGER.info("Attempt to perform maintenance failed due to timeout: {}", e.getMessage()); + } + } + +} diff --git a/src/main/java/org/qortal/repository/hsqldb/HSQLDBDatabaseUpdates.java b/src/main/java/org/qortal/repository/hsqldb/HSQLDBDatabaseUpdates.java index 757efe34..535cac6f 100644 --- a/src/main/java/org/qortal/repository/hsqldb/HSQLDBDatabaseUpdates.java +++ b/src/main/java/org/qortal/repository/hsqldb/HSQLDBDatabaseUpdates.java @@ -9,7 +9,9 @@ import java.util.stream.Collectors; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.qortal.controller.Controller; import org.qortal.controller.tradebot.BitcoinACCTv1TradeBot; +import org.qortal.gui.SplashFrame; public class HSQLDBDatabaseUpdates { @@ -27,9 +29,14 @@ public class HSQLDBDatabaseUpdates { public static boolean updateDatabase(Connection connection) throws SQLException { final boolean wasPristine = fetchDatabaseVersion(connection) == 0; + SplashFrame.getInstance().updateStatus("Upgrading database, please wait..."); + while (databaseUpdating(connection, wasPristine)) incrementDatabaseVersion(connection); + String text = String.format("Starting Qortal Core v%s...", Controller.getInstance().getVersionStringWithoutPrefix()); + SplashFrame.getInstance().updateStatus(text); + return wasPristine; } @@ -698,7 +705,7 @@ public class HSQLDBDatabaseUpdates { stmt.execute("CHECKPOINT"); break; - case 30: + case 30: { // Split AT state data off to new table for better performance/management. if (!wasPristine && !"mem".equals(HSQLDBRepository.getDbPathname(connection.getMetaData().getURL()))) { @@ -773,6 +780,7 @@ public class HSQLDBDatabaseUpdates { stmt.execute("ALTER TABLE ATStatesNew RENAME TO ATStates"); stmt.execute("CHECKPOINT"); break; + } case 31: // Fix latest AT state cache which was previous created as TEMPORARY @@ -844,6 +852,75 @@ public class HSQLDBDatabaseUpdates { stmt.execute("ALTER TABLE ArbitraryTransactions ADD compression INTEGER NOT NULL DEFAULT 0"); break; + case 34: { + // AT sleep-until-message support + LOGGER.info("Altering AT table in repository - this might take a while... (approx. 20 seconds on high-spec)"); + stmt.execute("ALTER TABLE ATs ADD sleep_until_message_timestamp BIGINT"); + + // Create new AT-states table with new column + stmt.execute("CREATE TABLE ATStatesNew (" + + "AT_address QortalAddress, height INTEGER NOT NULL, state_hash ATStateHash NOT NULL, " + + "fees QortalAmount NOT NULL, is_initial BOOLEAN NOT NULL, sleep_until_message_timestamp BIGINT, " + + "PRIMARY KEY (AT_address, height), " + + "FOREIGN KEY (AT_address) REFERENCES ATs (AT_address) ON DELETE CASCADE)"); + stmt.execute("SET TABLE ATStatesNew NEW SPACE"); + stmt.execute("CHECKPOINT"); + + // Add the height index + LOGGER.info("Adding index to AT states table..."); + stmt.execute("CREATE INDEX ATStatesNewHeightIndex ON ATStatesNew (height)"); + stmt.execute("CHECKPOINT"); + + ResultSet resultSet = stmt.executeQuery("SELECT height FROM Blocks ORDER BY height DESC LIMIT 1"); + final int blockchainHeight = resultSet.next() ? resultSet.getInt(1) : 0; + final int heightStep = 100; + + LOGGER.info("Altering AT states table in repository - this might take a while... (approx. 3 mins on high-spec)"); + for (int minHeight = 1; minHeight < blockchainHeight; minHeight += heightStep) { + stmt.execute("INSERT INTO ATStatesNew (" + + "SELECT AT_address, height, state_hash, fees, is_initial, NULL " + + "FROM ATStates " + + "WHERE height BETWEEN " + minHeight + " AND " + (minHeight + heightStep - 1) + + ")"); + stmt.execute("COMMIT"); + + int processed = Math.min(minHeight + heightStep - 1, blockchainHeight); + double percentage = (double)processed / (double)blockchainHeight * 100.0f; + LOGGER.info(String.format("Processed %d of %d blocks (%.1f%%)", processed, blockchainHeight, percentage)); + } + stmt.execute("CHECKPOINT"); + + stmt.execute("DROP TABLE ATStates"); + stmt.execute("ALTER TABLE ATStatesNew RENAME TO ATStates"); + stmt.execute("ALTER INDEX ATStatesNewHeightIndex RENAME TO ATStatesHeightIndex"); + stmt.execute("CHECKPOINT"); + break; + } + case 35: + // Support for pruning + stmt.execute("ALTER TABLE DatabaseInfo ADD AT_prune_height INT NOT NULL DEFAULT 0"); + stmt.execute("ALTER TABLE DatabaseInfo ADD block_prune_height INT NOT NULL DEFAULT 0"); + break; + + case 36: + // Block archive support + stmt.execute("ALTER TABLE DatabaseInfo ADD block_archive_height INT NOT NULL DEFAULT 0"); + + // Block archive (lookup table to map signature to height) + // Actual data is stored in archive files outside of the database + stmt.execute("CREATE TABLE BlockArchive (signature BlockSignature, height INTEGER NOT NULL, " + + "minted_when EpochMillis NOT NULL, minter QortalPublicKey NOT NULL, " + + "PRIMARY KEY (signature))"); + // For finding blocks by height. + stmt.execute("CREATE INDEX BlockArchiveHeightIndex ON BlockArchive (height)"); + // For finding blocks by the account that minted them. + stmt.execute("CREATE INDEX BlockArchiveMinterIndex ON BlockArchive (minter)"); + // For finding blocks by timestamp or finding height of latest block immediately before timestamp, etc. + stmt.execute("CREATE INDEX BlockArchiveTimestampHeightIndex ON BlockArchive (minted_when, height)"); + // Use a separate table space as this table will be very large. + stmt.execute("SET TABLE BlockArchive NEW SPACE"); + break; + default: // nothing to do return false; diff --git a/src/main/java/org/qortal/repository/hsqldb/HSQLDBImportExport.java b/src/main/java/org/qortal/repository/hsqldb/HSQLDBImportExport.java new file mode 100644 index 00000000..c5881c01 --- /dev/null +++ b/src/main/java/org/qortal/repository/hsqldb/HSQLDBImportExport.java @@ -0,0 +1,298 @@ +package org.qortal.repository.hsqldb; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.json.JSONArray; +import org.json.JSONException; +import org.json.JSONObject; +import org.qortal.data.account.MintingAccountData; +import org.qortal.data.crosschain.TradeBotData; +import org.qortal.repository.Bootstrap; +import org.qortal.repository.DataException; +import org.qortal.repository.Repository; +import org.qortal.settings.Settings; +import org.qortal.utils.Base58; +import org.qortal.utils.Triple; + +import java.io.File; +import java.io.FileNotFoundException; +import java.io.FileWriter; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.Iterator; +import java.util.List; + +public class HSQLDBImportExport { + + private static final Logger LOGGER = LogManager.getLogger(Bootstrap.class); + + public static void backupTradeBotStates(Repository repository) throws DataException { + HSQLDBImportExport.backupCurrentTradeBotStates(repository); + HSQLDBImportExport.backupArchivedTradeBotStates(repository); + + LOGGER.info("Exported sensitive/node-local data: trade bot states"); + } + + public static void backupMintingAccounts(Repository repository) throws DataException { + HSQLDBImportExport.backupCurrentMintingAccounts(repository); + + LOGGER.info("Exported sensitive/node-local data: minting accounts"); + } + + + /* Trade bot states */ + + /** + * Backs up the trade bot states currently in the repository, without combining them with past ones + * @param repository + * @throws DataException + */ + private static void backupCurrentTradeBotStates(Repository repository) throws DataException { + try { + Path backupDirectory = HSQLDBImportExport.getExportDirectory(true); + + // Load current trade bot data + List allTradeBotData = repository.getCrossChainRepository().getAllTradeBotData(); + JSONArray currentTradeBotDataJson = new JSONArray(); + for (TradeBotData tradeBotData : allTradeBotData) { + JSONObject tradeBotDataJson = tradeBotData.toJson(); + currentTradeBotDataJson.put(tradeBotDataJson); + } + + // Wrap current trade bot data in an object to indicate the type + JSONObject currentTradeBotDataJsonWrapper = new JSONObject(); + currentTradeBotDataJsonWrapper.put("type", "tradeBotStates"); + currentTradeBotDataJsonWrapper.put("dataset", "current"); + currentTradeBotDataJsonWrapper.put("data", currentTradeBotDataJson); + + // Write current trade bot data (just the ones currently in the database) + String fileName = Paths.get(backupDirectory.toString(), "TradeBotStates.json").toString(); + FileWriter writer = new FileWriter(fileName); + writer.write(currentTradeBotDataJsonWrapper.toString(2)); + writer.close(); + + } catch (DataException | IOException e) { + throw new DataException("Unable to export trade bot states from repository"); + } + } + + /** + * Backs up the trade bot states currently in the repository to a separate "archive" file, + * making sure to combine them with any unique states already present in the archive. + * @param repository + * @throws DataException + */ + private static void backupArchivedTradeBotStates(Repository repository) throws DataException { + try { + Path backupDirectory = HSQLDBImportExport.getExportDirectory(true); + + // Load current trade bot data + List allTradeBotData = repository.getCrossChainRepository().getAllTradeBotData(); + JSONArray allTradeBotDataJson = new JSONArray(); + for (TradeBotData tradeBotData : allTradeBotData) { + JSONObject tradeBotDataJson = tradeBotData.toJson(); + allTradeBotDataJson.put(tradeBotDataJson); + } + + // We need to combine existing archived TradeBotStates data before overwriting + String fileName = Paths.get(backupDirectory.toString(), "TradeBotStatesArchive.json").toString(); + File tradeBotStatesBackupFile = new File(fileName); + if (tradeBotStatesBackupFile.exists()) { + + String jsonString = new String(Files.readAllBytes(Paths.get(fileName))); + Triple parsedJSON = HSQLDBImportExport.parseJSONString(jsonString); + if (parsedJSON.getA() == null || parsedJSON.getC() == null) { + throw new DataException("Missing data when exporting archived trade bot states"); + } + String type = parsedJSON.getA(); + String dataset = parsedJSON.getB(); + JSONArray data = parsedJSON.getC(); + + if (!type.equals("tradeBotStates") || !dataset.equals("archive")) { + throw new DataException("Format mismatch when exporting archived trade bot states"); + } + + Iterator iterator = data.iterator(); + while(iterator.hasNext()) { + JSONObject existingTradeBotDataItem = (JSONObject)iterator.next(); + String existingTradePrivateKey = (String) existingTradeBotDataItem.get("tradePrivateKey"); + // Check if we already have an entry for this trade + boolean found = allTradeBotData.stream().anyMatch(tradeBotData -> Base58.encode(tradeBotData.getTradePrivateKey()).equals(existingTradePrivateKey)); + if (found == false) + // Add the data from the backup file to our "allTradeBotDataJson" array as it's not currently in the db + allTradeBotDataJson.put(existingTradeBotDataItem); + } + } + + // Wrap all trade bot data in an object to indicate the type + JSONObject allTradeBotDataJsonWrapper = new JSONObject(); + allTradeBotDataJsonWrapper.put("type", "tradeBotStates"); + allTradeBotDataJsonWrapper.put("dataset", "archive"); + allTradeBotDataJsonWrapper.put("data", allTradeBotDataJson); + + // Write ALL trade bot data to archive (current plus states that are no longer in the database) + FileWriter writer = new FileWriter(fileName); + writer.write(allTradeBotDataJsonWrapper.toString(2)); + writer.close(); + + } catch (DataException | IOException e) { + throw new DataException("Unable to export trade bot states from repository"); + } + } + + + /* Minting accounts */ + + /** + * Backs up the minting accounts currently in the repository, without combining them with past ones + * @param repository + * @throws DataException + */ + private static void backupCurrentMintingAccounts(Repository repository) throws DataException { + try { + Path backupDirectory = HSQLDBImportExport.getExportDirectory(true); + + // Load current trade bot data + List allMintingAccountData = repository.getAccountRepository().getMintingAccounts(); + JSONArray currentMintingAccountJson = new JSONArray(); + for (MintingAccountData mintingAccountData : allMintingAccountData) { + JSONObject mintingAccountDataJson = mintingAccountData.toJson(); + currentMintingAccountJson.put(mintingAccountDataJson); + } + + // Wrap current trade bot data in an object to indicate the type + JSONObject currentMintingAccountDataJsonWrapper = new JSONObject(); + currentMintingAccountDataJsonWrapper.put("type", "mintingAccounts"); + currentMintingAccountDataJsonWrapper.put("dataset", "current"); + currentMintingAccountDataJsonWrapper.put("data", currentMintingAccountJson); + + // Write current trade bot data (just the ones currently in the database) + String fileName = Paths.get(backupDirectory.toString(), "MintingAccounts.json").toString(); + FileWriter writer = new FileWriter(fileName); + writer.write(currentMintingAccountDataJsonWrapper.toString(2)); + writer.close(); + + } catch (DataException | IOException e) { + throw new DataException("Unable to export minting accounts from repository"); + } + } + + + /* Utils */ + + /** + * Imports data from supplied file + * Data type is loaded from the file itself, and if missing, TradeBotStates is assumed + * + * @param filename + * @param repository + * @throws DataException + * @throws IOException + */ + public static void importDataFromFile(String filename, Repository repository) throws DataException, IOException { + Path path = Paths.get(filename); + if (!path.toFile().exists()) { + throw new FileNotFoundException(String.format("File doesn't exist: %s", filename)); + } + byte[] fileContents = Files.readAllBytes(path); + if (fileContents == null) { + throw new FileNotFoundException(String.format("Unable to read file contents: %s", filename)); + } + + LOGGER.info(String.format("Importing %s into repository ...", filename)); + + String jsonString = new String(fileContents); + Triple parsedJSON = HSQLDBImportExport.parseJSONString(jsonString); + if (parsedJSON.getA() == null || parsedJSON.getC() == null) { + throw new DataException(String.format("Missing data when importing %s into repository", filename)); + } + String type = parsedJSON.getA(); + JSONArray data = parsedJSON.getC(); + + Iterator iterator = data.iterator(); + while(iterator.hasNext()) { + JSONObject dataJsonObject = (JSONObject)iterator.next(); + + if (type.equals("tradeBotStates")) { + HSQLDBImportExport.importTradeBotDataJSON(dataJsonObject, repository); + } + else if (type.equals("mintingAccounts")) { + HSQLDBImportExport.importMintingAccountDataJSON(dataJsonObject, repository); + } + else { + throw new DataException(String.format("Unrecognized data type when importing %s into repository", filename)); + } + + } + LOGGER.info(String.format("Imported %s into repository from %s", type, filename)); + } + + private static void importTradeBotDataJSON(JSONObject tradeBotDataJson, Repository repository) throws DataException { + TradeBotData tradeBotData = TradeBotData.fromJson(tradeBotDataJson); + repository.getCrossChainRepository().save(tradeBotData); + } + + private static void importMintingAccountDataJSON(JSONObject mintingAccountDataJson, Repository repository) throws DataException { + MintingAccountData mintingAccountData = MintingAccountData.fromJson(mintingAccountDataJson); + repository.getAccountRepository().save(mintingAccountData); + } + + public static Path getExportDirectory(boolean createIfNotExists) throws DataException { + Path backupPath = Paths.get(Settings.getInstance().getExportPath()); + + if (createIfNotExists) { + // Create the qortal-backup folder if it doesn't exist + try { + Files.createDirectories(backupPath); + } catch (IOException e) { + LOGGER.info(String.format("Unable to create %s folder", backupPath.toString())); + throw new DataException(String.format("Unable to create %s folder", backupPath.toString())); + } + } + + return backupPath; + } + + /** + * Parses a JSON string and returns "data", "type", and "dataset" fields. + * In the case of legacy JSON files with no type, they are assumed to be TradeBotStates archives, + * as we had never implemented this for any other types. + * + * @param jsonString + * @return Triple (type, dataset, data) + */ + private static Triple parseJSONString(String jsonString) throws DataException { + String type = null; + String dataset = null; + JSONArray data = null; + + try { + // Firstly try importing the new format + JSONObject jsonData = new JSONObject(jsonString); + if (jsonData != null && jsonData.getString("type") != null) { + + type = jsonData.getString("type"); + dataset = jsonData.getString("dataset"); + data = jsonData.getJSONArray("data"); + } + + } catch (JSONException e) { + // Could be a legacy format which didn't contain a type or any other outer keys, so try importing that + // Treat these as TradeBotStates archives, given that this was the only type previously implemented + try { + type = "tradeBotStates"; + dataset = "archive"; + data = new JSONArray(jsonString); + + } catch (JSONException e2) { + // Still failed, so give up + throw new DataException("Couldn't import JSON file"); + } + } + + return new Triple(type, dataset, data); + } + +} diff --git a/src/main/java/org/qortal/repository/hsqldb/HSQLDBRepository.java b/src/main/java/org/qortal/repository/hsqldb/HSQLDBRepository.java index 4d8e5043..1c025ae2 100644 --- a/src/main/java/org/qortal/repository/hsqldb/HSQLDBRepository.java +++ b/src/main/java/org/qortal/repository/hsqldb/HSQLDBRepository.java @@ -2,7 +2,6 @@ package org.qortal.repository.hsqldb; import java.awt.TrayIcon.MessageType; import java.io.File; -import java.io.FileWriter; import java.io.IOException; import java.math.BigDecimal; import java.nio.file.Files; @@ -17,39 +16,20 @@ import java.sql.SQLException; import java.sql.Savepoint; import java.sql.Statement; import java.util.*; +import java.util.concurrent.TimeoutException; import java.util.regex.Matcher; import java.util.regex.Pattern; import java.util.stream.Stream; -import org.json.JSONArray; -import org.json.JSONObject; - import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.qortal.account.PrivateKeyAccount; import org.qortal.crypto.Crypto; -import org.qortal.data.crosschain.TradeBotData; import org.qortal.globalization.Translator; import org.qortal.gui.SysTray; -import org.qortal.repository.ATRepository; -import org.qortal.repository.AccountRepository; -import org.qortal.repository.ArbitraryRepository; -import org.qortal.repository.AssetRepository; -import org.qortal.repository.BlockRepository; -import org.qortal.repository.ChatRepository; -import org.qortal.repository.CrossChainRepository; -import org.qortal.repository.DataException; -import org.qortal.repository.GroupRepository; -import org.qortal.repository.MessageRepository; -import org.qortal.repository.NameRepository; -import org.qortal.repository.NetworkRepository; -import org.qortal.repository.Repository; -import org.qortal.repository.RepositoryManager; -import org.qortal.repository.TransactionRepository; -import org.qortal.repository.VotingRepository; +import org.qortal.repository.*; import org.qortal.repository.hsqldb.transaction.HSQLDBTransactionRepository; import org.qortal.settings.Settings; -import org.qortal.utils.Base58; public class HSQLDBRepository implements Repository { @@ -69,12 +49,14 @@ public class HSQLDBRepository implements Repository { protected final Map preparedStatementCache = new HashMap<>(); // We want the same object corresponding to the actual DB protected final Object trimHeightsLock = RepositoryManager.getRepositoryFactory(); + protected final Object latestATStatesLock = RepositoryManager.getRepositoryFactory(); private final ATRepository atRepository = new HSQLDBATRepository(this); private final AccountRepository accountRepository = new HSQLDBAccountRepository(this); private final ArbitraryRepository arbitraryRepository = new HSQLDBArbitraryRepository(this); private final AssetRepository assetRepository = new HSQLDBAssetRepository(this); private final BlockRepository blockRepository = new HSQLDBBlockRepository(this); + private final BlockArchiveRepository blockArchiveRepository = new HSQLDBBlockArchiveRepository(this); private final ChatRepository chatRepository = new HSQLDBChatRepository(this); private final CrossChainRepository crossChainRepository = new HSQLDBCrossChainRepository(this); private final GroupRepository groupRepository = new HSQLDBGroupRepository(this); @@ -142,6 +124,11 @@ public class HSQLDBRepository implements Repository { return this.blockRepository; } + @Override + public BlockArchiveRepository getBlockArchiveRepository() { + return this.blockArchiveRepository; + } + @Override public ChatRepository getChatRepository() { return this.chatRepository; @@ -281,7 +268,7 @@ public class HSQLDBRepository implements Repository { public void close() throws DataException { // Already closed? No need to do anything but maybe report double-call if (this.connection == null) { - LOGGER.warn("HSQLDBRepository.close() called when repository already closed", new Exception("Repository already closed")); + LOGGER.warn("HSQLDBRepository.close() called when repository already closed. This is expected when bootstrapping."); return; } @@ -393,133 +380,104 @@ public class HSQLDBRepository implements Repository { } @Override - public void backup(boolean quick) throws DataException { - if (!quick) - // First perform a CHECKPOINT + public void backup(boolean quick, String name, Long timeout) throws DataException, TimeoutException { + synchronized (CHECKPOINT_LOCK) { + + // We can only perform a CHECKPOINT if no other HSQLDB session is mid-transaction, + // otherwise the CHECKPOINT blocks for COMMITs and other threads can't open HSQLDB sessions + // due to HSQLDB blocking until CHECKPOINT finishes - i.e. deadlock. + // Since we don't want to give up too easily, it's best to wait until the other transaction + // count reaches zero, and then continue. + this.blockUntilNoOtherTransactions(timeout); + + if (!quick) + // First perform a CHECKPOINT + try (Statement stmt = this.connection.createStatement()) { + LOGGER.info("Performing maintenance - this will take a while..."); + stmt.execute("CHECKPOINT"); + stmt.execute("CHECKPOINT DEFRAG"); + LOGGER.info("Maintenance completed"); + } catch (SQLException e) { + throw new DataException("Unable to prepare repository for backup"); + } + + // Clean out any previous backup + try { + String connectionUrl = this.connection.getMetaData().getURL(); + String dbPathname = getDbPathname(connectionUrl); + if (dbPathname == null) + throw new DataException("Unable to locate repository for backup?"); + + // Doesn't really make sense to backup an in-memory database... + if (dbPathname.equals("mem")) { + LOGGER.debug("Ignoring request to backup in-memory repository!"); + return; + } + + String backupUrl = buildBackupUrl(dbPathname, name); + String backupPathname = getDbPathname(backupUrl); + if (backupPathname == null) + throw new DataException("Unable to determine location for repository backup?"); + + Path backupDirPath = Paths.get(backupPathname).getParent(); + String backupDirPathname = backupDirPath.toString(); + + try (Stream paths = Files.walk(backupDirPath)) { + paths.sorted(Comparator.reverseOrder()) + .map(Path::toFile) + .filter(file -> file.getPath().startsWith(backupDirPathname)) + .forEach(File::delete); + } + } catch (NoSuchFileException e) { + // Nothing to remove + } catch (SQLException | IOException e) { + throw new DataException("Unable to remove previous repository backup"); + } + + // Actually create backup try (Statement stmt = this.connection.createStatement()) { - stmt.execute("CHECKPOINT DEFRAG"); + LOGGER.info("Backing up repository..."); + stmt.execute(String.format("BACKUP DATABASE TO '%s/' BLOCKING AS FILES", name)); + LOGGER.info("Backup completed"); } catch (SQLException e) { - throw new DataException("Unable to prepare repository for backup"); + throw new DataException("Unable to backup repository"); } - // Clean out any previous backup - try { - String connectionUrl = this.connection.getMetaData().getURL(); - String dbPathname = getDbPathname(connectionUrl); - if (dbPathname == null) - throw new DataException("Unable to locate repository for backup?"); - - // Doesn't really make sense to backup an in-memory database... - if (dbPathname.equals("mem")) { - LOGGER.debug("Ignoring request to backup in-memory repository!"); - return; - } - - String backupUrl = buildBackupUrl(dbPathname); - String backupPathname = getDbPathname(backupUrl); - if (backupPathname == null) - throw new DataException("Unable to determine location for repository backup?"); - - Path backupDirPath = Paths.get(backupPathname).getParent(); - String backupDirPathname = backupDirPath.toString(); - - try (Stream paths = Files.walk(backupDirPath)) { - paths.sorted(Comparator.reverseOrder()) - .map(Path::toFile) - .filter(file -> file.getPath().startsWith(backupDirPathname)) - .forEach(File::delete); - } - } catch (NoSuchFileException e) { - // Nothing to remove - } catch (SQLException | IOException e) { - throw new DataException("Unable to remove previous repository backup"); - } - - // Actually create backup - try (Statement stmt = this.connection.createStatement()) { - stmt.execute("BACKUP DATABASE TO 'backup/' BLOCKING AS FILES"); - } catch (SQLException e) { - throw new DataException("Unable to backup repository"); } } @Override - public void performPeriodicMaintenance() throws DataException { - // Defrag DB - takes a while! - try (Statement stmt = this.connection.createStatement()) { - LOGGER.info("performing maintenance - this will take a while"); - stmt.execute("CHECKPOINT"); - stmt.execute("CHECKPOINT DEFRAG"); - LOGGER.info("maintenance completed"); - } catch (SQLException e) { - throw new DataException("Unable to defrag repository"); + public void performPeriodicMaintenance(Long timeout) throws DataException, TimeoutException { + synchronized (CHECKPOINT_LOCK) { + + // We can only perform a CHECKPOINT if no other HSQLDB session is mid-transaction, + // otherwise the CHECKPOINT blocks for COMMITs and other threads can't open HSQLDB sessions + // due to HSQLDB blocking until CHECKPOINT finishes - i.e. deadlock. + // Since we don't want to give up too easily, it's best to wait until the other transaction + // count reaches zero, and then continue. + this.blockUntilNoOtherTransactions(timeout); + + // Defrag DB - takes a while! + try (Statement stmt = this.connection.createStatement()) { + LOGGER.info("performing maintenance - this will take a while"); + stmt.execute("CHECKPOINT"); + stmt.execute("CHECKPOINT DEFRAG"); + LOGGER.info("maintenance completed"); + } catch (SQLException e) { + throw new DataException("Unable to defrag repository"); + } } } @Override public void exportNodeLocalData() throws DataException { - // Create the qortal-backup folder if it doesn't exist - Path backupPath = Paths.get("qortal-backup"); - try { - Files.createDirectories(backupPath); - } catch (IOException e) { - LOGGER.info("Unable to create backup folder"); - throw new DataException("Unable to create backup folder"); - } - - try { - // Load trade bot data - List allTradeBotData = this.getCrossChainRepository().getAllTradeBotData(); - JSONArray allTradeBotDataJson = new JSONArray(); - for (TradeBotData tradeBotData : allTradeBotData) { - JSONObject tradeBotDataJson = tradeBotData.toJson(); - allTradeBotDataJson.put(tradeBotDataJson); - } - - // We need to combine existing TradeBotStates data before overwriting - String fileName = "qortal-backup/TradeBotStates.json"; - File tradeBotStatesBackupFile = new File(fileName); - if (tradeBotStatesBackupFile.exists()) { - String jsonString = new String(Files.readAllBytes(Paths.get(fileName))); - JSONArray allExistingTradeBotData = new JSONArray(jsonString); - Iterator iterator = allExistingTradeBotData.iterator(); - while(iterator.hasNext()) { - JSONObject existingTradeBotData = (JSONObject)iterator.next(); - String existingTradePrivateKey = (String) existingTradeBotData.get("tradePrivateKey"); - // Check if we already have an entry for this trade - boolean found = allTradeBotData.stream().anyMatch(tradeBotData -> Base58.encode(tradeBotData.getTradePrivateKey()).equals(existingTradePrivateKey)); - if (found == false) - // We need to add this to our list - allTradeBotDataJson.put(existingTradeBotData); - } - } - - FileWriter writer = new FileWriter(fileName); - writer.write(allTradeBotDataJson.toString()); - writer.close(); - LOGGER.info("Exported sensitive/node-local data: trade bot states"); - - } catch (DataException | IOException e) { - throw new DataException("Unable to export trade bot states from repository"); - } + HSQLDBImportExport.backupTradeBotStates(this); + HSQLDBImportExport.backupMintingAccounts(this); } @Override - public void importDataFromFile(String filename) throws DataException { - LOGGER.info(() -> String.format("Importing data into repository from %s", filename)); - try { - String jsonString = new String(Files.readAllBytes(Paths.get(filename))); - JSONArray tradeBotDataToImport = new JSONArray(jsonString); - Iterator iterator = tradeBotDataToImport.iterator(); - while(iterator.hasNext()) { - JSONObject tradeBotDataJson = (JSONObject)iterator.next(); - TradeBotData tradeBotData = TradeBotData.fromJson(tradeBotDataJson); - this.getCrossChainRepository().save(tradeBotData); - } - } catch (IOException e) { - throw new DataException("Unable to import sensitive/node-local trade bot states to repository: " + e.getMessage()); - } - LOGGER.info(() -> String.format("Imported trade bot states into repository from %s", filename)); + public void importDataFromFile(String filename) throws DataException, IOException { + HSQLDBImportExport.importDataFromFile(filename, this); } @Override @@ -541,22 +499,22 @@ public class HSQLDBRepository implements Repository { return matcher.group(2); } - private static String buildBackupUrl(String dbPathname) { + private static String buildBackupUrl(String dbPathname, String backupName) { Path oldRepoPath = Paths.get(dbPathname); Path oldRepoDirPath = oldRepoPath.getParent(); Path oldRepoFilePath = oldRepoPath.getFileName(); // Try to open backup. We need to remove "create=true" and insert "backup" dir before final filename. - String backupUrlTemplate = "jdbc:hsqldb:file:%s%sbackup%s%s;create=false;hsqldb.full_log_replay=true"; - return String.format(backupUrlTemplate, oldRepoDirPath.toString(), File.separator, File.separator, oldRepoFilePath.toString()); + String backupUrlTemplate = "jdbc:hsqldb:file:%s%s%s%s%s;create=false;hsqldb.full_log_replay=true"; + return String.format(backupUrlTemplate, oldRepoDirPath.toString(), File.separator, backupName, File.separator, oldRepoFilePath.toString()); } - /* package */ static void attemptRecovery(String connectionUrl) throws DataException { + /* package */ static void attemptRecovery(String connectionUrl, String name) throws DataException { String dbPathname = getDbPathname(connectionUrl); if (dbPathname == null) throw new DataException("Unable to locate repository for backup?"); - String backupUrl = buildBackupUrl(dbPathname); + String backupUrl = buildBackupUrl(dbPathname, name); Path oldRepoDirPath = Paths.get(dbPathname).getParent(); // Attempt connection to backup to see if it is viable @@ -1059,4 +1017,51 @@ public class HSQLDBRepository implements Repository { return DEADLOCK_ERROR_CODE.equals(e.getErrorCode()); } + private int otherTransactionsCount() throws DataException { + // We can only perform a CHECKPOINT if no other HSQLDB session is mid-transaction, + // otherwise the CHECKPOINT blocks for COMMITs and other threads can't open HSQLDB sessions + // due to HSQLDB blocking until CHECKPOINT finishes - i.e. deadlock + String sql = "SELECT COUNT(*) " + + "FROM Information_schema.system_sessions " + + "WHERE transaction = TRUE AND session_id != ?"; + try { + PreparedStatement pstmt = this.cachePreparedStatement(sql); + pstmt.setLong(1, this.sessionId); + + if (!pstmt.execute()) + throw new DataException("Unable to check repository session status"); + + try (ResultSet resultSet = pstmt.getResultSet()) { + if (resultSet == null || !resultSet.next()) + // Failed to even find HSQLDB session info! + throw new DataException("No results when checking repository session status"); + + int transactionCount = resultSet.getInt(1); + + return transactionCount; + } + } catch (SQLException e) { + throw new DataException("Unable to check repository session status", e); + } + } + + private void blockUntilNoOtherTransactions(Long timeout) throws DataException, TimeoutException { + try { + long startTime = System.currentTimeMillis(); + while (this.otherTransactionsCount() > 0) { + // Wait and try again + LOGGER.debug("Waiting for repository..."); + Thread.sleep(1000L); + + if (timeout != null) { + if (System.currentTimeMillis() - startTime >= timeout) { + throw new TimeoutException("Timed out waiting for repository to become available"); + } + } + } + } catch (InterruptedException e) { + throw new DataException("Interrupted before repository became available"); + } + } + } diff --git a/src/main/java/org/qortal/repository/hsqldb/HSQLDBRepositoryFactory.java b/src/main/java/org/qortal/repository/hsqldb/HSQLDBRepositoryFactory.java index be9c09eb..64f6be8c 100644 --- a/src/main/java/org/qortal/repository/hsqldb/HSQLDBRepositoryFactory.java +++ b/src/main/java/org/qortal/repository/hsqldb/HSQLDBRepositoryFactory.java @@ -54,7 +54,7 @@ public class HSQLDBRepositoryFactory implements RepositoryFactory { throw new DataException("Unable to read repository: " + e.getMessage(), e); // Attempt recovery? - HSQLDBRepository.attemptRecovery(connectionUrl); + HSQLDBRepository.attemptRecovery(connectionUrl, "backup"); } this.connectionPool = new HSQLDBPool(Settings.getInstance().getRepositoryConnectionPoolSize()); diff --git a/src/main/java/org/qortal/settings/Settings.java b/src/main/java/org/qortal/settings/Settings.java index 230ead8a..fa186078 100644 --- a/src/main/java/org/qortal/settings/Settings.java +++ b/src/main/java/org/qortal/settings/Settings.java @@ -74,6 +74,9 @@ public class Settings { }; private Boolean apiRestricted; private String apiKey = null; + /** Whether to disable API key or loopback address checking + * IMPORTANT: do not disable for shared nodes or low-security local networks */ + private boolean apiKeyDisabled = false; private boolean apiLoggingEnabled = false; private boolean apiDocumentationEnabled = false; // Both of these need to be set for API to use SSL @@ -98,6 +101,12 @@ public class Settings { private long repositoryBackupInterval = 0; // ms /** Whether to show a notification when we backup repository. */ private boolean showBackupNotification = false; + /** Minimum time between repository maintenance attempts (ms) */ + private long repositoryMaintenanceMinInterval = 7 * 24 * 60 * 60 * 1000L; // 7 days (ms) default + /** Maximum time between repository maintenance attempts (ms) (0 if disabled). */ + private long repositoryMaintenanceMaxInterval = 30 * 24 * 60 * 60 * 1000L; // 30 days (ms) default + /** Whether to show a notification when we run scheduled maintenance. */ + private boolean showMaintenanceNotification = false; /** How long between repository checkpoints (ms). */ private long repositoryCheckpointInterval = 60 * 60 * 1000L; // 1 hour (ms) default /** Whether to show a notification when we perform repository 'checkpoint'. */ @@ -106,7 +115,7 @@ public class Settings { private int blockCacheSize = 10; /** How long to keep old, full, AT state data (ms). */ - private long atStatesMaxLifetime = 2 * 7 * 24 * 60 * 60 * 1000L; // milliseconds + private long atStatesMaxLifetime = 5 * 24 * 60 * 60 * 1000L; // milliseconds /** How often to attempt AT state trimming (ms). */ private long atStatesTrimInterval = 5678L; // milliseconds /** Block height range to scan for trimmable AT states.
@@ -121,6 +130,36 @@ public class Settings { * This has a significant effect on execution time. */ private int onlineSignaturesTrimBatchSize = 100; // blocks + + /** Whether we should prune old data to reduce database size + * This prevents the node from being able to serve older blocks */ + private boolean topOnly = false; + /** The amount of recent blocks we should keep when pruning */ + private int pruneBlockLimit = 1450; + + /** How often to attempt AT state pruning (ms). */ + private long atStatesPruneInterval = 3219L; // milliseconds + /** Block height range to scan for prunable AT states.
+ * This has a significant effect on execution time. */ + private int atStatesPruneBatchSize = 25; // blocks + + /** How often to attempt block pruning (ms). */ + private long blockPruneInterval = 3219L; // milliseconds + /** Block height range to scan for prunable blocks.
+ * This has a significant effect on execution time. */ + private int blockPruneBatchSize = 10000; // blocks + + + /** Whether we should archive old data to reduce the database size */ + private boolean archiveEnabled = true; + /** How often to attempt archiving (ms). */ + private long archiveInterval = 7171L; // milliseconds + + + /** Whether to automatically bootstrap instead of syncing from genesis */ + private boolean bootstrap = true; + + // Peer-to-peer related private boolean isTestNet = false; /** Port number for inbound peer-to-peer connections. */ @@ -179,12 +218,27 @@ public class Settings { private int repositoryConnectionPoolSize = 100; private List fixedNetwork; + // Export/import + private String exportPath = "qortal-backup"; + + // Bootstrap + private String bootstrapFilenamePrefix = ""; + + // Bootstrap sources + private String[] bootstrapHosts = new String[] { + "http://bootstrap.qortal.org", + "http://cinfu1.crowetic.com" + }; + // Auto-update sources private String[] autoUpdateRepos = new String[] { "https://github.com/Qortal/qortal/raw/%s/qortal.update", "https://raw.githubusercontent.com@151.101.16.133/Qortal/qortal/%s/qortal.update" }; + // Lists + private String listsPath = "lists"; + /** Array of NTP server hostnames. */ private String[] ntpServers = new String[] { "pool.ntp.org", @@ -412,6 +466,10 @@ public class Settings { return this.apiKey; } + public boolean isApiKeyDisabled() { + return this.apiKeyDisabled; + } + public boolean isApiLoggingEnabled() { return this.apiLoggingEnabled; } @@ -552,6 +610,14 @@ public class Settings { return this.repositoryConnectionPoolSize; } + public String getExportPath() { + return this.exportPath; + } + + public String getBootstrapFilenamePrefix() { + return this.bootstrapFilenamePrefix; + } + public boolean isFastSyncEnabled() { return this.fastSyncEnabled; } @@ -574,6 +640,14 @@ public class Settings { return this.autoUpdateRepos; } + public String[] getBootstrapHosts() { + return this.bootstrapHosts; + } + + public String getListsPath() { + return this.listsPath; + } + public String[] getNtpServers() { return this.ntpServers; } @@ -590,6 +664,18 @@ public class Settings { return this.showBackupNotification; } + public long getRepositoryMaintenanceMinInterval() { + return this.repositoryMaintenanceMinInterval; + } + + public long getRepositoryMaintenanceMaxInterval() { + return this.repositoryMaintenanceMaxInterval; + } + + public boolean getShowMaintenanceNotification() { + return this.showMaintenanceNotification; + } + public long getRepositoryCheckpointInterval() { return this.repositoryCheckpointInterval; } @@ -598,6 +684,10 @@ public class Settings { return this.showCheckpointNotification; } + public List getFixedNetwork() { + return fixedNetwork; + } + public long getAtStatesMaxLifetime() { return this.atStatesMaxLifetime; } @@ -622,10 +712,48 @@ public class Settings { return this.onlineSignaturesTrimBatchSize; } - public List getFixedNetwork() { - return fixedNetwork; + public boolean isTopOnly() { + return this.topOnly; } + public int getPruneBlockLimit() { + return this.pruneBlockLimit; + } + + public long getAtStatesPruneInterval() { + return this.atStatesPruneInterval; + } + + public int getAtStatesPruneBatchSize() { + return this.atStatesPruneBatchSize; + } + + public long getBlockPruneInterval() { + return this.blockPruneInterval; + } + + public int getBlockPruneBatchSize() { + return this.blockPruneBatchSize; + } + + + public boolean isArchiveEnabled() { + if (this.topOnly) { + return false; + } + return this.archiveEnabled; + } + + public long getArchiveInterval() { + return this.archiveInterval; + } + + + public boolean getBootstrap() { + return this.bootstrap; + } + + public String getDataPath() { return this.dataPath; } diff --git a/src/main/java/org/qortal/transaction/AccountFlagsTransaction.java b/src/main/java/org/qortal/transaction/AccountFlagsTransaction.java index 355340b6..4362b1a9 100644 --- a/src/main/java/org/qortal/transaction/AccountFlagsTransaction.java +++ b/src/main/java/org/qortal/transaction/AccountFlagsTransaction.java @@ -48,6 +48,11 @@ public class AccountFlagsTransaction extends Transaction { return ValidationResult.NO_FLAG_PERMISSION; } + @Override + public void preProcess() throws DataException { + // Nothing to do + } + @Override public void process() throws DataException { Account target = this.getTarget(); diff --git a/src/main/java/org/qortal/transaction/AccountLevelTransaction.java b/src/main/java/org/qortal/transaction/AccountLevelTransaction.java index da986344..18324c34 100644 --- a/src/main/java/org/qortal/transaction/AccountLevelTransaction.java +++ b/src/main/java/org/qortal/transaction/AccountLevelTransaction.java @@ -49,6 +49,11 @@ public class AccountLevelTransaction extends Transaction { return ValidationResult.NO_FLAG_PERMISSION; } + @Override + public void preProcess() throws DataException { + // Nothing to do + } + @Override public void process() throws DataException { Account target = getTarget(); diff --git a/src/main/java/org/qortal/transaction/AddGroupAdminTransaction.java b/src/main/java/org/qortal/transaction/AddGroupAdminTransaction.java index d62bd451..15dc51bf 100644 --- a/src/main/java/org/qortal/transaction/AddGroupAdminTransaction.java +++ b/src/main/java/org/qortal/transaction/AddGroupAdminTransaction.java @@ -84,6 +84,11 @@ public class AddGroupAdminTransaction extends Transaction { return ValidationResult.OK; } + @Override + public void preProcess() throws DataException { + // Nothing to do + } + @Override public void process() throws DataException { // Update Group adminship @@ -98,4 +103,4 @@ public class AddGroupAdminTransaction extends Transaction { group.unpromoteToAdmin(this.addGroupAdminTransactionData); } -} \ No newline at end of file +} diff --git a/src/main/java/org/qortal/transaction/ArbitraryTransaction.java b/src/main/java/org/qortal/transaction/ArbitraryTransaction.java index aaa5bd48..3fc8356b 100644 --- a/src/main/java/org/qortal/transaction/ArbitraryTransaction.java +++ b/src/main/java/org/qortal/transaction/ArbitraryTransaction.java @@ -195,6 +195,11 @@ public class ArbitraryTransaction extends Transaction { arbitraryTransactionData.getFee()); } + @Override + public void preProcess() throws DataException { + // Nothing to do + } + @Override public void process() throws DataException { // Wrap and delegate payment processing to Payment class. diff --git a/src/main/java/org/qortal/transaction/AtTransaction.java b/src/main/java/org/qortal/transaction/AtTransaction.java index a7e72b2a..c570bb65 100644 --- a/src/main/java/org/qortal/transaction/AtTransaction.java +++ b/src/main/java/org/qortal/transaction/AtTransaction.java @@ -80,6 +80,11 @@ public class AtTransaction extends Transaction { return Arrays.equals(atAccount.getLastReference(), atTransactionData.getReference()); } + @Override + public void preProcess() throws DataException { + // Nothing to do + } + @Override public ValidationResult isValid() throws DataException { // Check recipient address is valid diff --git a/src/main/java/org/qortal/transaction/BuyNameTransaction.java b/src/main/java/org/qortal/transaction/BuyNameTransaction.java index ad3e0c8d..c4e5f29c 100644 --- a/src/main/java/org/qortal/transaction/BuyNameTransaction.java +++ b/src/main/java/org/qortal/transaction/BuyNameTransaction.java @@ -6,6 +6,7 @@ import java.util.List; import org.qortal.account.Account; import org.qortal.asset.Asset; import org.qortal.block.BlockChain; +import org.qortal.controller.repository.NamesDatabaseIntegrityCheck; import org.qortal.crypto.Crypto; import org.qortal.data.naming.NameData; import org.qortal.data.transaction.BuyNameTransactionData; @@ -98,6 +99,17 @@ public class BuyNameTransaction extends Transaction { return ValidationResult.OK; } + @Override + public void preProcess() throws DataException { + BuyNameTransactionData buyNameTransactionData = (BuyNameTransactionData) transactionData; + + // Rebuild this name in the Names table from the transaction history + // This is necessary because in some rare cases names can be missing from the Names table after registration + // but we have been unable to reproduce the issue and track down the root cause + NamesDatabaseIntegrityCheck namesDatabaseIntegrityCheck = new NamesDatabaseIntegrityCheck(); + namesDatabaseIntegrityCheck.rebuildName(buyNameTransactionData.getName(), this.repository); + } + @Override public void process() throws DataException { // Buy Name diff --git a/src/main/java/org/qortal/transaction/CancelAssetOrderTransaction.java b/src/main/java/org/qortal/transaction/CancelAssetOrderTransaction.java index b8b70dde..955f62f4 100644 --- a/src/main/java/org/qortal/transaction/CancelAssetOrderTransaction.java +++ b/src/main/java/org/qortal/transaction/CancelAssetOrderTransaction.java @@ -62,6 +62,11 @@ public class CancelAssetOrderTransaction extends Transaction { return ValidationResult.OK; } + @Override + public void preProcess() throws DataException { + // Nothing to do + } + @Override public void process() throws DataException { // Mark Order as completed so no more trades can happen diff --git a/src/main/java/org/qortal/transaction/CancelGroupBanTransaction.java b/src/main/java/org/qortal/transaction/CancelGroupBanTransaction.java index e01be7be..483dfc6f 100644 --- a/src/main/java/org/qortal/transaction/CancelGroupBanTransaction.java +++ b/src/main/java/org/qortal/transaction/CancelGroupBanTransaction.java @@ -83,6 +83,11 @@ public class CancelGroupBanTransaction extends Transaction { return ValidationResult.OK; } + @Override + public void preProcess() throws DataException { + // Nothing to do + } + @Override public void process() throws DataException { // Update Group Membership diff --git a/src/main/java/org/qortal/transaction/CancelGroupInviteTransaction.java b/src/main/java/org/qortal/transaction/CancelGroupInviteTransaction.java index ea228215..800f2444 100644 --- a/src/main/java/org/qortal/transaction/CancelGroupInviteTransaction.java +++ b/src/main/java/org/qortal/transaction/CancelGroupInviteTransaction.java @@ -83,6 +83,11 @@ public class CancelGroupInviteTransaction extends Transaction { return ValidationResult.OK; } + @Override + public void preProcess() throws DataException { + // Nothing to do + } + @Override public void process() throws DataException { // Update Group Membership diff --git a/src/main/java/org/qortal/transaction/CancelSellNameTransaction.java b/src/main/java/org/qortal/transaction/CancelSellNameTransaction.java index f241db47..788492a9 100644 --- a/src/main/java/org/qortal/transaction/CancelSellNameTransaction.java +++ b/src/main/java/org/qortal/transaction/CancelSellNameTransaction.java @@ -79,6 +79,11 @@ public class CancelSellNameTransaction extends Transaction { } + @Override + public void preProcess() throws DataException { + // Nothing to do + } + @Override public void process() throws DataException { // Update Name diff --git a/src/main/java/org/qortal/transaction/ChatTransaction.java b/src/main/java/org/qortal/transaction/ChatTransaction.java index ccef1f37..2202d44a 100644 --- a/src/main/java/org/qortal/transaction/ChatTransaction.java +++ b/src/main/java/org/qortal/transaction/ChatTransaction.java @@ -11,6 +11,7 @@ import org.qortal.crypto.MemoryPoW; import org.qortal.data.transaction.ChatTransactionData; import org.qortal.data.transaction.TransactionData; import org.qortal.group.Group; +import org.qortal.list.ResourceListManager; import org.qortal.repository.DataException; import org.qortal.repository.GroupRepository; import org.qortal.repository.Repository; @@ -134,10 +135,21 @@ public class ChatTransaction extends Transaction { return true; } + @Override + public void preProcess() throws DataException { + // Nothing to do + } + @Override public ValidationResult isValid() throws DataException { // Nonce checking is done via isSignatureValid() as that method is only called once per import + // Check for blacklisted author by address + ResourceListManager listManager = ResourceListManager.getInstance(); + if (listManager.isAddressInBlacklist(this.chatTransactionData.getSender())) { + return ValidationResult.ADDRESS_IN_BLACKLIST; + } + // If we exist in the repository then we've been imported as unconfirmed, // but we don't want to make it into a block, so return fake non-OK result. if (this.repository.getTransactionRepository().exists(this.chatTransactionData.getSignature())) diff --git a/src/main/java/org/qortal/transaction/CreateAssetOrderTransaction.java b/src/main/java/org/qortal/transaction/CreateAssetOrderTransaction.java index 36cccf42..24e57a4e 100644 --- a/src/main/java/org/qortal/transaction/CreateAssetOrderTransaction.java +++ b/src/main/java/org/qortal/transaction/CreateAssetOrderTransaction.java @@ -135,6 +135,11 @@ public class CreateAssetOrderTransaction extends Transaction { return ValidationResult.OK; } + @Override + public void preProcess() throws DataException { + // Nothing to do + } + @Override public void process() throws DataException { // Order Id is transaction's signature diff --git a/src/main/java/org/qortal/transaction/CreateGroupTransaction.java b/src/main/java/org/qortal/transaction/CreateGroupTransaction.java index 7ed61684..6f4a3634 100644 --- a/src/main/java/org/qortal/transaction/CreateGroupTransaction.java +++ b/src/main/java/org/qortal/transaction/CreateGroupTransaction.java @@ -92,6 +92,11 @@ public class CreateGroupTransaction extends Transaction { return ValidationResult.OK; } + @Override + public void preProcess() throws DataException { + // Nothing to do + } + @Override public void process() throws DataException { // Create Group diff --git a/src/main/java/org/qortal/transaction/CreatePollTransaction.java b/src/main/java/org/qortal/transaction/CreatePollTransaction.java index 4c4b3a0a..a56322a7 100644 --- a/src/main/java/org/qortal/transaction/CreatePollTransaction.java +++ b/src/main/java/org/qortal/transaction/CreatePollTransaction.java @@ -106,6 +106,11 @@ public class CreatePollTransaction extends Transaction { return ValidationResult.OK; } + @Override + public void preProcess() throws DataException { + // Nothing to do + } + @Override public void process() throws DataException { // Publish poll to allow voting diff --git a/src/main/java/org/qortal/transaction/DeployAtTransaction.java b/src/main/java/org/qortal/transaction/DeployAtTransaction.java index 86e04d56..f3024b57 100644 --- a/src/main/java/org/qortal/transaction/DeployAtTransaction.java +++ b/src/main/java/org/qortal/transaction/DeployAtTransaction.java @@ -203,6 +203,11 @@ public class DeployAtTransaction extends Transaction { return ValidationResult.OK; } + @Override + public void preProcess() throws DataException { + // Nothing to do + } + @Override public void process() throws DataException { ensureATAddress(this.deployAtTransactionData); diff --git a/src/main/java/org/qortal/transaction/GenesisTransaction.java b/src/main/java/org/qortal/transaction/GenesisTransaction.java index 067ff183..74a84a7d 100644 --- a/src/main/java/org/qortal/transaction/GenesisTransaction.java +++ b/src/main/java/org/qortal/transaction/GenesisTransaction.java @@ -100,6 +100,11 @@ public class GenesisTransaction extends Transaction { return ValidationResult.OK; } + @Override + public void preProcess() throws DataException { + // Nothing to do + } + @Override public void process() throws DataException { Account recipient = new Account(repository, this.genesisTransactionData.getRecipient()); diff --git a/src/main/java/org/qortal/transaction/GroupApprovalTransaction.java b/src/main/java/org/qortal/transaction/GroupApprovalTransaction.java index d5cf66f7..1c8bb709 100644 --- a/src/main/java/org/qortal/transaction/GroupApprovalTransaction.java +++ b/src/main/java/org/qortal/transaction/GroupApprovalTransaction.java @@ -66,6 +66,11 @@ public class GroupApprovalTransaction extends Transaction { return ValidationResult.OK; } + @Override + public void preProcess() throws DataException { + // Nothing to do + } + @Override public void process() throws DataException { // Find previous approval decision (if any) by this admin for pending transaction diff --git a/src/main/java/org/qortal/transaction/GroupBanTransaction.java b/src/main/java/org/qortal/transaction/GroupBanTransaction.java index d3458ebe..c9a6c307 100644 --- a/src/main/java/org/qortal/transaction/GroupBanTransaction.java +++ b/src/main/java/org/qortal/transaction/GroupBanTransaction.java @@ -87,6 +87,11 @@ public class GroupBanTransaction extends Transaction { return ValidationResult.OK; } + @Override + public void preProcess() throws DataException { + // Nothing to do + } + @Override public void process() throws DataException { // Update Group Membership diff --git a/src/main/java/org/qortal/transaction/GroupInviteTransaction.java b/src/main/java/org/qortal/transaction/GroupInviteTransaction.java index a66f7584..f3b08f59 100644 --- a/src/main/java/org/qortal/transaction/GroupInviteTransaction.java +++ b/src/main/java/org/qortal/transaction/GroupInviteTransaction.java @@ -88,6 +88,11 @@ public class GroupInviteTransaction extends Transaction { return ValidationResult.OK; } + @Override + public void preProcess() throws DataException { + // Nothing to do + } + @Override public void process() throws DataException { // Update Group Membership diff --git a/src/main/java/org/qortal/transaction/GroupKickTransaction.java b/src/main/java/org/qortal/transaction/GroupKickTransaction.java index d9be8161..84de3a59 100644 --- a/src/main/java/org/qortal/transaction/GroupKickTransaction.java +++ b/src/main/java/org/qortal/transaction/GroupKickTransaction.java @@ -89,6 +89,11 @@ public class GroupKickTransaction extends Transaction { return ValidationResult.OK; } + @Override + public void preProcess() throws DataException { + // Nothing to do + } + @Override public void process() throws DataException { // Update Group Membership diff --git a/src/main/java/org/qortal/transaction/IssueAssetTransaction.java b/src/main/java/org/qortal/transaction/IssueAssetTransaction.java index e9422dcd..52428963 100644 --- a/src/main/java/org/qortal/transaction/IssueAssetTransaction.java +++ b/src/main/java/org/qortal/transaction/IssueAssetTransaction.java @@ -92,6 +92,11 @@ public class IssueAssetTransaction extends Transaction { return ValidationResult.OK; } + @Override + public void preProcess() throws DataException { + // Nothing to do + } + @Override public void process() throws DataException { // Issue asset diff --git a/src/main/java/org/qortal/transaction/JoinGroupTransaction.java b/src/main/java/org/qortal/transaction/JoinGroupTransaction.java index ed69ed4e..bc62c629 100644 --- a/src/main/java/org/qortal/transaction/JoinGroupTransaction.java +++ b/src/main/java/org/qortal/transaction/JoinGroupTransaction.java @@ -67,6 +67,11 @@ public class JoinGroupTransaction extends Transaction { return ValidationResult.OK; } + @Override + public void preProcess() throws DataException { + // Nothing to do + } + @Override public void process() throws DataException { // Update Group Membership diff --git a/src/main/java/org/qortal/transaction/LeaveGroupTransaction.java b/src/main/java/org/qortal/transaction/LeaveGroupTransaction.java index ad31e565..1e8f8c6c 100644 --- a/src/main/java/org/qortal/transaction/LeaveGroupTransaction.java +++ b/src/main/java/org/qortal/transaction/LeaveGroupTransaction.java @@ -67,6 +67,11 @@ public class LeaveGroupTransaction extends Transaction { return ValidationResult.OK; } + @Override + public void preProcess() throws DataException { + // Nothing to do + } + @Override public void process() throws DataException { // Update Group Membership diff --git a/src/main/java/org/qortal/transaction/MessageTransaction.java b/src/main/java/org/qortal/transaction/MessageTransaction.java index ef6e6c76..d02b6fdd 100644 --- a/src/main/java/org/qortal/transaction/MessageTransaction.java +++ b/src/main/java/org/qortal/transaction/MessageTransaction.java @@ -239,6 +239,11 @@ public class MessageTransaction extends Transaction { getPaymentData(), this.messageTransactionData.getFee(), true); } + @Override + public void preProcess() throws DataException { + // Nothing to do + } + @Override public void process() throws DataException { // If we have no amount then there's nothing to do diff --git a/src/main/java/org/qortal/transaction/MultiPaymentTransaction.java b/src/main/java/org/qortal/transaction/MultiPaymentTransaction.java index 4c3f75dc..34cd0147 100644 --- a/src/main/java/org/qortal/transaction/MultiPaymentTransaction.java +++ b/src/main/java/org/qortal/transaction/MultiPaymentTransaction.java @@ -67,6 +67,11 @@ public class MultiPaymentTransaction extends Transaction { return new Payment(this.repository).isProcessable(this.multiPaymentTransactionData.getSenderPublicKey(), payments, this.multiPaymentTransactionData.getFee()); } + @Override + public void preProcess() throws DataException { + // Nothing to do + } + @Override public void process() throws DataException { // Wrap and delegate payment processing to Payment class. diff --git a/src/main/java/org/qortal/transaction/PaymentTransaction.java b/src/main/java/org/qortal/transaction/PaymentTransaction.java index f6caaef5..4869db76 100644 --- a/src/main/java/org/qortal/transaction/PaymentTransaction.java +++ b/src/main/java/org/qortal/transaction/PaymentTransaction.java @@ -61,6 +61,11 @@ public class PaymentTransaction extends Transaction { return new Payment(this.repository).isProcessable(this.paymentTransactionData.getSenderPublicKey(), getPaymentData(), this.paymentTransactionData.getFee()); } + @Override + public void preProcess() throws DataException { + // Nothing to do + } + @Override public void process() throws DataException { // Wrap and delegate payment processing to Payment class. diff --git a/src/main/java/org/qortal/transaction/PresenceTransaction.java b/src/main/java/org/qortal/transaction/PresenceTransaction.java index 729270e0..0d28d382 100644 --- a/src/main/java/org/qortal/transaction/PresenceTransaction.java +++ b/src/main/java/org/qortal/transaction/PresenceTransaction.java @@ -149,6 +149,11 @@ public class PresenceTransaction extends Transaction { return true; } + @Override + public void preProcess() throws DataException { + // Nothing to do + } + @Override public ValidationResult isValid() throws DataException { // Nonce checking is done via isSignatureValid() as that method is only called once per import diff --git a/src/main/java/org/qortal/transaction/PublicizeTransaction.java b/src/main/java/org/qortal/transaction/PublicizeTransaction.java index 75cfd2a2..c03c8283 100644 --- a/src/main/java/org/qortal/transaction/PublicizeTransaction.java +++ b/src/main/java/org/qortal/transaction/PublicizeTransaction.java @@ -80,6 +80,11 @@ public class PublicizeTransaction extends Transaction { return true; } + @Override + public void preProcess() throws DataException { + // Nothing to do + } + @Override public ValidationResult isValid() throws DataException { // There can be only one diff --git a/src/main/java/org/qortal/transaction/RegisterNameTransaction.java b/src/main/java/org/qortal/transaction/RegisterNameTransaction.java index 66c1fc8b..d0a2f49c 100644 --- a/src/main/java/org/qortal/transaction/RegisterNameTransaction.java +++ b/src/main/java/org/qortal/transaction/RegisterNameTransaction.java @@ -6,6 +6,7 @@ import java.util.List; import org.qortal.account.Account; import org.qortal.asset.Asset; import org.qortal.block.BlockChain; +import org.qortal.controller.repository.NamesDatabaseIntegrityCheck; import org.qortal.crypto.Crypto; import org.qortal.data.transaction.RegisterNameTransactionData; import org.qortal.data.transaction.TransactionData; @@ -88,6 +89,17 @@ public class RegisterNameTransaction extends Transaction { return ValidationResult.OK; } + @Override + public void preProcess() throws DataException { + RegisterNameTransactionData registerNameTransactionData = (RegisterNameTransactionData) transactionData; + + // Rebuild this name in the Names table from the transaction history + // This is necessary because in some rare cases names can be missing from the Names table after registration + // but we have been unable to reproduce the issue and track down the root cause + NamesDatabaseIntegrityCheck namesDatabaseIntegrityCheck = new NamesDatabaseIntegrityCheck(); + namesDatabaseIntegrityCheck.rebuildName(registerNameTransactionData.getName(), this.repository); + } + @Override public void process() throws DataException { // Register Name diff --git a/src/main/java/org/qortal/transaction/RemoveGroupAdminTransaction.java b/src/main/java/org/qortal/transaction/RemoveGroupAdminTransaction.java index 43f1fc8f..3e5f1e6d 100644 --- a/src/main/java/org/qortal/transaction/RemoveGroupAdminTransaction.java +++ b/src/main/java/org/qortal/transaction/RemoveGroupAdminTransaction.java @@ -87,6 +87,11 @@ public class RemoveGroupAdminTransaction extends Transaction { return ValidationResult.OK; } + @Override + public void preProcess() throws DataException { + // Nothing to do + } + @Override public void process() throws DataException { // Update Group adminship @@ -107,4 +112,4 @@ public class RemoveGroupAdminTransaction extends Transaction { this.repository.getTransactionRepository().save(this.removeGroupAdminTransactionData); } -} \ No newline at end of file +} diff --git a/src/main/java/org/qortal/transaction/RewardShareTransaction.java b/src/main/java/org/qortal/transaction/RewardShareTransaction.java index 0e21c0c6..be68196d 100644 --- a/src/main/java/org/qortal/transaction/RewardShareTransaction.java +++ b/src/main/java/org/qortal/transaction/RewardShareTransaction.java @@ -159,6 +159,11 @@ public class RewardShareTransaction extends Transaction { return ValidationResult.OK; } + @Override + public void preProcess() throws DataException { + // Nothing to do + } + @Override public void process() throws DataException { PublicKeyAccount mintingAccount = getMintingAccount(); diff --git a/src/main/java/org/qortal/transaction/SellNameTransaction.java b/src/main/java/org/qortal/transaction/SellNameTransaction.java index 81bd9ff7..c2ab2eb9 100644 --- a/src/main/java/org/qortal/transaction/SellNameTransaction.java +++ b/src/main/java/org/qortal/transaction/SellNameTransaction.java @@ -5,6 +5,7 @@ import java.util.List; import org.qortal.account.Account; import org.qortal.asset.Asset; +import org.qortal.controller.repository.NamesDatabaseIntegrityCheck; import org.qortal.data.naming.NameData; import org.qortal.data.transaction.SellNameTransactionData; import org.qortal.data.transaction.TransactionData; @@ -89,6 +90,17 @@ public class SellNameTransaction extends Transaction { return ValidationResult.OK; } + @Override + public void preProcess() throws DataException { + SellNameTransactionData sellNameTransactionData = (SellNameTransactionData) transactionData; + + // Rebuild this name in the Names table from the transaction history + // This is necessary because in some rare cases names can be missing from the Names table after registration + // but we have been unable to reproduce the issue and track down the root cause + NamesDatabaseIntegrityCheck namesDatabaseIntegrityCheck = new NamesDatabaseIntegrityCheck(); + namesDatabaseIntegrityCheck.rebuildName(sellNameTransactionData.getName(), this.repository); + } + @Override public void process() throws DataException { // Sell Name diff --git a/src/main/java/org/qortal/transaction/SetGroupTransaction.java b/src/main/java/org/qortal/transaction/SetGroupTransaction.java index 084044a7..48248b69 100644 --- a/src/main/java/org/qortal/transaction/SetGroupTransaction.java +++ b/src/main/java/org/qortal/transaction/SetGroupTransaction.java @@ -56,6 +56,11 @@ public class SetGroupTransaction extends Transaction { return ValidationResult.OK; } + @Override + public void preProcess() throws DataException { + // Nothing to do + } + @Override public void process() throws DataException { Account creator = getCreator(); diff --git a/src/main/java/org/qortal/transaction/Transaction.java b/src/main/java/org/qortal/transaction/Transaction.java index f026d0ae..85158ec0 100644 --- a/src/main/java/org/qortal/transaction/Transaction.java +++ b/src/main/java/org/qortal/transaction/Transaction.java @@ -247,6 +247,7 @@ public abstract class Transaction { INVALID_GROUP_BLOCK_DELAY(93), INCORRECT_NONCE(94), INVALID_TIMESTAMP_SIGNATURE(95), + ADDRESS_IN_BLACKLIST(96), INVALID_BUT_OK(999), NOT_YET_RELEASED(1000); @@ -798,6 +799,8 @@ public abstract class Transaction { // Fix up approval status this.setInitialApprovalStatus(); + this.preProcess(); + ValidationResult validationResult = this.isValidUnconfirmed(); if (validationResult != ValidationResult.OK) return validationResult; @@ -898,6 +901,14 @@ public abstract class Transaction { return ValidationResult.OK; } + /** + * * Pre-process a transaction before validating or processing the block + * This allows for any database integrity checks prior to validation. + * + * @throws DataException + */ + public abstract void preProcess() throws DataException; + /** * Actually process a transaction, updating the blockchain. *

diff --git a/src/main/java/org/qortal/transaction/TransferAssetTransaction.java b/src/main/java/org/qortal/transaction/TransferAssetTransaction.java index a2855a35..79d485a5 100644 --- a/src/main/java/org/qortal/transaction/TransferAssetTransaction.java +++ b/src/main/java/org/qortal/transaction/TransferAssetTransaction.java @@ -61,6 +61,11 @@ public class TransferAssetTransaction extends Transaction { return new Payment(this.repository).isProcessable(this.transferAssetTransactionData.getSenderPublicKey(), getPaymentData(), this.transferAssetTransactionData.getFee()); } + @Override + public void preProcess() throws DataException { + // Nothing to do + } + @Override public void process() throws DataException { // Wrap asset transfer as a payment and delegate processing to Payment class. diff --git a/src/main/java/org/qortal/transaction/TransferPrivsTransaction.java b/src/main/java/org/qortal/transaction/TransferPrivsTransaction.java index d64e953e..f77dac15 100644 --- a/src/main/java/org/qortal/transaction/TransferPrivsTransaction.java +++ b/src/main/java/org/qortal/transaction/TransferPrivsTransaction.java @@ -68,6 +68,11 @@ public class TransferPrivsTransaction extends Transaction { return ValidationResult.OK; } + @Override + public void preProcess() throws DataException { + // Nothing to do + } + @Override public void process() throws DataException { Account sender = this.getSender(); diff --git a/src/main/java/org/qortal/transaction/UpdateAssetTransaction.java b/src/main/java/org/qortal/transaction/UpdateAssetTransaction.java index 2a7af23c..16e5641d 100644 --- a/src/main/java/org/qortal/transaction/UpdateAssetTransaction.java +++ b/src/main/java/org/qortal/transaction/UpdateAssetTransaction.java @@ -90,6 +90,11 @@ public class UpdateAssetTransaction extends Transaction { return ValidationResult.OK; } + @Override + public void preProcess() throws DataException { + // Nothing to do + } + @Override public void process() throws DataException { // Update Asset diff --git a/src/main/java/org/qortal/transaction/UpdateGroupTransaction.java b/src/main/java/org/qortal/transaction/UpdateGroupTransaction.java index 6751be33..9664ccbf 100644 --- a/src/main/java/org/qortal/transaction/UpdateGroupTransaction.java +++ b/src/main/java/org/qortal/transaction/UpdateGroupTransaction.java @@ -109,6 +109,11 @@ public class UpdateGroupTransaction extends Transaction { return ValidationResult.OK; } + @Override + public void preProcess() throws DataException { + // Nothing to do + } + @Override public void process() throws DataException { // Update Group diff --git a/src/main/java/org/qortal/transaction/UpdateNameTransaction.java b/src/main/java/org/qortal/transaction/UpdateNameTransaction.java index ebfde97c..c9eedbae 100644 --- a/src/main/java/org/qortal/transaction/UpdateNameTransaction.java +++ b/src/main/java/org/qortal/transaction/UpdateNameTransaction.java @@ -2,9 +2,11 @@ package org.qortal.transaction; import java.util.Collections; import java.util.List; +import java.util.Objects; import org.qortal.account.Account; import org.qortal.asset.Asset; +import org.qortal.controller.repository.NamesDatabaseIntegrityCheck; import org.qortal.crypto.Crypto; import org.qortal.data.naming.NameData; import org.qortal.data.transaction.TransactionData; @@ -124,6 +126,22 @@ public class UpdateNameTransaction extends Transaction { return ValidationResult.OK; } + @Override + public void preProcess() throws DataException { + UpdateNameTransactionData updateNameTransactionData = (UpdateNameTransactionData) transactionData; + + // Rebuild this name in the Names table from the transaction history + // This is necessary because in some rare cases names can be missing from the Names table after registration + // but we have been unable to reproduce the issue and track down the root cause + NamesDatabaseIntegrityCheck namesDatabaseIntegrityCheck = new NamesDatabaseIntegrityCheck(); + namesDatabaseIntegrityCheck.rebuildName(updateNameTransactionData.getName(), this.repository); + + if (!Objects.equals(updateNameTransactionData.getName(), updateNameTransactionData.getNewName())) { + // Renaming - so make sure the new name is rebuilt too + namesDatabaseIntegrityCheck.rebuildName(updateNameTransactionData.getNewName(), this.repository); + } + } + @Override public void process() throws DataException { // Update Name diff --git a/src/main/java/org/qortal/transaction/VoteOnPollTransaction.java b/src/main/java/org/qortal/transaction/VoteOnPollTransaction.java index 35447aa6..89eec184 100644 --- a/src/main/java/org/qortal/transaction/VoteOnPollTransaction.java +++ b/src/main/java/org/qortal/transaction/VoteOnPollTransaction.java @@ -92,6 +92,11 @@ public class VoteOnPollTransaction extends Transaction { return ValidationResult.OK; } + @Override + public void preProcess() throws DataException { + // Nothing to do + } + @Override public void process() throws DataException { String pollName = this.voteOnPollTransactionData.getPollName(); diff --git a/src/main/java/org/qortal/utils/BlockArchiveUtils.java b/src/main/java/org/qortal/utils/BlockArchiveUtils.java new file mode 100644 index 00000000..0beff026 --- /dev/null +++ b/src/main/java/org/qortal/utils/BlockArchiveUtils.java @@ -0,0 +1,78 @@ +package org.qortal.utils; + +import org.qortal.data.at.ATStateData; +import org.qortal.data.block.BlockData; +import org.qortal.data.transaction.TransactionData; +import org.qortal.repository.BlockArchiveReader; +import org.qortal.repository.DataException; +import org.qortal.repository.Repository; + +import java.util.List; + +public class BlockArchiveUtils { + + /** + * importFromArchive + *

+ * Reads the requested block range from the archive + * and imports the BlockData and AT state data hashes + * This can be used to convert a block archive back + * into the HSQLDB, in order to make it SQL-compatible + * again. + *

+ * Note: calls discardChanges() and saveChanges(), so + * make sure that you commit any existing repository + * changes before calling this method. + * + * @param startHeight The earliest block to import + * @param endHeight The latest block to import + * @param repository A clean repository session + * @throws DataException + */ + public static void importFromArchive(int startHeight, int endHeight, Repository repository) throws DataException { + repository.discardChanges(); + final int requestedRange = endHeight+1-startHeight; + + List, List>> blockInfoList = + BlockArchiveReader.getInstance().fetchBlocksFromRange(startHeight, endHeight); + + // Ensure that we have received all of the requested blocks + if (blockInfoList == null || blockInfoList.isEmpty()) { + throw new IllegalStateException("No blocks found when importing from archive"); + } + if (blockInfoList.size() != requestedRange) { + throw new IllegalStateException("Non matching block count when importing from archive"); + } + Triple, List> firstBlock = blockInfoList.get(0); + if (firstBlock == null || firstBlock.getA().getHeight() != startHeight) { + throw new IllegalStateException("Non matching first block when importing from archive"); + } + if (blockInfoList.size() > 0) { + Triple, List> lastBlock = + blockInfoList.get(blockInfoList.size() - 1); + if (lastBlock == null || lastBlock.getA().getHeight() != endHeight) { + throw new IllegalStateException("Non matching last block when importing from archive"); + } + } + + // Everything seems okay, so go ahead with the import + for (Triple, List> blockInfo : blockInfoList) { + try { + // Save block + repository.getBlockRepository().save(blockInfo.getA()); + + // Save AT state data hashes + for (ATStateData atStateData : blockInfo.getC()) { + atStateData.setHeight(blockInfo.getA().getHeight()); + repository.getATRepository().save(atStateData); + } + + } catch (DataException e) { + repository.discardChanges(); + throw new IllegalStateException("Unable to import blocks from archive"); + } + } + repository.saveChanges(); + } + +} diff --git a/src/main/java/org/qortal/utils/SevenZ.java b/src/main/java/org/qortal/utils/SevenZ.java new file mode 100644 index 00000000..5126b292 --- /dev/null +++ b/src/main/java/org/qortal/utils/SevenZ.java @@ -0,0 +1,85 @@ +// +// Code originally written by memorynotfound +// https://memorynotfound.com/java-7z-seven-zip-example-compress-decompress-file/ +// Modified Sept 2021 by Qortal Core dev team +// + +package org.qortal.utils; + +import org.apache.commons.compress.archivers.sevenz.SevenZArchiveEntry; +import org.apache.commons.compress.archivers.sevenz.SevenZFile; +import org.apache.commons.compress.archivers.sevenz.SevenZOutputFile; +import org.qortal.gui.SplashFrame; + +import java.io.*; + +public class SevenZ { + + private SevenZ() { + + } + + public static void compress(String outputPath, File... files) throws IOException { + try (SevenZOutputFile out = new SevenZOutputFile(new File(outputPath))){ + for (File file : files){ + addToArchiveCompression(out, file, "."); + } + } + } + + public static void decompress(String in, File destination) throws IOException { + SevenZFile sevenZFile = new SevenZFile(new File(in)); + SevenZArchiveEntry entry; + while ((entry = sevenZFile.getNextEntry()) != null){ + if (entry.isDirectory()){ + continue; + } + File curfile = new File(destination, entry.getName()); + File parent = curfile.getParentFile(); + if (!parent.exists()) { + parent.mkdirs(); + } + long fileSize = entry.getSize(); + + FileOutputStream out = new FileOutputStream(curfile); + byte[] b = new byte[1024 * 1024]; + int count; + long extracted = 0; + + while ((count = sevenZFile.read(b)) > 0) { + out.write(b, 0, count); + extracted += count; + + int progress = (int)((double)extracted / (double)fileSize * 100); + SplashFrame.getInstance().updateStatus(String.format("Extracting %s... (%d%%)", curfile.getName(), progress)); + } + out.close(); + } + } + + private static void addToArchiveCompression(SevenZOutputFile out, File file, String dir) throws IOException { + String name = dir + File.separator + file.getName(); + if (file.isFile()){ + SevenZArchiveEntry entry = out.createArchiveEntry(file, name); + out.putArchiveEntry(entry); + + FileInputStream in = new FileInputStream(file); + byte[] b = new byte[8192]; + int count = 0; + while ((count = in.read(b)) > 0) { + out.write(b, 0, count); + } + out.closeArchiveEntry(); + + } else if (file.isDirectory()) { + File[] children = file.listFiles(); + if (children != null){ + for (File child : children){ + addToArchiveCompression(out, child, name); + } + } + } else { + System.out.println(file.getName() + " is not supported"); + } + } +} diff --git a/src/main/resources/blockchain.json b/src/main/resources/blockchain.json index d0ac9ffb..acba90da 100644 --- a/src/main/resources/blockchain.json +++ b/src/main/resources/blockchain.json @@ -11,8 +11,8 @@ "minAccountLevelToRewardShare": 5, "maxRewardSharesPerMintingAccount": 6, "founderEffectiveMintingLevel": 10, - "onlineAccountSignaturesMinLifetime": 2592000000, - "onlineAccountSignaturesMaxLifetime": 3196800000, + "onlineAccountSignaturesMinLifetime": 43200000, + "onlineAccountSignaturesMaxLifetime": 86400000, "rewardsByHeight": [ { "height": 1, "reward": 5.00 }, { "height": 259201, "reward": 4.75 }, diff --git a/src/main/resources/i18n/ApiError_de.properties b/src/main/resources/i18n/ApiError_de.properties index 490aac0d..ab7da6b7 100644 --- a/src/main/resources/i18n/ApiError_de.properties +++ b/src/main/resources/i18n/ApiError_de.properties @@ -1,14 +1,83 @@ +#Generated by ResourceBundle Editor (http://essiembre.github.io/eclipse-rbe/) +# Keys are from api.ApiError enum + +# "localeLang": "de", + +### Common ### +JSON = JSON nachricht konnte nicht geparsed werden + +INSUFFICIENT_BALANCE = insufficient balance + +UNAUTHORIZED = API call unauthorized + +REPOSITORY_ISSUE = repository error + +NON_PRODUCTION = this API call is not permitted for production systems + +BLOCKCHAIN_NEEDS_SYNC = blockchain needs to synchronize first + +NO_TIME_SYNC = no clock synchronization yet + +### Validation ### +INVALID_SIGNATURE = ungültige signatur INVALID_ADDRESS = ungültige adresse -INVALID_ASSET_ID = ungültige asset ID +INVALID_PUBLIC_KEY = ungültiger public key INVALID_DATA = ungültige daten -INVALID_PUBLIC_KEY = ungültiger public key +INVALID_NETWORK_ADDRESS = invalid network address -INVALID_SIGNATURE = ungültige signatur +ADDRESS_UNKNOWN = account address unknown -JSON = JSON nachricht konnte nicht geparsed werden +INVALID_CRITERIA = invalid search criteria + +INVALID_REFERENCE = invalid reference + +TRANSFORMATION_ERROR = could not transform JSON into transaction + +INVALID_PRIVATE_KEY = invalid private key + +INVALID_HEIGHT = invalid block height + +CANNOT_MINT = account cannot mint + +### Blocks ### +BLOCK_UNKNOWN = block unknown + +### Transactions ### +TRANSACTION_UNKNOWN = transaction unknown PUBLIC_KEY_NOT_FOUND = public key wurde nicht gefunden + +# this one is special in that caller expected to pass two additional strings, hence the two %s +TRANSACTION_INVALID = transaction invalid: %s (%s) + +### Naming ### +NAME_UNKNOWN = name unknown + +### Asset ### +INVALID_ASSET_ID = ungültige asset ID + +INVALID_ORDER_ID = invalid asset order ID + +ORDER_UNKNOWN = unknown asset order ID + +### Groups ### +GROUP_UNKNOWN = group unknown + +### Foreign Blockchain ### +FOREIGN_BLOCKCHAIN_NETWORK_ISSUE = foreign blokchain or ElectrumX network issue + +FOREIGN_BLOCKCHAIN_BALANCE_ISSUE = insufficient balance on foreign blockchain + +FOREIGN_BLOCKCHAIN_TOO_SOON = too soon to broadcast foreign blockchain transaction (LockTime/median block time) + +### Trade Portal ### +ORDER_SIZE_TOO_SMALL = order amount too low + +### Data ### +FILE_NOT_FOUND = file not found + +NO_REPLY = peer did not reply with data \ No newline at end of file diff --git a/src/main/resources/i18n/ApiError_en.properties b/src/main/resources/i18n/ApiError_en.properties index 6b083ae7..dfe73eef 100644 --- a/src/main/resources/i18n/ApiError_en.properties +++ b/src/main/resources/i18n/ApiError_en.properties @@ -1,69 +1,84 @@ #Generated by ResourceBundle Editor (http://essiembre.github.io/eclipse-rbe/) # Keys are from api.ApiError enum -ADDRESS_UNKNOWN = account address unknown - -BLOCKCHAIN_NEEDS_SYNC = blockchain needs to synchronize first - -# Blocks -BLOCK_UNKNOWN = block unknown - -BTC_BALANCE_ISSUE = insufficient Bitcoin balance - -BTC_NETWORK_ISSUE = Bitcoin/ElectrumX network issue - -BTC_TOO_SOON = too soon to broadcast Bitcoin transaction (lockTime/median block time) - -CANNOT_MINT = account cannot mint - -GROUP_UNKNOWN = group unknown - -INVALID_ADDRESS = invalid address - -# Assets -INVALID_ASSET_ID = invalid asset ID - -INVALID_CRITERIA = invalid search criteria - -INVALID_DATA = invalid data - -INVALID_HEIGHT = invalid block height - -INVALID_NETWORK_ADDRESS = invalid network address - -INVALID_ORDER_ID = invalid asset order ID - -INVALID_PRIVATE_KEY = invalid private key - -INVALID_PUBLIC_KEY = invalid public key - -INVALID_REFERENCE = invalid reference - -# Validation -INVALID_SIGNATURE = invalid signature +# "localeLang": "en", +### Common ### JSON = failed to parse JSON message -NAME_UNKNOWN = name unknown +INSUFFICIENT_BALANCE = insufficient balance -NON_PRODUCTION = this API call is not permitted for production systems - -NO_TIME_SYNC = no clock synchronization yet - -ORDER_UNKNOWN = unknown asset order ID - -PUBLIC_KEY_NOT_FOUND = public key not found +UNAUTHORIZED = API call unauthorized REPOSITORY_ISSUE = repository error -# This one is special in that caller expected to pass two additional strings, hence the two %s -TRANSACTION_INVALID = transaction invalid: %s (%s) +NON_PRODUCTION = this API call is not permitted for production systems -TRANSACTION_UNKNOWN = transaction unknown +BLOCKCHAIN_NEEDS_SYNC = blockchain needs to synchronize first + +NO_TIME_SYNC = no clock synchronization yet + +### Validation ### +INVALID_SIGNATURE = invalid signature + +INVALID_ADDRESS = invalid address + +INVALID_PUBLIC_KEY = invalid public key + +INVALID_DATA = invalid data + +INVALID_NETWORK_ADDRESS = invalid network address + +ADDRESS_UNKNOWN = account address unknown + +INVALID_CRITERIA = invalid search criteria + +INVALID_REFERENCE = invalid reference TRANSFORMATION_ERROR = could not transform JSON into transaction -UNAUTHORIZED = API call unauthorized +INVALID_PRIVATE_KEY = invalid private key + +INVALID_HEIGHT = invalid block height + +CANNOT_MINT = account cannot mint + +### Blocks ### +BLOCK_UNKNOWN = block unknown + +### Transactions ### +TRANSACTION_UNKNOWN = transaction unknown + +PUBLIC_KEY_NOT_FOUND = public key not found + +# this one is special in that caller expected to pass two additional strings, hence the two %s +TRANSACTION_INVALID = transaction invalid: %s (%s) + +### Naming ### +NAME_UNKNOWN = name unknown + +### Asset ### +INVALID_ASSET_ID = invalid asset ID + +INVALID_ORDER_ID = invalid asset order ID + +ORDER_UNKNOWN = unknown asset order ID + +### Groups ### +GROUP_UNKNOWN = group unknown + +### Foreign Blockchain ### +FOREIGN_BLOCKCHAIN_NETWORK_ISSUE = foreign blokchain or ElectrumX network issue + +FOREIGN_BLOCKCHAIN_BALANCE_ISSUE = insufficient balance on foreign blockchain + +FOREIGN_BLOCKCHAIN_TOO_SOON = too soon to broadcast foreign blockchain transaction (LockTime/median block time) + +### Trade Portal ### +ORDER_SIZE_TOO_SMALL = order amount too low + +### Data ### +FILE_NOT_FOUND = file not found ORDER_SIZE_TOO_SMALL = order size too small diff --git a/src/main/resources/i18n/ApiError_fi.properties b/src/main/resources/i18n/ApiError_fi.properties index f9fedf09..f9518700 100644 --- a/src/main/resources/i18n/ApiError_fi.properties +++ b/src/main/resources/i18n/ApiError_fi.properties @@ -1,71 +1,86 @@ #Generated by ResourceBundle Editor (http://essiembre.github.io/eclipse-rbe/) # Keys are from api.ApiError enum -# + # Kielen muuttaminen suomeksi tapahtuu settings.json-tiedostossa # # "localeLang": "fi", # muista pilkku lopussa jos komento ei ole viimeisellä rivillä -ADDRESS_UNKNOWN = tilin osoite on tuntematon - -BLOCKCHAIN_NEEDS_SYNC = lohkoketjun tarvitsee ensin synkronisoitua - -# Blocks -BLOCK_UNKNOWN = tuntematon lohko - -BTC_BALANCE_ISSUE = riittämätön Bitcoin-saldo - -BTC_NETWORK_ISSUE = Bitcoin/ElectrumX -verkon ongelma - -BTC_TOO_SOON = liian aikaista julkistaa Bitcoin-tapahtumaa (lukitusaika/mediiaanilohkoaika) - -CANNOT_MINT = tili ei voi lyödä rahaa - -GROUP_UNKNOWN = tuntematon ryhmä - -INVALID_ADDRESS = osoite on kelvoton - -# Assets -INVALID_ASSET_ID = kelvoton ID resurssille - -INVALID_CRITERIA = kelvoton hakuehto - -INVALID_DATA = kelvoton data - -INVALID_HEIGHT = kelvoton lohkon korkeus - -INVALID_NETWORK_ADDRESS = kelvoton verkko-osoite - -INVALID_ORDER_ID = kelvoton resurssin tilaus-ID - -INVALID_PRIVATE_KEY = kelvoton yksityinen avain - -INVALID_PUBLIC_KEY = kelvoton julkinen avain - -INVALID_REFERENCE = kelvoton viite - -# Validation -INVALID_SIGNATURE = kelvoton allekirjoitus - +### Common ### JSON = JSON-viestin jaottelu epäonnistui -NAME_UNKNOWN = tuntematon nimi +INSUFFICIENT_BALANCE = insufficient balance -NON_PRODUCTION = tämä API-kutsu on kielletty tuotantoversiossa - -NO_TIME_SYNC = kello vielä synkronisoimatta - -ORDER_UNKNOWN = tuntematon resurssin tilaus-ID - -PUBLIC_KEY_NOT_FOUND = julkista avainta ei löytynyt +UNAUTHORIZED = luvaton API-kutsu REPOSITORY_ISSUE = tietovarantovirhe (repo) -# This one is special in that caller expected to pass two additional strings, hence the two %s -TRANSACTION_INVALID = kelvoton transaktio: %s (%s) +NON_PRODUCTION = tämä API-kutsu on kielletty tuotantoversiossa -TRANSACTION_UNKNOWN = tuntematon transaktio +BLOCKCHAIN_NEEDS_SYNC = lohkoketjun tarvitsee ensin synkronisoitua + +NO_TIME_SYNC = kello vielä synkronisoimatta + +### Validation ### +INVALID_SIGNATURE = kelvoton allekirjoitus + +INVALID_ADDRESS = osoite on kelvoton + +INVALID_PUBLIC_KEY = kelvoton julkinen avain + +INVALID_DATA = kelvoton data + +INVALID_NETWORK_ADDRESS = kelvoton verkko-osoite + +ADDRESS_UNKNOWN = tilin osoite on tuntematon + +INVALID_CRITERIA = kelvoton hakuehto + +INVALID_REFERENCE = kelvoton viite TRANSFORMATION_ERROR = JSON:in muuntaminen transaktioksi epäonnistui -UNAUTHORIZED = luvaton API-kutsu \ No newline at end of file +INVALID_PRIVATE_KEY = kelvoton yksityinen avain + +INVALID_HEIGHT = kelvoton lohkon korkeus + +CANNOT_MINT = tili ei voi lyödä rahaa + +### Blocks ### +BLOCK_UNKNOWN = tuntematon lohko + +### Transactions ### +TRANSACTION_UNKNOWN = tuntematon transaktio + +PUBLIC_KEY_NOT_FOUND = julkista avainta ei löytynyt + +# this one is special in that caller expected to pass two additional strings, hence the two %s +TRANSACTION_INVALID = kelvoton transaktio: %s (%s) + +### Naming ### +NAME_UNKNOWN = tuntematon nimi + +### Asset ### +INVALID_ASSET_ID = kelvoton ID resurssille + +INVALID_ORDER_ID = kelvoton resurssin tilaus-ID + +ORDER_UNKNOWN = tuntematon resurssin tilaus-ID + +### Groups ### +GROUP_UNKNOWN = tuntematon ryhmä + +### Foreign Blockchain ### +FOREIGN_BLOCKCHAIN_NETWORK_ISSUE = foreign blokchain or ElectrumX network issue + +FOREIGN_BLOCKCHAIN_BALANCE_ISSUE = insufficient balance on foreign blockchain + +FOREIGN_BLOCKCHAIN_TOO_SOON = too soon to broadcast foreign blockchain transaction (LockTime/median block time) + +### Trade Portal ### +ORDER_SIZE_TOO_SMALL = order amount too low + +### Data ### +FILE_NOT_FOUND = file not found + +NO_REPLY = peer did not reply with data \ No newline at end of file diff --git a/src/main/resources/i18n/ApiError_hu.properties b/src/main/resources/i18n/ApiError_hu.properties new file mode 100644 index 00000000..8aa783da --- /dev/null +++ b/src/main/resources/i18n/ApiError_hu.properties @@ -0,0 +1,86 @@ +#Generated by ResourceBundle Editor (http://essiembre.github.io/eclipse-rbe/) +# Keys are from api.ApiError enum + +# Magyar myelvre forditotta: Szkíta (Scythian). 2021 Augusztus 7. +# Az alkalmazás nyelvének magyarra való változtatása a settings.json oldalon történik. + +# "localeLang": "hu", + +### Common ### +JSON = nem sikerült elemezni a JSON üzenetet + +INSUFFICIENT_BALANCE = elégtelen egyenleg + +UNAUTHORIZED = nem engedélyezett API-hívás + +REPOSITORY_ISSUE = adattári hiba + +NON_PRODUCTION = ez az API-hívás nem engedélyezett korlátozott rendszereken + +BLOCKCHAIN_NEEDS_SYNC = a blokkláncnak még szinkronizálnia kell + +NO_TIME_SYNC = az óraszinkronizálás még nem történt meg + +### Validation ### +INVALID_SIGNATURE = érvénytelen aláírás + +INVALID_ADDRESS = érvénytelen fiók cím + +INVALID_PUBLIC_KEY = érvénytelen nyilvános kulcs + +INVALID_DATA = érvénytelen adat + +INVALID_NETWORK_ADDRESS = érvénytelen hálózat cím + +ADDRESS_UNKNOWN = ismeretlen fiók cím + +INVALID_CRITERIA = érvénytelen keresési feltétel + +INVALID_REFERENCE = érvénytelen hivatkozás + +TRANSFORMATION_ERROR = nem sikerült tranzakcióvá alakítani a JSON-t + +INVALID_PRIVATE_KEY = érvénytelen privát kulcs + +INVALID_HEIGHT = érvénytelen blokkmagasság + +CANNOT_MINT = ez a fiók még nem tud QORT-ot verni + +### Blocks ### +BLOCK_UNKNOWN = ismeretlen blokk + +### Transactions ### +TRANSACTION_UNKNOWN = ismeretlen tranzakció + +PUBLIC_KEY_NOT_FOUND = nyilvános kulcs nem található + +# this one is special in that caller expected to pass two additional strings, hence the two %s +TRANSACTION_INVALID = érvénytelen tranzakció: %s (%s) + +### Naming ### +NAME_UNKNOWN = ismeretlen név + +### Asset ### +INVALID_ASSET_ID = érvénytelen eszközazonosító + +INVALID_ORDER_ID = érvénytelen eszközrendelési azonosító + +ORDER_UNKNOWN = ismeretlen eszközrendelési azonosító + +### Groups ### +GROUP_UNKNOWN = ismeretlen csoport + +### Foreign Blockchain ### +FOREIGN_BLOCKCHAIN_NETWORK_ISSUE = idegen blokklánc vagy ElectrumX hálózati probléma + +FOREIGN_BLOCKCHAIN_BALANCE_ISSUE = elégtelen egyenleg az idegen blokkláncon + +FOREIGN_BLOCKCHAIN_TOO_SOON = túl korai meghírdetni az idegen blokkláncon való tranzakciót (LockTime/medián blokkidő) + +### Trade Portal ### +ORDER_SIZE_TOO_SMALL = rendelési összeg túl alacsony + +### Data ### +FILE_NOT_FOUND = fájl nem található + +NO_REPLY = a másik csomópont nem válaszolt \ No newline at end of file diff --git a/src/main/resources/i18n/ApiError_it.properties b/src/main/resources/i18n/ApiError_it.properties index 27f93f63..33993200 100644 --- a/src/main/resources/i18n/ApiError_it.properties +++ b/src/main/resources/i18n/ApiError_it.properties @@ -7,66 +7,81 @@ # "localeLang": "it", # Si prega ricordare la virgola alla fine, se questo comando non è sull'ultima riga -ADDRESS_UNKNOWN = indirizzo account sconosciuto - -BLOCKCHAIN_NEEDS_SYNC = blockchain deve prima sincronizzarsi - -# Blocks -BLOCK_UNKNOWN = blocco sconosciuto - -BTC_BALANCE_ISSUE = saldo Bitcoin insufficiente - -BTC_NETWORK_ISSUE = Bitcoin/ElectrumX problema di rete - -BTC_TOO_SOON = troppo presto per trasmettere transazione Bitcoin (tempo di blocco / tempo di blocco mediano) - -CANNOT_MINT = l'account non può coniare - -GROUP_UNKNOWN = gruppo sconosciuto - -INVALID_ADDRESS = indirizzo non valido - -# Assets -INVALID_ASSET_ID = identificazione risorsa non valida - -INVALID_CRITERIA = criteri di ricerca non validi - -INVALID_DATA = dati non validi - -INVALID_HEIGHT = altezza blocco non valida - -INVALID_NETWORK_ADDRESS = indirizzo di rete non valido - -INVALID_ORDER_ID = identificazione di ordine di risorsa non valida - -INVALID_PRIVATE_KEY = chiave privata non valida - -INVALID_PUBLIC_KEY = chiave pubblica non valida - -INVALID_REFERENCE = riferimento non valido - -# Validation -INVALID_SIGNATURE = firma non valida - +### Common ### JSON = Impossibile analizzare il messaggio JSON -NAME_UNKNOWN = nome sconosciuto +INSUFFICIENT_BALANCE = insufficient balance -NON_PRODUCTION = questa chiamata API non è consentita per i sistemi di produzione - -NO_TIME_SYNC = nessuna sincronizzazione dell'orologio ancora - -ORDER_UNKNOWN = identificazione di ordine di risorsa sconosciuta - -PUBLIC_KEY_NOT_FOUND = chiave pubblica non trovata +UNAUTHORIZED = Chiamata API non autorizzata REPOSITORY_ISSUE = errore del repositorio -# This one is special in that caller expected to pass two additional strings, hence the two %s -TRANSACTION_INVALID = transazione non valida: %s (%s) +NON_PRODUCTION = questa chiamata API non è consentita per i sistemi di produzione -TRANSACTION_UNKNOWN = transazione sconosciuta +BLOCKCHAIN_NEEDS_SYNC = blockchain deve prima sincronizzarsi + +NO_TIME_SYNC = nessuna sincronizzazione dell'orologio ancora + +### Validation ### +INVALID_SIGNATURE = firma non valida + +INVALID_ADDRESS = indirizzo non valido + +INVALID_PUBLIC_KEY = chiave pubblica non valida + +INVALID_DATA = dati non validi + +INVALID_NETWORK_ADDRESS = indirizzo di rete non valido + +ADDRESS_UNKNOWN = indirizzo account sconosciuto + +INVALID_CRITERIA = criteri di ricerca non validi + +INVALID_REFERENCE = riferimento non valido TRANSFORMATION_ERROR = non è stato possibile trasformare JSON in transazione -UNAUTHORIZED = Chiamata API non autorizzata +INVALID_PRIVATE_KEY = chiave privata non valida + +INVALID_HEIGHT = altezza blocco non valida + +CANNOT_MINT = l'account non può coniare + +### Blocks ### +BLOCK_UNKNOWN = blocco sconosciuto + +### Transactions ### +TRANSACTION_UNKNOWN = transazione sconosciuta + +PUBLIC_KEY_NOT_FOUND = chiave pubblica non trovata + +# this one is special in that caller expected to pass two additional strings, hence the two %s +TRANSACTION_INVALID = transazione non valida: %s (%s) + +### Naming ### +NAME_UNKNOWN = nome sconosciuto + +### Asset ### +INVALID_ASSET_ID = identificazione risorsa non valida + +INVALID_ORDER_ID = identificazione di ordine di risorsa non valida + +ORDER_UNKNOWN = identificazione di ordine di risorsa sconosciuta + +### Groups ### +GROUP_UNKNOWN = gruppo sconosciuto + +### Foreign Blockchain ### +FOREIGN_BLOCKCHAIN_NETWORK_ISSUE = foreign blokchain or ElectrumX network issue + +FOREIGN_BLOCKCHAIN_BALANCE_ISSUE = insufficient balance on foreign blockchain + +FOREIGN_BLOCKCHAIN_TOO_SOON = too soon to broadcast foreign blockchain transaction (LockTime/median block time) + +### Trade Portal ### +ORDER_SIZE_TOO_SMALL = order amount too low + +### Data ### +FILE_NOT_FOUND = file not found + +NO_REPLY = peer did not reply with data \ No newline at end of file diff --git a/src/main/resources/i18n/ApiError_nl.properties b/src/main/resources/i18n/ApiError_nl.properties index 60faa0f6..5c54cf64 100644 --- a/src/main/resources/i18n/ApiError_nl.properties +++ b/src/main/resources/i18n/ApiError_nl.properties @@ -1,66 +1,83 @@ #Generated by ResourceBundle Editor (http://essiembre.github.io/eclipse-rbe/) # Keys are from api.ApiError enum -ADDRESS_UNKNOWN = account adres onbekend - -BLOCKCHAIN_NEEDS_SYNC = blockchain dient eerst gesynchronizeerd te worden - -# Blocks -BLOCK_UNKNOWN = blok onbekend - -BTC_BALANCE_ISSUE = onvoldoende Bitcoin balans - -BTC_NETWORK_ISSUE = Bitcoin/ElectrumX netwerk probleem - -BTC_TOO_SOON = te vroeg om Bitcoin transactie te versturen (vergrendelingstijd/gemiddelde bloktijd) - -CANNOT_MINT = account kan niet munten - -GROUP_UNKNOWN = onbekende groep - -INVALID_ADDRESS = ongeldig adres - -# Assets -INVALID_ASSET_ID = ongeldige asset ID - -INVALID_CRITERIA = ongeldige zoekcriteria - -INVALID_DATA = ongeldige gegevens - -INVALID_HEIGHT = ongeldige blokhoogte - -INVALID_NETWORK_ADDRESS = ongeldig netwerkadres - -INVALID_ORDER_ID = ongeldige asset order ID - -INVALID_PRIVATE_KEY = ongeldige private key - -INVALID_PUBLIC_KEY = ongeldige public key - -INVALID_REFERENCE = ongeldige verwijzing - -# Validation -INVALID_SIGNATURE = ongeldige handtekening +# "localeLang": "nl", +### Common ### JSON = lezen van JSON bericht gefaald -NAME_UNKNOWN = onbekende naam +INSUFFICIENT_BALANCE = insufficient balance -NON_PRODUCTION = deze API call is niet toegestaan voor productiesystemen - -NO_TIME_SYNC = klok nog niet gesynchronizeerd - -ORDER_UNKNOWN = onbekende asset order ID - -PUBLIC_KEY_NOT_FOUND = public key niet gevonden +UNAUTHORIZED = ongeautoriseerde API call REPOSITORY_ISSUE = repository fout -# This one is special in that caller expected to pass two additional strings, hence the two %s -TRANSACTION_INVALID = ongeldige transactie: %s (%s) +NON_PRODUCTION = deze API call is niet toegestaan voor productiesystemen -TRANSACTION_UNKNOWN = onbekende transactie +BLOCKCHAIN_NEEDS_SYNC = blockchain dient eerst gesynchronizeerd te worden + +NO_TIME_SYNC = klok nog niet gesynchronizeerd + +### Validation ### +INVALID_SIGNATURE = ongeldige handtekening + +INVALID_ADDRESS = ongeldig adres + +INVALID_PUBLIC_KEY = ongeldige public key + +INVALID_DATA = ongeldige gegevens + +INVALID_NETWORK_ADDRESS = ongeldig netwerkadres + +ADDRESS_UNKNOWN = account adres onbekend + +INVALID_CRITERIA = ongeldige zoekcriteria + +INVALID_REFERENCE = ongeldige verwijzing TRANSFORMATION_ERROR = JSON kon niet omgezet worden in transactie -UNAUTHORIZED = ongeautoriseerde API call +INVALID_PRIVATE_KEY = ongeldige private key + +INVALID_HEIGHT = ongeldige blokhoogte + +CANNOT_MINT = account kan niet munten + +### Blocks ### +BLOCK_UNKNOWN = blok onbekend + +### Transactions ### +TRANSACTION_UNKNOWN = onbekende transactie + +PUBLIC_KEY_NOT_FOUND = public key niet gevonden + +# this one is special in that caller expected to pass two additional strings, hence the two %s +TRANSACTION_INVALID = ongeldige transactie: %s (%s) + +### Naming ### +NAME_UNKNOWN = onbekende naam + +### Asset ### +INVALID_ASSET_ID = ongeldige asset ID + +INVALID_ORDER_ID = ongeldige asset order ID + +ORDER_UNKNOWN = onbekende asset order ID + +### Groups ### +GROUP_UNKNOWN = onbekende groep + +### Foreign Blockchain ### +FOREIGN_BLOCKCHAIN_NETWORK_ISSUE = foreign blokchain or ElectrumX network issue + +FOREIGN_BLOCKCHAIN_BALANCE_ISSUE = insufficient balance on foreign blockchain + +FOREIGN_BLOCKCHAIN_TOO_SOON = too soon to broadcast foreign blockchain transaction (LockTime/median block time) + +### Trade Portal ### +ORDER_SIZE_TOO_SMALL = order amount too low + +### Data ### +FILE_NOT_FOUND = file not found + +NO_REPLY = peer did not reply with data \ No newline at end of file diff --git a/src/main/resources/i18n/ApiError_ru.properties b/src/main/resources/i18n/ApiError_ru.properties index e67be901..61948a2a 100644 --- a/src/main/resources/i18n/ApiError_ru.properties +++ b/src/main/resources/i18n/ApiError_ru.properties @@ -1,57 +1,83 @@ #Generated by ResourceBundle Editor (http://essiembre.github.io/eclipse-rbe/) # Keys are from api.ApiError enum -ADDRESS_UNKNOWN = неизвестная учетная запись - -BLOCKCHAIN_NEEDS_SYNC = блокчейн должен сначала синхронизироваться - -# Blocks -BLOCK_UNKNOWN = неизвестный блок - -CANNOT_MINT = аккаунт не может чеканить - -GROUP_UNKNOWN = неизвестная группа - -INVALID_ADDRESS = неизвестный адрес - -# Assets -INVALID_ASSET_ID = неверный идентификатор актива - -INVALID_CRITERIA = неверные критерии поиска - -INVALID_DATA = неверные данные - -INVALID_HEIGHT = недопустимая высота блока - -INVALID_NETWORK_ADDRESS = неверный сетевой адрес - -INVALID_ORDER_ID = неверный идентификатор заказа актива - -INVALID_PRIVATE_KEY = неверный приватный ключ - -INVALID_PUBLIC_KEY = недействительный открытый ключ - -INVALID_REFERENCE = неверная ссылка - -# Validation -INVALID_SIGNATURE = недействительная подпись +# "localeLang": "ru", +### Common ### JSON = не удалось разобрать сообщение json -NAME_UNKNOWN = имя неизвестно +INSUFFICIENT_BALANCE = insufficient balance -NON_PRODUCTION = этот вызов API не разрешен для производственных систем - -ORDER_UNKNOWN = неизвестный идентификатор заказа актива - -PUBLIC_KEY_NOT_FOUND = открытый ключ не найден +UNAUTHORIZED = вызов API не авторизован REPOSITORY_ISSUE = ошибка репозитория -TRANSACTION_INVALID = транзакция недействительна: %s (%s) +NON_PRODUCTION = этот вызов API не разрешен для производственных систем -TRANSACTION_UNKNOWN = транзакция неизвестна +BLOCKCHAIN_NEEDS_SYNC = блокчейн должен сначала синхронизироваться + +NO_TIME_SYNC = no clock synchronization yet + +### Validation ### +INVALID_SIGNATURE = недействительная подпись + +INVALID_ADDRESS = неизвестный адрес + +INVALID_PUBLIC_KEY = недействительный открытый ключ + +INVALID_DATA = неверные данные + +INVALID_NETWORK_ADDRESS = неверный сетевой адрес + +ADDRESS_UNKNOWN = неизвестная учетная запись + +INVALID_CRITERIA = неверные критерии поиска + +INVALID_REFERENCE = неверная ссылка TRANSFORMATION_ERROR = не удалось преобразовать JSON в транзакцию -UNAUTHORIZED = вызов API не авторизован +INVALID_PRIVATE_KEY = неверный приватный ключ + +INVALID_HEIGHT = недопустимая высота блока + +CANNOT_MINT = аккаунт не может чеканить + +### Blocks ### +BLOCK_UNKNOWN = неизвестный блок + +### Transactions ### +TRANSACTION_UNKNOWN = транзакция неизвестна + +PUBLIC_KEY_NOT_FOUND = открытый ключ не найден + +# this one is special in that caller expected to pass two additional strings, hence the two %s +TRANSACTION_INVALID = транзакция недействительна: %s (%s) + +### Naming ### +NAME_UNKNOWN = имя неизвестно + +### Asset ### +INVALID_ASSET_ID = неверный идентификатор актива + +INVALID_ORDER_ID = неверный идентификатор заказа актива + +ORDER_UNKNOWN = неизвестный идентификатор заказа актива + +### Groups ### +GROUP_UNKNOWN = неизвестная группа + +### Foreign Blockchain ### +FOREIGN_BLOCKCHAIN_NETWORK_ISSUE = foreign blokchain or ElectrumX network issue + +FOREIGN_BLOCKCHAIN_BALANCE_ISSUE = insufficient balance on foreign blockchain + +FOREIGN_BLOCKCHAIN_TOO_SOON = too soon to broadcast foreign blockchain transaction (LockTime/median block time) + +### Trade Portal ### +ORDER_SIZE_TOO_SMALL = order amount too low + +### Data ### +FILE_NOT_FOUND = file not found + +NO_REPLY = peer did not reply with data \ No newline at end of file diff --git a/src/main/resources/i18n/SysTray_en.properties b/src/main/resources/i18n/SysTray_en.properties index e581335d..07541339 100644 --- a/src/main/resources/i18n/SysTray_en.properties +++ b/src/main/resources/i18n/SysTray_en.properties @@ -1,12 +1,14 @@ #Generated by ResourceBundle Editor (http://essiembre.github.io/eclipse-rbe/) # SysTray pop-up menu -APPLYING_UPDATE_AND_RESTARTING = Applying automatic update and restarting... - AUTO_UPDATE = Auto Update +APPLYING_UPDATE_AND_RESTARTING = Applying automatic update and restarting... + BLOCK_HEIGHT = height +BUILD_VERSION = Build version + CHECK_TIME_ACCURACY = Check time accuracy CONNECTING = Connecting @@ -19,6 +21,8 @@ CREATING_BACKUP_OF_DB_FILES = Creating backup of database files... DB_BACKUP = Database Backup +DB_MAINTENANCE = Database Maintenance + DB_CHECKPOINT = Database Checkpoint EXIT = Exit @@ -27,17 +31,12 @@ MINTING_DISABLED = NOT minting MINTING_ENABLED = \u2714 Minting -# Nagging about lack of NTP time sync -NTP_NAG_CAPTION = Computer's clock is inaccurate! - -NTP_NAG_TEXT_UNIX = Install NTP service to get an accurate clock. - -NTP_NAG_TEXT_WINDOWS = Select "Synchronize clock" from menu to fix. - OPEN_UI = Open UI PERFORMING_DB_CHECKPOINT = Saving uncommitted database changes... +PERFORMING_DB_MAINTENANCE = Performing scheduled maintenance... + SYNCHRONIZE_CLOCK = Synchronize clock SYNCHRONIZING_BLOCKCHAIN = Synchronizing diff --git a/src/main/resources/i18n/SysTray_fi.properties b/src/main/resources/i18n/SysTray_fi.properties index 551b010e..edd062bc 100644 --- a/src/main/resources/i18n/SysTray_fi.properties +++ b/src/main/resources/i18n/SysTray_fi.properties @@ -1,12 +1,14 @@ #Generated by ResourceBundle Editor (http://essiembre.github.io/eclipse-rbe/) # SysTray pop-up menu -APPLYING_UPDATE_AND_RESTARTING = Automaattinen päivitys käynnissä, uudelleenkäynnistys seuraa... - AUTO_UPDATE = Automaattinen päivitys +APPLYING_UPDATE_AND_RESTARTING = Automaattinen päivitys käynnissä, uudelleenkäynnistys seuraa... + BLOCK_HEIGHT = korkeus +BUILD_VERSION = Versio + CHECK_TIME_ACCURACY = Tarkista ajan tarkkuus CONNECTING = Yhdistää @@ -19,6 +21,8 @@ CREATING_BACKUP_OF_DB_FILES = Luodaan varmuuskopio tietokannan tiedostoista... DB_BACKUP = Tietokannan varmuuskopio +DB_MAINTENANCE = Database Maintenance + DB_CHECKPOINT = Tietokannan varmistuspiste EXIT = Pois @@ -27,17 +31,12 @@ MINTING_DISABLED = EI lyö rahaa MINTING_ENABLED = \u2714 Lyö rahaa -# Nagging about lack of NTP time sync -NTP_NAG_CAPTION = Tietokoneen kello on epätarkka! - -NTP_NAG_TEXT_UNIX = Asennathan NTP-palvelun, jotta saat kellon tarkkuuden oikeaksi. - -NTP_NAG_TEXT_WINDOWS = Valitse "Kellon synkronisointi" valikosta korjataksesi. - OPEN_UI = Avaa UI PERFORMING_DB_CHECKPOINT = Tallentaa kommittoidut tietokantamuutokset... +PERFORMING_DB_MAINTENANCE = Performing scheduled maintenance... + SYNCHRONIZE_CLOCK = Synkronisoi kello SYNCHRONIZING_BLOCKCHAIN = Synkronisoi diff --git a/src/main/resources/i18n/SysTray_hu.properties b/src/main/resources/i18n/SysTray_hu.properties new file mode 100644 index 00000000..be4bef25 --- /dev/null +++ b/src/main/resources/i18n/SysTray_hu.properties @@ -0,0 +1,46 @@ +#Generated by ResourceBundle Editor (http://essiembre.github.io/eclipse-rbe/) +# SysTray pop-up menu + +# Magyar myelvre forditotta: Szkíta (Scythian). 2021 Augusztus 7. + +AUTO_UPDATE = Automatikus Frissítés + +APPLYING_UPDATE_AND_RESTARTING = Automatikus frissítés és újraindítás alkalmazása... + +BLOCK_HEIGHT = blokkmagasság + +BUILD_VERSION = Verzió + +CHECK_TIME_ACCURACY = Idő pontosság ellenőrzése + +CONNECTING = Kapcsolódás + +CONNECTION = kapcsolat + +CONNECTIONS = kapcsolat + +CREATING_BACKUP_OF_DB_FILES = Adatbázis fájlok biztonsági mentésének létrehozása... + +DB_BACKUP = Adatbázis biztonsági mentése + +DB_MAINTENANCE = Database Maintenance + +DB_CHECKPOINT = Adatbázis-ellenőrzőpont + +EXIT = Kilépés + +MINTING_DISABLED = QORT-érmeverés jelenleg nincs folyamatban + +MINTING_ENABLED = \u2714 QORT-érmeverés folyamatban + +OPEN_UI = Felhasználói eszköz megnyitása + +PERFORMING_DB_CHECKPOINT = Mentetlen adatbázis-módosítások mentése... + +PERFORMING_DB_MAINTENANCE = Performing scheduled maintenance... + +SYNCHRONIZE_CLOCK = Óra-szinkronizálás megkezdése + +SYNCHRONIZING_BLOCKCHAIN = Szinkronizálás + +SYNCHRONIZING_CLOCK = Óra-szinkronizálás folyamatban diff --git a/src/main/resources/i18n/SysTray_it.properties b/src/main/resources/i18n/SysTray_it.properties index 1d243958..326c71c2 100644 --- a/src/main/resources/i18n/SysTray_it.properties +++ b/src/main/resources/i18n/SysTray_it.properties @@ -8,6 +8,8 @@ AUTO_UPDATE = Aggiornamento automatico BLOCK_HEIGHT = altezza +BUILD_VERSION = Versione + CHECK_TIME_ACCURACY = Controlla la precisione dell'ora CONNECTING = Collegando @@ -20,6 +22,8 @@ CREATING_BACKUP_OF_DB_FILES = Creazione di backup dei file di database... DB_BACKUP = Backup del database +DB_MAINTENANCE = Database Maintenance + DB_CHECKPOINT = Punto di controllo del database EXIT = Uscita @@ -28,17 +32,12 @@ MINTING_DISABLED = NON coniando MINTING_ENABLED = \u2714 Coniando -# Nagging about lack of NTP time sync -NTP_NAG_CAPTION = L'orologio del computer è impreciso! - -NTP_NAG_TEXT_UNIX = Installare servizio NTP per ottenere un orologio preciso. - -NTP_NAG_TEXT_WINDOWS = Seleziona "Sincronizza orologio" dal menu per correggere. - OPEN_UI = Apri UI PERFORMING_DB_CHECKPOINT = Salvataggio delle modifiche al database non salvate... +PERFORMING_DB_MAINTENANCE = Performing scheduled maintenance... + SYNCHRONIZE_CLOCK = Sincronizza orologio SYNCHRONIZING_BLOCKCHAIN = Sincronizzando diff --git a/src/main/resources/i18n/SysTray_nl.properties b/src/main/resources/i18n/SysTray_nl.properties index 4e3e48ec..ddf1527f 100644 --- a/src/main/resources/i18n/SysTray_nl.properties +++ b/src/main/resources/i18n/SysTray_nl.properties @@ -7,6 +7,8 @@ AUTO_UPDATE = Automatische Update BLOCK_HEIGHT = hoogte +BUILD_VERSION = Versie + CHECK_TIME_ACCURACY = Controleer accuraatheid van de tijd CONNECTING = Verbinden @@ -27,17 +29,12 @@ MINTING_DISABLED = NIET muntend MINTING_ENABLED = \u2714 Muntend -# Nagging about lack of NTP time sync -NTP_NAG_CAPTION = Klok van de computer is inaccuraat! - -NTP_NAG_TEXT_UNIX = Installeer NTP service voor een accurate klok. - -NTP_NAG_TEXT_WINDOWS = Selecteer "Synchronizeer klok" uit het menu om op te lossen. - OPEN_UI = Open UI PERFORMING_DB_CHECKPOINT = Nieuwe veranderingen aan database worden opgeslagen... +PERFORMING_DB_MAINTENANCE = Performing scheduled maintenance... + SYNCHRONIZE_CLOCK = Synchronizeer klok SYNCHRONIZING_BLOCKCHAIN = Aan het synchronizeren diff --git a/src/main/resources/i18n/SysTray_ru.properties b/src/main/resources/i18n/SysTray_ru.properties index f7012034..c124b500 100644 --- a/src/main/resources/i18n/SysTray_ru.properties +++ b/src/main/resources/i18n/SysTray_ru.properties @@ -7,6 +7,8 @@ AUTO_UPDATE = Автоматическое обновление BLOCK_HEIGHT = Высота блока +BUILD_VERSION = Build version + CHECK_TIME_ACCURACY = Проверка точного времени CONNECTING = Подключение @@ -19,21 +21,20 @@ CREATING_BACKUP_OF_DB_FILES = Создание резервной копии ф DB_BACKUP = Резервное копирование базы данных +DB_MAINTENANCE = Database Maintenance + EXIT = Выход MINTING_DISABLED = Чеканка отключена MINTING_ENABLED = Чеканка активна -# Nagging about lack of NTP time sync -NTP_NAG_CAPTION = Часы компьютера неточны! - -NTP_NAG_TEXT_UNIX = Установите службу NTP, чтобы получить точное время - -NTP_NAG_TEXT_WINDOWS = Выберите "Синхронизация времени" из меню, чтобы исправить - OPEN_UI = Открыть пользовательский интерфейс +PERFORMING_DB_CHECKPOINT = Saving uncommitted database changes... + +PERFORMING_DB_MAINTENANCE = Performing scheduled maintenance... + SYNCHRONIZE_CLOCK = Синхронизировать время SYNCHRONIZING_BLOCKCHAIN = Синхронизация цепи diff --git a/src/main/resources/i18n/SysTray_zh_SC.properties b/src/main/resources/i18n/SysTray_zh_CN.properties similarity index 52% rename from src/main/resources/i18n/SysTray_zh_SC.properties rename to src/main/resources/i18n/SysTray_zh_CN.properties index caba49cf..6d8318e2 100644 --- a/src/main/resources/i18n/SysTray_zh_SC.properties +++ b/src/main/resources/i18n/SysTray_zh_CN.properties @@ -1,29 +1,40 @@ #Generated by ResourceBundle Editor (http://essiembre.github.io/eclipse-rbe/) # SysTray pop-up menu +AUTO_UPDATE = Auto Update + +APPLYING_UPDATE_AND_RESTARTING = Applying automatic update and restarting... + BLOCK_HEIGHT = 区块高度 +BUILD_VERSION = Build version + CHECK_TIME_ACCURACY = 检查时间准确性 +CONNECTING = Connecting + CONNECTION = 个链接 CONNECTIONS = 个链接 +CREATING_BACKUP_OF_DB_FILES = Creating backup of database files... + +DB_BACKUP = Database Backup + +DB_CHECKPOINT = Database Checkpoint + EXIT = 退出核心 MINTING_DISABLED = 没有铸币 MINTING_ENABLED = ✔ 铸币 -# Nagging about lack of NTP time sync -NTP_NAG_CAPTION = 电脑的时间不准确! - -NTP_NAG_TEXT_UNIX = 安装NTP服务以获取准确的时间。 - -NTP_NAG_TEXT_WINDOWS = 从菜单中选择“同步时钟”进行修复。 - OPEN_UI = 开启Qortal界面 +PERFORMING_DB_CHECKPOINT = Saving uncommitted database changes... + +PERFORMING_DB_MAINTENANCE = Performing scheduled maintenance... + SYNCHRONIZE_CLOCK = 同步时钟 SYNCHRONIZING_BLOCKCHAIN = 正在同步区块链 diff --git a/src/main/resources/i18n/SysTray_zh_TC.properties b/src/main/resources/i18n/SysTray_zh_TW.properties similarity index 51% rename from src/main/resources/i18n/SysTray_zh_TC.properties rename to src/main/resources/i18n/SysTray_zh_TW.properties index ac768846..3af0c84c 100644 --- a/src/main/resources/i18n/SysTray_zh_TC.properties +++ b/src/main/resources/i18n/SysTray_zh_TW.properties @@ -1,31 +1,40 @@ #Generated by ResourceBundle Editor (http://essiembre.github.io/eclipse-rbe/) # SysTray pop-up menu +AUTO_UPDATE = Auto Update + +APPLYING_UPDATE_AND_RESTARTING = Applying automatic update and restarting... + BLOCK_HEIGHT = 區塊高度 +BUILD_VERSION = Build version + CHECK_TIME_ACCURACY = 檢查時間準確性 +CONNECTING = Connecting + CONNECTION = 個鏈接 CONNECTIONS = 個鏈接 +CREATING_BACKUP_OF_DB_FILES = Creating backup of database files... + +DB_BACKUP = Database Backup + +DB_CHECKPOINT = Database Checkpoint + EXIT = 退出核心 MINTING_DISABLED = 沒有鑄幣 MINTING_ENABLED = ✔ 鑄幣 -# Nagging about lack of NTP time sync -NTP_NAG_CAPTION = 電腦的時間不準確! - -NTP_NAG_TEXT_UNIX = 安装NTP服務以獲取準確的時間。 - -NTP_NAG_TEXT_WINDOWS = 從菜單中選擇“同步時鐘”進行修復。 - OPEN_UI = 開啓Qortal界面 +PERFORMING_DB_CHECKPOINT = Saving uncommitted database changes... + SYNCHRONIZE_CLOCK = 同步時鐘 SYNCHRONIZING_BLOCKCHAIN = 正在同步區塊鏈 -SYNCHRONIZING_CLOCK = 正在同步時鐘 +SYNCHRONIZING_CLOCK = 正在同步時鐘 \ No newline at end of file diff --git a/src/main/resources/i18n/TransactionValidity_en.properties b/src/main/resources/i18n/TransactionValidity_en.properties index 7e3ea324..17a52647 100644 --- a/src/main/resources/i18n/TransactionValidity_en.properties +++ b/src/main/resources/i18n/TransactionValidity_en.properties @@ -1,161 +1,44 @@ - -ACCOUNT_ALREADY_EXISTS = account already exists - -ACCOUNT_CANNOT_REWARD_SHARE = account cannot reward-share - -ALREADY_GROUP_ADMIN = already group admin - -ALREADY_GROUP_MEMBER = already group member - -ALREADY_VOTED_FOR_THAT_OPTION = already voted for that option - -ASSET_ALREADY_EXISTS = asset already exists - -ASSET_DOES_NOT_EXIST = asset does not exist - -ASSET_DOES_NOT_MATCH_AT = asset does not match AT's asset - -ASSET_NOT_SPENDABLE = asset is not spendable - -AT_ALREADY_EXISTS = AT already exists - -AT_IS_FINISHED = AT has finished - -AT_UNKNOWN = AT unknown - -BANNED_FROM_GROUP = banned from group - -BAN_EXISTS = ban already exists - -BAN_UNKNOWN = ban unknown - -BUYER_ALREADY_OWNER = buyer is already owner - -CHAT = CHAT transactions are never valid for inclusion into blocks - -CLOCK_NOT_SYNCED = clock not synchronized - -DUPLICATE_OPTION = duplicate option - -GROUP_ALREADY_EXISTS = group already exists - -GROUP_APPROVAL_DECIDED = group-approval already decided - -GROUP_APPROVAL_NOT_REQUIRED = group-approval not required - -GROUP_DOES_NOT_EXIST = group does not exist - -GROUP_ID_MISMATCH = group ID mismatch - -GROUP_OWNER_CANNOT_LEAVE = group owner cannot leave group - -HAVE_EQUALS_WANT = have-asset is the same as want-asset - -INCORRECT_NONCE = incorrect PoW nonce - -INSUFFICIENT_FEE = insufficient fee +OK = OK INVALID_ADDRESS = invalid address -INVALID_AMOUNT = invalid amount - -INVALID_ASSET_OWNER = invalid asset owner - -INVALID_AT_TRANSACTION = invalid AT transaction - -INVALID_AT_TYPE_LENGTH = invalid AT 'type' length - -INVALID_CREATION_BYTES = invalid creation bytes - -INVALID_DATA_LENGTH = invalid data length - -INVALID_DESCRIPTION_LENGTH = invalid description length - -INVALID_GROUP_APPROVAL_THRESHOLD = invalid group-approval threshold - -INVALID_GROUP_BLOCK_DELAY = invalid group-approval block delay - -INVALID_GROUP_ID = invalid group ID - -INVALID_GROUP_OWNER = invalid group owner - -INVALID_LIFETIME = invalid lifetime - -INVALID_NAME_LENGTH = invalid name length - -INVALID_NAME_OWNER = invalid name owner - -INVALID_OPTIONS_COUNT = invalid options count - -INVALID_OPTION_LENGTH = invalid options length - -INVALID_ORDER_CREATOR = invalid order creator - -INVALID_PAYMENTS_COUNT = invalid payments count - -INVALID_PUBLIC_KEY = invalid public key - -INVALID_QUANTITY = invalid quantity - -INVALID_REFERENCE = invalid reference - -INVALID_RETURN = invalid return - -INVALID_REWARD_SHARE_PERCENT = invalid reward-share percent - -INVALID_SELLER = invalid seller - -INVALID_TAGS_LENGTH = invalid 'tags' length - -INVALID_TX_GROUP_ID = invalid transaction group ID - -INVALID_VALUE_LENGTH = invalid 'value' length - -INVITE_UNKNOWN = group invite unknown - -JOIN_REQUEST_EXISTS = group join request already exists - -MAXIMUM_REWARD_SHARES = already at maximum number of reward-shares for this account - -MISSING_CREATOR = missing creator - -MULTIPLE_NAMES_FORBIDDEN = multiple registered names per account is forbidden - -NAME_ALREADY_FOR_SALE = name already for sale - -NAME_ALREADY_REGISTERED = name already registered - -NAME_DOES_NOT_EXIST = name does not exist - -NAME_NOT_FOR_SALE = name is not for sale - -NAME_NOT_NORMALIZED = name not in Unicode 'normalized' form - NEGATIVE_AMOUNT = invalid/negative amount NEGATIVE_FEE = invalid/negative fee -NEGATIVE_PRICE = invalid/negative price - -NOT_GROUP_ADMIN = account is not a group admin - -NOT_GROUP_MEMBER = account is not a group member - -NOT_MINTING_ACCOUNT = account cannot mint - -NOT_YET_RELEASED = feature not yet released - NO_BALANCE = insufficient balance -NO_BLOCKCHAIN_LOCK = node's blockchain currently busy +INVALID_REFERENCE = invalid reference -NO_FLAG_PERMISSION = account does not have that permission +INVALID_NAME_LENGTH = invalid name length -OK = OK +INVALID_VALUE_LENGTH = invalid 'value' length -ORDER_ALREADY_CLOSED = asset trade order is already closed +NAME_ALREADY_REGISTERED = name already registered -ORDER_DOES_NOT_EXIST = asset trade order does not exist +NAME_DOES_NOT_EXIST = name does not exist + +INVALID_NAME_OWNER = invalid name owner + +NAME_ALREADY_FOR_SALE = name already for sale + +NAME_NOT_FOR_SALE = name is not for sale + +BUYER_ALREADY_OWNER = buyer is already owner + +INVALID_AMOUNT = invalid amount + +INVALID_SELLER = invalid seller + +NAME_NOT_NORMALIZED = name not in Unicode 'normalized' form + +INVALID_DESCRIPTION_LENGTH = invalid description length + +INVALID_OPTIONS_COUNT = invalid options count + +INVALID_OPTION_LENGTH = invalid options length + +DUPLICATE_OPTION = duplicate option POLL_ALREADY_EXISTS = poll already exists @@ -163,22 +46,146 @@ POLL_DOES_NOT_EXIST = poll does not exist POLL_OPTION_DOES_NOT_EXIST = poll option does not exist -PUBLIC_KEY_UNKNOWN = public key unknown +ALREADY_VOTED_FOR_THAT_OPTION = already voted for that option -REWARD_SHARE_UNKNOWN = reward-share unknown +INVALID_DATA_LENGTH = invalid data length -SELF_SHARE_EXISTS = self-share (reward-share) already exists +INVALID_QUANTITY = invalid quantity -TIMESTAMP_TOO_NEW = timestamp too new +ASSET_DOES_NOT_EXIST = asset does not exist + +INVALID_RETURN = invalid return + +HAVE_EQUALS_WANT = have-asset is the same as want-asset + +ORDER_DOES_NOT_EXIST = asset trade order does not exist + +INVALID_ORDER_CREATOR = invalid order creator + +INVALID_PAYMENTS_COUNT = invalid payments count + +NEGATIVE_PRICE = invalid/negative price + +INVALID_CREATION_BYTES = invalid creation bytes + +INVALID_TAGS_LENGTH = invalid 'tags' length + +INVALID_AT_TYPE_LENGTH = invalid AT 'type' length + +INVALID_AT_TRANSACTION = invalid AT transaction + +INSUFFICIENT_FEE = insufficient fee + +ASSET_DOES_NOT_MATCH_AT = asset does not match AT's asset + +ASSET_ALREADY_EXISTS = asset already exists + +MISSING_CREATOR = missing creator TIMESTAMP_TOO_OLD = timestamp too old +TIMESTAMP_TOO_NEW = timestamp too new + TOO_MANY_UNCONFIRMED = account has too many unconfirmed transactions pending -TRANSACTION_ALREADY_CONFIRMED = transaction has already confirmed +GROUP_ALREADY_EXISTS = group already exists -TRANSACTION_ALREADY_EXISTS = transaction already exists +GROUP_DOES_NOT_EXIST = group does not exist + +INVALID_GROUP_OWNER = invalid group owner + +ALREADY_GROUP_MEMBER = already group member + +GROUP_OWNER_CANNOT_LEAVE = group owner cannot leave group + +NOT_GROUP_MEMBER = account is not a group member + +ALREADY_GROUP_ADMIN = already group admin + +NOT_GROUP_ADMIN = account is not a group admin + +INVALID_LIFETIME = invalid lifetime + +INVITE_UNKNOWN = group invite unknown + +BAN_EXISTS = ban already exists + +BAN_UNKNOWN = ban unknown + +BANNED_FROM_GROUP = banned from group + +JOIN_REQUEST_EXISTS = group join request already exists + +INVALID_GROUP_APPROVAL_THRESHOLD = invalid group-approval threshold + +GROUP_ID_MISMATCH = group ID mismatch + +INVALID_GROUP_ID = invalid group ID TRANSACTION_UNKNOWN = transaction unknown +TRANSACTION_ALREADY_CONFIRMED = transaction has already confirmed + +INVALID_TX_GROUP_ID = invalid transaction group ID + TX_GROUP_ID_MISMATCH = transaction's group ID does not match + +MULTIPLE_NAMES_FORBIDDEN = multiple registered names per account is forbidden + +INVALID_ASSET_OWNER = invalid asset owner + +AT_IS_FINISHED = AT has finished + +NO_FLAG_PERMISSION = account does not have that permission + +NOT_MINTING_ACCOUNT = account cannot mint + +REWARD_SHARE_UNKNOWN = reward-share unknown + +INVALID_REWARD_SHARE_PERCENT = invalid reward-share percent + +PUBLIC_KEY_UNKNOWN = public key unknown + +INVALID_PUBLIC_KEY = invalid public key + +AT_UNKNOWN = AT unknown + +AT_ALREADY_EXISTS = AT already exists + +GROUP_APPROVAL_NOT_REQUIRED = group-approval not required + +GROUP_APPROVAL_DECIDED = group-approval already decided + +MAXIMUM_REWARD_SHARES = already at maximum number of reward-shares for this account + +TRANSACTION_ALREADY_EXISTS = transaction already exists + +NO_BLOCKCHAIN_LOCK = node's blockchain currently busy + +ORDER_ALREADY_CLOSED = asset trade order is already closed + +CLOCK_NOT_SYNCED = clock not synchronized + +ASSET_NOT_SPENDABLE = asset is not spendable + +ACCOUNT_CANNOT_REWARD_SHARE = account cannot reward-share + +SELF_SHARE_EXISTS = self-share (reward-share) already exists + +ACCOUNT_ALREADY_EXISTS = account already exists + +INVALID_GROUP_BLOCK_DELAY = invalid group-approval block delay + +INCORRECT_NONCE = incorrect PoW nonce + +INVALID_TIMESTAMP_SIGNATURE = invalid timestamp signature + +ADDRESS_IN_BLACKLIST = this address is in your blacklist + +ADDRESS_ABOVE_RATE_LIMIT = address reached specified rate limit + +DUPLICATE_MESSAGE = address sent duplicate message + +INVALID_BUT_OK = invalid but OK + +NOT_YET_RELEASED = feature not yet released \ No newline at end of file diff --git a/src/main/resources/i18n/TransactionValidity_fi.properties b/src/main/resources/i18n/TransactionValidity_fi.properties index 2dc9abef..adf7eb35 100644 --- a/src/main/resources/i18n/TransactionValidity_fi.properties +++ b/src/main/resources/i18n/TransactionValidity_fi.properties @@ -1,4 +1,3 @@ - ACCOUNT_ALREADY_EXISTS = tili on jo olemassa ACCOUNT_CANNOT_REWARD_SHARE = tili ei voi palkinto-jakaa @@ -31,8 +30,6 @@ BAN_UNKNOWN = tuntematon eväys BUYER_ALREADY_OWNER = ostaja on jo omistaja -CHAT = CHATin transaktiot eivät koskaan ole kelvollisia sisällytettäväksi lohkoihin - CLOCK_NOT_SYNCED = kello on synkronisoimatta DUPLICATE_OPTION = kahdennettu valinta @@ -182,3 +179,13 @@ TRANSACTION_ALREADY_EXISTS = transaktio on jo olemassa TRANSACTION_UNKNOWN = tuntematon transaktio TX_GROUP_ID_MISMATCH = transaktion ryhmä-ID:n vastaavuusvirhe + +ADDRESS_IN_BLACKLIST = this address is in your blacklist + +ADDRESS_ABOVE_RATE_LIMIT = address reached specified rate limit + +DUPLICATE_MESSAGE = address sent duplicate message + +INVALID_TIMESTAMP_SIGNATURE = Invalid timestamp signature + +INVALID_BUT_OK = Invalid but OK \ No newline at end of file diff --git a/src/main/resources/i18n/TransactionValidity_hu.properties b/src/main/resources/i18n/TransactionValidity_hu.properties new file mode 100644 index 00000000..68950971 --- /dev/null +++ b/src/main/resources/i18n/TransactionValidity_hu.properties @@ -0,0 +1,193 @@ +# Magyar myelvre forditotta: Szkíta (Scythian). 2021 Augusztus 7. + +OK = OK + +INVALID_ADDRESS = érvénytelen név vagy cím + +NEGATIVE_AMOUNT = negatív összeg + +NEGATIVE_FEE = érvénytelen/negatív tranzakciós díj + +NO_BALANCE = elégtelen egyenleg + +INVALID_REFERENCE = érvénytelen hivatkozás + +INVALID_NAME_LENGTH = érvénytelen névhossz + +INVALID_VALUE_LENGTH = érvénytelen értékhossz + +NAME_ALREADY_REGISTERED = ez a név már regisztrált + +NAME_DOES_NOT_EXIST = ez a név nem létezik + +INVALID_NAME_OWNER = érvénytelen név tulajdonos + +NAME_ALREADY_FOR_SALE = ez a név már eladó + +NAME_NOT_FOR_SALE = ez a név nem eladó + +BUYER_ALREADY_OWNER = ez a vevő már a tulajdonos + +INVALID_AMOUNT = érvénytelen összeg + +INVALID_SELLER = érvénytelen eladó + +NAME_NOT_NORMALIZED = ez a név nincs "normalizált" Unicode formátumban + +INVALID_DESCRIPTION_LENGTH = érvénytelen leíráshossz + +INVALID_OPTIONS_COUNT = invalid options count + +INVALID_OPTION_LENGTH = érvénytelen opciókszám + +DUPLICATE_OPTION = ez a lehetőség már létezik + +POLL_ALREADY_EXISTS = ez a szavazás már létezik + +POLL_DOES_NOT_EXIST = ez a szavazás nem létezik + +POLL_OPTION_DOES_NOT_EXIST = ez a szavazási lehetőség nem létezik + +ALREADY_VOTED_FOR_THAT_OPTION = erre a lehetőségre már szavaztál + +INVALID_DATA_LENGTH = érvénytelen adathossz + +INVALID_QUANTITY = érvénytelen mennyiség + +ASSET_DOES_NOT_EXIST = tőke nem létezik + +INVALID_RETURN = érvénytelen csere tőke + +HAVE_EQUALS_WANT = saját tőke egyenlő a csere tőkével + +ORDER_DOES_NOT_EXIST = tőke rendelés nem létezik + +INVALID_ORDER_CREATOR = érvénytelen rendelés létrehozó + +INVALID_PAYMENTS_COUNT = a kifizetések száma érvénytelen + +NEGATIVE_PRICE = érvénytelen/negatív ár + +INVALID_CREATION_BYTES = érvénytelen létrehozási bájtok + +INVALID_TAGS_LENGTH = érvénytelen cimkehossz + +INVALID_AT_TYPE_LENGTH = érvénytelen AT "típus" hossz + +INVALID_AT_TRANSACTION = érvénytelen AT tranzakció + +INSUFFICIENT_FEE = elégtelen díj + +ASSET_DOES_NOT_MATCH_AT = a tőke nem egyezik az AT tőkéjével + +ASSET_ALREADY_EXISTS = ez a tőke már létezik + +MISSING_CREATOR = hiányzó létrehozó + +TIMESTAMP_TOO_OLD = időbélyeg túl régi + +TIMESTAMP_TOO_NEW = időbélyeg túl korai + +TOO_MANY_UNCONFIRMED = ennek a fióknak túl sok meg nem erősített tranzakciója van folyamatban + +GROUP_ALREADY_EXISTS = ez a csoport már létezik + +GROUP_DOES_NOT_EXIST = ez a csoport nem létezik + +INVALID_GROUP_OWNER = érvénytelen csoport tulajdonos + +ALREADY_GROUP_MEMBER = már csoporttag + +GROUP_OWNER_CANNOT_LEAVE = a csoport tulajdonos nem tudja elhagyni a csoportot + +NOT_GROUP_MEMBER = ez a tag nem csoporttag + +ALREADY_GROUP_ADMIN = már csoport adminisztrátor + +NOT_GROUP_ADMIN = ez a tag nem csoport adminisztrátor + +INVALID_LIFETIME = érvénytelen élettartam + +INVITE_UNKNOWN = ismeretlen csoport meghívás + +BAN_EXISTS = már ki van tiltva + +BAN_UNKNOWN = kitiltás nem létezik + +BANNED_FROM_GROUP = ki van tiltva a csoportból + +JOIN_REQUEST_EXISTS = a csoporthoz való csatlakozási kérelem már megtöretént + +INVALID_GROUP_APPROVAL_THRESHOLD = érvénytelen jóváhagyási küszöbérték + +GROUP_ID_MISMATCH = csoportazonosító nem egyezik + +INVALID_GROUP_ID = csoportazonosító érvénytelen + +TRANSACTION_UNKNOWN = ismeretlen tranzakció + +TRANSACTION_ALREADY_CONFIRMED = ez a tranzakció már meg van erősítve + +INVALID_TX_GROUP_ID = a tranzakció csoportazonosítója érvénytelen + +TX_GROUP_ID_MISMATCH = a tranzakció csoportazonosítója nem egyezik + +MULTIPLE_NAMES_FORBIDDEN = fiókonként több név regisztrálása tilos + +INVALID_ASSET_OWNER = érvénytelen tőke tulajdonos + +AT_IS_FINISHED = az AT végzett + +NO_FLAG_PERMISSION = ez a fiók nem rendelkezik ezzel az engedéllyel + +NOT_MINTING_ACCOUNT = ez a fiók nem tud QORT-ot verni + +REWARD_SHARE_UNKNOWN = ez a jutalék-megosztás ismeretlen + +INVALID_REWARD_SHARE_PERCENT = ez a jutalék-megosztási arány érvénytelen + +PUBLIC_KEY_UNKNOWN = ismeretlen nyilvános kulcs + +INVALID_PUBLIC_KEY = érvénytelen nyilvános kulcs + +AT_UNKNOWN = az AT ismeretlen + +AT_ALREADY_EXISTS = az AT már létezik + +GROUP_APPROVAL_NOT_REQUIRED = csoport általi jóváhagyás nem szükséges + +GROUP_APPROVAL_DECIDED = csoport általi jóváhagyás el van döntve + +MAXIMUM_REWARD_SHARES = ez a fiókcím már elérte a maximális lehetséges jutalék-megosztási részesedést + +TRANSACTION_ALREADY_EXISTS = ez a tranzakció már létezik + +NO_BLOCKCHAIN_LOCK = csomópont blokklánca jelenleg elfoglalt + +ORDER_ALREADY_CLOSED = ez a tőke értékesítés már befejeződött + +CLOCK_NOT_SYNCED = az óra nincs szinkronizálva + +ASSET_NOT_SPENDABLE = ez a tőke nem értékesíthető + +ACCOUNT_CANNOT_REWARD_SHARE = ez a fiók nem vehet részt jutalék-megosztásban + +SELF_SHARE_EXISTS = önrészes jutalék-megosztás már létezik + +ACCOUNT_ALREADY_EXISTS = ez a fiók már létezik + +INVALID_GROUP_BLOCK_DELAY = invalid group-approval block delay + +INCORRECT_NONCE = helytelen Proof-of-Work Nonce + +INVALID_TIMESTAMP_SIGNATURE = érvénytelen időbélyeg aláírás + +ADDRESS_IN_BLACKLIST = ez a fiókcím a fekete listádon van + +ADDRESS_ABOVE_RATE_LIMIT = ez a cím elérte a megengedett mérték korlátot + +DUPLICATE_MESSAGE = ez a cím duplikált üzenetet küldött + +INVALID_BUT_OK = érvénytelen de elfogadva + +NOT_YET_RELEASED = ez a funkció még nem került kiadásra \ No newline at end of file diff --git a/src/main/resources/i18n/TransactionValidity_it.properties b/src/main/resources/i18n/TransactionValidity_it.properties index d97af856..62d1608b 100644 --- a/src/main/resources/i18n/TransactionValidity_it.properties +++ b/src/main/resources/i18n/TransactionValidity_it.properties @@ -32,8 +32,6 @@ BAN_UNKNOWN = divieto sconosciuto BUYER_ALREADY_OWNER = l'acquirente è già proprietario -CHAT = Le transazioni CHAT non sono mai valide per l'inclusione nei blocchi - CLOCK_NOT_SYNCED = orologio non sincronizzato DUPLICATE_OPTION = opzione duplicata @@ -183,3 +181,13 @@ TRANSACTION_ALREADY_EXISTS = la transazione già esiste TRANSACTION_UNKNOWN = transazione sconosciuta TX_GROUP_ID_MISMATCH = identificazione di gruppo della transazione non corrisponde + +ADDRESS_IN_BLACKLIST = this address is in your blacklist + +ADDRESS_ABOVE_RATE_LIMIT = address reached specified rate limit + +DUPLICATE_MESSAGE = address sent duplicate message + +INVALID_TIMESTAMP_SIGNATURE = Invalid timestamp signature + +INVALID_BUT_OK = Invalid but OK \ No newline at end of file diff --git a/src/main/resources/i18n/TransactionValidity_nl.properties b/src/main/resources/i18n/TransactionValidity_nl.properties index 7afaad89..d6191f86 100644 --- a/src/main/resources/i18n/TransactionValidity_nl.properties +++ b/src/main/resources/i18n/TransactionValidity_nl.properties @@ -1,4 +1,3 @@ - ACCOUNT_ALREADY_EXISTS = account bestaat al ACCOUNT_CANNOT_REWARD_SHARE = account kan geen beloningen delen @@ -31,8 +30,6 @@ BAN_UNKNOWN = ban onbekend BUYER_ALREADY_OWNER = koper is al eigenaar -CHAT = CHAT transacties zijn nooit geldig voor opname in blokken - CLOCK_NOT_SYNCED = klok is niet gesynchronizeerd DUPLICATE_OPTION = dubbele optie @@ -182,3 +179,13 @@ TRANSACTION_ALREADY_EXISTS = transactie bestaat al TRANSACTION_UNKNOWN = transactie onbekend TX_GROUP_ID_MISMATCH = groep-ID van transactie matcht niet + +ADDRESS_IN_BLACKLIST = this address is in your blacklist + +ADDRESS_ABOVE_RATE_LIMIT = address reached specified rate limit + +DUPLICATE_MESSAGE = address sent duplicate message + +INVALID_TIMESTAMP_SIGNATURE = Invalid timestamp signature + +INVALID_BUT_OK = Invalid but OK \ No newline at end of file diff --git a/src/main/resources/i18n/TransactionValidity_ru.properties b/src/main/resources/i18n/TransactionValidity_ru.properties index c2dbe5df..e8761e7b 100644 --- a/src/main/resources/i18n/TransactionValidity_ru.properties +++ b/src/main/resources/i18n/TransactionValidity_ru.properties @@ -1,4 +1,3 @@ - ACCOUNT_ALREADY_EXISTS = аккаунт уже существует ACCOUNT_CANNOT_REWARD_SHARE = аккаунт не может делиться вознаграждением @@ -174,3 +173,13 @@ TRANSACTION_ALREADY_EXISTS = транзакция существует TRANSACTION_UNKNOWN = неизвестная транзакция TX_GROUP_ID_MISMATCH = не соответствие идентификатора группы c хэш транзации + +ADDRESS_IN_BLACKLIST = this address is in your blacklist + +ADDRESS_ABOVE_RATE_LIMIT = address reached specified rate limit + +DUPLICATE_MESSAGE = address sent duplicate message + +INVALID_TIMESTAMP_SIGNATURE = Invalid timestamp signature + +INVALID_BUT_OK = Invalid but OK \ No newline at end of file diff --git a/src/test/java/org/qortal/test/BlockArchiveTests.java b/src/test/java/org/qortal/test/BlockArchiveTests.java new file mode 100644 index 00000000..e2f2ed1c --- /dev/null +++ b/src/test/java/org/qortal/test/BlockArchiveTests.java @@ -0,0 +1,705 @@ +package org.qortal.test; + +import org.apache.commons.io.FileUtils; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.qortal.account.PrivateKeyAccount; +import org.qortal.controller.BlockMinter; +import org.qortal.data.at.ATStateData; +import org.qortal.data.block.BlockData; +import org.qortal.data.transaction.TransactionData; +import org.qortal.repository.*; +import org.qortal.repository.hsqldb.HSQLDBDatabaseArchiving; +import org.qortal.repository.hsqldb.HSQLDBDatabasePruning; +import org.qortal.repository.hsqldb.HSQLDBRepository; +import org.qortal.settings.Settings; +import org.qortal.test.common.AtUtils; +import org.qortal.test.common.BlockUtils; +import org.qortal.test.common.Common; +import org.qortal.transaction.DeployAtTransaction; +import org.qortal.transaction.Transaction; +import org.qortal.transform.TransformationException; +import org.qortal.utils.BlockArchiveUtils; +import org.qortal.utils.NTP; +import org.qortal.utils.Triple; + +import java.io.File; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.sql.SQLException; +import java.util.List; + +import static org.junit.Assert.*; + +public class BlockArchiveTests extends Common { + + @Before + public void beforeTest() throws DataException { + Common.useSettings("test-settings-v2-block-archive.json"); + NTP.setFixedOffset(Settings.getInstance().getTestNtpOffset()); + this.deleteArchiveDirectory(); + } + + @After + public void afterTest() throws DataException { + this.deleteArchiveDirectory(); + } + + + @Test + public void testWriter() throws DataException, InterruptedException, TransformationException, IOException { + try (final Repository repository = RepositoryManager.getRepository()) { + + // Mint some blocks so that we are able to archive them later + for (int i = 0; i < 1000; i++) { + BlockMinter.mintTestingBlock(repository, Common.getTestAccount(repository, "alice-reward-share")); + } + + // 900 blocks are trimmed (this specifies the first untrimmed height) + repository.getBlockRepository().setOnlineAccountsSignaturesTrimHeight(901); + repository.getATRepository().setAtTrimHeight(901); + + // Check the max archive height - this should be one less than the first untrimmed height + final int maximumArchiveHeight = BlockArchiveWriter.getMaxArchiveHeight(repository); + assertEquals(900, maximumArchiveHeight); + + // Write blocks 2-900 to the archive + BlockArchiveWriter writer = new BlockArchiveWriter(0, maximumArchiveHeight, repository); + writer.setShouldEnforceFileSizeTarget(false); // To avoid the need to pre-calculate file sizes + BlockArchiveWriter.BlockArchiveWriteResult result = writer.write(); + assertEquals(BlockArchiveWriter.BlockArchiveWriteResult.OK, result); + + // Make sure that the archive contains the correct number of blocks + assertEquals(900 - 1, writer.getWrittenCount()); + + // Increment block archive height + repository.getBlockArchiveRepository().setBlockArchiveHeight(writer.getWrittenCount()); + repository.saveChanges(); + assertEquals(900 - 1, repository.getBlockArchiveRepository().getBlockArchiveHeight()); + + // Ensure the file exists + File outputFile = writer.getOutputPath().toFile(); + assertTrue(outputFile.exists()); + } + } + + @Test + public void testWriterAndReader() throws DataException, InterruptedException, TransformationException, IOException { + try (final Repository repository = RepositoryManager.getRepository()) { + + // Mint some blocks so that we are able to archive them later + for (int i = 0; i < 1000; i++) { + BlockMinter.mintTestingBlock(repository, Common.getTestAccount(repository, "alice-reward-share")); + } + + // 900 blocks are trimmed (this specifies the first untrimmed height) + repository.getBlockRepository().setOnlineAccountsSignaturesTrimHeight(901); + repository.getATRepository().setAtTrimHeight(901); + + // Check the max archive height - this should be one less than the first untrimmed height + final int maximumArchiveHeight = BlockArchiveWriter.getMaxArchiveHeight(repository); + assertEquals(900, maximumArchiveHeight); + + // Write blocks 2-900 to the archive + BlockArchiveWriter writer = new BlockArchiveWriter(0, maximumArchiveHeight, repository); + writer.setShouldEnforceFileSizeTarget(false); // To avoid the need to pre-calculate file sizes + BlockArchiveWriter.BlockArchiveWriteResult result = writer.write(); + assertEquals(BlockArchiveWriter.BlockArchiveWriteResult.OK, result); + + // Make sure that the archive contains the correct number of blocks + assertEquals(900 - 1, writer.getWrittenCount()); + + // Increment block archive height + repository.getBlockArchiveRepository().setBlockArchiveHeight(writer.getWrittenCount()); + repository.saveChanges(); + assertEquals(900 - 1, repository.getBlockArchiveRepository().getBlockArchiveHeight()); + + // Ensure the file exists + File outputFile = writer.getOutputPath().toFile(); + assertTrue(outputFile.exists()); + + // Read block 2 from the archive + BlockArchiveReader reader = BlockArchiveReader.getInstance(); + Triple, List> block2Info = reader.fetchBlockAtHeight(2); + BlockData block2ArchiveData = block2Info.getA(); + + // Read block 2 from the repository + BlockData block2RepositoryData = repository.getBlockRepository().fromHeight(2); + + // Ensure the values match + assertEquals(block2ArchiveData.getHeight(), block2RepositoryData.getHeight()); + assertArrayEquals(block2ArchiveData.getSignature(), block2RepositoryData.getSignature()); + + // Test some values in the archive + assertEquals(1, block2ArchiveData.getOnlineAccountsCount()); + + // Read block 900 from the archive + Triple, List> block900Info = reader.fetchBlockAtHeight(900); + BlockData block900ArchiveData = block900Info.getA(); + + // Read block 900 from the repository + BlockData block900RepositoryData = repository.getBlockRepository().fromHeight(900); + + // Ensure the values match + assertEquals(block900ArchiveData.getHeight(), block900RepositoryData.getHeight()); + assertArrayEquals(block900ArchiveData.getSignature(), block900RepositoryData.getSignature()); + + // Test some values in the archive + assertEquals(1, block900ArchiveData.getOnlineAccountsCount()); + + } + } + + @Test + public void testArchivedAtStates() throws DataException, InterruptedException, TransformationException, IOException { + try (final Repository repository = RepositoryManager.getRepository()) { + + // Deploy an AT so that we have AT state data + PrivateKeyAccount deployer = Common.getTestAccount(repository, "alice"); + byte[] creationBytes = AtUtils.buildSimpleAT(); + long fundingAmount = 1_00000000L; + DeployAtTransaction deployAtTransaction = AtUtils.doDeployAT(repository, deployer, creationBytes, fundingAmount); + String atAddress = deployAtTransaction.getATAccount().getAddress(); + + // Mint some blocks so that we are able to archive them later + for (int i = 0; i < 1000; i++) { + BlockMinter.mintTestingBlock(repository, Common.getTestAccount(repository, "alice-reward-share")); + } + + // 9 blocks are trimmed (this specifies the first untrimmed height) + repository.getBlockRepository().setOnlineAccountsSignaturesTrimHeight(10); + repository.getATRepository().setAtTrimHeight(10); + + // Check the max archive height + final int maximumArchiveHeight = BlockArchiveWriter.getMaxArchiveHeight(repository); + assertEquals(9, maximumArchiveHeight); + + // Write blocks 2-9 to the archive + BlockArchiveWriter writer = new BlockArchiveWriter(0, maximumArchiveHeight, repository); + writer.setShouldEnforceFileSizeTarget(false); // To avoid the need to pre-calculate file sizes + BlockArchiveWriter.BlockArchiveWriteResult result = writer.write(); + assertEquals(BlockArchiveWriter.BlockArchiveWriteResult.OK, result); + + // Make sure that the archive contains the correct number of blocks + assertEquals(9 - 1, writer.getWrittenCount()); + + // Increment block archive height + repository.getBlockArchiveRepository().setBlockArchiveHeight(writer.getWrittenCount()); + repository.saveChanges(); + assertEquals(9 - 1, repository.getBlockArchiveRepository().getBlockArchiveHeight()); + + // Ensure the file exists + File outputFile = writer.getOutputPath().toFile(); + assertTrue(outputFile.exists()); + + // Check blocks 3-9 + for (Integer testHeight = 2; testHeight <= 9; testHeight++) { + + // Read a block from the archive + BlockArchiveReader reader = BlockArchiveReader.getInstance(); + Triple, List> blockInfo = reader.fetchBlockAtHeight(testHeight); + BlockData archivedBlockData = blockInfo.getA(); + ATStateData archivedAtStateData = blockInfo.getC().isEmpty() ? null : blockInfo.getC().get(0); + List archivedTransactions = blockInfo.getB(); + + // Read the same block from the repository + BlockData repositoryBlockData = repository.getBlockRepository().fromHeight(testHeight); + ATStateData repositoryAtStateData = repository.getATRepository().getATStateAtHeight(atAddress, testHeight); + + // Ensure the repository has full AT state data + assertNotNull(repositoryAtStateData.getStateHash()); + assertNotNull(repositoryAtStateData.getStateData()); + + // Check the archived AT state + if (testHeight == 2) { + // Block 2 won't have an AT state hash because it's initial (and has the DEPLOY_AT in the same block) + assertNull(archivedAtStateData); + + assertEquals(1, archivedTransactions.size()); + assertEquals(Transaction.TransactionType.DEPLOY_AT, archivedTransactions.get(0).getType()); + } + else { + // For blocks 3+, ensure the archive has the AT state data, but not the hashes + assertNotNull(archivedAtStateData.getStateHash()); + assertNull(archivedAtStateData.getStateData()); + + // They also shouldn't have any transactions + assertTrue(archivedTransactions.isEmpty()); + } + + // Also check the online accounts count and height + assertEquals(1, archivedBlockData.getOnlineAccountsCount()); + assertEquals(testHeight, archivedBlockData.getHeight()); + + // Ensure the values match + assertEquals(archivedBlockData.getHeight(), repositoryBlockData.getHeight()); + assertArrayEquals(archivedBlockData.getSignature(), repositoryBlockData.getSignature()); + assertEquals(archivedBlockData.getOnlineAccountsCount(), repositoryBlockData.getOnlineAccountsCount()); + assertArrayEquals(archivedBlockData.getMinterSignature(), repositoryBlockData.getMinterSignature()); + assertEquals(archivedBlockData.getATCount(), repositoryBlockData.getATCount()); + assertEquals(archivedBlockData.getOnlineAccountsCount(), repositoryBlockData.getOnlineAccountsCount()); + assertArrayEquals(archivedBlockData.getReference(), repositoryBlockData.getReference()); + assertEquals(archivedBlockData.getTimestamp(), repositoryBlockData.getTimestamp()); + assertEquals(archivedBlockData.getATFees(), repositoryBlockData.getATFees()); + assertEquals(archivedBlockData.getTotalFees(), repositoryBlockData.getTotalFees()); + assertEquals(archivedBlockData.getTransactionCount(), repositoryBlockData.getTransactionCount()); + assertArrayEquals(archivedBlockData.getTransactionsSignature(), repositoryBlockData.getTransactionsSignature()); + + if (testHeight != 2) { + assertArrayEquals(archivedAtStateData.getStateHash(), repositoryAtStateData.getStateHash()); + } + } + + // Check block 10 (unarchived) + BlockArchiveReader reader = BlockArchiveReader.getInstance(); + Triple, List> blockInfo = reader.fetchBlockAtHeight(10); + assertNull(blockInfo); + + } + + } + + @Test + public void testArchiveAndPrune() throws DataException, InterruptedException, TransformationException, IOException { + try (final Repository repository = RepositoryManager.getRepository()) { + + // Deploy an AT so that we have AT state data + PrivateKeyAccount deployer = Common.getTestAccount(repository, "alice"); + byte[] creationBytes = AtUtils.buildSimpleAT(); + long fundingAmount = 1_00000000L; + AtUtils.doDeployAT(repository, deployer, creationBytes, fundingAmount); + + // Mint some blocks so that we are able to archive them later + for (int i = 0; i < 1000; i++) { + BlockMinter.mintTestingBlock(repository, Common.getTestAccount(repository, "alice-reward-share")); + } + + // Assume 900 blocks are trimmed (this specifies the first untrimmed height) + repository.getBlockRepository().setOnlineAccountsSignaturesTrimHeight(901); + repository.getATRepository().setAtTrimHeight(901); + + // Check the max archive height - this should be one less than the first untrimmed height + final int maximumArchiveHeight = BlockArchiveWriter.getMaxArchiveHeight(repository); + assertEquals(900, maximumArchiveHeight); + + // Write blocks 2-900 to the archive + BlockArchiveWriter writer = new BlockArchiveWriter(0, maximumArchiveHeight, repository); + writer.setShouldEnforceFileSizeTarget(false); // To avoid the need to pre-calculate file sizes + BlockArchiveWriter.BlockArchiveWriteResult result = writer.write(); + assertEquals(BlockArchiveWriter.BlockArchiveWriteResult.OK, result); + + // Make sure that the archive contains the correct number of blocks + assertEquals(900 - 1, writer.getWrittenCount()); + + // Increment block archive height + repository.getBlockArchiveRepository().setBlockArchiveHeight(901); + repository.saveChanges(); + assertEquals(901, repository.getBlockArchiveRepository().getBlockArchiveHeight()); + + // Ensure the file exists + File outputFile = writer.getOutputPath().toFile(); + assertTrue(outputFile.exists()); + + // Ensure the SQL repository contains blocks 2 and 900... + assertNotNull(repository.getBlockRepository().fromHeight(2)); + assertNotNull(repository.getBlockRepository().fromHeight(900)); + + // Prune all the archived blocks + int numBlocksPruned = repository.getBlockRepository().pruneBlocks(0, 900); + assertEquals(900-1, numBlocksPruned); + repository.getBlockRepository().setBlockPruneHeight(901); + + // Prune the AT states for the archived blocks + repository.getATRepository().rebuildLatestAtStates(); + int numATStatesPruned = repository.getATRepository().pruneAtStates(0, 900); + assertEquals(900-1, numATStatesPruned); + repository.getATRepository().setAtPruneHeight(901); + + // Now ensure the SQL repository is missing blocks 2 and 900... + assertNull(repository.getBlockRepository().fromHeight(2)); + assertNull(repository.getBlockRepository().fromHeight(900)); + + // ... but it's not missing blocks 1 and 901 (we don't prune the genesis block) + assertNotNull(repository.getBlockRepository().fromHeight(1)); + assertNotNull(repository.getBlockRepository().fromHeight(901)); + + // Validate the latest block height in the repository + assertEquals(1002, (int) repository.getBlockRepository().getLastBlock().getHeight()); + + } + } + + @Test + public void testBulkArchiveAndPrune() throws DataException, SQLException { + try (final Repository repository = RepositoryManager.getRepository()) { + HSQLDBRepository hsqldb = (HSQLDBRepository) repository; + + // Deploy an AT so that we have AT state data + PrivateKeyAccount deployer = Common.getTestAccount(repository, "alice"); + byte[] creationBytes = AtUtils.buildSimpleAT(); + long fundingAmount = 1_00000000L; + AtUtils.doDeployAT(repository, deployer, creationBytes, fundingAmount); + + // Mint some blocks so that we are able to archive them later + for (int i = 0; i < 1000; i++) { + BlockMinter.mintTestingBlock(repository, Common.getTestAccount(repository, "alice-reward-share")); + } + + // Assume 900 blocks are trimmed (this specifies the first untrimmed height) + repository.getBlockRepository().setOnlineAccountsSignaturesTrimHeight(901); + repository.getATRepository().setAtTrimHeight(901); + + // Check the max archive height - this should be one less than the first untrimmed height + final int maximumArchiveHeight = BlockArchiveWriter.getMaxArchiveHeight(repository); + assertEquals(900, maximumArchiveHeight); + + // Check the current archive height + assertEquals(0, repository.getBlockArchiveRepository().getBlockArchiveHeight()); + + // Write blocks 2-900 to the archive (using bulk method) + int fileSizeTarget = 425000; // Pre-calculated size of 900 blocks + assertTrue(HSQLDBDatabaseArchiving.buildBlockArchive(repository, fileSizeTarget)); + + // Ensure the block archive height has increased + assertEquals(901, repository.getBlockArchiveRepository().getBlockArchiveHeight()); + + // Ensure the SQL repository contains blocks 2 and 900... + assertNotNull(repository.getBlockRepository().fromHeight(2)); + assertNotNull(repository.getBlockRepository().fromHeight(900)); + + // Check the current prune heights + assertEquals(0, repository.getBlockRepository().getBlockPruneHeight()); + assertEquals(0, repository.getATRepository().getAtPruneHeight()); + + // Prior to archiving or pruning, ensure blocks 2 to 1002 and their AT states are available in the db + for (int i=2; i<=1002; i++) { + assertNotNull(repository.getBlockRepository().fromHeight(i)); + List atStates = repository.getATRepository().getBlockATStatesAtHeight(i); + assertNotNull(atStates); + assertEquals(1, atStates.size()); + } + + // Prune all the archived blocks and AT states (using bulk method) + assertTrue(HSQLDBDatabasePruning.pruneBlocks(hsqldb)); + assertTrue(HSQLDBDatabasePruning.pruneATStates(hsqldb)); + + // Ensure the current prune heights have increased + assertEquals(901, repository.getBlockRepository().getBlockPruneHeight()); + assertEquals(901, repository.getATRepository().getAtPruneHeight()); + + // Now ensure the SQL repository is missing blocks 2 and 900... + assertNull(repository.getBlockRepository().fromHeight(2)); + assertNull(repository.getBlockRepository().fromHeight(900)); + + // ... but it's not missing blocks 1 and 901 (we don't prune the genesis block) + assertNotNull(repository.getBlockRepository().fromHeight(1)); + assertNotNull(repository.getBlockRepository().fromHeight(901)); + + // Validate the latest block height in the repository + assertEquals(1002, (int) repository.getBlockRepository().getLastBlock().getHeight()); + + // Ensure blocks 2-900 are all available in the archive + for (int i=2; i<=900; i++) { + assertNotNull(repository.getBlockArchiveRepository().fromHeight(i)); + } + + // Ensure blocks 2-900 are NOT available in the db + for (int i=2; i<=900; i++) { + assertNull(repository.getBlockRepository().fromHeight(i)); + } + + // Ensure blocks 901 to 1002 and their AT states are available in the db + for (int i=901; i<=1002; i++) { + assertNotNull(repository.getBlockRepository().fromHeight(i)); + List atStates = repository.getATRepository().getBlockATStatesAtHeight(i); + assertNotNull(atStates); + assertEquals(1, atStates.size()); + } + + // Ensure blocks 901 to 1002 are not available in the archive + for (int i=901; i<=1002; i++) { + assertNull(repository.getBlockArchiveRepository().fromHeight(i)); + } + } + } + + @Test + public void testBulkArchiveAndPruneMultipleFiles() throws DataException, SQLException { + try (final Repository repository = RepositoryManager.getRepository()) { + HSQLDBRepository hsqldb = (HSQLDBRepository) repository; + + // Deploy an AT so that we have AT state data + PrivateKeyAccount deployer = Common.getTestAccount(repository, "alice"); + byte[] creationBytes = AtUtils.buildSimpleAT(); + long fundingAmount = 1_00000000L; + AtUtils.doDeployAT(repository, deployer, creationBytes, fundingAmount); + + // Mint some blocks so that we are able to archive them later + for (int i = 0; i < 1000; i++) { + BlockMinter.mintTestingBlock(repository, Common.getTestAccount(repository, "alice-reward-share")); + } + + // Assume 900 blocks are trimmed (this specifies the first untrimmed height) + repository.getBlockRepository().setOnlineAccountsSignaturesTrimHeight(901); + repository.getATRepository().setAtTrimHeight(901); + + // Check the max archive height - this should be one less than the first untrimmed height + final int maximumArchiveHeight = BlockArchiveWriter.getMaxArchiveHeight(repository); + assertEquals(900, maximumArchiveHeight); + + // Check the current archive height + assertEquals(0, repository.getBlockArchiveRepository().getBlockArchiveHeight()); + + // Write blocks 2-900 to the archive (using bulk method) + int fileSizeTarget = 42000; // Pre-calculated size of approx 90 blocks + assertTrue(HSQLDBDatabaseArchiving.buildBlockArchive(repository, fileSizeTarget)); + + // Ensure 10 archive files have been created + Path archivePath = Paths.get(Settings.getInstance().getRepositoryPath(), "archive"); + assertEquals(10, new File(archivePath.toString()).list().length); + + // Check the files exist + assertTrue(Files.exists(Paths.get(archivePath.toString(), "2-90.dat"))); + assertTrue(Files.exists(Paths.get(archivePath.toString(), "91-179.dat"))); + assertTrue(Files.exists(Paths.get(archivePath.toString(), "180-268.dat"))); + assertTrue(Files.exists(Paths.get(archivePath.toString(), "269-357.dat"))); + assertTrue(Files.exists(Paths.get(archivePath.toString(), "358-446.dat"))); + assertTrue(Files.exists(Paths.get(archivePath.toString(), "447-535.dat"))); + assertTrue(Files.exists(Paths.get(archivePath.toString(), "536-624.dat"))); + assertTrue(Files.exists(Paths.get(archivePath.toString(), "625-713.dat"))); + assertTrue(Files.exists(Paths.get(archivePath.toString(), "714-802.dat"))); + assertTrue(Files.exists(Paths.get(archivePath.toString(), "803-891.dat"))); + + // Ensure the block archive height has increased + // It won't be as high as 901, because blocks 892-901 were too small to reach the file size + // target of the 11th file + assertEquals(892, repository.getBlockArchiveRepository().getBlockArchiveHeight()); + + // Ensure the SQL repository contains blocks 2 and 891... + assertNotNull(repository.getBlockRepository().fromHeight(2)); + assertNotNull(repository.getBlockRepository().fromHeight(891)); + + // Check the current prune heights + assertEquals(0, repository.getBlockRepository().getBlockPruneHeight()); + assertEquals(0, repository.getATRepository().getAtPruneHeight()); + + // Prior to archiving or pruning, ensure blocks 2 to 1002 and their AT states are available in the db + for (int i=2; i<=1002; i++) { + assertNotNull(repository.getBlockRepository().fromHeight(i)); + List atStates = repository.getATRepository().getBlockATStatesAtHeight(i); + assertNotNull(atStates); + assertEquals(1, atStates.size()); + } + + // Prune all the archived blocks and AT states (using bulk method) + assertTrue(HSQLDBDatabasePruning.pruneBlocks(hsqldb)); + assertTrue(HSQLDBDatabasePruning.pruneATStates(hsqldb)); + + // Ensure the current prune heights have increased + assertEquals(892, repository.getBlockRepository().getBlockPruneHeight()); + assertEquals(892, repository.getATRepository().getAtPruneHeight()); + + // Now ensure the SQL repository is missing blocks 2 and 891... + assertNull(repository.getBlockRepository().fromHeight(2)); + assertNull(repository.getBlockRepository().fromHeight(891)); + + // ... but it's not missing blocks 1 and 901 (we don't prune the genesis block) + assertNotNull(repository.getBlockRepository().fromHeight(1)); + assertNotNull(repository.getBlockRepository().fromHeight(892)); + + // Validate the latest block height in the repository + assertEquals(1002, (int) repository.getBlockRepository().getLastBlock().getHeight()); + + // Ensure blocks 2-891 are all available in the archive + for (int i=2; i<=891; i++) { + assertNotNull(repository.getBlockArchiveRepository().fromHeight(i)); + } + + // Ensure blocks 2-891 are NOT available in the db + for (int i=2; i<=891; i++) { + assertNull(repository.getBlockRepository().fromHeight(i)); + } + + // Ensure blocks 892 to 1002 and their AT states are available in the db + for (int i=892; i<=1002; i++) { + assertNotNull(repository.getBlockRepository().fromHeight(i)); + List atStates = repository.getATRepository().getBlockATStatesAtHeight(i); + assertNotNull(atStates); + assertEquals(1, atStates.size()); + } + + // Ensure blocks 892 to 1002 are not available in the archive + for (int i=892; i<=1002; i++) { + assertNull(repository.getBlockArchiveRepository().fromHeight(i)); + } + } + } + + @Test + public void testTrimArchivePruneAndOrphan() throws DataException, InterruptedException, TransformationException, IOException { + try (final Repository repository = RepositoryManager.getRepository()) { + + // Deploy an AT so that we have AT state data + PrivateKeyAccount deployer = Common.getTestAccount(repository, "alice"); + byte[] creationBytes = AtUtils.buildSimpleAT(); + long fundingAmount = 1_00000000L; + AtUtils.doDeployAT(repository, deployer, creationBytes, fundingAmount); + + // Mint some blocks so that we are able to archive them later + for (int i = 0; i < 1000; i++) { + BlockMinter.mintTestingBlock(repository, Common.getTestAccount(repository, "alice-reward-share")); + } + + // Make sure that block 500 has full AT state data and data hash + List block500AtStatesData = repository.getATRepository().getBlockATStatesAtHeight(500); + ATStateData atStatesData = repository.getATRepository().getATStateAtHeight(block500AtStatesData.get(0).getATAddress(), 500); + assertNotNull(atStatesData.getStateHash()); + assertNotNull(atStatesData.getStateData()); + + // Trim the first 500 blocks + repository.getBlockRepository().trimOldOnlineAccountsSignatures(0, 500); + repository.getBlockRepository().setOnlineAccountsSignaturesTrimHeight(501); + repository.getATRepository().trimAtStates(0, 500, 1000); + repository.getATRepository().setAtTrimHeight(501); + + // Now block 500 should only have the AT state data hash + block500AtStatesData = repository.getATRepository().getBlockATStatesAtHeight(500); + atStatesData = repository.getATRepository().getATStateAtHeight(block500AtStatesData.get(0).getATAddress(), 500); + assertNotNull(atStatesData.getStateHash()); + assertNull(atStatesData.getStateData()); + + // ... but block 501 should have the full data + List block501AtStatesData = repository.getATRepository().getBlockATStatesAtHeight(501); + atStatesData = repository.getATRepository().getATStateAtHeight(block501AtStatesData.get(0).getATAddress(), 501); + assertNotNull(atStatesData.getStateHash()); + assertNotNull(atStatesData.getStateData()); + + // Check the max archive height - this should be one less than the first untrimmed height + final int maximumArchiveHeight = BlockArchiveWriter.getMaxArchiveHeight(repository); + assertEquals(500, maximumArchiveHeight); + + BlockData block3DataPreArchive = repository.getBlockRepository().fromHeight(3); + + // Write blocks 2-500 to the archive + BlockArchiveWriter writer = new BlockArchiveWriter(0, maximumArchiveHeight, repository); + writer.setShouldEnforceFileSizeTarget(false); // To avoid the need to pre-calculate file sizes + BlockArchiveWriter.BlockArchiveWriteResult result = writer.write(); + assertEquals(BlockArchiveWriter.BlockArchiveWriteResult.OK, result); + + // Make sure that the archive contains the correct number of blocks + assertEquals(500 - 1, writer.getWrittenCount()); // -1 for the genesis block + + // Increment block archive height + repository.getBlockArchiveRepository().setBlockArchiveHeight(writer.getWrittenCount()); + repository.saveChanges(); + assertEquals(500 - 1, repository.getBlockArchiveRepository().getBlockArchiveHeight()); + + // Ensure the file exists + File outputFile = writer.getOutputPath().toFile(); + assertTrue(outputFile.exists()); + + // Ensure the SQL repository contains blocks 2 and 500... + assertNotNull(repository.getBlockRepository().fromHeight(2)); + assertNotNull(repository.getBlockRepository().fromHeight(500)); + + // Prune all the archived blocks + int numBlocksPruned = repository.getBlockRepository().pruneBlocks(0, 500); + assertEquals(500-1, numBlocksPruned); + repository.getBlockRepository().setBlockPruneHeight(501); + + // Prune the AT states for the archived blocks + repository.getATRepository().rebuildLatestAtStates(); + int numATStatesPruned = repository.getATRepository().pruneAtStates(2, 500); + assertEquals(499, numATStatesPruned); + repository.getATRepository().setAtPruneHeight(501); + + // Now ensure the SQL repository is missing blocks 2 and 500... + assertNull(repository.getBlockRepository().fromHeight(2)); + assertNull(repository.getBlockRepository().fromHeight(500)); + + // ... but it's not missing blocks 1 and 501 (we don't prune the genesis block) + assertNotNull(repository.getBlockRepository().fromHeight(1)); + assertNotNull(repository.getBlockRepository().fromHeight(501)); + + // Validate the latest block height in the repository + assertEquals(1002, (int) repository.getBlockRepository().getLastBlock().getHeight()); + + // Now orphan some unarchived blocks. + BlockUtils.orphanBlocks(repository, 500); + assertEquals(502, (int) repository.getBlockRepository().getLastBlock().getHeight()); + + // We're close to the lower limit of the SQL database now, so + // we need to import some blocks from the archive + BlockArchiveUtils.importFromArchive(401, 500, repository); + + // Ensure the SQL repository now contains block 401 but not 400... + assertNotNull(repository.getBlockRepository().fromHeight(401)); + assertNull(repository.getBlockRepository().fromHeight(400)); + + // Import the remaining 399 blocks + BlockArchiveUtils.importFromArchive(2, 400, repository); + + // Verify that block 3 matches the original + BlockData block3DataPostArchive = repository.getBlockRepository().fromHeight(3); + assertArrayEquals(block3DataPreArchive.getSignature(), block3DataPostArchive.getSignature()); + assertEquals(block3DataPreArchive.getHeight(), block3DataPostArchive.getHeight()); + + // Orphan 1 more block, which should be the last one that is possible to be orphaned + BlockUtils.orphanBlocks(repository, 1); + + // Orphan another block, which should fail + Exception exception = null; + try { + BlockUtils.orphanBlocks(repository, 1); + } catch (DataException e) { + exception = e; + } + + // Ensure that a DataException is thrown because there is no more AT states data available + assertNotNull(exception); + assertEquals(DataException.class, exception.getClass()); + + // FUTURE: we may be able to retain unique AT states when trimming, to avoid this exception + // and allow orphaning back through blocks with trimmed AT states. + + } + } + + + /** + * Many nodes are missing an ATStatesHeightIndex due to an earlier bug + * In these cases we disable archiving and pruning as this index is a + * very essential component in these processes. + */ + @Test + public void testMissingAtStatesHeightIndex() throws DataException, SQLException { + try (final HSQLDBRepository repository = (HSQLDBRepository) RepositoryManager.getRepository()) { + + // Firstly check that we're able to prune or archive when the index exists + assertTrue(repository.getATRepository().hasAtStatesHeightIndex()); + assertTrue(RepositoryManager.canArchiveOrPrune()); + + // Delete the index + repository.prepareStatement("DROP INDEX ATSTATESHEIGHTINDEX").execute(); + + // Ensure check that we're unable to prune or archive when the index doesn't exist + assertFalse(repository.getATRepository().hasAtStatesHeightIndex()); + assertFalse(RepositoryManager.canArchiveOrPrune()); + } + } + + + private void deleteArchiveDirectory() { + // Delete archive directory if exists + Path archivePath = Paths.get(Settings.getInstance().getRepositoryPath(), "archive").toAbsolutePath(); + try { + FileUtils.deleteDirectory(archivePath.toFile()); + } catch (IOException e) { + + } + } + +} diff --git a/src/test/java/org/qortal/test/BootstrapTests.java b/src/test/java/org/qortal/test/BootstrapTests.java new file mode 100644 index 00000000..70852b68 --- /dev/null +++ b/src/test/java/org/qortal/test/BootstrapTests.java @@ -0,0 +1,245 @@ +package org.qortal.test; + +import org.apache.commons.io.FileUtils; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.qortal.account.PrivateKeyAccount; +import org.qortal.controller.BlockMinter; +import org.qortal.controller.Controller; +import org.qortal.data.block.BlockData; +import org.qortal.repository.*; +import org.qortal.settings.Settings; +import org.qortal.test.common.AtUtils; +import org.qortal.test.common.Common; +import org.qortal.transform.TransformationException; +import org.qortal.utils.NTP; + +import java.io.IOException; +import java.io.PrintWriter; +import java.nio.file.Files; +import java.nio.file.NoSuchFileException; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import static org.junit.Assert.*; + +public class BootstrapTests extends Common { + + @Before + public void beforeTest() throws DataException, IOException { + Common.useSettingsAndDb(Common.testSettingsFilename, false); + NTP.setFixedOffset(Settings.getInstance().getTestNtpOffset()); + this.deleteBootstraps(); + } + + @After + public void afterTest() throws DataException, IOException { + this.deleteBootstraps(); + this.deleteExportDirectory(); + } + + @Test + public void testCheckRepositoryState() throws DataException, InterruptedException, TransformationException, IOException { + try (final Repository repository = RepositoryManager.getRepository()) { + this.buildDummyBlockchain(repository); + + Bootstrap bootstrap = new Bootstrap(repository); + assertTrue(bootstrap.checkRepositoryState()); + + } + } + + @Test + public void testValidateBlockchain() throws DataException, InterruptedException, TransformationException, IOException { + try (final Repository repository = RepositoryManager.getRepository()) { + this.buildDummyBlockchain(repository); + + Bootstrap bootstrap = new Bootstrap(repository); + assertTrue(bootstrap.validateBlockchain()); + + } + } + + + @Test + public void testCreateAndImportBootstrap() throws DataException, InterruptedException, TransformationException, IOException { + + Path archivePath = Paths.get(Settings.getInstance().getRepositoryPath(), "archive", "2-900.dat"); + BlockData block1000; + byte[] originalArchiveContents; + + try (final Repository repository = RepositoryManager.getRepository()) { + this.buildDummyBlockchain(repository); + + Bootstrap bootstrap = new Bootstrap(repository); + Path bootstrapPath = bootstrap.getBootstrapOutputPath(); + + // Ensure the compressed bootstrap doesn't exist + assertFalse(Files.exists(bootstrapPath)); + + // Create bootstrap + bootstrap.create(); + + // Ensure the compressed bootstrap exists + assertTrue(Files.exists(bootstrapPath)); + + // Ensure the original block archive file exists + assertTrue(Files.exists(archivePath)); + originalArchiveContents = Files.readAllBytes(archivePath); + + // Ensure block 1000 exists in the repository + block1000 = repository.getBlockRepository().fromHeight(1000); + assertNotNull(block1000); + + // Ensure we can retrieve block 10 from the archive + assertNotNull(repository.getBlockArchiveRepository().fromHeight(10)); + + // Now delete block 1000 + repository.getBlockRepository().delete(block1000); + assertNull(repository.getBlockRepository().fromHeight(1000)); + + // Overwrite the archive with dummy data, and verify it + try (PrintWriter out = new PrintWriter(archivePath.toFile())) { + out.println("testdata"); + } + String newline = System.getProperty("line.separator"); + assertEquals("testdata", Files.readString(archivePath).replace(newline, "")); + + // Ensure we can no longer retrieve block 10 from the archive + assertNull(repository.getBlockArchiveRepository().fromHeight(10)); + + // Import the bootstrap back in + bootstrap.importFromPath(bootstrapPath); + } + + // We need a new connection because we have switched to a new repository + try (final Repository repository = RepositoryManager.getRepository()) { + + // Ensure the block archive file exists + assertTrue(Files.exists(archivePath)); + + // and that its contents match the original + assertArrayEquals(originalArchiveContents, Files.readAllBytes(archivePath)); + + // Make sure that block 1000 exists again + BlockData newBlock1000 = repository.getBlockRepository().fromHeight(1000); + assertNotNull(newBlock1000); + + // and ensure that the signatures match + assertArrayEquals(block1000.getSignature(), newBlock1000.getSignature()); + + // Ensure we can retrieve block 10 from the archive + assertNotNull(repository.getBlockArchiveRepository().fromHeight(10)); + } + } + + + private void buildDummyBlockchain(Repository repository) throws DataException, InterruptedException, TransformationException, IOException { + // Alice self share online + List mintingAndOnlineAccounts = new ArrayList<>(); + PrivateKeyAccount aliceSelfShare = Common.getTestAccount(repository, "alice-reward-share"); + mintingAndOnlineAccounts.add(aliceSelfShare); + + // Deploy an AT so that we have AT state data + PrivateKeyAccount deployer = Common.getTestAccount(repository, "alice"); + byte[] creationBytes = AtUtils.buildSimpleAT(); + long fundingAmount = 1_00000000L; + AtUtils.doDeployAT(repository, deployer, creationBytes, fundingAmount); + + // Mint some blocks so that we are able to archive them later + for (int i = 0; i < 1000; i++) + BlockMinter.mintTestingBlock(repository, mintingAndOnlineAccounts.toArray(new PrivateKeyAccount[0])); + + // Assume 900 blocks are trimmed (this specifies the first untrimmed height) + repository.getBlockRepository().setOnlineAccountsSignaturesTrimHeight(901); + repository.getATRepository().setAtTrimHeight(901); + + // Check the max archive height - this should be one less than the first untrimmed height + final int maximumArchiveHeight = BlockArchiveWriter.getMaxArchiveHeight(repository); + + // Write blocks 2-900 to the archive + BlockArchiveWriter writer = new BlockArchiveWriter(0, maximumArchiveHeight, repository); + writer.setShouldEnforceFileSizeTarget(false); // To avoid the need to pre-calculate file sizes + BlockArchiveWriter.BlockArchiveWriteResult result = writer.write(); + + // Increment block archive height + repository.getBlockArchiveRepository().setBlockArchiveHeight(901); + + // Prune all the archived blocks + repository.getBlockRepository().pruneBlocks(0, 900); + repository.getBlockRepository().setBlockPruneHeight(901); + + // Prune the AT states for the archived blocks + repository.getATRepository().rebuildLatestAtStates(); + repository.getATRepository().pruneAtStates(0, 900); + repository.getATRepository().setAtPruneHeight(901); + + // Refill cache, used by Controller.getMinimumLatestBlockTimestamp() and other methods + Controller.getInstance().refillLatestBlocksCache(); + + repository.saveChanges(); + } + + @Test + public void testGetRandomHost() { + String[] bootstrapHosts = Settings.getInstance().getBootstrapHosts(); + List uniqueHosts = new ArrayList<>(); + + for (int i=0; i<1000; i++) { + Bootstrap bootstrap = new Bootstrap(); + String randomHost = bootstrap.getRandomHost(); + assertNotNull(randomHost); + + if (!uniqueHosts.contains(randomHost)){ + uniqueHosts.add(randomHost); + } + } + + // Ensure we have more than one bootstrap host in the settings + assertTrue(Arrays.asList(bootstrapHosts).size() > 1); + + // Ensure that all have been given the opportunity to be used + assertEquals(uniqueHosts.size(), Arrays.asList(bootstrapHosts).size()); + } + + private void deleteBootstraps() throws IOException { + try { + Path path = Paths.get(String.format("%s%s", Settings.getInstance().getBootstrapFilenamePrefix(), "bootstrap-archive.7z")); + Files.delete(path); + + } catch (NoSuchFileException e) { + // Nothing to delete + } + + try { + Path path = Paths.get(String.format("%s%s", Settings.getInstance().getBootstrapFilenamePrefix(), "bootstrap-toponly.7z")); + Files.delete(path); + + } catch (NoSuchFileException e) { + // Nothing to delete + } + + try { + Path path = Paths.get(String.format("%s%s", Settings.getInstance().getBootstrapFilenamePrefix(), "bootstrap-full.7z")); + Files.delete(path); + + } catch (NoSuchFileException e) { + // Nothing to delete + } + } + + private void deleteExportDirectory() { + // Delete archive directory if exists + Path archivePath = Paths.get(Settings.getInstance().getExportPath()); + try { + FileUtils.deleteDirectory(archivePath.toFile()); + } catch (IOException e) { + + } + } + +} diff --git a/src/test/java/org/qortal/test/CryptoTests.java b/src/test/java/org/qortal/test/CryptoTests.java index 44ad03f9..6a0133d2 100644 --- a/src/test/java/org/qortal/test/CryptoTests.java +++ b/src/test/java/org/qortal/test/CryptoTests.java @@ -15,7 +15,9 @@ import java.io.File; import java.io.FileOutputStream; import java.io.IOException; import java.nio.file.Files; +import java.nio.file.Path; import java.nio.file.Paths; +import java.nio.file.StandardOpenOption; import java.security.InvalidAlgorithmParameterException; import java.security.InvalidKeyException; import java.security.NoSuchAlgorithmException; @@ -56,6 +58,37 @@ public class CryptoTests extends Common { assertArrayEquals(expected, digest); } + @Test + public void testFileDigest() throws IOException { + byte[] input = HashCode.fromString("00").asBytes(); + + Path tempPath = Files.createTempFile("", ".tmp"); + Files.write(tempPath, input, StandardOpenOption.CREATE); + + byte[] digest = Crypto.digest(tempPath.toFile()); + byte[] expected = HashCode.fromString("6e340b9cffb37a989ca544e6bb780a2c78901d3fb33738768511a30617afa01d").asBytes(); + + assertArrayEquals(expected, digest); + + Files.delete(tempPath); + } + + @Test + public void testFileDigestWithRandomData() throws IOException { + byte[] input = new byte[128]; + new Random().nextBytes(input); + + Path tempPath = Files.createTempFile("", ".tmp"); + Files.write(tempPath, input, StandardOpenOption.CREATE); + + byte[] fileDigest = Crypto.digest(tempPath.toFile()); + byte[] memoryDigest = Crypto.digest(input); + + assertArrayEquals(fileDigest, memoryDigest); + + Files.delete(tempPath); + } + @Test public void testPublicKeyToAddress() { byte[] publicKey = HashCode.fromString("775ada64a48a30b3bfc4f1db16bca512d4088704975a62bde78781ce0cba90d6").asBytes(); diff --git a/src/test/java/org/qortal/test/ImportExportTests.java b/src/test/java/org/qortal/test/ImportExportTests.java new file mode 100644 index 00000000..c7a5062f --- /dev/null +++ b/src/test/java/org/qortal/test/ImportExportTests.java @@ -0,0 +1,390 @@ +package org.qortal.test; + +import org.apache.commons.io.FileUtils; +import org.bitcoinj.core.Address; +import org.bitcoinj.core.AddressFormatException; +import org.bitcoinj.core.ECKey; +import org.json.JSONArray; +import org.json.JSONObject; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.qortal.account.PublicKeyAccount; +import org.qortal.controller.tradebot.LitecoinACCTv1TradeBot; +import org.qortal.controller.tradebot.TradeBot; +import org.qortal.crosschain.Litecoin; +import org.qortal.crosschain.LitecoinACCTv1; +import org.qortal.crosschain.SupportedBlockchain; +import org.qortal.crypto.Crypto; +import org.qortal.data.account.MintingAccountData; +import org.qortal.data.crosschain.TradeBotData; +import org.qortal.repository.DataException; +import org.qortal.repository.Repository; +import org.qortal.repository.RepositoryManager; +import org.qortal.repository.hsqldb.HSQLDBImportExport; +import org.qortal.settings.Settings; +import org.qortal.test.common.Common; +import org.qortal.utils.NTP; + +import java.io.FileWriter; +import java.io.IOException; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.List; + +import static org.junit.Assert.*; + +public class ImportExportTests extends Common { + + @Before + public void beforeTest() throws DataException { + Common.useDefaultSettings(); + this.deleteExportDirectory(); + } + + @After + public void afterTest() throws DataException { + this.deleteExportDirectory(); + } + + + @Test + public void testExportAndImportTradeBotStates() throws DataException, IOException { + try (final Repository repository = RepositoryManager.getRepository()) { + + // Ensure no trade bots exist + assertTrue(repository.getCrossChainRepository().getAllTradeBotData().isEmpty()); + + // Create some trade bots + List tradeBots = new ArrayList<>(); + for (int i=0; i<10; i++) { + TradeBotData tradeBotData = this.createTradeBotData(repository); + repository.getCrossChainRepository().save(tradeBotData); + tradeBots.add(tradeBotData); + } + + // Ensure they have been added + assertEquals(10, repository.getCrossChainRepository().getAllTradeBotData().size()); + + // Export them + HSQLDBImportExport.backupTradeBotStates(repository); + + // Delete them from the repository + for (TradeBotData tradeBotData : tradeBots) { + repository.getCrossChainRepository().delete(tradeBotData.getTradePrivateKey()); + } + + // Ensure they have been deleted + assertTrue(repository.getCrossChainRepository().getAllTradeBotData().isEmpty()); + + // Import them + Path exportPath = HSQLDBImportExport.getExportDirectory(false); + Path filePath = Paths.get(exportPath.toString(), "TradeBotStates.json"); + HSQLDBImportExport.importDataFromFile(filePath.toString(), repository); + + // Ensure they have been imported + assertEquals(10, repository.getCrossChainRepository().getAllTradeBotData().size()); + + // Ensure all the data matches + for (TradeBotData tradeBotData : tradeBots) { + byte[] tradePrivateKey = tradeBotData.getTradePrivateKey(); + TradeBotData repositoryTradeBotData = repository.getCrossChainRepository().getTradeBotData(tradePrivateKey); + assertNotNull(repositoryTradeBotData); + assertEquals(tradeBotData.toJson().toString(), repositoryTradeBotData.toJson().toString()); + } + + repository.saveChanges(); + } + } + + @Test + public void testExportAndImportCurrentTradeBotStates() throws DataException, IOException { + try (final Repository repository = RepositoryManager.getRepository()) { + + // Ensure no trade bots exist + assertTrue(repository.getCrossChainRepository().getAllTradeBotData().isEmpty()); + + // Create some trade bots + List tradeBots = new ArrayList<>(); + for (int i=0; i<10; i++) { + TradeBotData tradeBotData = this.createTradeBotData(repository); + repository.getCrossChainRepository().save(tradeBotData); + tradeBots.add(tradeBotData); + } + + // Ensure they have been added + assertEquals(10, repository.getCrossChainRepository().getAllTradeBotData().size()); + + // Export them + HSQLDBImportExport.backupTradeBotStates(repository); + + // Delete them from the repository + for (TradeBotData tradeBotData : tradeBots) { + repository.getCrossChainRepository().delete(tradeBotData.getTradePrivateKey()); + } + + // Ensure they have been deleted + assertTrue(repository.getCrossChainRepository().getAllTradeBotData().isEmpty()); + + // Add some more trade bots + List additionalTradeBots = new ArrayList<>(); + for (int i=0; i<5; i++) { + TradeBotData tradeBotData = this.createTradeBotData(repository); + repository.getCrossChainRepository().save(tradeBotData); + additionalTradeBots.add(tradeBotData); + } + + // Export again + HSQLDBImportExport.backupTradeBotStates(repository); + + // Import current states only + Path exportPath = HSQLDBImportExport.getExportDirectory(false); + Path filePath = Paths.get(exportPath.toString(), "TradeBotStates.json"); + HSQLDBImportExport.importDataFromFile(filePath.toString(), repository); + + // Ensure they have been imported + assertEquals(5, repository.getCrossChainRepository().getAllTradeBotData().size()); + + // Ensure that only the additional trade bots have been imported and that the data matches + for (TradeBotData tradeBotData : additionalTradeBots) { + byte[] tradePrivateKey = tradeBotData.getTradePrivateKey(); + TradeBotData repositoryTradeBotData = repository.getCrossChainRepository().getTradeBotData(tradePrivateKey); + assertNotNull(repositoryTradeBotData); + assertEquals(tradeBotData.toJson().toString(), repositoryTradeBotData.toJson().toString()); + } + + // None of the original trade bots should exist in the repository + for (TradeBotData tradeBotData : tradeBots) { + byte[] tradePrivateKey = tradeBotData.getTradePrivateKey(); + TradeBotData repositoryTradeBotData = repository.getCrossChainRepository().getTradeBotData(tradePrivateKey); + assertNull(repositoryTradeBotData); + } + + repository.saveChanges(); + } + } + + @Test + public void testExportAndImportAllTradeBotStates() throws DataException, IOException { + try (final Repository repository = RepositoryManager.getRepository()) { + + // Ensure no trade bots exist + assertTrue(repository.getCrossChainRepository().getAllTradeBotData().isEmpty()); + + // Create some trade bots + List tradeBots = new ArrayList<>(); + for (int i=0; i<10; i++) { + TradeBotData tradeBotData = this.createTradeBotData(repository); + repository.getCrossChainRepository().save(tradeBotData); + tradeBots.add(tradeBotData); + } + + // Ensure they have been added + assertEquals(10, repository.getCrossChainRepository().getAllTradeBotData().size()); + + // Export them + HSQLDBImportExport.backupTradeBotStates(repository); + + // Delete them from the repository + for (TradeBotData tradeBotData : tradeBots) { + repository.getCrossChainRepository().delete(tradeBotData.getTradePrivateKey()); + } + + // Ensure they have been deleted + assertTrue(repository.getCrossChainRepository().getAllTradeBotData().isEmpty()); + + // Add some more trade bots + List additionalTradeBots = new ArrayList<>(); + for (int i=0; i<5; i++) { + TradeBotData tradeBotData = this.createTradeBotData(repository); + repository.getCrossChainRepository().save(tradeBotData); + additionalTradeBots.add(tradeBotData); + } + + // Export again + HSQLDBImportExport.backupTradeBotStates(repository); + + // Import all states from the archive + Path exportPath = HSQLDBImportExport.getExportDirectory(false); + Path filePath = Paths.get(exportPath.toString(), "TradeBotStatesArchive.json"); + HSQLDBImportExport.importDataFromFile(filePath.toString(), repository); + + // Ensure they have been imported + assertEquals(15, repository.getCrossChainRepository().getAllTradeBotData().size()); + + // Ensure that all known trade bots have been imported and that the data matches + tradeBots.addAll(additionalTradeBots); + + for (TradeBotData tradeBotData : tradeBots) { + byte[] tradePrivateKey = tradeBotData.getTradePrivateKey(); + TradeBotData repositoryTradeBotData = repository.getCrossChainRepository().getTradeBotData(tradePrivateKey); + assertNotNull(repositoryTradeBotData); + assertEquals(tradeBotData.toJson().toString(), repositoryTradeBotData.toJson().toString()); + } + + repository.saveChanges(); + } + } + + @Test + public void testExportAndImportLegacyTradeBotStates() throws DataException, IOException { + try (final Repository repository = RepositoryManager.getRepository()) { + + // Create some trade bots, but don't save them in the repository + List tradeBots = new ArrayList<>(); + for (int i=0; i<10; i++) { + TradeBotData tradeBotData = this.createTradeBotData(repository); + tradeBots.add(tradeBotData); + } + + // Create a legacy format TradeBotStates.json backup file + this.exportLegacyTradeBotStatesJson(tradeBots); + + // Ensure no trade bots exist in repository + assertTrue(repository.getCrossChainRepository().getAllTradeBotData().isEmpty()); + + // Import the legacy format file + Path exportPath = HSQLDBImportExport.getExportDirectory(false); + Path filePath = Paths.get(exportPath.toString(), "TradeBotStates.json"); + HSQLDBImportExport.importDataFromFile(filePath.toString(), repository); + + // Ensure they have been imported + assertEquals(10, repository.getCrossChainRepository().getAllTradeBotData().size()); + + for (TradeBotData tradeBotData : tradeBots) { + byte[] tradePrivateKey = tradeBotData.getTradePrivateKey(); + TradeBotData repositoryTradeBotData = repository.getCrossChainRepository().getTradeBotData(tradePrivateKey); + assertNotNull(repositoryTradeBotData); + assertEquals(tradeBotData.toJson().toString(), repositoryTradeBotData.toJson().toString()); + } + + repository.saveChanges(); + } + } + + @Test + public void testExportAndImportMintingAccountData() throws DataException, IOException { + try (final Repository repository = RepositoryManager.getRepository()) { + + // Ensure no minting accounts exist + assertTrue(repository.getAccountRepository().getMintingAccounts().isEmpty()); + + // Create some minting accounts + List mintingAccounts = new ArrayList<>(); + for (int i=0; i<10; i++) { + MintingAccountData mintingAccountData = this.createMintingAccountData(); + repository.getAccountRepository().save(mintingAccountData); + mintingAccounts.add(mintingAccountData); + } + + // Ensure they have been added + assertEquals(10, repository.getAccountRepository().getMintingAccounts().size()); + + // Export them + HSQLDBImportExport.backupMintingAccounts(repository); + + // Delete them from the repository + for (MintingAccountData mintingAccountData : mintingAccounts) { + repository.getAccountRepository().delete(mintingAccountData.getPrivateKey()); + } + + // Ensure they have been deleted + assertTrue(repository.getAccountRepository().getMintingAccounts().isEmpty()); + + // Import them + Path exportPath = HSQLDBImportExport.getExportDirectory(false); + Path filePath = Paths.get(exportPath.toString(), "MintingAccounts.json"); + HSQLDBImportExport.importDataFromFile(filePath.toString(), repository); + + // Ensure they have been imported + assertEquals(10, repository.getAccountRepository().getMintingAccounts().size()); + + // Ensure all the data matches + for (MintingAccountData mintingAccountData : mintingAccounts) { + byte[] privateKey = mintingAccountData.getPrivateKey(); + MintingAccountData repositoryMintingAccountData = repository.getAccountRepository().getMintingAccount(privateKey); + assertNotNull(repositoryMintingAccountData); + assertEquals(mintingAccountData.toJson().toString(), repositoryMintingAccountData.toJson().toString()); + } + + repository.saveChanges(); + } + } + + + private TradeBotData createTradeBotData(Repository repository) throws DataException { + byte[] tradePrivateKey = TradeBot.generateTradePrivateKey(); + + byte[] tradeNativePublicKey = TradeBot.deriveTradeNativePublicKey(tradePrivateKey); + byte[] tradeNativePublicKeyHash = Crypto.hash160(tradeNativePublicKey); + String tradeNativeAddress = Crypto.toAddress(tradeNativePublicKey); + + byte[] tradeForeignPublicKey = TradeBot.deriveTradeForeignPublicKey(tradePrivateKey); + byte[] tradeForeignPublicKeyHash = Crypto.hash160(tradeForeignPublicKey); + + String receivingAddress = "2N8WCg52ULCtDSMjkgVTm5mtPdCsUptkHWE"; + + // Convert Litecoin receiving address into public key hash (we only support P2PKH at this time) + Address litecoinReceivingAddress; + try { + litecoinReceivingAddress = Address.fromString(Litecoin.getInstance().getNetworkParameters(), receivingAddress); + } catch (AddressFormatException e) { + throw new DataException("Unsupported Litecoin receiving address: " + receivingAddress); + } + + byte[] litecoinReceivingAccountInfo = litecoinReceivingAddress.getHash(); + + byte[] creatorPublicKey = new byte[32]; + PublicKeyAccount creator = new PublicKeyAccount(repository, creatorPublicKey); + + long timestamp = NTP.getTime(); + String atAddress = "AT_ADDRESS"; + long foreignAmount = 1234; + long qortAmount= 5678; + + TradeBotData tradeBotData = new TradeBotData(tradePrivateKey, LitecoinACCTv1.NAME, + LitecoinACCTv1TradeBot.State.BOB_WAITING_FOR_AT_CONFIRM.name(), LitecoinACCTv1TradeBot.State.BOB_WAITING_FOR_AT_CONFIRM.value, + creator.getAddress(), atAddress, timestamp, qortAmount, + tradeNativePublicKey, tradeNativePublicKeyHash, tradeNativeAddress, + null, null, + SupportedBlockchain.LITECOIN.name(), + tradeForeignPublicKey, tradeForeignPublicKeyHash, + foreignAmount, null, null, null, litecoinReceivingAccountInfo); + + return tradeBotData; + } + + private MintingAccountData createMintingAccountData() { + // These don't need to be valid keys - just 32 byte strings for the purposes of testing + byte[] privateKey = new ECKey().getPrivKeyBytes(); + byte[] publicKey = new ECKey().getPrivKeyBytes(); + + return new MintingAccountData(privateKey, publicKey); + } + + private void exportLegacyTradeBotStatesJson(List allTradeBotData) throws IOException, DataException { + JSONArray allTradeBotDataJson = new JSONArray(); + for (TradeBotData tradeBotData : allTradeBotData) { + JSONObject tradeBotDataJson = tradeBotData.toJson(); + allTradeBotDataJson.put(tradeBotDataJson); + } + + Path backupDirectory = HSQLDBImportExport.getExportDirectory(true); + String fileName = Paths.get(backupDirectory.toString(), "TradeBotStates.json").toString(); + FileWriter writer = new FileWriter(fileName); + writer.write(allTradeBotDataJson.toString()); + writer.close(); + } + + private void deleteExportDirectory() { + // Delete archive directory if exists + Path archivePath = Paths.get(Settings.getInstance().getExportPath()); + try { + FileUtils.deleteDirectory(archivePath.toFile()); + } catch (IOException e) { + + } + } + +} diff --git a/src/test/java/org/qortal/test/PruneTests.java b/src/test/java/org/qortal/test/PruneTests.java new file mode 100644 index 00000000..0914d794 --- /dev/null +++ b/src/test/java/org/qortal/test/PruneTests.java @@ -0,0 +1,91 @@ +package org.qortal.test; + +import org.junit.Before; +import org.junit.Test; +import org.qortal.account.PrivateKeyAccount; +import org.qortal.controller.BlockMinter; +import org.qortal.data.at.ATStateData; +import org.qortal.data.block.BlockData; +import org.qortal.repository.DataException; +import org.qortal.repository.Repository; +import org.qortal.repository.RepositoryManager; +import org.qortal.test.common.AtUtils; +import org.qortal.test.common.Common; + +import java.util.ArrayList; +import java.util.List; + +import static org.junit.Assert.*; + +public class PruneTests extends Common { + + @Before + public void beforeTest() throws DataException { + Common.useDefaultSettings(); + } + + @Test + public void testPruning() throws DataException { + try (final Repository repository = RepositoryManager.getRepository()) { + + // Alice self share online + List mintingAndOnlineAccounts = new ArrayList<>(); + PrivateKeyAccount aliceSelfShare = Common.getTestAccount(repository, "alice-reward-share"); + mintingAndOnlineAccounts.add(aliceSelfShare); + + // Deploy an AT so that we have AT state data + PrivateKeyAccount deployer = Common.getTestAccount(repository, "alice"); + byte[] creationBytes = AtUtils.buildSimpleAT(); + long fundingAmount = 1_00000000L; + AtUtils.doDeployAT(repository, deployer, creationBytes, fundingAmount); + + // Mint some blocks + for (int i = 2; i <= 10; i++) + BlockMinter.mintTestingBlock(repository, mintingAndOnlineAccounts.toArray(new PrivateKeyAccount[0])); + + // Make sure that all blocks have full AT state data and data hash + for (Integer i=2; i <= 10; i++) { + BlockData blockData = repository.getBlockRepository().fromHeight(i); + assertNotNull(blockData.getSignature()); + assertEquals(i, blockData.getHeight()); + List atStatesDataList = repository.getATRepository().getBlockATStatesAtHeight(i); + assertNotNull(atStatesDataList); + assertFalse(atStatesDataList.isEmpty()); + ATStateData atStatesData = repository.getATRepository().getATStateAtHeight(atStatesDataList.get(0).getATAddress(), i); + assertNotNull(atStatesData.getStateHash()); + assertNotNull(atStatesData.getStateData()); + } + + // Prune blocks 2-5 + int numBlocksPruned = repository.getBlockRepository().pruneBlocks(0, 5); + assertEquals(4, numBlocksPruned); + repository.getBlockRepository().setBlockPruneHeight(6); + + // Prune AT states for blocks 2-5 + int numATStatesPruned = repository.getATRepository().pruneAtStates(0, 5); + assertEquals(4, numATStatesPruned); + repository.getATRepository().setAtPruneHeight(6); + + // Make sure that blocks 2-5 are now missing block data and AT states data + for (Integer i=2; i <= 5; i++) { + BlockData blockData = repository.getBlockRepository().fromHeight(i); + assertNull(blockData); + List atStatesDataList = repository.getATRepository().getBlockATStatesAtHeight(i); + assertTrue(atStatesDataList.isEmpty()); + } + + // ... but blocks 6-10 have block data and full AT states data + for (Integer i=6; i <= 10; i++) { + BlockData blockData = repository.getBlockRepository().fromHeight(i); + assertNotNull(blockData.getSignature()); + List atStatesDataList = repository.getATRepository().getBlockATStatesAtHeight(i); + assertNotNull(atStatesDataList); + assertFalse(atStatesDataList.isEmpty()); + ATStateData atStatesData = repository.getATRepository().getATStateAtHeight(atStatesDataList.get(0).getATAddress(), i); + assertNotNull(atStatesData.getStateHash()); + assertNotNull(atStatesData.getStateData()); + } + } + } + +} diff --git a/src/test/java/org/qortal/test/RepositoryTests.java b/src/test/java/org/qortal/test/RepositoryTests.java index 91dd03c2..bb6510d5 100644 --- a/src/test/java/org/qortal/test/RepositoryTests.java +++ b/src/test/java/org/qortal/test/RepositoryTests.java @@ -3,9 +3,12 @@ package org.qortal.test; import org.junit.Before; import org.junit.Test; import org.qortal.account.Account; +import org.qortal.account.PublicKeyAccount; import org.qortal.asset.Asset; import org.qortal.crosschain.BitcoinACCTv1; import org.qortal.crypto.Crypto; +import org.qortal.data.account.AccountBalanceData; +import org.qortal.data.account.AccountData; import org.qortal.repository.DataException; import org.qortal.repository.Repository; import org.qortal.repository.RepositoryManager; @@ -22,13 +25,8 @@ import java.sql.SQLException; import java.util.ArrayList; import java.util.Collections; import java.util.List; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.Future; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.TimeUnit; +import java.util.Random; +import java.util.concurrent.*; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -440,6 +438,119 @@ public class RepositoryTests extends Common { } } + @Test + public void testDefrag() throws DataException, TimeoutException { + try (final HSQLDBRepository hsqldb = (HSQLDBRepository) RepositoryManager.getRepository()) { + + this.populateWithRandomData(hsqldb); + + hsqldb.performPeriodicMaintenance(10 * 1000L); + + } + } + + @Test + public void testDefragOnDisk() throws DataException, TimeoutException { + Common.useSettingsAndDb(testSettingsFilename, false); + + try (final HSQLDBRepository hsqldb = (HSQLDBRepository) RepositoryManager.getRepository()) { + + this.populateWithRandomData(hsqldb); + + hsqldb.performPeriodicMaintenance(10 * 1000L); + + } + } + + @Test + public void testMultipleDefrags() throws DataException, TimeoutException { + // Mint some more blocks to populate the database + try (final HSQLDBRepository hsqldb = (HSQLDBRepository) RepositoryManager.getRepository()) { + + this.populateWithRandomData(hsqldb); + + for (int i = 0; i < 10; i++) { + hsqldb.performPeriodicMaintenance(10 * 1000L); + } + } + } + + @Test + public void testMultipleDefragsOnDisk() throws DataException, TimeoutException { + Common.useSettingsAndDb(testSettingsFilename, false); + + // Mint some more blocks to populate the database + try (final HSQLDBRepository hsqldb = (HSQLDBRepository) RepositoryManager.getRepository()) { + + this.populateWithRandomData(hsqldb); + + for (int i = 0; i < 10; i++) { + hsqldb.performPeriodicMaintenance(10 * 1000L); + } + } + } + + @Test + public void testMultipleDefragsWithDifferentData() throws DataException, TimeoutException { + for (int i=0; i<10; i++) { + try (final HSQLDBRepository hsqldb = (HSQLDBRepository) RepositoryManager.getRepository()) { + + this.populateWithRandomData(hsqldb); + hsqldb.performPeriodicMaintenance(10 * 1000L); + } + } + } + + @Test + public void testMultipleDefragsOnDiskWithDifferentData() throws DataException, TimeoutException { + Common.useSettingsAndDb(testSettingsFilename, false); + + for (int i=0; i<10; i++) { + try (final HSQLDBRepository hsqldb = (HSQLDBRepository) RepositoryManager.getRepository()) { + + this.populateWithRandomData(hsqldb); + hsqldb.performPeriodicMaintenance(10 * 1000L); + } + } + } + + private void populateWithRandomData(HSQLDBRepository repository) throws DataException { + Random random = new Random(); + + System.out.println("Creating random accounts..."); + + // Generate some random accounts + List accounts = new ArrayList<>(); + for (int ai = 0; ai < 20; ++ai) { + byte[] publicKey = new byte[32]; + random.nextBytes(publicKey); + + PublicKeyAccount account = new PublicKeyAccount(repository, publicKey); + accounts.add(account); + + AccountData accountData = new AccountData(account.getAddress()); + repository.getAccountRepository().ensureAccount(accountData); + } + repository.saveChanges(); + + System.out.println("Creating random balances..."); + + // Fill with lots of random balances + for (int i = 0; i < 100000; ++i) { + Account account = accounts.get(random.nextInt(accounts.size())); + int assetId = random.nextInt(2); + long balance = random.nextInt(100000); + + AccountBalanceData accountBalanceData = new AccountBalanceData(account.getAddress(), assetId, balance); + repository.getAccountRepository().save(accountBalanceData); + + // Maybe mint a block to change height + if (i > 0 && (i % 1000) == 0) + BlockUtils.mintBlock(repository); + } + repository.saveChanges(); + } + public static void hsqldbSleep(int millis) throws SQLException { System.out.println(String.format("HSQLDB sleep() thread ID: %s", Thread.currentThread().getId())); diff --git a/src/test/java/org/qortal/test/apps/CheckTranslations.java b/src/test/java/org/qortal/test/apps/CheckTranslations.java index faf1727d..2b59ce84 100644 --- a/src/test/java/org/qortal/test/apps/CheckTranslations.java +++ b/src/test/java/org/qortal/test/apps/CheckTranslations.java @@ -14,9 +14,9 @@ public class CheckTranslations { private static final String[] SUPPORTED_LANGS = new String[] { "en", "de", "zh", "ru" }; private static final Set SYSTRAY_KEYS = Set.of("AUTO_UPDATE", "APPLYING_UPDATE_AND_RESTARTING", "BLOCK_HEIGHT", - "CHECK_TIME_ACCURACY", "CONNECTING", "CONNECTION", "CONNECTIONS", "CREATING_BACKUP_OF_DB_FILES", "DB_BACKUP", "EXIT", - "MINTING_DISABLED", "MINTING_ENABLED", "NTP_NAG_CAPTION", "NTP_NAG_TEXT_UNIX", "NTP_NAG_TEXT_WINDOWS", - "OPEN_UI", "SYNCHRONIZE_CLOCK", "SYNCHRONIZING_BLOCKCHAIN", "SYNCHRONIZING_CLOCK"); + "BUILD_VERSION", "CHECK_TIME_ACCURACY", "CONNECTING", "CONNECTION", "CONNECTIONS", "CREATING_BACKUP_OF_DB_FILES", + "DB_BACKUP", "DB_CHECKPOINT", "EXIT", "MINTING_DISABLED", "MINTING_ENABLED", "OPEN_UI", "PERFORMING_DB_CHECKPOINT", + "SYNCHRONIZE_CLOCK", "SYNCHRONIZING_BLOCKCHAIN", "SYNCHRONIZING_CLOCK"); private static String failurePrefix; diff --git a/src/test/java/org/qortal/test/at/AtRepositoryTests.java b/src/test/java/org/qortal/test/at/AtRepositoryTests.java index 9aed7296..8ef4c774 100644 --- a/src/test/java/org/qortal/test/at/AtRepositoryTests.java +++ b/src/test/java/org/qortal/test/at/AtRepositoryTests.java @@ -21,6 +21,7 @@ import org.qortal.group.Group; import org.qortal.repository.DataException; import org.qortal.repository.Repository; import org.qortal.repository.RepositoryManager; +import org.qortal.test.common.AtUtils; import org.qortal.test.common.BlockUtils; import org.qortal.test.common.Common; import org.qortal.test.common.TransactionUtils; @@ -35,13 +36,13 @@ public class AtRepositoryTests extends Common { @Test public void testGetATStateAtHeightWithData() throws DataException { - byte[] creationBytes = buildSimpleAT(); + byte[] creationBytes = AtUtils.buildSimpleAT(); try (final Repository repository = RepositoryManager.getRepository()) { PrivateKeyAccount deployer = Common.getTestAccount(repository, "alice"); long fundingAmount = 1_00000000L; - DeployAtTransaction deployAtTransaction = doDeploy(repository, deployer, creationBytes, fundingAmount); + DeployAtTransaction deployAtTransaction = AtUtils.doDeployAT(repository, deployer, creationBytes, fundingAmount); String atAddress = deployAtTransaction.getATAccount().getAddress(); // Mint a few blocks @@ -58,13 +59,13 @@ public class AtRepositoryTests extends Common { @Test public void testGetATStateAtHeightWithoutData() throws DataException { - byte[] creationBytes = buildSimpleAT(); + byte[] creationBytes = AtUtils.buildSimpleAT(); try (final Repository repository = RepositoryManager.getRepository()) { PrivateKeyAccount deployer = Common.getTestAccount(repository, "alice"); long fundingAmount = 1_00000000L; - DeployAtTransaction deployAtTransaction = doDeploy(repository, deployer, creationBytes, fundingAmount); + DeployAtTransaction deployAtTransaction = AtUtils.doDeployAT(repository, deployer, creationBytes, fundingAmount); String atAddress = deployAtTransaction.getATAccount().getAddress(); // Mint a few blocks @@ -75,7 +76,7 @@ public class AtRepositoryTests extends Common { Integer testHeight = maxHeight - 2; // Trim AT state data - repository.getATRepository().prepareForAtStateTrimming(); + repository.getATRepository().rebuildLatestAtStates(); repository.getATRepository().trimAtStates(2, maxHeight, 1000); ATStateData atStateData = repository.getATRepository().getATStateAtHeight(atAddress, testHeight); @@ -87,13 +88,13 @@ public class AtRepositoryTests extends Common { @Test public void testGetLatestATStateWithData() throws DataException { - byte[] creationBytes = buildSimpleAT(); + byte[] creationBytes = AtUtils.buildSimpleAT(); try (final Repository repository = RepositoryManager.getRepository()) { PrivateKeyAccount deployer = Common.getTestAccount(repository, "alice"); long fundingAmount = 1_00000000L; - DeployAtTransaction deployAtTransaction = doDeploy(repository, deployer, creationBytes, fundingAmount); + DeployAtTransaction deployAtTransaction = AtUtils.doDeployAT(repository, deployer, creationBytes, fundingAmount); String atAddress = deployAtTransaction.getATAccount().getAddress(); // Mint a few blocks @@ -111,13 +112,13 @@ public class AtRepositoryTests extends Common { @Test public void testGetLatestATStatePostTrimming() throws DataException { - byte[] creationBytes = buildSimpleAT(); + byte[] creationBytes = AtUtils.buildSimpleAT(); try (final Repository repository = RepositoryManager.getRepository()) { PrivateKeyAccount deployer = Common.getTestAccount(repository, "alice"); long fundingAmount = 1_00000000L; - DeployAtTransaction deployAtTransaction = doDeploy(repository, deployer, creationBytes, fundingAmount); + DeployAtTransaction deployAtTransaction = AtUtils.doDeployAT(repository, deployer, creationBytes, fundingAmount); String atAddress = deployAtTransaction.getATAccount().getAddress(); // Mint a few blocks @@ -129,7 +130,7 @@ public class AtRepositoryTests extends Common { Integer testHeight = blockchainHeight; // Trim AT state data - repository.getATRepository().prepareForAtStateTrimming(); + repository.getATRepository().rebuildLatestAtStates(); // COMMIT to check latest AT states persist / TEMPORARY table interaction repository.saveChanges(); @@ -144,14 +145,66 @@ public class AtRepositoryTests extends Common { } @Test - public void testGetMatchingFinalATStatesWithoutDataValue() throws DataException { - byte[] creationBytes = buildSimpleAT(); + public void testOrphanTrimmedATStates() throws DataException { + byte[] creationBytes = AtUtils.buildSimpleAT(); try (final Repository repository = RepositoryManager.getRepository()) { PrivateKeyAccount deployer = Common.getTestAccount(repository, "alice"); long fundingAmount = 1_00000000L; - DeployAtTransaction deployAtTransaction = doDeploy(repository, deployer, creationBytes, fundingAmount); + DeployAtTransaction deployAtTransaction = AtUtils.doDeployAT(repository, deployer, creationBytes, fundingAmount); + String atAddress = deployAtTransaction.getATAccount().getAddress(); + + // Mint a few blocks + for (int i = 0; i < 10; ++i) + BlockUtils.mintBlock(repository); + + int blockchainHeight = repository.getBlockRepository().getBlockchainHeight(); + int maxTrimHeight = blockchainHeight - 4; + Integer testHeight = maxTrimHeight + 1; + + // Trim AT state data + repository.getATRepository().rebuildLatestAtStates(); + repository.saveChanges(); + repository.getATRepository().trimAtStates(2, maxTrimHeight, 1000); + + // Orphan 3 blocks + // This leaves one more untrimmed block, so the latest AT state should be available + BlockUtils.orphanBlocks(repository, 3); + + ATStateData atStateData = repository.getATRepository().getLatestATState(atAddress); + assertEquals(testHeight, atStateData.getHeight()); + + // We should always have the latest AT state data available + assertNotNull(atStateData.getStateData()); + + // Orphan 1 more block + Exception exception = null; + try { + BlockUtils.orphanBlocks(repository, 1); + } catch (DataException e) { + exception = e; + } + + // Ensure that a DataException is thrown because there is no more AT states data available + assertNotNull(exception); + assertEquals(DataException.class, exception.getClass()); + assertEquals(String.format("Can't find previous AT state data for %s", atAddress), exception.getMessage()); + + // FUTURE: we may be able to retain unique AT states when trimming, to avoid this exception + // and allow orphaning back through blocks with trimmed AT states. + } + } + + @Test + public void testGetMatchingFinalATStatesWithoutDataValue() throws DataException { + byte[] creationBytes = AtUtils.buildSimpleAT(); + + try (final Repository repository = RepositoryManager.getRepository()) { + PrivateKeyAccount deployer = Common.getTestAccount(repository, "alice"); + + long fundingAmount = 1_00000000L; + DeployAtTransaction deployAtTransaction = AtUtils.doDeployAT(repository, deployer, creationBytes, fundingAmount); String atAddress = deployAtTransaction.getATAccount().getAddress(); // Mint a few blocks @@ -191,13 +244,13 @@ public class AtRepositoryTests extends Common { @Test public void testGetMatchingFinalATStatesWithDataValue() throws DataException { - byte[] creationBytes = buildSimpleAT(); + byte[] creationBytes = AtUtils.buildSimpleAT(); try (final Repository repository = RepositoryManager.getRepository()) { PrivateKeyAccount deployer = Common.getTestAccount(repository, "alice"); long fundingAmount = 1_00000000L; - DeployAtTransaction deployAtTransaction = doDeploy(repository, deployer, creationBytes, fundingAmount); + DeployAtTransaction deployAtTransaction = AtUtils.doDeployAT(repository, deployer, creationBytes, fundingAmount); String atAddress = deployAtTransaction.getATAccount().getAddress(); // Mint a few blocks @@ -237,13 +290,13 @@ public class AtRepositoryTests extends Common { @Test public void testGetBlockATStatesAtHeightWithData() throws DataException { - byte[] creationBytes = buildSimpleAT(); + byte[] creationBytes = AtUtils.buildSimpleAT(); try (final Repository repository = RepositoryManager.getRepository()) { PrivateKeyAccount deployer = Common.getTestAccount(repository, "alice"); long fundingAmount = 1_00000000L; - doDeploy(repository, deployer, creationBytes, fundingAmount); + AtUtils.doDeployAT(repository, deployer, creationBytes, fundingAmount); // Mint a few blocks for (int i = 0; i < 10; ++i) @@ -264,13 +317,13 @@ public class AtRepositoryTests extends Common { @Test public void testGetBlockATStatesAtHeightWithoutData() throws DataException { - byte[] creationBytes = buildSimpleAT(); + byte[] creationBytes = AtUtils.buildSimpleAT(); try (final Repository repository = RepositoryManager.getRepository()) { PrivateKeyAccount deployer = Common.getTestAccount(repository, "alice"); long fundingAmount = 1_00000000L; - doDeploy(repository, deployer, creationBytes, fundingAmount); + AtUtils.doDeployAT(repository, deployer, creationBytes, fundingAmount); // Mint a few blocks for (int i = 0; i < 10; ++i) @@ -280,7 +333,7 @@ public class AtRepositoryTests extends Common { Integer testHeight = maxHeight - 2; // Trim AT state data - repository.getATRepository().prepareForAtStateTrimming(); + repository.getATRepository().rebuildLatestAtStates(); repository.getATRepository().trimAtStates(2, maxHeight, 1000); List atStates = repository.getATRepository().getBlockATStatesAtHeight(testHeight); @@ -297,13 +350,13 @@ public class AtRepositoryTests extends Common { @Test public void testSaveATStateWithData() throws DataException { - byte[] creationBytes = buildSimpleAT(); + byte[] creationBytes = AtUtils.buildSimpleAT(); try (final Repository repository = RepositoryManager.getRepository()) { PrivateKeyAccount deployer = Common.getTestAccount(repository, "alice"); long fundingAmount = 1_00000000L; - DeployAtTransaction deployAtTransaction = doDeploy(repository, deployer, creationBytes, fundingAmount); + DeployAtTransaction deployAtTransaction = AtUtils.doDeployAT(repository, deployer, creationBytes, fundingAmount); String atAddress = deployAtTransaction.getATAccount().getAddress(); // Mint a few blocks @@ -328,13 +381,13 @@ public class AtRepositoryTests extends Common { @Test public void testSaveATStateWithoutData() throws DataException { - byte[] creationBytes = buildSimpleAT(); + byte[] creationBytes = AtUtils.buildSimpleAT(); try (final Repository repository = RepositoryManager.getRepository()) { PrivateKeyAccount deployer = Common.getTestAccount(repository, "alice"); long fundingAmount = 1_00000000L; - DeployAtTransaction deployAtTransaction = doDeploy(repository, deployer, creationBytes, fundingAmount); + DeployAtTransaction deployAtTransaction = AtUtils.doDeployAT(repository, deployer, creationBytes, fundingAmount); String atAddress = deployAtTransaction.getATAccount().getAddress(); // Mint a few blocks @@ -354,7 +407,8 @@ public class AtRepositoryTests extends Common { /*StateData*/ null, atStateData.getStateHash(), atStateData.getFees(), - atStateData.isInitial()); + atStateData.isInitial(), + atStateData.getSleepUntilMessageTimestamp()); repository.getATRepository().save(newAtStateData); atStateData = repository.getATRepository().getATStateAtHeight(atAddress, testHeight); @@ -363,67 +417,4 @@ public class AtRepositoryTests extends Common { assertNull(atStateData.getStateData()); } } - - private byte[] buildSimpleAT() { - // Pretend we use 4 values in data segment - int addrCounter = 4; - - // Data segment - ByteBuffer dataByteBuffer = ByteBuffer.allocate(addrCounter * MachineState.VALUE_SIZE); - - ByteBuffer codeByteBuffer = ByteBuffer.allocate(512); - - // Two-pass version - for (int pass = 0; pass < 2; ++pass) { - codeByteBuffer.clear(); - - try { - // Stop and wait for next block - codeByteBuffer.put(OpCode.STP_IMD.compile()); - } catch (CompilationException e) { - throw new IllegalStateException("Unable to compile AT?", e); - } - } - - codeByteBuffer.flip(); - - byte[] codeBytes = new byte[codeByteBuffer.limit()]; - codeByteBuffer.get(codeBytes); - - final short ciyamAtVersion = 2; - final short numCallStackPages = 0; - final short numUserStackPages = 0; - final long minActivationAmount = 0L; - - return MachineState.toCreationBytes(ciyamAtVersion, codeBytes, dataByteBuffer.array(), numCallStackPages, numUserStackPages, minActivationAmount); - } - - private DeployAtTransaction doDeploy(Repository repository, PrivateKeyAccount deployer, byte[] creationBytes, long fundingAmount) throws DataException { - long txTimestamp = System.currentTimeMillis(); - byte[] lastReference = deployer.getLastReference(); - - if (lastReference == null) { - System.err.println(String.format("Qortal account %s has no last reference", deployer.getAddress())); - System.exit(2); - } - - Long fee = null; - String name = "Test AT"; - String description = "Test AT"; - String atType = "Test"; - String tags = "TEST"; - - BaseTransactionData baseTransactionData = new BaseTransactionData(txTimestamp, Group.NO_GROUP, lastReference, deployer.getPublicKey(), fee, null); - TransactionData deployAtTransactionData = new DeployAtTransactionData(baseTransactionData, name, description, atType, tags, creationBytes, fundingAmount, Asset.QORT); - - DeployAtTransaction deployAtTransaction = new DeployAtTransaction(repository, deployAtTransactionData); - - fee = deployAtTransaction.calcRecommendedFee(); - deployAtTransactionData.setFee(fee); - - TransactionUtils.signAndMint(repository, deployAtTransactionData, deployer); - - return deployAtTransaction; - } - } diff --git a/src/test/java/org/qortal/test/at/SleepUntilMessageOrHeightTests.java b/src/test/java/org/qortal/test/at/SleepUntilMessageOrHeightTests.java new file mode 100644 index 00000000..7ac952d2 --- /dev/null +++ b/src/test/java/org/qortal/test/at/SleepUntilMessageOrHeightTests.java @@ -0,0 +1,365 @@ +package org.qortal.test.at; + +import static org.junit.Assert.*; + +import java.nio.ByteBuffer; +import java.util.Arrays; +import java.util.List; + +import org.ciyam.at.CompilationException; +import org.ciyam.at.FunctionCode; +import org.ciyam.at.MachineState; +import org.ciyam.at.OpCode; +import org.ciyam.at.Timestamp; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.qortal.account.Account; +import org.qortal.account.PrivateKeyAccount; +import org.qortal.asset.Asset; +import org.qortal.at.QortalFunctionCode; +import org.qortal.block.Block; +import org.qortal.data.at.ATStateData; +import org.qortal.data.block.BlockData; +import org.qortal.data.transaction.BaseTransactionData; +import org.qortal.data.transaction.DeployAtTransactionData; +import org.qortal.data.transaction.MessageTransactionData; +import org.qortal.data.transaction.TransactionData; +import org.qortal.group.Group; +import org.qortal.repository.DataException; +import org.qortal.repository.Repository; +import org.qortal.repository.RepositoryManager; +import org.qortal.test.common.BlockUtils; +import org.qortal.test.common.Common; +import org.qortal.test.common.TransactionUtils; +import org.qortal.transaction.DeployAtTransaction; +import org.qortal.transaction.MessageTransaction; +import org.qortal.transaction.Transaction; +import org.qortal.utils.BitTwiddling; + +public class SleepUntilMessageOrHeightTests extends Common { + + private static final byte[] messageData = new byte[] { 0x44 }; + private static final byte[] creationBytes = buildSleepUntilMessageOrHeightAT(); + private static final long fundingAmount = 1_00000000L; + private static final long WAKE_HEIGHT = 10L; + + private Repository repository = null; + private PrivateKeyAccount deployer; + private DeployAtTransaction deployAtTransaction; + private Account atAccount; + private String atAddress; + private byte[] rawNextTimestamp = new byte[32]; + private Transaction transaction; + + @Before + public void before() throws DataException { + Common.useDefaultSettings(); + + this.repository = RepositoryManager.getRepository(); + this.deployer = Common.getTestAccount(repository, "alice"); + + this.deployAtTransaction = doDeploy(repository, deployer, creationBytes, fundingAmount); + this.atAccount = deployAtTransaction.getATAccount(); + this.atAddress = deployAtTransaction.getATAccount().getAddress(); + } + + @After + public void after() throws DataException { + if (this.repository != null) + this.repository.close(); + + this.repository = null; + } + + @Test + public void testDeploy() throws DataException { + // Confirm initial value is zero + extractNextTxTimestamp(repository, atAddress, rawNextTimestamp); + assertArrayEquals(new byte[32], rawNextTimestamp); + } + + @Test + public void testFeelessSleep() throws DataException { + // Mint block to allow AT to initialize and call SLEEP_UNTIL_MESSAGE + BlockUtils.mintBlock(repository); + + // Fetch AT's balance for this height + long preMintBalance = atAccount.getConfirmedBalance(Asset.QORT); + + // Mint block + BlockUtils.mintBlock(repository); + + // Fetch new AT balance + long postMintBalance = atAccount.getConfirmedBalance(Asset.QORT); + + assertEquals(preMintBalance, postMintBalance); + } + + @Test + public void testFeelessSleep2() throws DataException { + // Mint block to allow AT to initialize and call SLEEP_UNTIL_MESSAGE + BlockUtils.mintBlock(repository); + + // Fetch AT's balance for this height + long preMintBalance = atAccount.getConfirmedBalance(Asset.QORT); + + // Mint several blocks + for (int i = 0; i < 5; ++i) + BlockUtils.mintBlock(repository); + + // Fetch new AT balance + long postMintBalance = atAccount.getConfirmedBalance(Asset.QORT); + + assertEquals(preMintBalance, postMintBalance); + } + + @Test + public void testSleepUntilMessage() throws DataException { + // Mint block to allow AT to initialize and call SLEEP_UNTIL_MESSAGE_OR_HEIGHT + BlockUtils.mintBlock(repository); + + // Send message to AT + transaction = sendMessage(repository, deployer, messageData, atAddress); + BlockUtils.mintBlock(repository); + + // Mint block so AT executes and finds message + BlockUtils.mintBlock(repository); + + // Confirm AT finds message + assertTimestamp(repository, atAddress, transaction); + } + + @Test + public void testSleepUntilHeight() throws DataException { + // AT deployment in block 2 + + // Mint block to allow AT to initialize and call SLEEP_UNTIL_MESSAGE_OR_HEIGHT + BlockUtils.mintBlock(repository); // height now 3 + + // Fetch AT's balance for this height + long preMintBalance = atAccount.getConfirmedBalance(Asset.QORT); + + // Mint several blocks + for (int i = 3; i < WAKE_HEIGHT; ++i) + BlockUtils.mintBlock(repository); + + // We should now be at WAKE_HEIGHT + long height = repository.getBlockRepository().getBlockchainHeight(); + assertEquals(WAKE_HEIGHT, height); + + // AT should have woken and run at this height so balance should have changed + + // Fetch new AT balance + long postMintBalance = atAccount.getConfirmedBalance(Asset.QORT); + + assertNotSame(preMintBalance, postMintBalance); + + // Confirm AT has no message + extractNextTxTimestamp(repository, atAddress, rawNextTimestamp); + assertArrayEquals(new byte[32], rawNextTimestamp); + + // Mint yet another block + BlockUtils.mintBlock(repository); + + // AT should also have woken and run at this height so balance should have changed + + // Fetch new AT balance + long postMint2Balance = atAccount.getConfirmedBalance(Asset.QORT); + + assertNotSame(postMintBalance, postMint2Balance); + + // Confirm AT still has no message + extractNextTxTimestamp(repository, atAddress, rawNextTimestamp); + assertArrayEquals(new byte[32], rawNextTimestamp); + + } + + private static byte[] buildSleepUntilMessageOrHeightAT() { + // Labels for data segment addresses + int addrCounter = 0; + + // Beginning of data segment for easy extraction + final int addrNextTx = addrCounter; + addrCounter += 4; + + final int addrNextTxIndex = addrCounter++; + + final int addrLastTxTimestamp = addrCounter++; + + final int addrWakeHeight = addrCounter++; + + // Data segment + ByteBuffer dataByteBuffer = ByteBuffer.allocate(addrCounter * MachineState.VALUE_SIZE); + + // skip addrNextTx + dataByteBuffer.position(dataByteBuffer.position() + 4 * MachineState.VALUE_SIZE); + + // Store pointer to addrNextTx at addrNextTxIndex + dataByteBuffer.putLong(addrNextTx); + + // skip addrLastTxTimestamp + dataByteBuffer.position(dataByteBuffer.position() + MachineState.VALUE_SIZE); + + // Store fixed wake height (block 10) + dataByteBuffer.putLong(WAKE_HEIGHT); + + ByteBuffer codeByteBuffer = ByteBuffer.allocate(512); + + // Two-pass version + for (int pass = 0; pass < 2; ++pass) { + codeByteBuffer.clear(); + + try { + /* Initialization */ + + // Use AT creation 'timestamp' as starting point for finding transactions sent to AT + codeByteBuffer.put(OpCode.EXT_FUN_RET.compile(FunctionCode.GET_CREATION_TIMESTAMP, addrLastTxTimestamp)); + + // Set restart position to after this opcode + codeByteBuffer.put(OpCode.SET_PCS.compile()); + + /* Loop, waiting for message to AT */ + + /* Sleep until message arrives */ + codeByteBuffer.put(OpCode.EXT_FUN_DAT_2.compile(QortalFunctionCode.SLEEP_UNTIL_MESSAGE_OR_HEIGHT.value, addrLastTxTimestamp, addrWakeHeight)); + + // Find next transaction to this AT since the last one (if any) + codeByteBuffer.put(OpCode.EXT_FUN_DAT.compile(FunctionCode.PUT_TX_AFTER_TIMESTAMP_INTO_A, addrLastTxTimestamp)); + + // Copy A to data segment, starting at addrNextTx (as pointed to by addrNextTxIndex) + codeByteBuffer.put(OpCode.EXT_FUN_DAT.compile(FunctionCode.GET_A_IND, addrNextTxIndex)); + + // Stop if timestamp part of A is zero + codeByteBuffer.put(OpCode.STZ_DAT.compile(addrNextTx)); + + // Update our 'last found transaction's timestamp' using 'timestamp' from transaction + codeByteBuffer.put(OpCode.EXT_FUN_RET.compile(FunctionCode.GET_TIMESTAMP_FROM_TX_IN_A, addrLastTxTimestamp)); + + // We're done + codeByteBuffer.put(OpCode.FIN_IMD.compile()); + + } catch (CompilationException e) { + throw new IllegalStateException("Unable to compile AT?", e); + } + } + + codeByteBuffer.flip(); + + byte[] codeBytes = new byte[codeByteBuffer.limit()]; + codeByteBuffer.get(codeBytes); + + final short ciyamAtVersion = 2; + final short numCallStackPages = 0; + final short numUserStackPages = 0; + final long minActivationAmount = 0L; + + return MachineState.toCreationBytes(ciyamAtVersion, codeBytes, dataByteBuffer.array(), numCallStackPages, numUserStackPages, minActivationAmount); + } + + private DeployAtTransaction doDeploy(Repository repository, PrivateKeyAccount deployer, byte[] creationBytes, long fundingAmount) throws DataException { + long txTimestamp = System.currentTimeMillis(); + byte[] lastReference = deployer.getLastReference(); + + if (lastReference == null) { + System.err.println(String.format("Qortal account %s has no last reference", deployer.getAddress())); + System.exit(2); + } + + Long fee = null; + String name = "Test AT"; + String description = "Test AT"; + String atType = "Test"; + String tags = "TEST"; + + BaseTransactionData baseTransactionData = new BaseTransactionData(txTimestamp, Group.NO_GROUP, lastReference, deployer.getPublicKey(), fee, null); + TransactionData deployAtTransactionData = new DeployAtTransactionData(baseTransactionData, name, description, atType, tags, creationBytes, fundingAmount, Asset.QORT); + + DeployAtTransaction deployAtTransaction = new DeployAtTransaction(repository, deployAtTransactionData); + + fee = deployAtTransaction.calcRecommendedFee(); + deployAtTransactionData.setFee(fee); + + TransactionUtils.signAndMint(repository, deployAtTransactionData, deployer); + + return deployAtTransaction; + } + + private void extractNextTxTimestamp(Repository repository, String atAddress, byte[] rawNextTimestamp) throws DataException { + // Check AT result + ATStateData atStateData = repository.getATRepository().getLatestATState(atAddress); + byte[] stateData = atStateData.getStateData(); + + byte[] dataBytes = MachineState.extractDataBytes(stateData); + + System.arraycopy(dataBytes, 0, rawNextTimestamp, 0, rawNextTimestamp.length); + } + + private MessageTransaction sendMessage(Repository repository, PrivateKeyAccount sender, byte[] data, String recipient) throws DataException { + long txTimestamp = System.currentTimeMillis(); + byte[] lastReference = sender.getLastReference(); + + if (lastReference == null) { + System.err.println(String.format("Qortal account %s has no last reference", sender.getAddress())); + System.exit(2); + } + + Long fee = null; + int version = 4; + int nonce = 0; + long amount = 0; + Long assetId = null; // because amount is zero + + BaseTransactionData baseTransactionData = new BaseTransactionData(txTimestamp, Group.NO_GROUP, lastReference, sender.getPublicKey(), fee, null); + TransactionData messageTransactionData = new MessageTransactionData(baseTransactionData, version, nonce, recipient, amount, assetId, data, false, false); + + MessageTransaction messageTransaction = new MessageTransaction(repository, messageTransactionData); + + fee = messageTransaction.calcRecommendedFee(); + messageTransactionData.setFee(fee); + + TransactionUtils.signAndImportValid(repository, messageTransactionData, sender); + + return messageTransaction; + } + + private void assertTimestamp(Repository repository, String atAddress, Transaction transaction) throws DataException { + int height = transaction.getHeight(); + byte[] transactionSignature = transaction.getTransactionData().getSignature(); + + BlockData blockData = repository.getBlockRepository().fromHeight(height); + assertNotNull(blockData); + + Block block = new Block(repository, blockData); + + List blockTransactions = block.getTransactions(); + int sequence; + for (sequence = blockTransactions.size() - 1; sequence >= 0; --sequence) + if (Arrays.equals(blockTransactions.get(sequence).getTransactionData().getSignature(), transactionSignature)) + break; + + assertNotSame(-1, sequence); + + byte[] rawNextTimestamp = new byte[32]; + extractNextTxTimestamp(repository, atAddress, rawNextTimestamp); + + Timestamp expectedTimestamp = new Timestamp(height, sequence); + Timestamp actualTimestamp = new Timestamp(BitTwiddling.longFromBEBytes(rawNextTimestamp, 0)); + + assertEquals(String.format("Expected height %d, seq %d but was height %d, seq %d", + height, sequence, + actualTimestamp.blockHeight, actualTimestamp.transactionSequence + ), + expectedTimestamp.longValue(), + actualTimestamp.longValue()); + + byte[] expectedPartialSignature = new byte[24]; + System.arraycopy(transactionSignature, 8, expectedPartialSignature, 0, expectedPartialSignature.length); + + byte[] actualPartialSignature = new byte[24]; + System.arraycopy(rawNextTimestamp, 8, actualPartialSignature, 0, actualPartialSignature.length); + + assertArrayEquals(expectedPartialSignature, actualPartialSignature); + } + +} diff --git a/src/test/java/org/qortal/test/at/SleepUntilMessageTests.java b/src/test/java/org/qortal/test/at/SleepUntilMessageTests.java new file mode 100644 index 00000000..290f973a --- /dev/null +++ b/src/test/java/org/qortal/test/at/SleepUntilMessageTests.java @@ -0,0 +1,311 @@ +package org.qortal.test.at; + +import static org.junit.Assert.*; + +import java.nio.ByteBuffer; +import java.util.Arrays; +import java.util.List; + +import org.ciyam.at.CompilationException; +import org.ciyam.at.FunctionCode; +import org.ciyam.at.MachineState; +import org.ciyam.at.OpCode; +import org.ciyam.at.Timestamp; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.qortal.account.Account; +import org.qortal.account.PrivateKeyAccount; +import org.qortal.asset.Asset; +import org.qortal.at.QortalFunctionCode; +import org.qortal.block.Block; +import org.qortal.data.at.ATStateData; +import org.qortal.data.block.BlockData; +import org.qortal.data.transaction.BaseTransactionData; +import org.qortal.data.transaction.DeployAtTransactionData; +import org.qortal.data.transaction.MessageTransactionData; +import org.qortal.data.transaction.TransactionData; +import org.qortal.group.Group; +import org.qortal.repository.DataException; +import org.qortal.repository.Repository; +import org.qortal.repository.RepositoryManager; +import org.qortal.test.common.BlockUtils; +import org.qortal.test.common.Common; +import org.qortal.test.common.TransactionUtils; +import org.qortal.transaction.DeployAtTransaction; +import org.qortal.transaction.MessageTransaction; +import org.qortal.transaction.Transaction; +import org.qortal.utils.BitTwiddling; + +public class SleepUntilMessageTests extends Common { + + private static final byte[] messageData = new byte[] { 0x44 }; + private static final byte[] creationBytes = buildSleepUntilMessageAT(); + private static final long fundingAmount = 1_00000000L; + + private Repository repository = null; + private PrivateKeyAccount deployer; + private DeployAtTransaction deployAtTransaction; + private Account atAccount; + private String atAddress; + private byte[] rawNextTimestamp = new byte[32]; + private Transaction transaction; + + @Before + public void before() throws DataException { + Common.useDefaultSettings(); + + this.repository = RepositoryManager.getRepository(); + this.deployer = Common.getTestAccount(repository, "alice"); + + this.deployAtTransaction = doDeploy(repository, deployer, creationBytes, fundingAmount); + this.atAccount = deployAtTransaction.getATAccount(); + this.atAddress = deployAtTransaction.getATAccount().getAddress(); + } + + @After + public void after() throws DataException { + if (this.repository != null) + this.repository.close(); + + this.repository = null; + } + + @Test + public void testDeploy() throws DataException { + // Confirm initial value is zero + extractNextTxTimestamp(repository, atAddress, rawNextTimestamp); + assertArrayEquals(new byte[32], rawNextTimestamp); + } + + @Test + public void testFeelessSleep() throws DataException { + // Mint block to allow AT to initialize and call SLEEP_UNTIL_MESSAGE + BlockUtils.mintBlock(repository); + + // Fetch AT's balance for this height + long preMintBalance = atAccount.getConfirmedBalance(Asset.QORT); + + // Mint block + BlockUtils.mintBlock(repository); + + // Fetch new AT balance + long postMintBalance = atAccount.getConfirmedBalance(Asset.QORT); + + assertEquals(preMintBalance, postMintBalance); + } + + @Test + public void testFeelessSleep2() throws DataException { + // Mint block to allow AT to initialize and call SLEEP_UNTIL_MESSAGE + BlockUtils.mintBlock(repository); + + // Fetch AT's balance for this height + long preMintBalance = atAccount.getConfirmedBalance(Asset.QORT); + + // Mint several blocks + for (int i = 0; i < 10; ++i) + BlockUtils.mintBlock(repository); + + // Fetch new AT balance + long postMintBalance = atAccount.getConfirmedBalance(Asset.QORT); + + assertEquals(preMintBalance, postMintBalance); + } + + @Test + public void testSleepUntilMessage() throws DataException { + // Mint block to allow AT to initialize and call SLEEP_UNTIL_MESSAGE + BlockUtils.mintBlock(repository); + + // Send message to AT + transaction = sendMessage(repository, deployer, messageData, atAddress); + BlockUtils.mintBlock(repository); + + // Mint block so AT executes and finds message + BlockUtils.mintBlock(repository); + + // Confirm AT finds message + assertTimestamp(repository, atAddress, transaction); + } + + private static byte[] buildSleepUntilMessageAT() { + // Labels for data segment addresses + int addrCounter = 0; + + // Beginning of data segment for easy extraction + final int addrNextTx = addrCounter; + addrCounter += 4; + + final int addrNextTxIndex = addrCounter++; + + final int addrLastTxTimestamp = addrCounter++; + + // Data segment + ByteBuffer dataByteBuffer = ByteBuffer.allocate(addrCounter * MachineState.VALUE_SIZE); + + // skip addrNextTx + dataByteBuffer.position(dataByteBuffer.position() + 4 * MachineState.VALUE_SIZE); + + // Store pointer to addrNextTx at addrNextTxIndex + dataByteBuffer.putLong(addrNextTx); + + ByteBuffer codeByteBuffer = ByteBuffer.allocate(512); + + // Two-pass version + for (int pass = 0; pass < 2; ++pass) { + codeByteBuffer.clear(); + + try { + /* Initialization */ + + // Use AT creation 'timestamp' as starting point for finding transactions sent to AT + codeByteBuffer.put(OpCode.EXT_FUN_RET.compile(FunctionCode.GET_CREATION_TIMESTAMP, addrLastTxTimestamp)); + + // Set restart position to after this opcode + codeByteBuffer.put(OpCode.SET_PCS.compile()); + + /* Loop, waiting for message to AT */ + + /* Sleep until message arrives */ + codeByteBuffer.put(OpCode.EXT_FUN_DAT.compile(QortalFunctionCode.SLEEP_UNTIL_MESSAGE.value, addrLastTxTimestamp)); + + // Find next transaction to this AT since the last one (if any) + codeByteBuffer.put(OpCode.EXT_FUN_DAT.compile(FunctionCode.PUT_TX_AFTER_TIMESTAMP_INTO_A, addrLastTxTimestamp)); + + // Copy A to data segment, starting at addrNextTx (as pointed to by addrNextTxIndex) + codeByteBuffer.put(OpCode.EXT_FUN_DAT.compile(FunctionCode.GET_A_IND, addrNextTxIndex)); + + // Stop if timestamp part of A is zero + codeByteBuffer.put(OpCode.STZ_DAT.compile(addrNextTx)); + + // Update our 'last found transaction's timestamp' using 'timestamp' from transaction + codeByteBuffer.put(OpCode.EXT_FUN_RET.compile(FunctionCode.GET_TIMESTAMP_FROM_TX_IN_A, addrLastTxTimestamp)); + + // We're done + codeByteBuffer.put(OpCode.FIN_IMD.compile()); + + } catch (CompilationException e) { + throw new IllegalStateException("Unable to compile AT?", e); + } + } + + codeByteBuffer.flip(); + + byte[] codeBytes = new byte[codeByteBuffer.limit()]; + codeByteBuffer.get(codeBytes); + + final short ciyamAtVersion = 2; + final short numCallStackPages = 0; + final short numUserStackPages = 0; + final long minActivationAmount = 0L; + + return MachineState.toCreationBytes(ciyamAtVersion, codeBytes, dataByteBuffer.array(), numCallStackPages, numUserStackPages, minActivationAmount); + } + + private DeployAtTransaction doDeploy(Repository repository, PrivateKeyAccount deployer, byte[] creationBytes, long fundingAmount) throws DataException { + long txTimestamp = System.currentTimeMillis(); + byte[] lastReference = deployer.getLastReference(); + + if (lastReference == null) { + System.err.println(String.format("Qortal account %s has no last reference", deployer.getAddress())); + System.exit(2); + } + + Long fee = null; + String name = "Test AT"; + String description = "Test AT"; + String atType = "Test"; + String tags = "TEST"; + + BaseTransactionData baseTransactionData = new BaseTransactionData(txTimestamp, Group.NO_GROUP, lastReference, deployer.getPublicKey(), fee, null); + TransactionData deployAtTransactionData = new DeployAtTransactionData(baseTransactionData, name, description, atType, tags, creationBytes, fundingAmount, Asset.QORT); + + DeployAtTransaction deployAtTransaction = new DeployAtTransaction(repository, deployAtTransactionData); + + fee = deployAtTransaction.calcRecommendedFee(); + deployAtTransactionData.setFee(fee); + + TransactionUtils.signAndMint(repository, deployAtTransactionData, deployer); + + return deployAtTransaction; + } + + private void extractNextTxTimestamp(Repository repository, String atAddress, byte[] rawNextTimestamp) throws DataException { + // Check AT result + ATStateData atStateData = repository.getATRepository().getLatestATState(atAddress); + byte[] stateData = atStateData.getStateData(); + + byte[] dataBytes = MachineState.extractDataBytes(stateData); + + System.arraycopy(dataBytes, 0, rawNextTimestamp, 0, rawNextTimestamp.length); + } + + private MessageTransaction sendMessage(Repository repository, PrivateKeyAccount sender, byte[] data, String recipient) throws DataException { + long txTimestamp = System.currentTimeMillis(); + byte[] lastReference = sender.getLastReference(); + + if (lastReference == null) { + System.err.println(String.format("Qortal account %s has no last reference", sender.getAddress())); + System.exit(2); + } + + Long fee = null; + int version = 4; + int nonce = 0; + long amount = 0; + Long assetId = null; // because amount is zero + + BaseTransactionData baseTransactionData = new BaseTransactionData(txTimestamp, Group.NO_GROUP, lastReference, sender.getPublicKey(), fee, null); + TransactionData messageTransactionData = new MessageTransactionData(baseTransactionData, version, nonce, recipient, amount, assetId, data, false, false); + + MessageTransaction messageTransaction = new MessageTransaction(repository, messageTransactionData); + + fee = messageTransaction.calcRecommendedFee(); + messageTransactionData.setFee(fee); + + TransactionUtils.signAndImportValid(repository, messageTransactionData, sender); + + return messageTransaction; + } + + private void assertTimestamp(Repository repository, String atAddress, Transaction transaction) throws DataException { + int height = transaction.getHeight(); + byte[] transactionSignature = transaction.getTransactionData().getSignature(); + + BlockData blockData = repository.getBlockRepository().fromHeight(height); + assertNotNull(blockData); + + Block block = new Block(repository, blockData); + + List blockTransactions = block.getTransactions(); + int sequence; + for (sequence = blockTransactions.size() - 1; sequence >= 0; --sequence) + if (Arrays.equals(blockTransactions.get(sequence).getTransactionData().getSignature(), transactionSignature)) + break; + + assertNotSame(-1, sequence); + + byte[] rawNextTimestamp = new byte[32]; + extractNextTxTimestamp(repository, atAddress, rawNextTimestamp); + + Timestamp expectedTimestamp = new Timestamp(height, sequence); + Timestamp actualTimestamp = new Timestamp(BitTwiddling.longFromBEBytes(rawNextTimestamp, 0)); + + assertEquals(String.format("Expected height %d, seq %d but was height %d, seq %d", + height, sequence, + actualTimestamp.blockHeight, actualTimestamp.transactionSequence + ), + expectedTimestamp.longValue(), + actualTimestamp.longValue()); + + byte[] expectedPartialSignature = new byte[24]; + System.arraycopy(transactionSignature, 8, expectedPartialSignature, 0, expectedPartialSignature.length); + + byte[] actualPartialSignature = new byte[24]; + System.arraycopy(rawNextTimestamp, 8, actualPartialSignature, 0, actualPartialSignature.length); + + assertArrayEquals(expectedPartialSignature, actualPartialSignature); + } + +} diff --git a/src/test/java/org/qortal/test/common/AtUtils.java b/src/test/java/org/qortal/test/common/AtUtils.java new file mode 100644 index 00000000..3bc2b235 --- /dev/null +++ b/src/test/java/org/qortal/test/common/AtUtils.java @@ -0,0 +1,81 @@ +package org.qortal.test.common; + +import org.ciyam.at.CompilationException; +import org.ciyam.at.MachineState; +import org.ciyam.at.OpCode; +import org.qortal.account.PrivateKeyAccount; +import org.qortal.asset.Asset; +import org.qortal.data.transaction.BaseTransactionData; +import org.qortal.data.transaction.DeployAtTransactionData; +import org.qortal.data.transaction.TransactionData; +import org.qortal.group.Group; +import org.qortal.repository.DataException; +import org.qortal.repository.Repository; +import org.qortal.transaction.DeployAtTransaction; + +import java.nio.ByteBuffer; + +public class AtUtils { + + public static byte[] buildSimpleAT() { + // Pretend we use 4 values in data segment + int addrCounter = 4; + + // Data segment + ByteBuffer dataByteBuffer = ByteBuffer.allocate(addrCounter * MachineState.VALUE_SIZE); + + ByteBuffer codeByteBuffer = ByteBuffer.allocate(512); + + // Two-pass version + for (int pass = 0; pass < 2; ++pass) { + codeByteBuffer.clear(); + + try { + // Stop and wait for next block + codeByteBuffer.put(OpCode.STP_IMD.compile()); + } catch (CompilationException e) { + throw new IllegalStateException("Unable to compile AT?", e); + } + } + + codeByteBuffer.flip(); + + byte[] codeBytes = new byte[codeByteBuffer.limit()]; + codeByteBuffer.get(codeBytes); + + final short ciyamAtVersion = 2; + final short numCallStackPages = 0; + final short numUserStackPages = 0; + final long minActivationAmount = 0L; + + return MachineState.toCreationBytes(ciyamAtVersion, codeBytes, dataByteBuffer.array(), numCallStackPages, numUserStackPages, minActivationAmount); + } + + public static DeployAtTransaction doDeployAT(Repository repository, PrivateKeyAccount deployer, byte[] creationBytes, long fundingAmount) throws DataException { + long txTimestamp = System.currentTimeMillis(); + byte[] lastReference = deployer.getLastReference(); + + if (lastReference == null) { + System.err.println(String.format("Qortal account %s has no last reference", deployer.getAddress())); + System.exit(2); + } + + Long fee = null; + String name = "Test AT"; + String description = "Test AT"; + String atType = "Test"; + String tags = "TEST"; + + BaseTransactionData baseTransactionData = new BaseTransactionData(txTimestamp, Group.NO_GROUP, lastReference, deployer.getPublicKey(), fee, null); + TransactionData deployAtTransactionData = new DeployAtTransactionData(baseTransactionData, name, description, atType, tags, creationBytes, fundingAmount, Asset.QORT); + + DeployAtTransaction deployAtTransaction = new DeployAtTransaction(repository, deployAtTransactionData); + + fee = deployAtTransaction.calcRecommendedFee(); + deployAtTransactionData.setFee(fee); + + TransactionUtils.signAndMint(repository, deployAtTransactionData, deployer); + + return deployAtTransaction; + } +} diff --git a/src/test/java/org/qortal/test/common/Common.java b/src/test/java/org/qortal/test/common/Common.java index 24c86690..c45fcfd7 100644 --- a/src/test/java/org/qortal/test/common/Common.java +++ b/src/test/java/org/qortal/test/common/Common.java @@ -2,8 +2,11 @@ package org.qortal.test.common; import static org.junit.Assert.*; +import java.io.IOException; import java.math.BigDecimal; import java.net.URL; +import java.nio.file.Path; +import java.nio.file.Paths; import java.security.Security; import java.util.ArrayList; import java.util.Collections; @@ -15,6 +18,7 @@ import java.util.function.Function; import java.util.function.Predicate; import java.util.stream.Collectors; +import org.apache.commons.io.FileUtils; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.bouncycastle.jce.provider.BouncyCastleProvider; @@ -46,9 +50,15 @@ public class Common { private static final Logger LOGGER = LogManager.getLogger(Common.class); - public static final String testConnectionUrl = "jdbc:hsqldb:mem:testdb"; - // For debugging, use this instead to write DB to disk for examination: - // public static final String testConnectionUrl = "jdbc:hsqldb:file:testdb/blockchain;create=true"; + public static final String testConnectionUrlMemory = "jdbc:hsqldb:mem:testdb"; + public static final String testConnectionUrlDisk = "jdbc:hsqldb:file:%s/blockchain;create=true"; + + // For debugging, use testConnectionUrlDisk instead of memory, to write DB to disk for examination. + // This can be achieved using `Common.useSettingsAndDb(Common.testSettingsFilename, false);` + // where `false` specifies to use a repository on disk rather than one in memory. + // Make sure to also comment out `Common.deleteTestRepository();` in closeRepository() below, so that + // the files remain after the test finishes. + public static final String testSettingsFilename = "test-settings-v2.json"; @@ -100,7 +110,7 @@ public class Common { return testAccountsByName.values().stream().map(account -> new TestAccount(repository, account)).collect(Collectors.toList()); } - public static void useSettings(String settingsFilename) throws DataException { + public static void useSettingsAndDb(String settingsFilename, boolean dbInMemory) throws DataException { closeRepository(); // Load/check settings, which potentially sets up blockchain config, etc. @@ -109,11 +119,15 @@ public class Common { assertNotNull("Test settings JSON file not found", testSettingsUrl); Settings.fileInstance(testSettingsUrl.getPath()); - setRepository(); + setRepository(dbInMemory); resetBlockchain(); } + public static void useSettings(String settingsFilename) throws DataException { + Common.useSettingsAndDb(settingsFilename, true); + } + public static void useDefaultSettings() throws DataException { useSettings(testSettingsFilename); NTP.setFixedOffset(Settings.getInstance().getTestNtpOffset()); @@ -186,15 +200,33 @@ public class Common { assertTrue(String.format("Non-genesis %s remains", typeName), remainingClone.isEmpty()); } - @BeforeClass - public static void setRepository() throws DataException { - RepositoryFactory repositoryFactory = new HSQLDBRepositoryFactory(testConnectionUrl); + public static void setRepository(boolean inMemory) throws DataException { + String connectionUrlDisk = String.format(testConnectionUrlDisk, Settings.getInstance().getRepositoryPath()); + String connectionUrl = inMemory ? testConnectionUrlMemory : connectionUrlDisk; + RepositoryFactory repositoryFactory = new HSQLDBRepositoryFactory(connectionUrl); RepositoryManager.setRepositoryFactory(repositoryFactory); } + public static void deleteTestRepository() throws DataException { + // Delete repository directory if exists + Path repositoryPath = Paths.get(Settings.getInstance().getRepositoryPath()); + try { + FileUtils.deleteDirectory(repositoryPath.toFile()); + } catch (IOException e) { + throw new DataException(String.format("Unable to delete test repository: %s", e.getMessage())); + } + } + + @BeforeClass + public static void setRepositoryInMemory() throws DataException { + Common.deleteTestRepository(); + Common.setRepository(true); + } + @AfterClass public static void closeRepository() throws DataException { RepositoryManager.closeRepositoryFactory(); + Common.deleteTestRepository(); // Comment out this line in you need to inspect the database after running a test } // Test assertions diff --git a/src/test/java/org/qortal/test/crosschain/DogecoinTests.java b/src/test/java/org/qortal/test/crosschain/DogecoinTests.java index b6d21315..2b0410c3 100644 --- a/src/test/java/org/qortal/test/crosschain/DogecoinTests.java +++ b/src/test/java/org/qortal/test/crosschain/DogecoinTests.java @@ -35,10 +35,10 @@ public class DogecoinTests extends Common { @Test public void testGetMedianBlockTime() throws BlockStoreException, ForeignBlockchainException { long before = System.currentTimeMillis(); - System.out.println(String.format("Bitcoin median blocktime: %d", dogecoin.getMedianBlockTime())); + System.out.println(String.format("Dogecoin median blocktime: %d", dogecoin.getMedianBlockTime())); long afterFirst = System.currentTimeMillis(); - System.out.println(String.format("Bitcoin median blocktime: %d", dogecoin.getMedianBlockTime())); + System.out.println(String.format("Dogecoin median blocktime: %d", dogecoin.getMedianBlockTime())); long afterSecond = System.currentTimeMillis(); long firstPeriod = afterFirst - before; @@ -64,10 +64,11 @@ public class DogecoinTests extends Common { } @Test + @Ignore(value = "No testnet nodes available, so we can't regularly test buildSpend yet") public void testBuildSpend() { - String xprv58 = "tprv8ZgxMBicQKsPdahhFSrCdvC1bsWyzHHZfTneTVqUXN6s1wEtZLwAkZXzFP6TYLg2aQMecZLXLre5bTVGajEB55L1HYJcawpdFG66STVAWPJ"; + String xprv58 = "dgpv51eADS3spNJh9drNeW1Tc1P9z2LyaQRXPBortsq6yice1k47C2u2Prvgxycr2ihNBWzKZ2LthcBBGiYkWZ69KUTVkcLVbnjq7pD8mnApEru"; - String recipient = "2N8WCg52ULCtDSMjkgVTm5mtPdCsUptkHWE"; + String recipient = "DP1iFao33xdEPa5vaArpj7sykfzKNeiJeX"; long amount = 1000L; Transaction transaction = dogecoin.buildSpend(xprv58, recipient, amount); @@ -81,7 +82,7 @@ public class DogecoinTests extends Common { @Test public void testGetWalletBalance() { - String xprv58 = "tprv8ZgxMBicQKsPdahhFSrCdvC1bsWyzHHZfTneTVqUXN6s1wEtZLwAkZXzFP6TYLg2aQMecZLXLre5bTVGajEB55L1HYJcawpdFG66STVAWPJ"; + String xprv58 = "dgpv51eADS3spNJh9drNeW1Tc1P9z2LyaQRXPBortsq6yice1k47C2u2Prvgxycr2ihNBWzKZ2LthcBBGiYkWZ69KUTVkcLVbnjq7pD8mnApEru"; Long balance = dogecoin.getWalletBalance(xprv58); @@ -102,7 +103,7 @@ public class DogecoinTests extends Common { @Test public void testGetUnusedReceiveAddress() throws ForeignBlockchainException { - String xprv58 = "tprv8ZgxMBicQKsPdahhFSrCdvC1bsWyzHHZfTneTVqUXN6s1wEtZLwAkZXzFP6TYLg2aQMecZLXLre5bTVGajEB55L1HYJcawpdFG66STVAWPJ"; + String xprv58 = "dgpv51eADS3spNJh9drNeW1Tc1P9z2LyaQRXPBortsq6yice1k47C2u2Prvgxycr2ihNBWzKZ2LthcBBGiYkWZ69KUTVkcLVbnjq7pD8mnApEru"; String address = dogecoin.getUnusedReceiveAddress(xprv58); diff --git a/src/test/java/org/qortal/test/naming/IntegrityTests.java b/src/test/java/org/qortal/test/naming/IntegrityTests.java new file mode 100644 index 00000000..d278cf3a --- /dev/null +++ b/src/test/java/org/qortal/test/naming/IntegrityTests.java @@ -0,0 +1,345 @@ +package org.qortal.test.naming; + +import org.junit.Before; +import org.junit.Test; +import org.qortal.account.PrivateKeyAccount; +import org.qortal.controller.repository.NamesDatabaseIntegrityCheck; +import org.qortal.data.transaction.*; +import org.qortal.repository.DataException; +import org.qortal.repository.Repository; +import org.qortal.repository.RepositoryManager; +import org.qortal.test.common.Common; +import org.qortal.test.common.TransactionUtils; +import org.qortal.test.common.transaction.TestTransaction; +import org.qortal.transaction.Transaction; + +import static org.junit.Assert.*; + +public class IntegrityTests extends Common { + + @Before + public void beforeTest() throws DataException { + Common.useDefaultSettings(); + } + + @Test + public void testValidName() throws DataException { + try (final Repository repository = RepositoryManager.getRepository()) { + // Register-name + PrivateKeyAccount alice = Common.getTestAccount(repository, "alice"); + String name = "test-name"; + String data = "{\"age\":30}"; + + RegisterNameTransactionData transactionData = new RegisterNameTransactionData(TestTransaction.generateBase(alice), name, data); + TransactionUtils.signAndMint(repository, transactionData, alice); + + // Ensure the name exists and the data is correct + assertEquals(data, repository.getNameRepository().fromName(name).getData()); + + // Run the database integrity check for this name + NamesDatabaseIntegrityCheck integrityCheck = new NamesDatabaseIntegrityCheck(); + assertEquals(1, integrityCheck.rebuildName(name, repository)); + + // Ensure the name still exists and the data is still correct + assertEquals(data, repository.getNameRepository().fromName(name).getData()); + } + } + + @Test + public void testMissingName() throws DataException { + try (final Repository repository = RepositoryManager.getRepository()) { + // Register-name + PrivateKeyAccount alice = Common.getTestAccount(repository, "alice"); + String name = "test-name"; + String data = "{\"age\":30}"; + + RegisterNameTransactionData transactionData = new RegisterNameTransactionData(TestTransaction.generateBase(alice), name, data); + TransactionUtils.signAndMint(repository, transactionData, alice); + + // Ensure the name exists and the data is correct + assertEquals(data, repository.getNameRepository().fromName(name).getData()); + + // Now delete the name, to simulate a database inconsistency + repository.getNameRepository().delete(name); + + // Ensure the name doesn't exist + assertNull(repository.getNameRepository().fromName(name)); + + // Run the database integrity check for this name and check that a row was modified + NamesDatabaseIntegrityCheck integrityCheck = new NamesDatabaseIntegrityCheck(); + assertEquals(1, integrityCheck.rebuildName(name, repository)); + + // Ensure the name exists again and the data is correct + assertEquals(data, repository.getNameRepository().fromName(name).getData()); + } + } + + @Test + public void testMissingNameAfterUpdate() throws DataException { + try (final Repository repository = RepositoryManager.getRepository()) { + // Register-name + PrivateKeyAccount alice = Common.getTestAccount(repository, "alice"); + String name = "test-name"; + String data = "{\"age\":30}"; + + RegisterNameTransactionData transactionData = new RegisterNameTransactionData(TestTransaction.generateBase(alice), name, data); + TransactionUtils.signAndMint(repository, transactionData, alice); + + // Ensure the name exists and the data is correct + assertEquals(data, repository.getNameRepository().fromName(name).getData()); + + // Update the name + String newData = "{\"age\":31}"; + UpdateNameTransactionData updateTransactionData = new UpdateNameTransactionData(TestTransaction.generateBase(alice), name, name, newData); + TransactionUtils.signAndMint(repository, updateTransactionData, alice); + + // Ensure the name still exists and the data has been updated + assertEquals(newData, repository.getNameRepository().fromName(name).getData()); + + // Now delete the name, to simulate a database inconsistency + repository.getNameRepository().delete(name); + + // Ensure the name doesn't exist + assertNull(repository.getNameRepository().fromName(name)); + + // Run the database integrity check for this name + // We expect 2 modifications to be made - the original register name followed by the update + NamesDatabaseIntegrityCheck integrityCheck = new NamesDatabaseIntegrityCheck(); + assertEquals(2, integrityCheck.rebuildName(name, repository)); + + // Ensure the name exists and the data is correct + assertEquals(newData, repository.getNameRepository().fromName(name).getData()); + } + } + + @Test + public void testMissingNameAfterRename() throws DataException { + try (final Repository repository = RepositoryManager.getRepository()) { + // Register-name + PrivateKeyAccount alice = Common.getTestAccount(repository, "alice"); + String name = "test-name"; + String data = "{\"age\":30}"; + + RegisterNameTransactionData transactionData = new RegisterNameTransactionData(TestTransaction.generateBase(alice), name, data); + TransactionUtils.signAndMint(repository, transactionData, alice); + + // Ensure the name exists and the data is correct + assertEquals(data, repository.getNameRepository().fromName(name).getData()); + + // Rename the name + String newName = "new-name"; + String newData = "{\"age\":31}"; + UpdateNameTransactionData updateTransactionData = new UpdateNameTransactionData(TestTransaction.generateBase(alice), name, newName, newData); + TransactionUtils.signAndMint(repository, updateTransactionData, alice); + + // Ensure the new name exists and the data has been updated + assertEquals(newData, repository.getNameRepository().fromName(newName).getData()); + + // Ensure the old name doesn't exist + assertNull(repository.getNameRepository().fromName(name)); + + // Now delete the new name, to simulate a database inconsistency + repository.getNameRepository().delete(newName); + + // Ensure the new name doesn't exist + assertNull(repository.getNameRepository().fromName(newName)); + + // Attempt to register the new name + transactionData = new RegisterNameTransactionData(TestTransaction.generateBase(alice), newName, data); + Transaction transaction = Transaction.fromData(repository, transactionData); + transaction.sign(alice); + + // Transaction should be invalid, because the database inconsistency was fixed by RegisterNameTransaction.preProcess() + Transaction.ValidationResult result = transaction.importAsUnconfirmed(); + assertTrue("Transaction should be invalid", Transaction.ValidationResult.OK != result); + assertTrue("Name should already be registered", Transaction.ValidationResult.NAME_ALREADY_REGISTERED == result); + } + } + + @Test + public void testRegisterMissingName() throws DataException { + try (final Repository repository = RepositoryManager.getRepository()) { + // Register-name + PrivateKeyAccount alice = Common.getTestAccount(repository, "alice"); + String name = "test-name"; + String data = "{\"age\":30}"; + + RegisterNameTransactionData transactionData = new RegisterNameTransactionData(TestTransaction.generateBase(alice), name, data); + TransactionUtils.signAndMint(repository, transactionData, alice); + + // Ensure the name exists and the data is correct + assertEquals(data, repository.getNameRepository().fromName(name).getData()); + + // Now delete the name, to simulate a database inconsistency + repository.getNameRepository().delete(name); + + // Ensure the name doesn't exist + assertNull(repository.getNameRepository().fromName(name)); + + // Attempt to register the name again + String duplicateName = "TEST-nÁme"; + transactionData = new RegisterNameTransactionData(TestTransaction.generateBase(alice), duplicateName, data); + Transaction transaction = Transaction.fromData(repository, transactionData); + transaction.sign(alice); + + // Transaction should be invalid, because the database inconsistency was fixed by RegisterNameTransaction.preProcess() + Transaction.ValidationResult result = transaction.importAsUnconfirmed(); + assertTrue("Transaction should be invalid", Transaction.ValidationResult.OK != result); + assertTrue("Name should already be registered", Transaction.ValidationResult.NAME_ALREADY_REGISTERED == result); + } + } + + @Test + public void testUpdateMissingName() throws DataException { + try (final Repository repository = RepositoryManager.getRepository()) { + // Register-name + PrivateKeyAccount alice = Common.getTestAccount(repository, "alice"); + String initialName = "test-name"; + String data = "{\"age\":30}"; + + RegisterNameTransactionData transactionData = new RegisterNameTransactionData(TestTransaction.generateBase(alice), initialName, data); + TransactionUtils.signAndMint(repository, transactionData, alice); + + // Ensure the name exists and the data is correct + assertEquals(data, repository.getNameRepository().fromName(initialName).getData()); + + // Now delete the name, to simulate a database inconsistency + repository.getNameRepository().delete(initialName); + + // Ensure the name doesn't exist + assertNull(repository.getNameRepository().fromName(initialName)); + + // Attempt to update the name + String newName = "new-name"; + String newData = ""; + TransactionData updateTransactionData = new UpdateNameTransactionData(TestTransaction.generateBase(alice), initialName, newName, newData); + Transaction transaction = Transaction.fromData(repository, updateTransactionData); + transaction.sign(alice); + + // Transaction should be valid, because the database inconsistency was fixed by UpdateNameTransaction.preProcess() + Transaction.ValidationResult result = transaction.importAsUnconfirmed(); + assertTrue("Transaction should be valid", Transaction.ValidationResult.OK == result); + } + } + + @Test + public void testUpdateToMissingName() throws DataException { + try (final Repository repository = RepositoryManager.getRepository()) { + // Register-name + PrivateKeyAccount alice = Common.getTestAccount(repository, "alice"); + String initialName = "test-name"; + String data = "{\"age\":30}"; + + RegisterNameTransactionData transactionData = new RegisterNameTransactionData(TestTransaction.generateBase(alice), initialName, data); + TransactionUtils.signAndMint(repository, transactionData, alice); + + // Ensure the name exists and the data is correct + assertEquals(data, repository.getNameRepository().fromName(initialName).getData()); + + // Register the second name that we will ultimately try and rename the first name to + String secondName = "new-missing-name"; + String secondNameData = "{\"data2\":true}"; + transactionData = new RegisterNameTransactionData(TestTransaction.generateBase(alice), secondName, secondNameData); + TransactionUtils.signAndMint(repository, transactionData, alice); + + // Ensure the second name exists and the data is correct + assertEquals(secondNameData, repository.getNameRepository().fromName(secondName).getData()); + + // Now delete the second name, to simulate a database inconsistency + repository.getNameRepository().delete(secondName); + + // Ensure the second name doesn't exist + assertNull(repository.getNameRepository().fromName(secondName)); + + // Attempt to rename the first name to the second name + TransactionData updateTransactionData = new UpdateNameTransactionData(TestTransaction.generateBase(alice), initialName, secondName, secondNameData); + Transaction transaction = Transaction.fromData(repository, updateTransactionData); + transaction.sign(alice); + + // Transaction should be invalid, because the database inconsistency was fixed by UpdateNameTransaction.preProcess() + // Therefore the name that we are trying to rename TO already exists + Transaction.ValidationResult result = transaction.importAsUnconfirmed(); + assertTrue("Transaction should be invalid", Transaction.ValidationResult.OK != result); + assertTrue("Destination name should already exist", Transaction.ValidationResult.NAME_ALREADY_REGISTERED == result); + } + } + + @Test + public void testSellMissingName() throws DataException { + try (final Repository repository = RepositoryManager.getRepository()) { + // Register-name + PrivateKeyAccount alice = Common.getTestAccount(repository, "alice"); + String name = "test-name"; + String data = "{\"age\":30}"; + + RegisterNameTransactionData transactionData = new RegisterNameTransactionData(TestTransaction.generateBase(alice), name, data); + TransactionUtils.signAndMint(repository, transactionData, alice); + + // Ensure the name exists and the data is correct + assertEquals(data, repository.getNameRepository().fromName(name).getData()); + + // Now delete the name, to simulate a database inconsistency + repository.getNameRepository().delete(name); + + // Ensure the name doesn't exist + assertNull(repository.getNameRepository().fromName(name)); + + // Attempt to sell the name + TransactionData sellTransactionData = new SellNameTransactionData(TestTransaction.generateBase(alice), name, 123456); + Transaction transaction = Transaction.fromData(repository, sellTransactionData); + transaction.sign(alice); + + // Transaction should be valid, because the database inconsistency was fixed by SellNameTransaction.preProcess() + Transaction.ValidationResult result = transaction.importAsUnconfirmed(); + assertTrue("Transaction should be valid", Transaction.ValidationResult.OK == result); + } + } + + @Test + public void testBuyMissingName() throws DataException { + try (final Repository repository = RepositoryManager.getRepository()) { + // Register-name + PrivateKeyAccount alice = Common.getTestAccount(repository, "alice"); + String name = "test-name"; + String data = "{\"age\":30}"; + + RegisterNameTransactionData transactionData = new RegisterNameTransactionData(TestTransaction.generateBase(alice), name, data); + TransactionUtils.signAndMint(repository, transactionData, alice); + + // Ensure the name exists and the data is correct + assertEquals(data, repository.getNameRepository().fromName(name).getData()); + + // Now delete the name, to simulate a database inconsistency + repository.getNameRepository().delete(name); + + // Ensure the name doesn't exist + assertNull(repository.getNameRepository().fromName(name)); + + // Attempt to sell the name + long amount = 123456; + TransactionData sellTransactionData = new SellNameTransactionData(TestTransaction.generateBase(alice), name, amount); + TransactionUtils.signAndMint(repository, sellTransactionData, alice); + + // Ensure the name now exists + assertNotNull(repository.getNameRepository().fromName(name)); + + // Now delete the name again, to simulate another database inconsistency + repository.getNameRepository().delete(name); + + // Ensure the name doesn't exist + assertNull(repository.getNameRepository().fromName(name)); + + // Bob now attempts to buy the name + String seller = alice.getAddress(); + PrivateKeyAccount bob = Common.getTestAccount(repository, "bob"); + TransactionData buyTransactionData = new BuyNameTransactionData(TestTransaction.generateBase(bob), name, amount, seller); + Transaction transaction = Transaction.fromData(repository, buyTransactionData); + transaction.sign(bob); + + // Transaction should be valid, because the database inconsistency was fixed by SellNameTransaction.preProcess() + Transaction.ValidationResult result = transaction.importAsUnconfirmed(); + assertTrue("Transaction should be valid", Transaction.ValidationResult.OK == result); + } + } + +} diff --git a/src/test/java/org/qortal/test/naming/MiscTests.java b/src/test/java/org/qortal/test/naming/MiscTests.java index c46cbfab..84fe3351 100644 --- a/src/test/java/org/qortal/test/naming/MiscTests.java +++ b/src/test/java/org/qortal/test/naming/MiscTests.java @@ -7,14 +7,13 @@ import java.util.List; import org.junit.Before; import org.junit.Test; import org.qortal.account.PrivateKeyAccount; -import org.qortal.data.transaction.RegisterNameTransactionData; -import org.qortal.data.transaction.TransactionData; -import org.qortal.data.transaction.UpdateNameTransactionData; +import org.qortal.controller.BlockMinter; +import org.qortal.data.transaction.*; +import org.qortal.naming.Name; import org.qortal.repository.DataException; import org.qortal.repository.Repository; import org.qortal.repository.RepositoryManager; -import org.qortal.test.common.Common; -import org.qortal.test.common.TransactionUtils; +import org.qortal.test.common.*; import org.qortal.test.common.transaction.TestTransaction; import org.qortal.transaction.Transaction; import org.qortal.transaction.Transaction.ValidationResult; @@ -32,7 +31,7 @@ public class MiscTests extends Common { // Register-name PrivateKeyAccount alice = Common.getTestAccount(repository, "alice"); String name = "initial-name"; - String data = "initial-data"; + String data = "{\"age\":30}"; RegisterNameTransactionData transactionData = new RegisterNameTransactionData(TestTransaction.generateBase(alice), name, data); TransactionUtils.signAndMint(repository, transactionData, alice); @@ -51,7 +50,7 @@ public class MiscTests extends Common { // Register-name PrivateKeyAccount alice = Common.getTestAccount(repository, "alice"); String name = "test-name"; - String data = "{}"; + String data = "{\"age\":30}"; RegisterNameTransactionData transactionData = new RegisterNameTransactionData(TestTransaction.generateBase(alice), name, data); TransactionUtils.signAndMint(repository, transactionData, alice); @@ -67,6 +66,30 @@ public class MiscTests extends Common { } } + // test trying to register same name twice (with different creator) + @Test + public void testDuplicateRegisterNameWithDifferentCreator() throws DataException { + try (final Repository repository = RepositoryManager.getRepository()) { + // Register-name + PrivateKeyAccount alice = Common.getTestAccount(repository, "alice"); + String name = "test-name"; + String data = "{}"; + + RegisterNameTransactionData transactionData = new RegisterNameTransactionData(TestTransaction.generateBase(alice), name, data); + TransactionUtils.signAndMint(repository, transactionData, alice); + + // duplicate (this time registered by Bob) + PrivateKeyAccount bob = Common.getTestAccount(repository, "bob"); + String duplicateName = "TEST-nÁme"; + transactionData = new RegisterNameTransactionData(TestTransaction.generateBase(bob), duplicateName, data); + Transaction transaction = Transaction.fromData(repository, transactionData); + transaction.sign(alice); + + ValidationResult result = transaction.importAsUnconfirmed(); + assertTrue("Transaction should be invalid", ValidationResult.OK != result); + } + } + // test register then trying to update another name to existing name @Test public void testUpdateToExistingName() throws DataException { @@ -74,7 +97,7 @@ public class MiscTests extends Common { // Register-name PrivateKeyAccount alice = Common.getTestAccount(repository, "alice"); String name = "test-name"; - String data = "{}"; + String data = "{\"age\":30}"; TransactionData transactionData = new RegisterNameTransactionData(TestTransaction.generateBase(alice), name, data); TransactionUtils.signAndMint(repository, transactionData, alice); @@ -103,7 +126,7 @@ public class MiscTests extends Common { // Register-name PrivateKeyAccount alice = Common.getTestAccount(repository, "alice"); String name = alice.getAddress(); - String data = "{}"; + String data = "{\"age\":30}"; RegisterNameTransactionData transactionData = new RegisterNameTransactionData(TestTransaction.generateBase(alice), name, data); Transaction transaction = Transaction.fromData(repository, transactionData); @@ -121,7 +144,7 @@ public class MiscTests extends Common { // Register-name PrivateKeyAccount alice = Common.getTestAccount(repository, "alice"); String name = "test-name"; - String data = "{}"; + String data = "{\"age\":30}"; TransactionData transactionData = new RegisterNameTransactionData(TestTransaction.generateBase(alice), name, data); TransactionUtils.signAndMint(repository, transactionData, alice); @@ -138,4 +161,147 @@ public class MiscTests extends Common { } } + // test registering and then orphaning + @Test + public void testRegisterNameAndOrphan() throws DataException { + try (final Repository repository = RepositoryManager.getRepository()) { + + PrivateKeyAccount alice = Common.getTestAccount(repository, "alice"); + String name = "test-name"; + String data = "{\"age\":30}"; + + // Ensure the name doesn't exist + assertNull(repository.getNameRepository().fromName(name)); + + // Register the name + RegisterNameTransactionData transactionData = new RegisterNameTransactionData(TestTransaction.generateBase(alice), name, data); + TransactionUtils.signAndMint(repository, transactionData, alice); + + // Ensure the name exists and the data is correct + assertEquals(data, repository.getNameRepository().fromName(name).getData()); + + // Orphan the latest block + BlockUtils.orphanBlocks(repository, 1); + + // Ensure the name doesn't exist once again + assertNull(repository.getNameRepository().fromName(name)); + } + } + + @Test + public void testOrphanAndReregisterName() throws DataException { + try (final Repository repository = RepositoryManager.getRepository()) { + + PrivateKeyAccount alice = Common.getTestAccount(repository, "alice"); + String name = "test-name"; + String data = "{\"age\":30}"; + + // Ensure the name doesn't exist + assertNull(repository.getNameRepository().fromName(name)); + + // Register the name + RegisterNameTransactionData transactionData = new RegisterNameTransactionData(TestTransaction.generateBase(alice), name, data); + TransactionUtils.signAndMint(repository, transactionData, alice); + + // Ensure the name exists and the data is correct + assertEquals(data, repository.getNameRepository().fromName(name).getData()); + + // Orphan the latest block + BlockUtils.orphanBlocks(repository, 1); + + // Ensure the name doesn't exist once again + assertNull(repository.getNameRepository().fromName(name)); + + // Now check there is an unconfirmed transaction + assertEquals(1, repository.getTransactionRepository().getUnconfirmedTransactions().size()); + + // Re-mint the block, including the original transaction + BlockMinter.mintTestingBlock(repository, Common.getTestAccount(repository, "alice-reward-share")); + + // There should no longer be an unconfirmed transaction + assertEquals(0, repository.getTransactionRepository().getUnconfirmedTransactions().size()); + + // Orphan the latest block + BlockUtils.orphanBlocks(repository, 1); + + // There should now be an unconfirmed transaction again + assertEquals(1, repository.getTransactionRepository().getUnconfirmedTransactions().size()); + + // Re-mint the block, including the original transaction + BlockMinter.mintTestingBlock(repository, Common.getTestAccount(repository, "alice-reward-share")); + + // Ensure there are no unconfirmed transactions + assertEquals(0, repository.getTransactionRepository().getUnconfirmedTransactions().size()); + } + } + + // test registering and then orphaning multiple times, with a different versions of the transaction each time + // we can sometimes end up with more than one version of a transaction, if it is signed and submitted twice + @Test + public void testMultipleRegisterNameAndOrphan() throws DataException { + try (final Repository repository = RepositoryManager.getRepository()) { + + PrivateKeyAccount alice = Common.getTestAccount(repository, "alice"); + String name = "test-name"; + String data = "{\"age\":30}"; + + for (int i = 1; i <= 10; i++) { + + // Ensure the name doesn't exist + assertNull(repository.getNameRepository().fromName(name)); + + // Register the name + RegisterNameTransactionData transactionData = new RegisterNameTransactionData(TestTransaction.generateBase(alice), name, data); + TransactionUtils.signAndMint(repository, transactionData, alice); + + // Ensure the name exists and the data is correct + assertEquals(data, repository.getNameRepository().fromName(name).getData()); + + // The number of unconfirmed transactions should equal the number of cycles minus 1 (because one is in a block) + // If more than one made it into a block, this test would fail + assertEquals(i-1, repository.getTransactionRepository().getUnconfirmedTransactions().size()); + + // Orphan the latest block + BlockUtils.orphanBlocks(repository, 1); + + // The number of unconfirmed transactions should equal the number of cycles + assertEquals(i, repository.getTransactionRepository().getUnconfirmedTransactions().size()); + + // Ensure the name doesn't exist once again + assertNull(repository.getNameRepository().fromName(name)); + } + } + } + + @Test + public void testSaveName() throws DataException { + try (final Repository repository = RepositoryManager.getRepository()) { + for (int i=0; i<10; i++) { + + String name = "test-name"; + String data = "{\"age\":30}"; + + PrivateKeyAccount alice = Common.getTestAccount(repository, "alice"); + RegisterNameTransactionData transactionData = new RegisterNameTransactionData(TestTransaction.generateBase(alice), name, data); + + // Ensure the name doesn't exist + assertNull(repository.getNameRepository().fromName(name)); + + // Register the name + Name nameObj = new Name(repository, transactionData); + nameObj.register(); + + // Ensure the name now exists + assertNotNull(repository.getNameRepository().fromName(name)); + + // Unregister the name + nameObj.unregister(); + + // Ensure the name doesn't exist again + assertNull(repository.getNameRepository().fromName(name)); + + } + } + } + } diff --git a/src/test/java/org/qortal/test/naming/UpdateTests.java b/src/test/java/org/qortal/test/naming/UpdateTests.java index ffbf7177..a13b3138 100644 --- a/src/test/java/org/qortal/test/naming/UpdateTests.java +++ b/src/test/java/org/qortal/test/naming/UpdateTests.java @@ -5,6 +5,7 @@ import static org.junit.Assert.*; import org.junit.Before; import org.junit.Test; import org.qortal.account.PrivateKeyAccount; +import org.qortal.data.naming.NameData; import org.qortal.data.transaction.RegisterNameTransactionData; import org.qortal.data.transaction.TransactionData; import org.qortal.data.transaction.UpdateNameTransactionData; @@ -29,12 +30,21 @@ public class UpdateTests extends Common { // Register-name PrivateKeyAccount alice = Common.getTestAccount(repository, "alice"); String initialName = "initial-name"; - String initialData = "initial-data"; + String initialReducedName = "initia1-name"; + String initialData = "{\"age\":30}"; TransactionData initialTransactionData = new RegisterNameTransactionData(TestTransaction.generateBase(alice), initialName, initialData); TransactionUtils.signAndMint(repository, initialTransactionData, alice); + // Check name, reduced name, and data exist + assertTrue(repository.getNameRepository().nameExists(initialName)); + NameData nameData = repository.getNameRepository().fromName(initialName); + assertEquals("initia1-name", nameData.getReducedName()); + assertEquals(initialData, nameData.getData()); + assertNotNull(repository.getNameRepository().fromReducedName(initialReducedName)); + String newName = "new-name"; + String newReducedName = "new-name"; String newData = ""; TransactionData updateTransactionData = new UpdateNameTransactionData(TestTransaction.generateBase(alice), initialName, newName, newData); TransactionUtils.signAndMint(repository, updateTransactionData, alice); @@ -42,20 +52,37 @@ public class UpdateTests extends Common { // Check old name no longer exists assertFalse(repository.getNameRepository().nameExists(initialName)); + // Check reduced name no longer exists + assertNull(repository.getNameRepository().fromReducedName(initialReducedName)); + // Check new name exists assertTrue(repository.getNameRepository().nameExists(newName)); + // Check reduced name and data are correct for new name + NameData newNameData = repository.getNameRepository().fromName(newReducedName); + assertEquals(newReducedName, newNameData.getReducedName()); + // Data should remain the same because it was empty in the UpdateNameTransactionData + assertEquals(initialData, newNameData.getData()); + // Check updated timestamp is correct assertEquals((Long) updateTransactionData.getTimestamp(), repository.getNameRepository().fromName(newName).getUpdated()); // orphan and recheck BlockUtils.orphanLastBlock(repository); - // Check new name no longer exists + // Check new name and reduced name no longer exist assertFalse(repository.getNameRepository().nameExists(newName)); + assertNull(repository.getNameRepository().fromReducedName(newReducedName)); - // Check old name exists again + // Check old name and reduced name exist again assertTrue(repository.getNameRepository().nameExists(initialName)); + assertNotNull(repository.getNameRepository().fromReducedName(initialReducedName)); + + // Check data and reduced name are still present for this name + assertTrue(repository.getNameRepository().nameExists(initialName)); + nameData = repository.getNameRepository().fromName(initialName); + assertEquals(initialReducedName, nameData.getReducedName()); + assertEquals(initialData, nameData.getData()); // Check updated timestamp is empty assertNull(repository.getNameRepository().fromName(initialName).getUpdated()); @@ -68,11 +95,17 @@ public class UpdateTests extends Common { // Register-name PrivateKeyAccount alice = Common.getTestAccount(repository, "alice"); String initialName = "initial-name"; - String initialData = "initial-data"; + String initialData = "{\"age\":30}"; + + String constantReducedName = "initia1-name"; TransactionData initialTransactionData = new RegisterNameTransactionData(TestTransaction.generateBase(alice), initialName, initialData); TransactionUtils.signAndMint(repository, initialTransactionData, alice); + // Check initial name exists + assertTrue(repository.getNameRepository().nameExists(initialName)); + assertNotNull(repository.getNameRepository().fromReducedName(constantReducedName)); + String newName = "Initial-Name"; String newData = ""; TransactionData updateTransactionData = new UpdateNameTransactionData(TestTransaction.generateBase(alice), initialName, newName, newData); @@ -83,6 +116,7 @@ public class UpdateTests extends Common { // Check new name exists assertTrue(repository.getNameRepository().nameExists(newName)); + assertNotNull(repository.getNameRepository().fromReducedName(constantReducedName)); // Check updated timestamp is correct assertEquals((Long) updateTransactionData.getTimestamp(), repository.getNameRepository().fromName(newName).getUpdated()); @@ -95,6 +129,7 @@ public class UpdateTests extends Common { // Check old name exists again assertTrue(repository.getNameRepository().nameExists(initialName)); + assertNotNull(repository.getNameRepository().fromReducedName(constantReducedName)); // Check updated timestamp is empty assertNull(repository.getNameRepository().fromName(initialName).getUpdated()); @@ -108,32 +143,43 @@ public class UpdateTests extends Common { // Register-name PrivateKeyAccount alice = Common.getTestAccount(repository, "alice"); String initialName = "initial-name"; - String initialData = "initial-data"; + String initialReducedName = "initia1-name"; + String initialData = "{\"age\":30}"; TransactionData initialTransactionData = new RegisterNameTransactionData(TestTransaction.generateBase(alice), initialName, initialData); TransactionUtils.signAndMint(repository, initialTransactionData, alice); + // Check initial name exists + assertTrue(repository.getNameRepository().nameExists(initialName)); + assertNotNull(repository.getNameRepository().fromReducedName(initialReducedName)); + String middleName = "middle-name"; + String middleReducedName = "midd1e-name"; String middleData = ""; TransactionData middleTransactionData = new UpdateNameTransactionData(TestTransaction.generateBase(alice), initialName, middleName, middleData); TransactionUtils.signAndMint(repository, middleTransactionData, alice); // Check old name no longer exists assertFalse(repository.getNameRepository().nameExists(initialName)); + assertNull(repository.getNameRepository().fromReducedName(initialReducedName)); // Check new name exists assertTrue(repository.getNameRepository().nameExists(middleName)); + assertNotNull(repository.getNameRepository().fromReducedName(middleReducedName)); String newestName = "newest-name"; + String newestReducedName = "newest-name"; String newestData = "newest-data"; TransactionData newestTransactionData = new UpdateNameTransactionData(TestTransaction.generateBase(alice), middleName, newestName, newestData); TransactionUtils.signAndMint(repository, newestTransactionData, alice); // Check previous name no longer exists assertFalse(repository.getNameRepository().nameExists(middleName)); + assertNull(repository.getNameRepository().fromReducedName(middleReducedName)); // Check newest name exists assertTrue(repository.getNameRepository().nameExists(newestName)); + assertNotNull(repository.getNameRepository().fromReducedName(newestReducedName)); // Check updated timestamp is correct assertEquals((Long) newestTransactionData.getTimestamp(), repository.getNameRepository().fromName(newestName).getUpdated()); @@ -143,9 +189,11 @@ public class UpdateTests extends Common { // Check newest name no longer exists assertFalse(repository.getNameRepository().nameExists(newestName)); + assertNull(repository.getNameRepository().fromReducedName(newestReducedName)); // Check previous name exists again assertTrue(repository.getNameRepository().nameExists(middleName)); + assertNotNull(repository.getNameRepository().fromReducedName(middleReducedName)); // Check updated timestamp is correct assertEquals((Long) middleTransactionData.getTimestamp(), repository.getNameRepository().fromName(middleName).getUpdated()); @@ -155,9 +203,11 @@ public class UpdateTests extends Common { // Check new name no longer exists assertFalse(repository.getNameRepository().nameExists(middleName)); + assertNull(repository.getNameRepository().fromReducedName(middleReducedName)); // Check original name exists again assertTrue(repository.getNameRepository().nameExists(initialName)); + assertNotNull(repository.getNameRepository().fromReducedName(initialReducedName)); // Check updated timestamp is empty assertNull(repository.getNameRepository().fromName(initialName).getUpdated()); @@ -171,11 +221,16 @@ public class UpdateTests extends Common { // Register-name PrivateKeyAccount alice = Common.getTestAccount(repository, "alice"); String initialName = "initial-name"; - String initialData = "initial-data"; + String initialReducedName = "initia1-name"; + String initialData = "{\"age\":30}"; TransactionData transactionData = new RegisterNameTransactionData(TestTransaction.generateBase(alice), initialName, initialData); TransactionUtils.signAndMint(repository, transactionData, alice); + // Check initial name exists + assertTrue(repository.getNameRepository().nameExists(initialName)); + assertNotNull(repository.getNameRepository().fromReducedName(initialReducedName)); + // Don't update name, but update data. // This tests whether reverting a future update/sale can find the correct previous name String middleName = ""; @@ -185,29 +240,35 @@ public class UpdateTests extends Common { // Check old name still exists assertTrue(repository.getNameRepository().nameExists(initialName)); + assertNotNull(repository.getNameRepository().fromReducedName(initialReducedName)); String newestName = "newest-name"; + String newestReducedName = "newest-name"; String newestData = "newest-data"; transactionData = new UpdateNameTransactionData(TestTransaction.generateBase(alice), initialName, newestName, newestData); TransactionUtils.signAndMint(repository, transactionData, alice); // Check previous name no longer exists assertFalse(repository.getNameRepository().nameExists(initialName)); + assertNull(repository.getNameRepository().fromReducedName(initialReducedName)); // Check newest name exists assertTrue(repository.getNameRepository().nameExists(newestName)); + assertNotNull(repository.getNameRepository().fromReducedName(newestReducedName)); // orphan and recheck BlockUtils.orphanLastBlock(repository); // Check original name exists again assertTrue(repository.getNameRepository().nameExists(initialName)); + assertNotNull(repository.getNameRepository().fromReducedName(initialReducedName)); // orphan and recheck BlockUtils.orphanLastBlock(repository); // Check original name still exists assertTrue(repository.getNameRepository().nameExists(initialName)); + assertNotNull(repository.getNameRepository().fromReducedName(initialReducedName)); } } @@ -217,11 +278,16 @@ public class UpdateTests extends Common { // Register-name PrivateKeyAccount alice = Common.getTestAccount(repository, "alice"); String initialName = "initial-name"; - String initialData = "initial-data"; + String initialReducedName = "initia1-name"; + String initialData = "{\"age\":30}"; TransactionData transactionData = new RegisterNameTransactionData(TestTransaction.generateBase(alice), initialName, initialData); TransactionUtils.signAndMint(repository, transactionData, alice); + // Check initial name exists + assertTrue(repository.getNameRepository().nameExists(initialName)); + assertNotNull(repository.getNameRepository().fromReducedName(initialReducedName)); + String newName = ""; String newData = "new-data"; transactionData = new UpdateNameTransactionData(TestTransaction.generateBase(alice), initialName, newName, newData); @@ -229,6 +295,7 @@ public class UpdateTests extends Common { // Check name still exists assertTrue(repository.getNameRepository().nameExists(initialName)); + assertNotNull(repository.getNameRepository().fromReducedName(initialReducedName)); // Check data is correct assertEquals(newData, repository.getNameRepository().fromName(initialName).getData()); @@ -238,6 +305,7 @@ public class UpdateTests extends Common { // Check name still exists assertTrue(repository.getNameRepository().nameExists(initialName)); + assertNotNull(repository.getNameRepository().fromReducedName(initialReducedName)); // Check old data restored assertEquals(initialData, repository.getNameRepository().fromName(initialName).getData()); @@ -251,13 +319,19 @@ public class UpdateTests extends Common { // Register-name PrivateKeyAccount alice = Common.getTestAccount(repository, "alice"); String initialName = "initial-name"; - String initialData = "initial-data"; + String initialReducedName = "initia1-name"; + String initialData = "{\"age\":30}"; TransactionData transactionData = new RegisterNameTransactionData(TestTransaction.generateBase(alice), initialName, initialData); TransactionUtils.signAndMint(repository, transactionData, alice); + // Check initial name exists + assertTrue(repository.getNameRepository().nameExists(initialName)); + assertNotNull(repository.getNameRepository().fromReducedName(initialReducedName)); + // Update data String middleName = "middle-name"; + String middleReducedName = "midd1e-name"; String middleData = "middle-data"; transactionData = new UpdateNameTransactionData(TestTransaction.generateBase(alice), initialName, middleName, middleData); TransactionUtils.signAndMint(repository, transactionData, alice); @@ -266,6 +340,7 @@ public class UpdateTests extends Common { assertEquals(middleData, repository.getNameRepository().fromName(middleName).getData()); String newestName = "newest-name"; + String newestReducedName = "newest-name"; String newestData = "newest-data"; transactionData = new UpdateNameTransactionData(TestTransaction.generateBase(alice), middleName, newestName, newestData); TransactionUtils.signAndMint(repository, transactionData, alice); @@ -273,6 +348,14 @@ public class UpdateTests extends Common { // Check data is correct assertEquals(newestData, repository.getNameRepository().fromName(newestName).getData()); + // Check initial name no longer exists + assertFalse(repository.getNameRepository().nameExists(initialName)); + assertNull(repository.getNameRepository().fromReducedName(initialReducedName)); + + // Check newest name exists + assertTrue(repository.getNameRepository().nameExists(newestName)); + assertNotNull(repository.getNameRepository().fromReducedName(newestReducedName)); + // orphan and recheck BlockUtils.orphanLastBlock(repository); @@ -284,6 +367,10 @@ public class UpdateTests extends Common { // Check data is correct assertEquals(initialData, repository.getNameRepository().fromName(initialName).getData()); + + // Check initial name exists again + assertTrue(repository.getNameRepository().nameExists(initialName)); + assertNotNull(repository.getNameRepository().fromReducedName(initialReducedName)); } } @@ -294,38 +381,69 @@ public class UpdateTests extends Common { // Register-name PrivateKeyAccount alice = Common.getTestAccount(repository, "alice"); String initialName = "initial-name"; - String initialData = "initial-data"; + String initialReducedName = "initia1-name"; + String initialData = "{\"age\":30}"; TransactionData transactionData = new RegisterNameTransactionData(TestTransaction.generateBase(alice), initialName, initialData); TransactionUtils.signAndMint(repository, transactionData, alice); + // Check initial name exists + assertTrue(repository.getNameRepository().nameExists(initialName)); + assertNotNull(repository.getNameRepository().fromReducedName(initialReducedName)); + // Don't update data, but update name. // This tests whether reverting a future update/sale can find the correct previous data String middleName = "middle-name"; + String middleReducedName = "midd1e-name"; String middleData = ""; transactionData = new UpdateNameTransactionData(TestTransaction.generateBase(alice), initialName, middleName, middleData); TransactionUtils.signAndMint(repository, transactionData, alice); + // Check original name no longer exists + assertFalse(repository.getNameRepository().nameExists(initialName)); + assertNull(repository.getNameRepository().fromReducedName(initialReducedName)); + + // Check middle name exists + assertTrue(repository.getNameRepository().nameExists(middleName)); + assertNotNull(repository.getNameRepository().fromReducedName(middleReducedName)); + // Check data is correct assertEquals(initialData, repository.getNameRepository().fromName(middleName).getData()); String newestName = "newest-name"; + String newestReducedName = "newest-name"; String newestData = "newest-data"; transactionData = new UpdateNameTransactionData(TestTransaction.generateBase(alice), middleName, newestName, newestData); TransactionUtils.signAndMint(repository, transactionData, alice); + // Check middle name no longer exists + assertFalse(repository.getNameRepository().nameExists(middleName)); + assertNull(repository.getNameRepository().fromReducedName(middleReducedName)); + + // Check newest name exists + assertTrue(repository.getNameRepository().nameExists(newestName)); + assertNotNull(repository.getNameRepository().fromReducedName(newestReducedName)); + // Check data is correct assertEquals(newestData, repository.getNameRepository().fromName(newestName).getData()); // orphan and recheck BlockUtils.orphanLastBlock(repository); + // Check middle name exists + assertTrue(repository.getNameRepository().nameExists(middleName)); + assertNotNull(repository.getNameRepository().fromReducedName(middleReducedName)); + // Check data is correct assertEquals(initialData, repository.getNameRepository().fromName(middleName).getData()); // orphan and recheck BlockUtils.orphanLastBlock(repository); + // Check initial name exists + assertTrue(repository.getNameRepository().nameExists(initialName)); + assertNotNull(repository.getNameRepository().fromReducedName(initialReducedName)); + // Check data is correct assertEquals(initialData, repository.getNameRepository().fromName(initialName).getData()); } diff --git a/src/test/resources/test-settings-v2-bitcoin-regtest.json b/src/test/resources/test-settings-v2-bitcoin-regtest.json index 86379ae7..687c240d 100644 --- a/src/test/resources/test-settings-v2-bitcoin-regtest.json +++ b/src/test/resources/test-settings-v2-bitcoin-regtest.json @@ -1,9 +1,13 @@ { + "repositoryPath": "testdb", "bitcoinNet": "REGTEST", "litecoinNet": "REGTEST", "restrictedApi": false, "blockchainConfig": "src/test/resources/test-chain-v2.json", + "exportPath": "qortal-backup-test", + "bootstrap": false, "wipeUnconfirmedOnStart": false, "testNtpOffset": 0, - "minPeers": 0 + "minPeers": 0, + "pruneBlockLimit": 100 } diff --git a/src/test/resources/test-settings-v2-block-archive.json b/src/test/resources/test-settings-v2-block-archive.json new file mode 100644 index 00000000..c5ed1aa8 --- /dev/null +++ b/src/test/resources/test-settings-v2-block-archive.json @@ -0,0 +1,13 @@ +{ + "bitcoinNet": "TEST3", + "litecoinNet": "TEST3", + "restrictedApi": false, + "blockchainConfig": "src/test/resources/test-chain-v2.json", + "exportPath": "qortal-backup-test", + "bootstrap": false, + "wipeUnconfirmedOnStart": false, + "testNtpOffset": 0, + "minPeers": 0, + "pruneBlockLimit": 100, + "repositoryPath": "dbtest" +} diff --git a/src/test/resources/test-settings-v2-founder-rewards.json b/src/test/resources/test-settings-v2-founder-rewards.json index c89df187..02d71d76 100644 --- a/src/test/resources/test-settings-v2-founder-rewards.json +++ b/src/test/resources/test-settings-v2-founder-rewards.json @@ -1,7 +1,11 @@ { + "repositoryPath": "testdb", "restrictedApi": false, "blockchainConfig": "src/test/resources/test-chain-v2-founder-rewards.json", + "exportPath": "qortal-backup-test", + "bootstrap": false, "wipeUnconfirmedOnStart": false, "testNtpOffset": 0, - "minPeers": 0 + "minPeers": 0, + "pruneBlockLimit": 100 } diff --git a/src/test/resources/test-settings-v2-leftover-reward.json b/src/test/resources/test-settings-v2-leftover-reward.json index bdbc1d52..185bbeba 100644 --- a/src/test/resources/test-settings-v2-leftover-reward.json +++ b/src/test/resources/test-settings-v2-leftover-reward.json @@ -1,7 +1,11 @@ { + "repositoryPath": "testdb", "restrictedApi": false, "blockchainConfig": "src/test/resources/test-chain-v2-leftover-reward.json", + "exportPath": "qortal-backup-test", + "bootstrap": false, "wipeUnconfirmedOnStart": false, "testNtpOffset": 0, - "minPeers": 0 + "minPeers": 0, + "pruneBlockLimit": 100 } diff --git a/src/test/resources/test-settings-v2-minting.json b/src/test/resources/test-settings-v2-minting.json index 9c72c375..b5645812 100644 --- a/src/test/resources/test-settings-v2-minting.json +++ b/src/test/resources/test-settings-v2-minting.json @@ -1,7 +1,11 @@ { + "repositoryPath": "testdb", "restrictedApi": false, "blockchainConfig": "src/test/resources/test-chain-v2-minting.json", + "exportPath": "qortal-backup-test", + "bootstrap": false, "wipeUnconfirmedOnStart": false, "testNtpOffset": 0, - "minPeers": 0 + "minPeers": 0, + "pruneBlockLimit": 100 } diff --git a/src/test/resources/test-settings-v2-qora-holder-extremes.json b/src/test/resources/test-settings-v2-qora-holder-extremes.json index b311fbf2..e20fddf0 100644 --- a/src/test/resources/test-settings-v2-qora-holder-extremes.json +++ b/src/test/resources/test-settings-v2-qora-holder-extremes.json @@ -1,7 +1,11 @@ { + "repositoryPath": "testdb", "restrictedApi": false, "blockchainConfig": "src/test/resources/test-chain-v2-qora-holder-extremes.json", + "exportPath": "qortal-backup-test", + "bootstrap": false, "wipeUnconfirmedOnStart": false, "testNtpOffset": 0, - "minPeers": 0 + "minPeers": 0, + "pruneBlockLimit": 100 } diff --git a/src/test/resources/test-settings-v2-qora-holder.json b/src/test/resources/test-settings-v2-qora-holder.json index 83b23287..9d7d2567 100644 --- a/src/test/resources/test-settings-v2-qora-holder.json +++ b/src/test/resources/test-settings-v2-qora-holder.json @@ -1,7 +1,11 @@ { + "repositoryPath": "testdb", "restrictedApi": false, "blockchainConfig": "src/test/resources/test-chain-v2-qora-holder.json", + "exportPath": "qortal-backup-test", + "bootstrap": false, "wipeUnconfirmedOnStart": false, "testNtpOffset": 0, - "minPeers": 0 + "minPeers": 0, + "pruneBlockLimit": 100 } diff --git a/src/test/resources/test-settings-v2-reward-levels.json b/src/test/resources/test-settings-v2-reward-levels.json index 1c6862ad..3ee0179d 100644 --- a/src/test/resources/test-settings-v2-reward-levels.json +++ b/src/test/resources/test-settings-v2-reward-levels.json @@ -1,7 +1,11 @@ { + "repositoryPath": "testdb", "restrictedApi": false, "blockchainConfig": "src/test/resources/test-chain-v2-reward-levels.json", + "exportPath": "qortal-backup-test", + "bootstrap": false, "wipeUnconfirmedOnStart": false, "testNtpOffset": 0, - "minPeers": 0 + "minPeers": 0, + "pruneBlockLimit": 100 } diff --git a/src/test/resources/test-settings-v2-reward-scaling.json b/src/test/resources/test-settings-v2-reward-scaling.json index 262938b7..fa02ebe7 100644 --- a/src/test/resources/test-settings-v2-reward-scaling.json +++ b/src/test/resources/test-settings-v2-reward-scaling.json @@ -1,7 +1,11 @@ { + "repositoryPath": "testdb", "restrictedApi": false, "blockchainConfig": "src/test/resources/test-chain-v2-reward-scaling.json", + "exportPath": "qortal-backup-test", + "bootstrap": false, "wipeUnconfirmedOnStart": false, "testNtpOffset": 0, - "minPeers": 0 + "minPeers": 0, + "pruneBlockLimit": 100 } diff --git a/src/test/resources/test-settings-v2.json b/src/test/resources/test-settings-v2.json index f4052647..372fc2ed 100644 --- a/src/test/resources/test-settings-v2.json +++ b/src/test/resources/test-settings-v2.json @@ -1,10 +1,15 @@ { + "repositoryPath": "testdb", "bitcoinNet": "TEST3", "litecoinNet": "TEST3", "restrictedApi": false, "blockchainConfig": "src/test/resources/test-chain-v2.json", + "exportPath": "qortal-backup-test", + "bootstrap": false, "wipeUnconfirmedOnStart": false, "testNtpOffset": 0, "minPeers": 0, + "pruneBlockLimit": 100, + "bootstrapFilenamePrefix": "test-", "dataPath": "data-test" }