mirror of
https://github.com/Qortal/qortal.git
synced 2025-02-11 17:55:50 +00:00
Merge remote-tracking branch 'qortal/master'
# Conflicts: # .gitignore # pom.xml # src/main/java/org/qortal/controller/Controller.java # src/main/java/org/qortal/gui/SysTray.java # src/main/java/org/qortal/settings/Settings.java # src/main/resources/i18n/ApiError_en.properties # src/test/java/org/qortal/test/CryptoTests.java # src/test/resources/test-settings-v2.json
This commit is contained in:
commit
f3ef112297
7
.gitignore
vendored
7
.gitignore
vendored
@ -1,4 +1,5 @@
|
||||
/db*
|
||||
/lists/
|
||||
/bin/
|
||||
/target/
|
||||
/qortal-backup/
|
||||
@ -15,8 +16,8 @@
|
||||
/settings.json
|
||||
/testnet*
|
||||
/settings*.json
|
||||
/testchain.json
|
||||
/run-testnet.sh
|
||||
/testchain*.json
|
||||
/run-testnet*.sh
|
||||
/.idea
|
||||
/qortal.iml
|
||||
.DS_Store
|
||||
@ -25,4 +26,6 @@
|
||||
/run.pid
|
||||
/run.log
|
||||
/WindowsInstaller/Install Files/qortal.jar
|
||||
/*.7z
|
||||
/tmp
|
||||
/data*
|
||||
|
@ -17,10 +17,10 @@
|
||||
<ROW Property="Manufacturer" Value="Qortal"/>
|
||||
<ROW Property="MsiLogging" MultiBuildValue="DefaultBuild:vp"/>
|
||||
<ROW Property="NTP_GOOD" Value="false"/>
|
||||
<ROW Property="ProductCode" Value="1033:{E67F58ED-2236-43A9-8895-B9AB96C60EB9} 1049:{E612D7B8-EB9F-481C-81FF-7530E0801F95} 2052:{8EC6F665-1D21-4DB8-8C55-05C2550FF1B3} 2057:{6CE725B6-BBDD-459D-8016-6D1D2FC1F4EC} " Type="16"/>
|
||||
<ROW Property="ProductCode" Value="1033:{4B61B59E-57CF-4088-892E-F0CF90F30771} 1049:{3FEC2386-884A-4688-8E4F-09A11F1E3DDD} 2052:{2D5F0C95-DDC6-45D7-858C-1D4AD2625578} 2057:{E07CCB96-A78E-4F79-9348-67E58B7C3949} " Type="16"/>
|
||||
<ROW Property="ProductLanguage" Value="2057"/>
|
||||
<ROW Property="ProductName" Value="Qortal"/>
|
||||
<ROW Property="ProductVersion" Value="1.5.6" Type="32"/>
|
||||
<ROW Property="ProductVersion" Value="2.0.0" Type="32"/>
|
||||
<ROW Property="RECONFIG_NTP" Value="true"/>
|
||||
<ROW Property="REMOVE_BLOCKCHAIN" Value="YES" Type="4"/>
|
||||
<ROW Property="REPAIR_BLOCKCHAIN" Value="YES" Type="4"/>
|
||||
@ -212,7 +212,7 @@
|
||||
<ROW Component="ADDITIONAL_LICENSE_INFO_71" ComponentId="{12A3ADBE-BB7A-496C-8869-410681E6232F}" Directory_="jdk.zipfs_Dir" Attributes="0" KeyPath="ADDITIONAL_LICENSE_INFO_71" Type="0"/>
|
||||
<ROW Component="ADDITIONAL_LICENSE_INFO_8" ComponentId="{D53AD95E-CF96-4999-80FC-5812277A7456}" Directory_="java.naming_Dir" Attributes="0" KeyPath="ADDITIONAL_LICENSE_INFO_8" Type="0"/>
|
||||
<ROW Component="ADDITIONAL_LICENSE_INFO_9" ComponentId="{6B7EA9B0-5D17-47A8-B78C-FACE86D15E01}" Directory_="java.net.http_Dir" Attributes="0" KeyPath="ADDITIONAL_LICENSE_INFO_9" Type="0"/>
|
||||
<ROW Component="AI_CustomARPName" ComponentId="{9DA2985C-778C-4D85-A44E-5B00D935EED2}" Directory_="APPDIR" Attributes="260" KeyPath="DisplayName" Options="1"/>
|
||||
<ROW Component="AI_CustomARPName" ComponentId="{E8EAFE6B-CB9E-4D2A-8597-9FAA2D7D98D2}" Directory_="APPDIR" Attributes="260" KeyPath="DisplayName" Options="1"/>
|
||||
<ROW Component="AI_ExePath" ComponentId="{3644948D-AE0B-41BB-9FAF-A79E70490A08}" Directory_="APPDIR" Attributes="260" KeyPath="AI_ExePath"/>
|
||||
<ROW Component="APPDIR" ComponentId="{680DFDDE-3FB4-47A5-8FF5-934F576C6F91}" Directory_="APPDIR" Attributes="0"/>
|
||||
<ROW Component="AccessBridgeCallbacks.h" ComponentId="{288055D1-1062-47A3-AA44-5601B4E38AED}" Directory_="bridge_Dir" Attributes="0" KeyPath="AccessBridgeCallbacks.h" Type="0"/>
|
||||
|
BIN
WindowsInstaller/qortal.ico
Executable file → Normal file
BIN
WindowsInstaller/qortal.ico
Executable file → Normal file
Binary file not shown.
Before Width: | Height: | Size: 250 KiB After Width: | Height: | Size: 42 KiB |
15
pom.xml
15
pom.xml
@ -14,6 +14,9 @@
|
||||
<ciyam-at.version>1.3.8</ciyam-at.version>
|
||||
<commons-net.version>3.6</commons-net.version>
|
||||
<commons-text.version>1.8</commons-text.version>
|
||||
<commons-io.version>2.6</commons-io.version>
|
||||
<commons-compress.version>1.21</commons-compress.version>
|
||||
<xz.version>1.9</xz.version>
|
||||
<dagger.version>1.2.2</dagger.version>
|
||||
<guava.version>28.1-jre</guava.version>
|
||||
<hsqldb.version>2.5.1</hsqldb.version>
|
||||
@ -454,7 +457,17 @@
|
||||
<dependency>
|
||||
<groupId>commons-io</groupId>
|
||||
<artifactId>commons-io</artifactId>
|
||||
<version>2.6</version>
|
||||
<version>${commons-io.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.commons</groupId>
|
||||
<artifactId>commons-compress</artifactId>
|
||||
<version>${commons-compress.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.tukaani</groupId>
|
||||
<artifactId>xz</artifactId>
|
||||
<version>${xz.version}</version>
|
||||
</dependency>
|
||||
<!-- For bitset/bitmap compression -->
|
||||
<dependency>
|
||||
|
@ -1,6 +1,7 @@
|
||||
package org.qortal;
|
||||
|
||||
import java.security.Security;
|
||||
import java.util.concurrent.TimeoutException;
|
||||
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
@ -57,10 +58,10 @@ public class RepositoryMaintenance {
|
||||
|
||||
LOGGER.info("Starting repository periodic maintenance. This can take a while...");
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
repository.performPeriodicMaintenance();
|
||||
repository.performPeriodicMaintenance(null);
|
||||
|
||||
LOGGER.info("Repository periodic maintenance completed");
|
||||
} catch (DataException e) {
|
||||
} catch (DataException | TimeoutException e) {
|
||||
LOGGER.error("Repository periodic maintenance failed", e);
|
||||
}
|
||||
|
||||
|
@ -16,4 +16,8 @@ public enum ApiExceptionFactory {
|
||||
return createException(request, apiError, null);
|
||||
}
|
||||
|
||||
public ApiException createCustomException(HttpServletRequest request, ApiError apiError, String message) {
|
||||
return new ApiException(apiError.getStatus(), apiError.getCode(), message, null);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -14,6 +14,8 @@ import java.security.SecureRandom;
|
||||
import javax.net.ssl.KeyManagerFactory;
|
||||
import javax.net.ssl.SSLContext;
|
||||
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.eclipse.jetty.http.HttpVersion;
|
||||
import org.eclipse.jetty.rewrite.handler.RedirectPatternRule;
|
||||
import org.eclipse.jetty.rewrite.handler.RewriteHandler;
|
||||
@ -50,6 +52,8 @@ import org.qortal.settings.Settings;
|
||||
|
||||
public class ApiService {
|
||||
|
||||
private static final Logger LOGGER = LogManager.getLogger(ApiService.class);
|
||||
|
||||
private static ApiService instance;
|
||||
|
||||
private final ResourceConfig config;
|
||||
@ -203,6 +207,9 @@ public class ApiService {
|
||||
context.addServlet(TradeBotWebSocket.class, "/websockets/crosschain/tradebot");
|
||||
context.addServlet(PresenceWebSocket.class, "/websockets/presence");
|
||||
|
||||
// Warn about API security if needed
|
||||
this.checkApiSecurity();
|
||||
|
||||
// Start server
|
||||
this.server.start();
|
||||
} catch (Exception e) {
|
||||
@ -222,4 +229,23 @@ public class ApiService {
|
||||
this.server = null;
|
||||
}
|
||||
|
||||
private void checkApiSecurity() {
|
||||
// Warn about API security if needed
|
||||
boolean allConnectionsAllowed = false;
|
||||
if (Settings.getInstance().isApiKeyDisabled()) {
|
||||
for (String pattern : Settings.getInstance().getApiWhitelist()) {
|
||||
if (pattern.startsWith("0.0.0.0/") || pattern.startsWith("::/") || pattern.endsWith("/0")) {
|
||||
allConnectionsAllowed = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (allConnectionsAllowed) {
|
||||
LOGGER.warn("Warning: API key validation is currently disabled, and the API whitelist " +
|
||||
"is allowing all connections. This can be a security risk.");
|
||||
LOGGER.warn("To fix, set the apiKeyDisabled setting to false, or allow only specific local " +
|
||||
"IP addresses using the apiWhitelist setting.");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -12,6 +12,11 @@ public abstract class Security {
|
||||
public static final String API_KEY_HEADER = "X-API-KEY";
|
||||
|
||||
public static void checkApiCallAllowed(HttpServletRequest request) {
|
||||
// If API key checking has been disabled, we will allow the request in all cases
|
||||
boolean isApiKeyDisabled = Settings.getInstance().isApiKeyDisabled();
|
||||
if (isApiKeyDisabled)
|
||||
return;
|
||||
|
||||
String expectedApiKey = Settings.getInstance().getApiKey();
|
||||
String passedApiKey = request.getHeader(API_KEY_HEADER);
|
||||
|
||||
|
18
src/main/java/org/qortal/api/model/AddressListRequest.java
Normal file
18
src/main/java/org/qortal/api/model/AddressListRequest.java
Normal file
@ -0,0 +1,18 @@
|
||||
package org.qortal.api.model;
|
||||
|
||||
import io.swagger.v3.oas.annotations.media.Schema;
|
||||
|
||||
import javax.xml.bind.annotation.XmlAccessType;
|
||||
import javax.xml.bind.annotation.XmlAccessorType;
|
||||
import java.util.List;
|
||||
|
||||
@XmlAccessorType(XmlAccessType.FIELD)
|
||||
public class AddressListRequest {
|
||||
|
||||
@Schema(description = "A list of addresses")
|
||||
public List<String> addresses;
|
||||
|
||||
public AddressListRequest() {
|
||||
}
|
||||
|
||||
}
|
@ -22,6 +22,7 @@ import java.time.OffsetDateTime;
|
||||
import java.time.ZoneOffset;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.TimeoutException;
|
||||
import java.util.concurrent.locks.ReentrantLock;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
@ -35,6 +36,7 @@ import javax.ws.rs.core.Context;
|
||||
import javax.ws.rs.core.MediaType;
|
||||
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.core.LoggerContext;
|
||||
import org.apache.logging.log4j.core.appender.RollingFileAppender;
|
||||
import org.qortal.account.Account;
|
||||
@ -67,6 +69,8 @@ import com.google.common.collect.Lists;
|
||||
@Tag(name = "Admin")
|
||||
public class AdminResource {
|
||||
|
||||
private static final Logger LOGGER = LogManager.getLogger(AdminResource.class);
|
||||
|
||||
private static final int MAX_LOG_LINES = 500;
|
||||
|
||||
@Context
|
||||
@ -460,6 +464,23 @@ public class AdminResource {
|
||||
if (targetHeight <= 0 || targetHeight > Controller.getInstance().getChainHeight())
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_HEIGHT);
|
||||
|
||||
// Make sure we're not orphaning as far back as the archived blocks
|
||||
// FUTURE: we could support this by first importing earlier blocks from the archive
|
||||
if (Settings.getInstance().isTopOnly() ||
|
||||
Settings.getInstance().isArchiveEnabled()) {
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
// Find the first unarchived block
|
||||
int oldestBlock = repository.getBlockArchiveRepository().getBlockArchiveHeight();
|
||||
// Add some extra blocks just in case we're currently archiving/pruning
|
||||
oldestBlock += 100;
|
||||
if (targetHeight <= oldestBlock) {
|
||||
LOGGER.info("Unable to orphan beyond block {} because it is archived", oldestBlock);
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_HEIGHT);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (BlockChain.orphan(targetHeight))
|
||||
return "true";
|
||||
else
|
||||
@ -554,13 +575,13 @@ public class AdminResource {
|
||||
@Path("/repository/data")
|
||||
@Operation(
|
||||
summary = "Import data into repository.",
|
||||
description = "Imports data from file on local machine. Filename is forced to 'import.json' if apiKey is not set.",
|
||||
description = "Imports data from file on local machine. Filename is forced to 'qortal-backup/TradeBotStates.json' if apiKey is not set.",
|
||||
requestBody = @RequestBody(
|
||||
required = true,
|
||||
content = @Content(
|
||||
mediaType = MediaType.TEXT_PLAIN,
|
||||
schema = @Schema(
|
||||
type = "string", example = "MintingAccounts.script"
|
||||
type = "string", example = "qortal-backup/TradeBotStates.json"
|
||||
)
|
||||
)
|
||||
),
|
||||
@ -578,7 +599,7 @@ public class AdminResource {
|
||||
|
||||
// Hard-coded because it's too dangerous to allow user-supplied filenames in weaker security contexts
|
||||
if (Settings.getInstance().getApiKey() == null)
|
||||
filename = "import.json";
|
||||
filename = "qortal-backup/TradeBotStates.json";
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
ReentrantLock blockchainLock = Controller.getInstance().getBlockchainLock();
|
||||
@ -590,6 +611,10 @@ public class AdminResource {
|
||||
repository.saveChanges();
|
||||
|
||||
return "true";
|
||||
|
||||
} catch (IOException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA, e);
|
||||
|
||||
} finally {
|
||||
blockchainLock.unlock();
|
||||
}
|
||||
@ -645,14 +670,16 @@ public class AdminResource {
|
||||
blockchainLock.lockInterruptibly();
|
||||
|
||||
try {
|
||||
repository.backup(true);
|
||||
// Timeout if the database isn't ready for backing up after 60 seconds
|
||||
long timeout = 60 * 1000L;
|
||||
repository.backup(true, "backup", timeout);
|
||||
repository.saveChanges();
|
||||
|
||||
return "true";
|
||||
} finally {
|
||||
blockchainLock.unlock();
|
||||
}
|
||||
} catch (InterruptedException e) {
|
||||
} catch (InterruptedException | TimeoutException e) {
|
||||
// We couldn't lock blockchain to perform backup
|
||||
return "false";
|
||||
} catch (DataException e) {
|
||||
@ -677,13 +704,15 @@ public class AdminResource {
|
||||
blockchainLock.lockInterruptibly();
|
||||
|
||||
try {
|
||||
repository.performPeriodicMaintenance();
|
||||
// Timeout if the database isn't ready to start after 60 seconds
|
||||
long timeout = 60 * 1000L;
|
||||
repository.performPeriodicMaintenance(timeout);
|
||||
} finally {
|
||||
blockchainLock.unlock();
|
||||
}
|
||||
} catch (InterruptedException e) {
|
||||
// No big deal
|
||||
} catch (DataException e) {
|
||||
} catch (DataException | TimeoutException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
|
||||
}
|
||||
}
|
||||
|
@ -15,6 +15,8 @@ import java.math.BigDecimal;
|
||||
import java.math.BigInteger;
|
||||
import java.math.RoundingMode;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Comparator;
|
||||
import java.util.List;
|
||||
|
||||
import javax.servlet.http.HttpServletRequest;
|
||||
@ -33,11 +35,13 @@ import org.qortal.api.ApiExceptionFactory;
|
||||
import org.qortal.api.model.BlockMintingInfo;
|
||||
import org.qortal.api.model.BlockSignerSummary;
|
||||
import org.qortal.block.Block;
|
||||
import org.qortal.controller.Controller;
|
||||
import org.qortal.crypto.Crypto;
|
||||
import org.qortal.data.account.AccountData;
|
||||
import org.qortal.data.block.BlockData;
|
||||
import org.qortal.data.block.BlockSummaryData;
|
||||
import org.qortal.data.transaction.TransactionData;
|
||||
import org.qortal.repository.BlockArchiveReader;
|
||||
import org.qortal.repository.DataException;
|
||||
import org.qortal.repository.Repository;
|
||||
import org.qortal.repository.RepositoryManager;
|
||||
@ -81,11 +85,19 @@ public class BlocksResource {
|
||||
}
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
// Check the database first
|
||||
BlockData blockData = repository.getBlockRepository().fromSignature(signature);
|
||||
if (blockData == null)
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.BLOCK_UNKNOWN);
|
||||
if (blockData != null) {
|
||||
return blockData;
|
||||
}
|
||||
|
||||
return blockData;
|
||||
// Not found, so try the block archive
|
||||
blockData = repository.getBlockArchiveRepository().fromSignature(signature);
|
||||
if (blockData != null) {
|
||||
return blockData;
|
||||
}
|
||||
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.BLOCK_UNKNOWN);
|
||||
} catch (DataException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
|
||||
}
|
||||
@ -116,16 +128,24 @@ public class BlocksResource {
|
||||
}
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
|
||||
// Check the database first
|
||||
BlockData blockData = repository.getBlockRepository().fromSignature(signature);
|
||||
if (blockData == null)
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.BLOCK_UNKNOWN);
|
||||
if (blockData != null) {
|
||||
Block block = new Block(repository, blockData);
|
||||
ByteArrayOutputStream bytes = new ByteArrayOutputStream();
|
||||
bytes.write(Ints.toByteArray(block.getBlockData().getHeight()));
|
||||
bytes.write(BlockTransformer.toBytes(block));
|
||||
return Base58.encode(bytes.toByteArray());
|
||||
}
|
||||
|
||||
Block block = new Block(repository, blockData);
|
||||
ByteArrayOutputStream bytes = new ByteArrayOutputStream();
|
||||
bytes.write(Ints.toByteArray(block.getBlockData().getHeight()));
|
||||
bytes.write(BlockTransformer.toBytes(block));
|
||||
return Base58.encode(bytes.toByteArray());
|
||||
// Not found, so try the block archive
|
||||
byte[] bytes = BlockArchiveReader.getInstance().fetchSerializedBlockBytesForSignature(signature, false, repository);
|
||||
if (bytes != null) {
|
||||
return Base58.encode(bytes);
|
||||
}
|
||||
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.BLOCK_UNKNOWN);
|
||||
} catch (TransformationException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_DATA, e);
|
||||
} catch (DataException | IOException e) {
|
||||
@ -170,8 +190,12 @@ public class BlocksResource {
|
||||
}
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
if (repository.getBlockRepository().getHeightFromSignature(signature) == 0)
|
||||
// Check if the block exists in either the database or archive
|
||||
if (repository.getBlockRepository().getHeightFromSignature(signature) == 0 &&
|
||||
repository.getBlockArchiveRepository().getHeightFromSignature(signature) == 0) {
|
||||
// Not found in either the database or archive
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.BLOCK_UNKNOWN);
|
||||
}
|
||||
|
||||
return repository.getBlockRepository().getTransactionsFromSignature(signature, limit, offset, reverse);
|
||||
} catch (DataException e) {
|
||||
@ -200,7 +224,19 @@ public class BlocksResource {
|
||||
})
|
||||
public BlockData getFirstBlock() {
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
return repository.getBlockRepository().fromHeight(1);
|
||||
// Check the database first
|
||||
BlockData blockData = repository.getBlockRepository().fromHeight(1);
|
||||
if (blockData != null) {
|
||||
return blockData;
|
||||
}
|
||||
|
||||
// Try the archive
|
||||
blockData = repository.getBlockArchiveRepository().fromHeight(1);
|
||||
if (blockData != null) {
|
||||
return blockData;
|
||||
}
|
||||
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.BLOCK_UNKNOWN);
|
||||
} catch (DataException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
|
||||
}
|
||||
@ -262,17 +298,28 @@ public class BlocksResource {
|
||||
}
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
BlockData childBlockData = null;
|
||||
|
||||
// Check if block exists in database
|
||||
BlockData blockData = repository.getBlockRepository().fromSignature(signature);
|
||||
if (blockData != null) {
|
||||
return repository.getBlockRepository().fromReference(signature);
|
||||
}
|
||||
|
||||
// Check block exists
|
||||
if (blockData == null)
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.BLOCK_UNKNOWN);
|
||||
|
||||
BlockData childBlockData = repository.getBlockRepository().fromReference(signature);
|
||||
// Not found, so try the archive
|
||||
// This also checks that the parent block exists
|
||||
// It will return null if either the parent or child don't exit
|
||||
childBlockData = repository.getBlockArchiveRepository().fromReference(signature);
|
||||
|
||||
// Check child block exists
|
||||
if (childBlockData == null)
|
||||
if (childBlockData == null) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.BLOCK_UNKNOWN);
|
||||
}
|
||||
|
||||
// Check child block's reference matches the supplied signature
|
||||
if (!Arrays.equals(childBlockData.getReference(), signature)) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.BLOCK_UNKNOWN);
|
||||
}
|
||||
|
||||
return childBlockData;
|
||||
} catch (DataException e) {
|
||||
@ -338,13 +385,20 @@ public class BlocksResource {
|
||||
}
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
// Firstly check the database
|
||||
BlockData blockData = repository.getBlockRepository().fromSignature(signature);
|
||||
if (blockData != null) {
|
||||
return blockData.getHeight();
|
||||
}
|
||||
|
||||
// Check block exists
|
||||
if (blockData == null)
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.BLOCK_UNKNOWN);
|
||||
// Not found, so try the archive
|
||||
blockData = repository.getBlockArchiveRepository().fromSignature(signature);
|
||||
if (blockData != null) {
|
||||
return blockData.getHeight();
|
||||
}
|
||||
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.BLOCK_UNKNOWN);
|
||||
|
||||
return blockData.getHeight();
|
||||
} catch (DataException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
|
||||
}
|
||||
@ -371,11 +425,20 @@ public class BlocksResource {
|
||||
})
|
||||
public BlockData getByHeight(@PathParam("height") int height) {
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
// Firstly check the database
|
||||
BlockData blockData = repository.getBlockRepository().fromHeight(height);
|
||||
if (blockData == null)
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.BLOCK_UNKNOWN);
|
||||
if (blockData != null) {
|
||||
return blockData;
|
||||
}
|
||||
|
||||
// Not found, so try the archive
|
||||
blockData = repository.getBlockArchiveRepository().fromHeight(height);
|
||||
if (blockData != null) {
|
||||
return blockData;
|
||||
}
|
||||
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.BLOCK_UNKNOWN);
|
||||
|
||||
return blockData;
|
||||
} catch (DataException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
|
||||
}
|
||||
@ -402,12 +465,31 @@ public class BlocksResource {
|
||||
})
|
||||
public BlockMintingInfo getBlockMintingInfoByHeight(@PathParam("height") int height) {
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
// Try the database
|
||||
BlockData blockData = repository.getBlockRepository().fromHeight(height);
|
||||
if (blockData == null)
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.BLOCK_UNKNOWN);
|
||||
if (blockData == null) {
|
||||
|
||||
// Not found, so try the archive
|
||||
blockData = repository.getBlockArchiveRepository().fromHeight(height);
|
||||
if (blockData == null) {
|
||||
|
||||
// Still not found
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.BLOCK_UNKNOWN);
|
||||
}
|
||||
}
|
||||
|
||||
Block block = new Block(repository, blockData);
|
||||
BlockData parentBlockData = repository.getBlockRepository().fromSignature(blockData.getReference());
|
||||
if (parentBlockData == null) {
|
||||
// Parent block not found - try the archive
|
||||
parentBlockData = repository.getBlockArchiveRepository().fromSignature(blockData.getReference());
|
||||
if (parentBlockData == null) {
|
||||
|
||||
// Still not found
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.BLOCK_UNKNOWN);
|
||||
}
|
||||
}
|
||||
|
||||
int minterLevel = Account.getRewardShareEffectiveMintingLevel(repository, blockData.getMinterPublicKey());
|
||||
if (minterLevel == 0)
|
||||
// This may be unavailable when requesting a trimmed block
|
||||
@ -454,13 +536,26 @@ public class BlocksResource {
|
||||
})
|
||||
public BlockData getByTimestamp(@PathParam("timestamp") long timestamp) {
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
int height = repository.getBlockRepository().getHeightFromTimestamp(timestamp);
|
||||
if (height == 0)
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.BLOCK_UNKNOWN);
|
||||
BlockData blockData = null;
|
||||
|
||||
BlockData blockData = repository.getBlockRepository().fromHeight(height);
|
||||
if (blockData == null)
|
||||
// Try the Blocks table
|
||||
int height = repository.getBlockRepository().getHeightFromTimestamp(timestamp);
|
||||
if (height > 0) {
|
||||
// Found match in Blocks table
|
||||
return repository.getBlockRepository().fromHeight(height);
|
||||
}
|
||||
|
||||
// Not found in Blocks table, so try the archive
|
||||
height = repository.getBlockArchiveRepository().getHeightFromTimestamp(timestamp);
|
||||
if (height > 0) {
|
||||
// Found match in archive
|
||||
blockData = repository.getBlockArchiveRepository().fromHeight(height);
|
||||
}
|
||||
|
||||
// Ensure block exists
|
||||
if (blockData == null) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.BLOCK_UNKNOWN);
|
||||
}
|
||||
|
||||
return blockData;
|
||||
} catch (DataException e) {
|
||||
@ -497,9 +592,14 @@ public class BlocksResource {
|
||||
|
||||
for (/* count already set */; count > 0; --count, ++height) {
|
||||
BlockData blockData = repository.getBlockRepository().fromHeight(height);
|
||||
if (blockData == null)
|
||||
// Run out of blocks!
|
||||
break;
|
||||
if (blockData == null) {
|
||||
// Not found - try the archive
|
||||
blockData = repository.getBlockArchiveRepository().fromHeight(height);
|
||||
if (blockData == null) {
|
||||
// Run out of blocks!
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
blocks.add(blockData);
|
||||
}
|
||||
@ -544,7 +644,29 @@ public class BlocksResource {
|
||||
if (accountData == null || accountData.getPublicKey() == null)
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.PUBLIC_KEY_NOT_FOUND);
|
||||
|
||||
return repository.getBlockRepository().getBlockSummariesBySigner(accountData.getPublicKey(), limit, offset, reverse);
|
||||
|
||||
List<BlockSummaryData> summaries = repository.getBlockRepository()
|
||||
.getBlockSummariesBySigner(accountData.getPublicKey(), limit, offset, reverse);
|
||||
|
||||
// Add any from the archive
|
||||
List<BlockSummaryData> archivedSummaries = repository.getBlockArchiveRepository()
|
||||
.getBlockSummariesBySigner(accountData.getPublicKey(), limit, offset, reverse);
|
||||
if (archivedSummaries != null && !archivedSummaries.isEmpty()) {
|
||||
summaries.addAll(archivedSummaries);
|
||||
}
|
||||
else {
|
||||
summaries = archivedSummaries;
|
||||
}
|
||||
|
||||
// Sort the results (because they may have been obtained from two places)
|
||||
if (reverse != null && reverse) {
|
||||
summaries.sort((s1, s2) -> Integer.valueOf(s2.getHeight()).compareTo(Integer.valueOf(s1.getHeight())));
|
||||
}
|
||||
else {
|
||||
summaries.sort(Comparator.comparing(s -> Integer.valueOf(s.getHeight())));
|
||||
}
|
||||
|
||||
return summaries;
|
||||
} catch (DataException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
|
||||
}
|
||||
@ -580,7 +702,8 @@ public class BlocksResource {
|
||||
if (!Crypto.isValidAddress(address))
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_ADDRESS);
|
||||
|
||||
return repository.getBlockRepository().getBlockSigners(addresses, limit, offset, reverse);
|
||||
// This method pulls data from both Blocks and BlockArchive, so no need to query serparately
|
||||
return repository.getBlockArchiveRepository().getBlockSigners(addresses, limit, offset, reverse);
|
||||
} catch (DataException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
|
||||
}
|
||||
@ -620,7 +743,76 @@ public class BlocksResource {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA);
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
return repository.getBlockRepository().getBlockSummaries(startHeight, endHeight, count);
|
||||
|
||||
/*
|
||||
* start end count result
|
||||
* 10 40 null blocks 10 to 39 (excludes end block, ignore count)
|
||||
*
|
||||
* null null null blocks 1 to 50 (assume count=50, maybe start=1)
|
||||
* 30 null null blocks 30 to 79 (assume count=50)
|
||||
* 30 null 10 blocks 30 to 39
|
||||
*
|
||||
* null null 50 last 50 blocks? so if max(blocks.height) is 200, then blocks 151 to 200
|
||||
* null 200 null blocks 150 to 199 (excludes end block, assume count=50)
|
||||
* null 200 10 blocks 190 to 199 (excludes end block)
|
||||
*/
|
||||
|
||||
List<BlockSummaryData> blockSummaries = new ArrayList<>();
|
||||
|
||||
// Use the latest X blocks if only a count is specified
|
||||
if (startHeight == null && endHeight == null && count != null) {
|
||||
BlockData chainTip = repository.getBlockRepository().getLastBlock();
|
||||
startHeight = chainTip.getHeight() - count;
|
||||
endHeight = chainTip.getHeight();
|
||||
}
|
||||
|
||||
// ... otherwise default the start height to 1
|
||||
if (startHeight == null && endHeight == null) {
|
||||
startHeight = 1;
|
||||
}
|
||||
|
||||
// Default the count to 50
|
||||
if (count == null) {
|
||||
count = 50;
|
||||
}
|
||||
|
||||
// If both a start and end height exist, ignore the count
|
||||
if (startHeight != null && endHeight != null) {
|
||||
if (startHeight > 0 && endHeight > 0) {
|
||||
count = Integer.MAX_VALUE;
|
||||
}
|
||||
}
|
||||
|
||||
// Derive start height from end height if missing
|
||||
if (startHeight == null || startHeight == 0) {
|
||||
if (endHeight != null && endHeight > 0) {
|
||||
if (count != null) {
|
||||
startHeight = endHeight - count;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (/* count already set */; count > 0; --count, ++startHeight) {
|
||||
if (endHeight != null && startHeight >= endHeight) {
|
||||
break;
|
||||
}
|
||||
BlockData blockData = repository.getBlockRepository().fromHeight(startHeight);
|
||||
if (blockData == null) {
|
||||
// Not found - try the archive
|
||||
blockData = repository.getBlockArchiveRepository().fromHeight(startHeight);
|
||||
if (blockData == null) {
|
||||
// Run out of blocks!
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (blockData != null) {
|
||||
BlockSummaryData blockSummaryData = new BlockSummaryData(blockData);
|
||||
blockSummaries.add(blockSummaryData);
|
||||
}
|
||||
}
|
||||
|
||||
return blockSummaries;
|
||||
} catch (DataException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
|
||||
}
|
||||
|
92
src/main/java/org/qortal/api/resource/BootstrapResource.java
Normal file
92
src/main/java/org/qortal/api/resource/BootstrapResource.java
Normal file
@ -0,0 +1,92 @@
|
||||
package org.qortal.api.resource;
|
||||
|
||||
import io.swagger.v3.oas.annotations.Operation;
|
||||
import io.swagger.v3.oas.annotations.media.Content;
|
||||
import io.swagger.v3.oas.annotations.media.Schema;
|
||||
import io.swagger.v3.oas.annotations.responses.ApiResponse;
|
||||
import io.swagger.v3.oas.annotations.tags.Tag;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.qortal.api.ApiError;
|
||||
import org.qortal.api.ApiExceptionFactory;
|
||||
import org.qortal.api.Security;
|
||||
import org.qortal.repository.Bootstrap;
|
||||
import org.qortal.repository.DataException;
|
||||
import org.qortal.repository.Repository;
|
||||
import org.qortal.repository.RepositoryManager;
|
||||
|
||||
import javax.servlet.http.HttpServletRequest;
|
||||
import javax.ws.rs.*;
|
||||
import javax.ws.rs.core.Context;
|
||||
import javax.ws.rs.core.MediaType;
|
||||
import java.io.IOException;
|
||||
|
||||
|
||||
@Path("/bootstrap")
|
||||
@Tag(name = "Bootstrap")
|
||||
public class BootstrapResource {
|
||||
|
||||
private static final Logger LOGGER = LogManager.getLogger(BootstrapResource.class);
|
||||
|
||||
@Context
|
||||
HttpServletRequest request;
|
||||
|
||||
@POST
|
||||
@Path("/create")
|
||||
@Operation(
|
||||
summary = "Create bootstrap",
|
||||
description = "Builds a bootstrap file for distribution",
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
description = "path to file on success, an exception on failure",
|
||||
content = @Content(mediaType = MediaType.TEXT_PLAIN, schema = @Schema(type = "string"))
|
||||
)
|
||||
}
|
||||
)
|
||||
public String createBootstrap() {
|
||||
Security.checkApiCallAllowed(request);
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
|
||||
Bootstrap bootstrap = new Bootstrap(repository);
|
||||
try {
|
||||
bootstrap.checkRepositoryState();
|
||||
} catch (DataException e) {
|
||||
LOGGER.info("Not ready to create bootstrap: {}", e.getMessage());
|
||||
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.REPOSITORY_ISSUE, e.getMessage());
|
||||
}
|
||||
bootstrap.validateBlockchain();
|
||||
return bootstrap.create();
|
||||
|
||||
} catch (DataException | InterruptedException | IOException e) {
|
||||
LOGGER.info("Unable to create bootstrap", e);
|
||||
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.REPOSITORY_ISSUE, e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/validate")
|
||||
@Operation(
|
||||
summary = "Validate blockchain",
|
||||
description = "Useful to check database integrity prior to creating or after installing a bootstrap. " +
|
||||
"This process is intensive and can take over an hour to run.",
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
description = "true if valid, false if invalid",
|
||||
content = @Content(mediaType = MediaType.TEXT_PLAIN, schema = @Schema(type = "boolean"))
|
||||
)
|
||||
}
|
||||
)
|
||||
public boolean validateBootstrap() {
|
||||
Security.checkApiCallAllowed(request);
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
|
||||
Bootstrap bootstrap = new Bootstrap(repository);
|
||||
return bootstrap.validateCompleteBlockchain();
|
||||
|
||||
} catch (DataException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE);
|
||||
}
|
||||
}
|
||||
}
|
@ -11,6 +11,7 @@ import java.util.List;
|
||||
|
||||
import javax.servlet.http.HttpServletRequest;
|
||||
import javax.ws.rs.GET;
|
||||
import javax.ws.rs.POST;
|
||||
import javax.ws.rs.Path;
|
||||
import javax.ws.rs.PathParam;
|
||||
import javax.ws.rs.core.Context;
|
||||
@ -173,7 +174,7 @@ public class CrossChainHtlcResource {
|
||||
}
|
||||
}
|
||||
|
||||
@GET
|
||||
@POST
|
||||
@Path("/redeem/{ataddress}")
|
||||
@Operation(
|
||||
summary = "Redeems HTLC associated with supplied AT",
|
||||
@ -231,7 +232,7 @@ public class CrossChainHtlcResource {
|
||||
}
|
||||
}
|
||||
|
||||
@GET
|
||||
@POST
|
||||
@Path("/redeemAll")
|
||||
@Operation(
|
||||
summary = "Redeems HTLC for all applicable ATs in tradebot data",
|
||||
@ -415,7 +416,7 @@ public class CrossChainHtlcResource {
|
||||
return false;
|
||||
}
|
||||
|
||||
@GET
|
||||
@POST
|
||||
@Path("/refund/{ataddress}")
|
||||
@Operation(
|
||||
summary = "Refunds HTLC associated with supplied AT",
|
||||
@ -463,7 +464,7 @@ public class CrossChainHtlcResource {
|
||||
}
|
||||
|
||||
|
||||
@GET
|
||||
@POST
|
||||
@Path("/refundAll")
|
||||
@Operation(
|
||||
summary = "Refunds HTLC for all applicable ATs in tradebot data",
|
||||
@ -478,8 +479,6 @@ public class CrossChainHtlcResource {
|
||||
)
|
||||
@ApiErrors({ApiError.INVALID_CRITERIA, ApiError.INVALID_ADDRESS, ApiError.ADDRESS_UNKNOWN})
|
||||
public boolean refundAllHtlc() {
|
||||
Security.checkApiCallAllowed(request);
|
||||
|
||||
Security.checkApiCallAllowed(request);
|
||||
boolean success = false;
|
||||
|
||||
@ -568,6 +567,13 @@ public class CrossChainHtlcResource {
|
||||
if (crossChainTradeData == null)
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA);
|
||||
|
||||
// If the AT is "finished" then it will have a zero balance
|
||||
// In these cases we should avoid HTLC refunds if tbe QORT haven't been returned to the seller
|
||||
if (atData.getIsFinished() && crossChainTradeData.mode != AcctMode.REFUNDED && crossChainTradeData.mode != AcctMode.CANCELLED) {
|
||||
LOGGER.info(String.format("Skipping AT %s because the QORT has already been redemed", atAddress));
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA);
|
||||
}
|
||||
|
||||
List<TradeBotData> allTradeBotData = repository.getCrossChainRepository().getAllTradeBotData();
|
||||
TradeBotData tradeBotData = allTradeBotData.stream().filter(tradeBotDataItem -> tradeBotDataItem.getAtAddress().equals(atAddress)).findFirst().orElse(null);
|
||||
if (tradeBotData == null)
|
||||
|
298
src/main/java/org/qortal/api/resource/ListsResource.java
Normal file
298
src/main/java/org/qortal/api/resource/ListsResource.java
Normal file
@ -0,0 +1,298 @@
|
||||
package org.qortal.api.resource;
|
||||
|
||||
import io.swagger.v3.oas.annotations.Operation;
|
||||
import io.swagger.v3.oas.annotations.media.ArraySchema;
|
||||
import io.swagger.v3.oas.annotations.media.Content;
|
||||
import io.swagger.v3.oas.annotations.media.Schema;
|
||||
import io.swagger.v3.oas.annotations.parameters.RequestBody;
|
||||
import io.swagger.v3.oas.annotations.responses.ApiResponse;
|
||||
import io.swagger.v3.oas.annotations.tags.Tag;
|
||||
|
||||
import org.qortal.api.*;
|
||||
import org.qortal.api.model.AddressListRequest;
|
||||
import org.qortal.crypto.Crypto;
|
||||
import org.qortal.data.account.AccountData;
|
||||
import org.qortal.list.ResourceListManager;
|
||||
import org.qortal.repository.DataException;
|
||||
import org.qortal.repository.Repository;
|
||||
import org.qortal.repository.RepositoryManager;
|
||||
|
||||
import javax.servlet.http.HttpServletRequest;
|
||||
import javax.ws.rs.*;
|
||||
import javax.ws.rs.core.Context;
|
||||
import javax.ws.rs.core.MediaType;
|
||||
|
||||
|
||||
@Path("/lists")
|
||||
@Tag(name = "Lists")
|
||||
public class ListsResource {
|
||||
|
||||
@Context
|
||||
HttpServletRequest request;
|
||||
|
||||
@POST
|
||||
@Path("/blacklist/address/{address}")
|
||||
@Operation(
|
||||
summary = "Add a QORT address to the local blacklist",
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
description = "Returns true on success, or an exception on failure",
|
||||
content = @Content(mediaType = MediaType.TEXT_PLAIN, schema = @Schema(type = "boolean"))
|
||||
)
|
||||
}
|
||||
)
|
||||
@ApiErrors({ApiError.INVALID_ADDRESS, ApiError.ADDRESS_UNKNOWN, ApiError.REPOSITORY_ISSUE})
|
||||
public String addAddressToBlacklist(@PathParam("address") String address) {
|
||||
Security.checkApiCallAllowed(request);
|
||||
|
||||
if (!Crypto.isValidAddress(address))
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_ADDRESS);
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
AccountData accountData = repository.getAccountRepository().getAccount(address);
|
||||
// Not found?
|
||||
if (accountData == null)
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.ADDRESS_UNKNOWN);
|
||||
|
||||
// Valid address, so go ahead and blacklist it
|
||||
boolean success = ResourceListManager.getInstance().addAddressToBlacklist(address, true);
|
||||
|
||||
return success ? "true" : "false";
|
||||
} catch (DataException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
|
||||
}
|
||||
}
|
||||
|
||||
@POST
|
||||
@Path("/blacklist/addresses")
|
||||
@Operation(
|
||||
summary = "Add one or more QORT addresses to the local blacklist",
|
||||
requestBody = @RequestBody(
|
||||
required = true,
|
||||
content = @Content(
|
||||
mediaType = MediaType.APPLICATION_JSON,
|
||||
schema = @Schema(
|
||||
implementation = AddressListRequest.class
|
||||
)
|
||||
)
|
||||
),
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
description = "Returns true if all addresses were processed, false if any couldn't be " +
|
||||
"processed, or an exception on failure. If false or an exception is returned, " +
|
||||
"the list will not be updated, and the request will need to be re-issued.",
|
||||
content = @Content(mediaType = MediaType.TEXT_PLAIN, schema = @Schema(type = "boolean"))
|
||||
)
|
||||
}
|
||||
)
|
||||
@ApiErrors({ApiError.INVALID_ADDRESS, ApiError.ADDRESS_UNKNOWN, ApiError.REPOSITORY_ISSUE})
|
||||
public String addAddressesToBlacklist(AddressListRequest addressListRequest) {
|
||||
Security.checkApiCallAllowed(request);
|
||||
|
||||
if (addressListRequest == null || addressListRequest.addresses == null) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA);
|
||||
}
|
||||
|
||||
int successCount = 0;
|
||||
int errorCount = 0;
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
|
||||
for (String address : addressListRequest.addresses) {
|
||||
|
||||
if (!Crypto.isValidAddress(address)) {
|
||||
errorCount++;
|
||||
continue;
|
||||
}
|
||||
|
||||
AccountData accountData = repository.getAccountRepository().getAccount(address);
|
||||
// Not found?
|
||||
if (accountData == null) {
|
||||
errorCount++;
|
||||
continue;
|
||||
}
|
||||
|
||||
// Valid address, so go ahead and blacklist it
|
||||
boolean success = ResourceListManager.getInstance().addAddressToBlacklist(address, false);
|
||||
if (success) {
|
||||
successCount++;
|
||||
}
|
||||
else {
|
||||
errorCount++;
|
||||
}
|
||||
}
|
||||
} catch (DataException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
|
||||
}
|
||||
|
||||
if (successCount > 0 && errorCount == 0) {
|
||||
// All were successful, so save the blacklist
|
||||
ResourceListManager.getInstance().saveBlacklist();
|
||||
return "true";
|
||||
}
|
||||
else {
|
||||
// Something went wrong, so revert
|
||||
ResourceListManager.getInstance().revertBlacklist();
|
||||
return "false";
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@DELETE
|
||||
@Path("/blacklist/address/{address}")
|
||||
@Operation(
|
||||
summary = "Remove a QORT address from the local blacklist",
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
description = "Returns true on success, or an exception on failure",
|
||||
content = @Content(mediaType = MediaType.TEXT_PLAIN, schema = @Schema(type = "boolean"))
|
||||
)
|
||||
}
|
||||
)
|
||||
@ApiErrors({ApiError.INVALID_ADDRESS, ApiError.ADDRESS_UNKNOWN, ApiError.REPOSITORY_ISSUE})
|
||||
public String removeAddressFromBlacklist(@PathParam("address") String address) {
|
||||
Security.checkApiCallAllowed(request);
|
||||
|
||||
if (!Crypto.isValidAddress(address))
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_ADDRESS);
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
AccountData accountData = repository.getAccountRepository().getAccount(address);
|
||||
// Not found?
|
||||
if (accountData == null)
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.ADDRESS_UNKNOWN);
|
||||
|
||||
// Valid address, so go ahead and blacklist it
|
||||
boolean success = ResourceListManager.getInstance().removeAddressFromBlacklist(address, true);
|
||||
|
||||
return success ? "true" : "false";
|
||||
} catch (DataException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
|
||||
}
|
||||
}
|
||||
|
||||
@DELETE
|
||||
@Path("/blacklist/addresses")
|
||||
@Operation(
|
||||
summary = "Remove one or more QORT addresses from the local blacklist",
|
||||
requestBody = @RequestBody(
|
||||
required = true,
|
||||
content = @Content(
|
||||
mediaType = MediaType.APPLICATION_JSON,
|
||||
schema = @Schema(
|
||||
implementation = AddressListRequest.class
|
||||
)
|
||||
)
|
||||
),
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
description = "Returns true if all addresses were processed, false if any couldn't be " +
|
||||
"processed, or an exception on failure. If false or an exception is returned, " +
|
||||
"the list will not be updated, and the request will need to be re-issued.",
|
||||
content = @Content(mediaType = MediaType.TEXT_PLAIN, schema = @Schema(type = "boolean"))
|
||||
)
|
||||
}
|
||||
)
|
||||
@ApiErrors({ApiError.INVALID_ADDRESS, ApiError.ADDRESS_UNKNOWN, ApiError.REPOSITORY_ISSUE})
|
||||
public String removeAddressesFromBlacklist(AddressListRequest addressListRequest) {
|
||||
Security.checkApiCallAllowed(request);
|
||||
|
||||
if (addressListRequest == null || addressListRequest.addresses == null) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA);
|
||||
}
|
||||
|
||||
int successCount = 0;
|
||||
int errorCount = 0;
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
|
||||
for (String address : addressListRequest.addresses) {
|
||||
|
||||
if (!Crypto.isValidAddress(address)) {
|
||||
errorCount++;
|
||||
continue;
|
||||
}
|
||||
|
||||
AccountData accountData = repository.getAccountRepository().getAccount(address);
|
||||
// Not found?
|
||||
if (accountData == null) {
|
||||
errorCount++;
|
||||
continue;
|
||||
}
|
||||
|
||||
// Valid address, so go ahead and blacklist it
|
||||
// Don't save as we will do this at the end of the process
|
||||
boolean success = ResourceListManager.getInstance().removeAddressFromBlacklist(address, false);
|
||||
if (success) {
|
||||
successCount++;
|
||||
}
|
||||
else {
|
||||
errorCount++;
|
||||
}
|
||||
}
|
||||
} catch (DataException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
|
||||
}
|
||||
|
||||
if (successCount > 0 && errorCount == 0) {
|
||||
// All were successful, so save the blacklist
|
||||
ResourceListManager.getInstance().saveBlacklist();
|
||||
return "true";
|
||||
}
|
||||
else {
|
||||
// Something went wrong, so revert
|
||||
ResourceListManager.getInstance().revertBlacklist();
|
||||
return "false";
|
||||
}
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/blacklist/addresses")
|
||||
@Operation(
|
||||
summary = "Fetch the list of blacklisted addresses",
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
description = "A JSON array of addresses",
|
||||
content = @Content(mediaType = MediaType.APPLICATION_JSON, array = @ArraySchema(schema = @Schema(implementation = String.class)))
|
||||
)
|
||||
}
|
||||
)
|
||||
public String getAddressBlacklist() {
|
||||
Security.checkApiCallAllowed(request);
|
||||
return ResourceListManager.getInstance().getBlacklistJSONString();
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/blacklist/address/{address}")
|
||||
@Operation(
|
||||
summary = "Check if an address is present in the local blacklist",
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
description = "Returns true or false if the list was queried, or an exception on failure",
|
||||
content = @Content(mediaType = MediaType.TEXT_PLAIN, schema = @Schema(type = "boolean"))
|
||||
)
|
||||
}
|
||||
)
|
||||
@ApiErrors({ApiError.INVALID_ADDRESS, ApiError.ADDRESS_UNKNOWN, ApiError.REPOSITORY_ISSUE})
|
||||
public String checkAddressInBlacklist(@PathParam("address") String address) {
|
||||
Security.checkApiCallAllowed(request);
|
||||
|
||||
if (!Crypto.isValidAddress(address))
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_ADDRESS);
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
AccountData accountData = repository.getAccountRepository().getAccount(address);
|
||||
// Not found?
|
||||
if (accountData == null)
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.ADDRESS_UNKNOWN);
|
||||
|
||||
// Valid address, so go ahead and blacklist it
|
||||
boolean blacklisted = ResourceListManager.getInstance().isAddressInBlacklist(address);
|
||||
|
||||
return blacklisted ? "true" : "false";
|
||||
} catch (DataException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
@ -1,5 +1,7 @@
|
||||
package org.qortal.at;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
import org.ciyam.at.MachineState;
|
||||
@ -56,12 +58,12 @@ public class AT {
|
||||
|
||||
this.atData = new ATData(atAddress, creatorPublicKey, creation, machineState.version, assetId, codeBytes, codeHash,
|
||||
machineState.isSleeping(), machineState.getSleepUntilHeight(), machineState.isFinished(), machineState.hadFatalError(),
|
||||
machineState.isFrozen(), machineState.getFrozenBalance());
|
||||
machineState.isFrozen(), machineState.getFrozenBalance(), null);
|
||||
|
||||
byte[] stateData = machineState.toBytes();
|
||||
byte[] stateHash = Crypto.digest(stateData);
|
||||
|
||||
this.atStateData = new ATStateData(atAddress, height, stateData, stateHash, 0L, true);
|
||||
this.atStateData = new ATStateData(atAddress, height, stateData, stateHash, 0L, true, null);
|
||||
}
|
||||
|
||||
// Getters / setters
|
||||
@ -84,13 +86,28 @@ public class AT {
|
||||
this.repository.getATRepository().delete(this.atData.getATAddress());
|
||||
}
|
||||
|
||||
/**
|
||||
* Potentially execute AT.
|
||||
* <p>
|
||||
* Note that sleep-until-message support might set/reset
|
||||
* sleep-related flags/values.
|
||||
* <p>
|
||||
* {@link #getATStateData()} will return null if nothing happened.
|
||||
* <p>
|
||||
* @param blockHeight
|
||||
* @param blockTimestamp
|
||||
* @return AT-generated transactions, possibly empty
|
||||
* @throws DataException
|
||||
*/
|
||||
public List<AtTransaction> run(int blockHeight, long blockTimestamp) throws DataException {
|
||||
String atAddress = this.atData.getATAddress();
|
||||
|
||||
QortalATAPI api = new QortalATAPI(repository, this.atData, blockTimestamp);
|
||||
QortalAtLoggerFactory loggerFactory = QortalAtLoggerFactory.getInstance();
|
||||
|
||||
byte[] codeBytes = this.atData.getCodeBytes();
|
||||
if (!api.willExecute(blockHeight))
|
||||
// this.atStateData will be null
|
||||
return Collections.emptyList();
|
||||
|
||||
// Fetch latest ATStateData for this AT
|
||||
ATStateData latestAtStateData = this.repository.getATRepository().getLatestATState(atAddress);
|
||||
@ -100,8 +117,10 @@ public class AT {
|
||||
throw new IllegalStateException("No previous AT state data found");
|
||||
|
||||
// [Re]create AT machine state using AT state data or from scratch as applicable
|
||||
byte[] codeBytes = this.atData.getCodeBytes();
|
||||
MachineState state = MachineState.fromBytes(api, loggerFactory, latestAtStateData.getStateData(), codeBytes);
|
||||
try {
|
||||
api.preExecute(state);
|
||||
state.execute();
|
||||
} catch (Exception e) {
|
||||
throw new DataException(String.format("Uncaught exception while running AT '%s'", atAddress), e);
|
||||
@ -109,9 +128,18 @@ public class AT {
|
||||
|
||||
byte[] stateData = state.toBytes();
|
||||
byte[] stateHash = Crypto.digest(stateData);
|
||||
long atFees = api.calcFinalFees(state);
|
||||
|
||||
this.atStateData = new ATStateData(atAddress, blockHeight, stateData, stateHash, atFees, false);
|
||||
// Nothing happened?
|
||||
if (state.getSteps() == 0 && Arrays.equals(stateHash, latestAtStateData.getStateHash()))
|
||||
// We currently want to execute frozen ATs, to maintain backwards support.
|
||||
if (state.isFrozen() == false)
|
||||
// this.atStateData will be null
|
||||
return Collections.emptyList();
|
||||
|
||||
long atFees = api.calcFinalFees(state);
|
||||
Long sleepUntilMessageTimestamp = this.atData.getSleepUntilMessageTimestamp();
|
||||
|
||||
this.atStateData = new ATStateData(atAddress, blockHeight, stateData, stateHash, atFees, false, sleepUntilMessageTimestamp);
|
||||
|
||||
return api.getTransactions();
|
||||
}
|
||||
@ -130,6 +158,10 @@ public class AT {
|
||||
this.atData.setHadFatalError(state.hadFatalError());
|
||||
this.atData.setIsFrozen(state.isFrozen());
|
||||
this.atData.setFrozenBalance(state.getFrozenBalance());
|
||||
|
||||
// Special sleep-until-message support
|
||||
this.atData.setSleepUntilMessageTimestamp(this.atStateData.getSleepUntilMessageTimestamp());
|
||||
|
||||
this.repository.getATRepository().save(this.atData);
|
||||
}
|
||||
|
||||
@ -157,6 +189,10 @@ public class AT {
|
||||
this.atData.setHadFatalError(state.hadFatalError());
|
||||
this.atData.setIsFrozen(state.isFrozen());
|
||||
this.atData.setFrozenBalance(state.getFrozenBalance());
|
||||
|
||||
// Special sleep-until-message support
|
||||
this.atData.setSleepUntilMessageTimestamp(previousStateData.getSleepUntilMessageTimestamp());
|
||||
|
||||
this.repository.getATRepository().save(this.atData);
|
||||
}
|
||||
|
||||
|
@ -32,6 +32,7 @@ import org.qortal.group.Group;
|
||||
import org.qortal.repository.ATRepository;
|
||||
import org.qortal.repository.DataException;
|
||||
import org.qortal.repository.Repository;
|
||||
import org.qortal.repository.ATRepository.NextTransactionInfo;
|
||||
import org.qortal.transaction.AtTransaction;
|
||||
import org.qortal.transaction.Transaction.TransactionType;
|
||||
import org.qortal.utils.Base58;
|
||||
@ -74,8 +75,45 @@ public class QortalATAPI extends API {
|
||||
return this.transactions;
|
||||
}
|
||||
|
||||
public long calcFinalFees(MachineState state) {
|
||||
return state.getSteps() * this.ciyamAtSettings.feePerStep;
|
||||
public boolean willExecute(int blockHeight) throws DataException {
|
||||
// Sleep-until-message/height checking
|
||||
Long sleepUntilMessageTimestamp = this.atData.getSleepUntilMessageTimestamp();
|
||||
|
||||
if (sleepUntilMessageTimestamp != null) {
|
||||
// Quicker to check height, if sleep-until-height also active
|
||||
Integer sleepUntilHeight = this.atData.getSleepUntilHeight();
|
||||
|
||||
boolean wakeDueToHeight = sleepUntilHeight != null && sleepUntilHeight != 0 && blockHeight >= sleepUntilHeight;
|
||||
|
||||
boolean wakeDueToMessage = false;
|
||||
if (!wakeDueToHeight) {
|
||||
// No avoiding asking repository
|
||||
Timestamp previousTxTimestamp = new Timestamp(sleepUntilMessageTimestamp);
|
||||
NextTransactionInfo nextTransactionInfo = this.repository.getATRepository().findNextTransaction(this.atData.getATAddress(),
|
||||
previousTxTimestamp.blockHeight,
|
||||
previousTxTimestamp.transactionSequence);
|
||||
|
||||
wakeDueToMessage = nextTransactionInfo != null;
|
||||
}
|
||||
|
||||
// Can we skip?
|
||||
if (!wakeDueToHeight && !wakeDueToMessage)
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
public void preExecute(MachineState state) {
|
||||
// Sleep-until-message/height checking
|
||||
Long sleepUntilMessageTimestamp = this.atData.getSleepUntilMessageTimestamp();
|
||||
|
||||
if (sleepUntilMessageTimestamp != null) {
|
||||
// We've passed checks, so clear sleep-related flags/values
|
||||
this.setIsSleeping(state, false);
|
||||
this.setSleepUntilHeight(state, 0);
|
||||
this.atData.setSleepUntilMessageTimestamp(null);
|
||||
}
|
||||
}
|
||||
|
||||
// Inherited methods from CIYAM AT API
|
||||
@ -412,6 +450,10 @@ public class QortalATAPI extends API {
|
||||
|
||||
// Utility methods
|
||||
|
||||
public long calcFinalFees(MachineState state) {
|
||||
return state.getSteps() * this.ciyamAtSettings.feePerStep;
|
||||
}
|
||||
|
||||
/** Returns partial transaction signature, used to verify we're operating on the same transaction and not naively using block height & sequence. */
|
||||
public static byte[] partialSignature(byte[] fullSignature) {
|
||||
return Arrays.copyOfRange(fullSignature, 8, 32);
|
||||
@ -460,6 +502,15 @@ public class QortalATAPI extends API {
|
||||
}
|
||||
}
|
||||
|
||||
/*package*/ void sleepUntilMessageOrHeight(MachineState state, long txTimestamp, Long sleepUntilHeight) {
|
||||
this.setIsSleeping(state, true);
|
||||
|
||||
this.atData.setSleepUntilMessageTimestamp(txTimestamp);
|
||||
|
||||
if (sleepUntilHeight != null)
|
||||
this.setSleepUntilHeight(state, sleepUntilHeight.intValue());
|
||||
}
|
||||
|
||||
/** Returns AT's account */
|
||||
/* package */ Account getATAccount() {
|
||||
return new Account(this.repository, this.atData.getATAddress());
|
||||
|
@ -84,6 +84,43 @@ public enum QortalFunctionCode {
|
||||
api.setB(state, bBytes);
|
||||
}
|
||||
},
|
||||
/**
|
||||
* Sleep AT until a new message arrives after 'tx-timestamp'.<br>
|
||||
* <tt>0x0503 tx-timestamp</tt>
|
||||
*/
|
||||
SLEEP_UNTIL_MESSAGE(0x0503, 1, false) {
|
||||
@Override
|
||||
protected void postCheckExecute(FunctionData functionData, MachineState state, short rawFunctionCode) throws ExecutionException {
|
||||
if (functionData.value1 <= 0)
|
||||
return;
|
||||
|
||||
long txTimestamp = functionData.value1;
|
||||
|
||||
QortalATAPI api = (QortalATAPI) state.getAPI();
|
||||
api.sleepUntilMessageOrHeight(state, txTimestamp, null);
|
||||
}
|
||||
},
|
||||
/**
|
||||
* Sleep AT until a new message arrives, after 'tx-timestamp', or height reached.<br>
|
||||
* <tt>0x0504 tx-timestamp height</tt>
|
||||
*/
|
||||
SLEEP_UNTIL_MESSAGE_OR_HEIGHT(0x0504, 2, false) {
|
||||
@Override
|
||||
protected void postCheckExecute(FunctionData functionData, MachineState state, short rawFunctionCode) throws ExecutionException {
|
||||
if (functionData.value1 <= 0)
|
||||
return;
|
||||
|
||||
long txTimestamp = functionData.value1;
|
||||
|
||||
if (functionData.value2 <= 0)
|
||||
return;
|
||||
|
||||
long sleepUntilHeight = functionData.value2;
|
||||
|
||||
QortalATAPI api = (QortalATAPI) state.getAPI();
|
||||
api.sleepUntilMessageOrHeight(state, txTimestamp, sleepUntilHeight);
|
||||
}
|
||||
},
|
||||
/**
|
||||
* Convert address in B to 20-byte value in LSB of B1, and all of B2 & B3.<br>
|
||||
* <tt>0x0510</tt>
|
||||
|
@ -1104,9 +1104,14 @@ public class Block {
|
||||
// Create repository savepoint here so we can rollback to it after testing transactions
|
||||
repository.setSavepoint();
|
||||
|
||||
if (this.blockData.getHeight() == 212937)
|
||||
if (this.blockData.getHeight() == 212937) {
|
||||
// Apply fix for block 212937 but fix will be rolled back before we exit method
|
||||
Block212937.processFix(this);
|
||||
}
|
||||
else if (InvalidNameRegistrationBlocks.isAffectedBlock(this.blockData.getHeight())) {
|
||||
// Apply fix for affected name registration blocks, but fix will be rolled back before we exit method
|
||||
InvalidNameRegistrationBlocks.processFix(this);
|
||||
}
|
||||
|
||||
for (Transaction transaction : this.getTransactions()) {
|
||||
TransactionData transactionData = transaction.getTransactionData();
|
||||
@ -1145,7 +1150,7 @@ public class Block {
|
||||
// Check transaction can even be processed
|
||||
validationResult = transaction.isProcessable();
|
||||
if (validationResult != Transaction.ValidationResult.OK) {
|
||||
LOGGER.debug(String.format("Error during transaction validation, tx %s: %s", Base58.encode(transactionData.getSignature()), validationResult.name()));
|
||||
LOGGER.info(String.format("Error during transaction validation, tx %s: %s", Base58.encode(transactionData.getSignature()), validationResult.name()));
|
||||
return ValidationResult.TRANSACTION_INVALID;
|
||||
}
|
||||
|
||||
@ -1259,12 +1264,13 @@ public class Block {
|
||||
for (ATData atData : executableATs) {
|
||||
AT at = new AT(this.repository, atData);
|
||||
List<AtTransaction> atTransactions = at.run(this.blockData.getHeight(), this.blockData.getTimestamp());
|
||||
ATStateData atStateData = at.getATStateData();
|
||||
// Didn't execute? (e.g. sleeping)
|
||||
if (atStateData == null)
|
||||
continue;
|
||||
|
||||
allAtTransactions.addAll(atTransactions);
|
||||
|
||||
ATStateData atStateData = at.getATStateData();
|
||||
this.ourAtStates.add(atStateData);
|
||||
|
||||
this.ourAtFees += atStateData.getFees();
|
||||
}
|
||||
|
||||
@ -1293,6 +1299,21 @@ public class Block {
|
||||
return mintingAccount.canMint();
|
||||
}
|
||||
|
||||
/**
|
||||
* Pre-process block, and its transactions.
|
||||
* This allows for any database integrity checks prior to validation.
|
||||
* This is called before isValid() and process()
|
||||
*
|
||||
* @throws DataException
|
||||
*/
|
||||
public void preProcess() throws DataException {
|
||||
List<Transaction> blocksTransactions = this.getTransactions();
|
||||
|
||||
for (Transaction transaction : blocksTransactions) {
|
||||
transaction.preProcess();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Process block, and its transactions, adding them to the blockchain.
|
||||
*
|
||||
|
@ -4,10 +4,7 @@ import java.io.File;
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.InputStream;
|
||||
import java.sql.SQLException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.*;
|
||||
import java.util.concurrent.locks.ReentrantLock;
|
||||
|
||||
import javax.xml.bind.JAXBContext;
|
||||
@ -27,11 +24,9 @@ import org.eclipse.persistence.jaxb.UnmarshallerProperties;
|
||||
import org.qortal.controller.Controller;
|
||||
import org.qortal.data.block.BlockData;
|
||||
import org.qortal.network.Network;
|
||||
import org.qortal.repository.BlockRepository;
|
||||
import org.qortal.repository.DataException;
|
||||
import org.qortal.repository.Repository;
|
||||
import org.qortal.repository.RepositoryManager;
|
||||
import org.qortal.repository.*;
|
||||
import org.qortal.settings.Settings;
|
||||
import org.qortal.utils.Base58;
|
||||
import org.qortal.utils.StringLongMapXmlAdapter;
|
||||
|
||||
/**
|
||||
@ -506,29 +501,105 @@ public class BlockChain {
|
||||
* @throws SQLException
|
||||
*/
|
||||
public static void validate() throws DataException {
|
||||
// Check first block is Genesis Block
|
||||
if (!isGenesisBlockValid())
|
||||
rebuildBlockchain();
|
||||
|
||||
boolean isTopOnly = Settings.getInstance().isTopOnly();
|
||||
boolean archiveEnabled = Settings.getInstance().isArchiveEnabled();
|
||||
boolean canBootstrap = Settings.getInstance().getBootstrap();
|
||||
boolean needsArchiveRebuild = false;
|
||||
BlockData chainTip;
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
chainTip = repository.getBlockRepository().getLastBlock();
|
||||
|
||||
// Ensure archive is (at least partially) intact, and force a bootstrap if it isn't
|
||||
if (!isTopOnly && archiveEnabled && canBootstrap) {
|
||||
needsArchiveRebuild = (repository.getBlockArchiveRepository().fromHeight(2) == null);
|
||||
if (needsArchiveRebuild) {
|
||||
LOGGER.info("Couldn't retrieve block 2 from archive. Bootstrapping...");
|
||||
|
||||
// If there are minting accounts, make sure to back them up
|
||||
// Don't backup if there are no minting accounts, as this can cause problems
|
||||
if (!repository.getAccountRepository().getMintingAccounts().isEmpty()) {
|
||||
Controller.getInstance().exportRepositoryData();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
boolean hasBlocks = (chainTip != null && chainTip.getHeight() > 1);
|
||||
|
||||
if (isTopOnly && hasBlocks) {
|
||||
// Top-only mode is enabled and we have blocks, so it's possible that the genesis block has been pruned
|
||||
// It's best not to validate it, and there's no real need to
|
||||
} else {
|
||||
// Check first block is Genesis Block
|
||||
if (!isGenesisBlockValid() || needsArchiveRebuild) {
|
||||
try {
|
||||
rebuildBlockchain();
|
||||
|
||||
} catch (InterruptedException e) {
|
||||
throw new DataException(String.format("Interrupted when trying to rebuild blockchain: %s", e.getMessage()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// We need to create a new connection, as the previous repository and its connections may be been
|
||||
// closed by rebuildBlockchain() if a bootstrap was applied
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
repository.checkConsistency();
|
||||
|
||||
int startHeight = Math.max(repository.getBlockRepository().getBlockchainHeight() - 1440, 1);
|
||||
// Set the number of blocks to validate based on the pruned state of the chain
|
||||
// If pruned, subtract an extra 10 to allow room for error
|
||||
int blocksToValidate = (isTopOnly || archiveEnabled) ? Settings.getInstance().getPruneBlockLimit() - 10 : 1440;
|
||||
|
||||
int startHeight = Math.max(repository.getBlockRepository().getBlockchainHeight() - blocksToValidate, 1);
|
||||
BlockData detachedBlockData = repository.getBlockRepository().getDetachedBlockSignature(startHeight);
|
||||
|
||||
if (detachedBlockData != null) {
|
||||
LOGGER.error(String.format("Block %d's reference does not match any block's signature", detachedBlockData.getHeight()));
|
||||
LOGGER.error(String.format("Block %d's reference does not match any block's signature",
|
||||
detachedBlockData.getHeight()));
|
||||
LOGGER.error(String.format("Your chain may be invalid and you should consider bootstrapping" +
|
||||
" or re-syncing from genesis."));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Wait for blockchain lock (whereas orphan() only tries to get lock)
|
||||
ReentrantLock blockchainLock = Controller.getInstance().getBlockchainLock();
|
||||
blockchainLock.lock();
|
||||
try {
|
||||
LOGGER.info(String.format("Orphaning back to block %d", detachedBlockData.getHeight() - 1));
|
||||
orphan(detachedBlockData.getHeight() - 1);
|
||||
} finally {
|
||||
blockchainLock.unlock();
|
||||
/**
|
||||
* More thorough blockchain validation method. Useful for validating bootstraps.
|
||||
* A DataException is thrown if anything is invalid.
|
||||
*
|
||||
* @throws DataException
|
||||
*/
|
||||
public static void validateAllBlocks() throws DataException {
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
BlockData chainTip = repository.getBlockRepository().getLastBlock();
|
||||
final int chainTipHeight = chainTip.getHeight();
|
||||
final int oldestBlock = 1; // TODO: increase if in pruning mode
|
||||
byte[] lastReference = null;
|
||||
|
||||
for (int height = chainTipHeight; height > oldestBlock; height--) {
|
||||
BlockData blockData = repository.getBlockRepository().fromHeight(height);
|
||||
if (blockData == null) {
|
||||
blockData = repository.getBlockArchiveRepository().fromHeight(height);
|
||||
}
|
||||
|
||||
if (blockData == null) {
|
||||
String error = String.format("Missing block at height %d", height);
|
||||
LOGGER.error(error);
|
||||
throw new DataException(error);
|
||||
}
|
||||
|
||||
if (height != chainTipHeight) {
|
||||
// Check reference
|
||||
if (!Arrays.equals(blockData.getSignature(), lastReference)) {
|
||||
String error = String.format("Invalid reference for block at height %d: %s (should be %s)",
|
||||
height, Base58.encode(blockData.getReference()), Base58.encode(lastReference));
|
||||
LOGGER.error(error);
|
||||
throw new DataException(error);
|
||||
}
|
||||
}
|
||||
|
||||
lastReference = blockData.getReference();
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -551,7 +622,15 @@ public class BlockChain {
|
||||
}
|
||||
}
|
||||
|
||||
private static void rebuildBlockchain() throws DataException {
|
||||
private static void rebuildBlockchain() throws DataException, InterruptedException {
|
||||
boolean shouldBootstrap = Settings.getInstance().getBootstrap();
|
||||
if (shouldBootstrap) {
|
||||
// Settings indicate that we should apply a bootstrap rather than rebuilding and syncing from genesis
|
||||
Bootstrap bootstrap = new Bootstrap();
|
||||
bootstrap.startImport();
|
||||
return;
|
||||
}
|
||||
|
||||
// (Re)build repository
|
||||
if (!RepositoryManager.wasPristineAtOpen())
|
||||
RepositoryManager.rebuild();
|
||||
|
@ -0,0 +1,114 @@
|
||||
package org.qortal.block;
|
||||
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.qortal.naming.Name;
|
||||
import org.qortal.repository.DataException;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* Invalid Name Registration Blocks
|
||||
* <p>
|
||||
* A node minted a version of block 535658 that contained one transaction:
|
||||
* a REGISTER_NAME transaction that attempted to register a name that was already registered.
|
||||
* <p>
|
||||
* This invalid transaction made block 535658 (rightly) invalid to several nodes,
|
||||
* which refused to use that block.
|
||||
* However, it seems there were no other nodes minting an alternative, valid block at that time
|
||||
* and so the chain stalled for several nodes in the network.
|
||||
* <p>
|
||||
* Additionally, the invalid block 535658 affected all new installations, regardless of whether
|
||||
* they synchronized from scratch (block 1) or used an 'official release' bootstrap.
|
||||
* <p>
|
||||
* The diagnosis found the following:
|
||||
* - The original problem occurred in block 535205 where for some unknown reason many nodes didn't
|
||||
* add the name from a REGISTER_NAME transaction to their Names table.
|
||||
* - As a result, those nodes had a corrupt db, because they weren't holding a record of the name.
|
||||
* - This invalid db then caused them to treat a candidate for block 535658 as valid when it
|
||||
* should have been invalid.
|
||||
* - As such, the chain continued on with a technically invalid block in it, for a subset of the network
|
||||
* <p>
|
||||
* As with block 212937, there were three options, but the only feasible one was to apply edits to block
|
||||
* 535658 to make it valid. There were several cross-chain trades completed after this block, so doing
|
||||
* any kind of rollback was out of the question.
|
||||
* <p>
|
||||
* To complicate things further, a custom data field was used for the first REGISTER_NAME transaction,
|
||||
* and the default data field was used for the second. So it was important that all nodes ended up with
|
||||
* the exact same data regardless of how they arrived there.
|
||||
* <p>
|
||||
* The invalid block 535658 signature is: <tt>3oiuDhok...NdXvCLEV</tt>.
|
||||
* <p>
|
||||
* The invalid transaction in block 212937 is:
|
||||
* <p>
|
||||
* <code><pre>
|
||||
{
|
||||
"type": "REGISTER_NAME",
|
||||
"timestamp": 1630739437517,
|
||||
"reference": "4peRechwSPxP6UkRj9Y8ox9YxkWb34sWk5zyMc1WyMxEsACxD4Gmm7LZVsQ6Skpze8QCSBMZasvEZg6RgdqkyADW",
|
||||
"fee": "0.00100000",
|
||||
"signature": "2t1CryCog8KPDBarzY5fDCKu499nfnUcGrz4Lz4w5wNb5nWqm7y126P48dChYY7huhufcBV3RJPkgKP4Ywxc1gXx",
|
||||
"txGroupId": 0,
|
||||
"blockHeight": 535658,
|
||||
"approvalStatus": "NOT_REQUIRED",
|
||||
"creatorAddress": "Qbx9ojxv7XNi1xDMWzzw7xDvd1zYW6SKFB",
|
||||
"registrantPublicKey": "HJqGEf6cW695Xun4ydhkB2excGFwsDxznhNCRHZStyyx",
|
||||
"name": "Qplay",
|
||||
"data": "Registered Name on the Qortal Chain"
|
||||
}
|
||||
</pre></code>
|
||||
* <p>
|
||||
* Account <tt>Qbx9ojxv7XNi1xDMWzzw7xDvd1zYW6SKFB</tt> attempted to register the name <tt>Qplay</tt>
|
||||
* when they had already registered it 12 hours before in block <tt>535205</tt>.
|
||||
* <p>
|
||||
* However, on the broken DB nodes, their Names table was missing a record for the `Qplay` name
|
||||
* which was sufficient to make the transaction valid.
|
||||
*
|
||||
* This problem then occurred two more times, in blocks 536140 and 541334
|
||||
* To reduce duplication, I have combined all three block fixes into a single class
|
||||
*
|
||||
*/
|
||||
public final class InvalidNameRegistrationBlocks {
|
||||
|
||||
private static final Logger LOGGER = LogManager.getLogger(InvalidNameRegistrationBlocks.class);
|
||||
|
||||
public static Map<Integer, String> invalidBlocksNamesMap = new HashMap<Integer, String>()
|
||||
{
|
||||
{
|
||||
put(535658, "Qplay");
|
||||
put(536140, "Qweb");
|
||||
put(541334, "Qithub");
|
||||
}
|
||||
};
|
||||
|
||||
private InvalidNameRegistrationBlocks() {
|
||||
/* Do not instantiate */
|
||||
}
|
||||
|
||||
public static boolean isAffectedBlock(int height) {
|
||||
return (invalidBlocksNamesMap.containsKey(height));
|
||||
}
|
||||
|
||||
public static void processFix(Block block) throws DataException {
|
||||
Integer blockHeight = block.getBlockData().getHeight();
|
||||
String invalidName = invalidBlocksNamesMap.get(blockHeight);
|
||||
if (invalidName == null) {
|
||||
throw new DataException(String.format("Unable to lookup invalid name for block height %d", blockHeight));
|
||||
}
|
||||
|
||||
// Unregister the existing name record if it exists
|
||||
// This ensures that the duplicate name is considered valid, and therefore
|
||||
// the second (i.e. duplicate) REGISTER_NAME transaction data is applied.
|
||||
// Both were issued by the same user account, so there is no conflict.
|
||||
Name name = new Name(block.repository, invalidName);
|
||||
name.unregister();
|
||||
|
||||
LOGGER.debug("Applied name registration patch for block {}", blockHeight);
|
||||
}
|
||||
|
||||
// Note:
|
||||
// There is no need to write an orphanFix() method, as we do not have
|
||||
// the necessary ATStatesData to orphan back this far anyway
|
||||
|
||||
}
|
@ -14,6 +14,7 @@ import java.security.NoSuchAlgorithmException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.TimeoutException;
|
||||
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
@ -215,8 +216,17 @@ public class AutoUpdate extends Thread {
|
||||
}
|
||||
|
||||
// Give repository a chance to backup in case things go badly wrong (if enabled)
|
||||
if (Settings.getInstance().getRepositoryBackupInterval() > 0)
|
||||
RepositoryManager.backup(true);
|
||||
if (Settings.getInstance().getRepositoryBackupInterval() > 0) {
|
||||
try {
|
||||
// Timeout if the database isn't ready for backing up after 60 seconds
|
||||
long timeout = 60 * 1000L;
|
||||
RepositoryManager.backup(true, "backup", timeout);
|
||||
|
||||
} catch (TimeoutException e) {
|
||||
LOGGER.info("Attempt to backup repository failed due to timeout: {}", e.getMessage());
|
||||
// Continue with the auto update anyway...
|
||||
}
|
||||
}
|
||||
|
||||
// Call ApplyUpdate to end this process (unlocking current JAR so it can be replaced)
|
||||
String javaHome = System.getProperty("java.home");
|
||||
|
@ -44,6 +44,9 @@ public class BlockMinter extends Thread {
|
||||
private static Long lastLogTimestamp;
|
||||
private static Long logTimeout;
|
||||
|
||||
// Recovery
|
||||
public static final long INVALID_BLOCK_RECOVERY_TIMEOUT = 10 * 60 * 1000L; // ms
|
||||
|
||||
// Constructors
|
||||
|
||||
public BlockMinter() {
|
||||
@ -144,9 +147,25 @@ public class BlockMinter extends Thread {
|
||||
if (peers.size() < Settings.getInstance().getMinBlockchainPeers())
|
||||
continue;
|
||||
|
||||
// If we are stuck on an invalid block, we should allow an alternative to be minted
|
||||
boolean recoverInvalidBlock = false;
|
||||
if (Synchronizer.getInstance().timeInvalidBlockLastReceived != null) {
|
||||
// We've had at least one invalid block
|
||||
long timeSinceLastValidBlock = NTP.getTime() - Synchronizer.getInstance().timeValidBlockLastReceived;
|
||||
long timeSinceLastInvalidBlock = NTP.getTime() - Synchronizer.getInstance().timeInvalidBlockLastReceived;
|
||||
if (timeSinceLastValidBlock > INVALID_BLOCK_RECOVERY_TIMEOUT) {
|
||||
if (timeSinceLastInvalidBlock < INVALID_BLOCK_RECOVERY_TIMEOUT) {
|
||||
// Last valid block was more than 10 mins ago, but we've had an invalid block since then
|
||||
// Assume that the chain has stalled because there is no alternative valid candidate
|
||||
// Enter recovery mode to allow alternative, valid candidates to be minted
|
||||
recoverInvalidBlock = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If our latest block isn't recent then we need to synchronize instead of minting, unless we're in recovery mode.
|
||||
if (!peers.isEmpty() && lastBlockData.getTimestamp() < minLatestBlockTimestamp)
|
||||
if (Controller.getInstance().getRecoveryMode() == false)
|
||||
if (Controller.getInstance().getRecoveryMode() == false && recoverInvalidBlock == false)
|
||||
continue;
|
||||
|
||||
// There are enough peers with a recent block and our latest block is recent
|
||||
@ -230,6 +249,8 @@ public class BlockMinter extends Thread {
|
||||
if (testBlock.isTimestampValid() != ValidationResult.OK)
|
||||
continue;
|
||||
|
||||
testBlock.preProcess();
|
||||
|
||||
// Is new block valid yet? (Before adding unconfirmed transactions)
|
||||
ValidationResult result = testBlock.isValid();
|
||||
if (result != ValidationResult.OK) {
|
||||
@ -421,7 +442,8 @@ public class BlockMinter extends Thread {
|
||||
|
||||
// Add to blockchain
|
||||
newBlock.process();
|
||||
LOGGER.info(String.format("Minted new test block: %d", newBlock.getBlockData().getHeight()));
|
||||
LOGGER.info(String.format("Minted new test block: %d sig: %.8s",
|
||||
newBlock.getBlockData().getHeight(), Base58.encode(newBlock.getBlockData().getSignature())));
|
||||
|
||||
repository.saveChanges();
|
||||
|
||||
|
@ -1,10 +1,47 @@
|
||||
package org.qortal.controller;
|
||||
|
||||
import com.google.common.primitives.Longs;
|
||||
import java.awt.TrayIcon.MessageType;
|
||||
import java.io.File;
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.Paths;
|
||||
import java.security.SecureRandom;
|
||||
import java.security.Security;
|
||||
import java.time.LocalDateTime;
|
||||
import java.time.ZoneOffset;
|
||||
import java.time.format.DateTimeFormatter;
|
||||
import java.util.ArrayDeque;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.Deque;
|
||||
import java.util.HashMap;
|
||||
import java.util.Iterator;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Properties;
|
||||
import java.util.Random;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.TimeoutException;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
import java.util.concurrent.locks.ReentrantLock;
|
||||
import java.util.function.Function;
|
||||
import java.util.function.Predicate;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import javax.xml.bind.annotation.XmlAccessType;
|
||||
import javax.xml.bind.annotation.XmlAccessorType;
|
||||
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.bouncycastle.jce.provider.BouncyCastleProvider;
|
||||
import org.bouncycastle.jsse.provider.BouncyCastleJsseProvider;
|
||||
import com.google.common.primitives.Longs;
|
||||
import org.qortal.account.Account;
|
||||
import org.qortal.account.PrivateKeyAccount;
|
||||
import org.qortal.account.PublicKeyAccount;
|
||||
@ -13,9 +50,11 @@ import org.qortal.api.DomainMapService;
|
||||
import org.qortal.block.Block;
|
||||
import org.qortal.block.BlockChain;
|
||||
import org.qortal.block.BlockChain.BlockTimingByHeight;
|
||||
import org.qortal.controller.Synchronizer.SynchronizationResult;
|
||||
import org.qortal.controller.arbitrary.ArbitraryDataCleanupManager;
|
||||
import org.qortal.controller.arbitrary.ArbitraryDataManager;
|
||||
import org.qortal.controller.Synchronizer.SynchronizationResult;
|
||||
import org.qortal.controller.repository.PruneManager;
|
||||
import org.qortal.controller.repository.NamesDatabaseIntegrityCheck;
|
||||
import org.qortal.controller.tradebot.TradeBot;
|
||||
import org.qortal.data.account.MintingAccountData;
|
||||
import org.qortal.data.account.RewardShareData;
|
||||
@ -34,10 +73,7 @@ import org.qortal.gui.SysTray;
|
||||
import org.qortal.network.Network;
|
||||
import org.qortal.network.Peer;
|
||||
import org.qortal.network.message.*;
|
||||
import org.qortal.repository.DataException;
|
||||
import org.qortal.repository.Repository;
|
||||
import org.qortal.repository.RepositoryFactory;
|
||||
import org.qortal.repository.RepositoryManager;
|
||||
import org.qortal.repository.*;
|
||||
import org.qortal.repository.hsqldb.HSQLDBRepositoryFactory;
|
||||
import org.qortal.settings.Settings;
|
||||
import org.qortal.transaction.Transaction;
|
||||
@ -123,6 +159,7 @@ public class Controller extends Thread {
|
||||
};
|
||||
|
||||
private long repositoryBackupTimestamp = startTime; // ms
|
||||
private long repositoryMaintenanceTimestamp = startTime; // ms
|
||||
private long repositoryCheckpointTimestamp = startTime; // ms
|
||||
private long ntpCheckTimestamp = startTime; // ms
|
||||
private long deleteExpiredTimestamp = startTime + DELETE_EXPIRED_INTERVAL; // ms
|
||||
@ -291,6 +328,10 @@ public class Controller extends Thread {
|
||||
return this.buildVersion;
|
||||
}
|
||||
|
||||
public String getVersionStringWithoutPrefix() {
|
||||
return this.buildVersion.replaceFirst(VERSION_PREFIX, "");
|
||||
}
|
||||
|
||||
/** Returns current blockchain height, or 0 if it's not available. */
|
||||
public int getChainHeight() {
|
||||
synchronized (this.latestBlocks) {
|
||||
@ -334,7 +375,7 @@ public class Controller extends Thread {
|
||||
return this.savedArgs;
|
||||
}
|
||||
|
||||
/* package */ public static boolean isStopping() {
|
||||
public static boolean isStopping() {
|
||||
return isStopping;
|
||||
}
|
||||
|
||||
@ -392,6 +433,12 @@ public class Controller extends Thread {
|
||||
try {
|
||||
RepositoryFactory repositoryFactory = new HSQLDBRepositoryFactory(getRepositoryUrl());
|
||||
RepositoryManager.setRepositoryFactory(repositoryFactory);
|
||||
RepositoryManager.setRequestedCheckpoint(Boolean.TRUE);
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
RepositoryManager.archive(repository);
|
||||
RepositoryManager.prune(repository);
|
||||
}
|
||||
} catch (DataException e) {
|
||||
// If exception has no cause then repository is in use by some other process.
|
||||
if (e.getCause() == null) {
|
||||
@ -405,6 +452,11 @@ public class Controller extends Thread {
|
||||
return; // Not System.exit() so that GUI can display error
|
||||
}
|
||||
|
||||
// Rebuild Names table and check database integrity
|
||||
NamesDatabaseIntegrityCheck namesDatabaseIntegrityCheck = new NamesDatabaseIntegrityCheck();
|
||||
namesDatabaseIntegrityCheck.rebuildAllNames();
|
||||
namesDatabaseIntegrityCheck.runIntegrityCheck();
|
||||
|
||||
LOGGER.info("Validating blockchain");
|
||||
try {
|
||||
BlockChain.validate();
|
||||
@ -417,6 +469,12 @@ public class Controller extends Thread {
|
||||
return; // Not System.exit() so that GUI can display error
|
||||
}
|
||||
|
||||
// Import current trade bot states and minting accounts if they exist
|
||||
Controller.importRepositoryData();
|
||||
|
||||
// Add the initial peers to the repository if we don't have any
|
||||
Controller.installInitialPeers();
|
||||
|
||||
LOGGER.info("Starting controller");
|
||||
Controller.getInstance().start();
|
||||
|
||||
@ -500,10 +558,10 @@ public class Controller extends Thread {
|
||||
|
||||
final long repositoryBackupInterval = Settings.getInstance().getRepositoryBackupInterval();
|
||||
final long repositoryCheckpointInterval = Settings.getInstance().getRepositoryCheckpointInterval();
|
||||
long repositoryMaintenanceInterval = getRandomRepositoryMaintenanceInterval();
|
||||
|
||||
ExecutorService trimExecutor = Executors.newCachedThreadPool(new DaemonThreadFactory());
|
||||
trimExecutor.execute(new AtStatesTrimmer());
|
||||
trimExecutor.execute(new OnlineAccountsSignaturesTrimmer());
|
||||
// Start executor service for trimming or pruning
|
||||
PruneManager.getInstance().start();
|
||||
|
||||
try {
|
||||
while (!isStopping) {
|
||||
@ -562,7 +620,39 @@ public class Controller extends Thread {
|
||||
Translator.INSTANCE.translate("SysTray", "CREATING_BACKUP_OF_DB_FILES"),
|
||||
MessageType.INFO);
|
||||
|
||||
RepositoryManager.backup(true);
|
||||
try {
|
||||
// Timeout if the database isn't ready for backing up after 60 seconds
|
||||
long timeout = 60 * 1000L;
|
||||
RepositoryManager.backup(true, "backup", timeout);
|
||||
|
||||
} catch (TimeoutException e) {
|
||||
LOGGER.info("Attempt to backup repository failed due to timeout: {}", e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
// Give repository a chance to perform maintenance (if enabled)
|
||||
if (repositoryMaintenanceInterval > 0 && now >= repositoryMaintenanceTimestamp + repositoryMaintenanceInterval) {
|
||||
repositoryMaintenanceTimestamp = now + repositoryMaintenanceInterval;
|
||||
|
||||
if (Settings.getInstance().getShowMaintenanceNotification())
|
||||
SysTray.getInstance().showMessage(Translator.INSTANCE.translate("SysTray", "DB_MAINTENANCE"),
|
||||
Translator.INSTANCE.translate("SysTray", "PERFORMING_DB_MAINTENANCE"),
|
||||
MessageType.INFO);
|
||||
|
||||
LOGGER.info("Starting scheduled repository maintenance. This can take a while...");
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
|
||||
// Timeout if the database isn't ready for maintenance after 60 seconds
|
||||
long timeout = 60 * 1000L;
|
||||
repository.performPeriodicMaintenance(timeout);
|
||||
|
||||
LOGGER.info("Scheduled repository maintenance completed");
|
||||
} catch (DataException | TimeoutException e) {
|
||||
LOGGER.error("Scheduled repository maintenance failed", e);
|
||||
}
|
||||
|
||||
// Get a new random interval
|
||||
repositoryMaintenanceInterval = getRandomRepositoryMaintenanceInterval();
|
||||
}
|
||||
|
||||
// Prune stuck/slow/old peers
|
||||
@ -589,13 +679,68 @@ public class Controller extends Thread {
|
||||
Thread.interrupted();
|
||||
// Fall-through to exit
|
||||
} finally {
|
||||
trimExecutor.shutdownNow();
|
||||
PruneManager.getInstance().stop();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Import current trade bot states and minting accounts.
|
||||
* This is needed because the user may have bootstrapped, or there could be a database inconsistency
|
||||
* if the core crashed when computing the nonce during the start of the trade process.
|
||||
*/
|
||||
private static void importRepositoryData() {
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
|
||||
String exportPath = Settings.getInstance().getExportPath();
|
||||
try {
|
||||
Path importPath = Paths.get(exportPath, "TradeBotStates.json");
|
||||
repository.importDataFromFile(importPath.toString());
|
||||
} catch (FileNotFoundException e) {
|
||||
// Do nothing, as the files will only exist in certain cases
|
||||
}
|
||||
|
||||
try {
|
||||
trimExecutor.awaitTermination(2L, TimeUnit.SECONDS);
|
||||
} catch (InterruptedException e) {
|
||||
// We tried...
|
||||
Path importPath = Paths.get(exportPath, "MintingAccounts.json");
|
||||
repository.importDataFromFile(importPath.toString());
|
||||
} catch (FileNotFoundException e) {
|
||||
// Do nothing, as the files will only exist in certain cases
|
||||
}
|
||||
repository.saveChanges();
|
||||
}
|
||||
catch (DataException | IOException e) {
|
||||
LOGGER.info("Unable to import data into repository: {}", e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
private static void installInitialPeers() {
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
if (repository.getNetworkRepository().getAllPeers().isEmpty()) {
|
||||
Network.installInitialPeers(repository);
|
||||
}
|
||||
|
||||
} catch (DataException e) {
|
||||
// Fail silently as this is an optional step
|
||||
}
|
||||
}
|
||||
|
||||
private long getRandomRepositoryMaintenanceInterval() {
|
||||
final long minInterval = Settings.getInstance().getRepositoryMaintenanceMinInterval();
|
||||
final long maxInterval = Settings.getInstance().getRepositoryMaintenanceMaxInterval();
|
||||
if (maxInterval == 0) {
|
||||
return 0;
|
||||
}
|
||||
return (new Random().nextLong() % (maxInterval - minInterval)) + minInterval;
|
||||
}
|
||||
|
||||
/**
|
||||
* Export current trade bot states and minting accounts.
|
||||
*/
|
||||
public void exportRepositoryData() {
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
repository.exportNodeLocalData();
|
||||
|
||||
} catch (DataException e) {
|
||||
// Fail silently as this is an optional step
|
||||
}
|
||||
}
|
||||
|
||||
@ -878,7 +1023,7 @@ public class Controller extends Thread {
|
||||
}
|
||||
}
|
||||
|
||||
String tooltip = String.format("%s - %d %s - %s %d", actionText, numberOfPeers, connectionsText, heightText, height) + "\n" + String.format("Build version: %s", this.buildVersion);
|
||||
String tooltip = String.format("%s - %d %s - %s %d", actionText, numberOfPeers, connectionsText, heightText, height) + "\n" + String.format("%s: %s", Translator.INSTANCE.translate("SysTray", "BUILD_VERSION"), this.buildVersion);
|
||||
SysTray.getInstance().setToolTipText(tooltip);
|
||||
|
||||
this.callbackExecutor.execute(() -> {
|
||||
@ -951,6 +1096,10 @@ public class Controller extends Thread {
|
||||
}
|
||||
}
|
||||
|
||||
// Export local data
|
||||
LOGGER.info("Backing up local data");
|
||||
this.exportRepositoryData();
|
||||
|
||||
LOGGER.info("Shutting down networking");
|
||||
Network.getInstance().shutdown();
|
||||
|
||||
@ -1291,6 +1440,34 @@ public class Controller extends Thread {
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
BlockData blockData = repository.getBlockRepository().fromSignature(signature);
|
||||
|
||||
if (blockData != null) {
|
||||
if (PruneManager.getInstance().isBlockPruned(blockData.getHeight())) {
|
||||
// If this is a pruned block, we likely only have partial data, so best not to sent it
|
||||
blockData = null;
|
||||
}
|
||||
}
|
||||
|
||||
// If we have no block data, we should check the archive in case it's there
|
||||
if (blockData == null) {
|
||||
if (Settings.getInstance().isArchiveEnabled()) {
|
||||
byte[] bytes = BlockArchiveReader.getInstance().fetchSerializedBlockBytesForSignature(signature, true, repository);
|
||||
if (bytes != null) {
|
||||
CachedBlockMessage blockMessage = new CachedBlockMessage(bytes);
|
||||
blockMessage.setId(message.getId());
|
||||
|
||||
// This call also causes the other needed data to be pulled in from repository
|
||||
if (!peer.sendMessage(blockMessage)) {
|
||||
peer.disconnect("failed to send block");
|
||||
// Don't fall-through to caching because failure to send might be from failure to build message
|
||||
return;
|
||||
}
|
||||
|
||||
// Sent successfully from archive, so nothing more to do
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (blockData == null) {
|
||||
// We don't have this block
|
||||
this.stats.getBlockMessageStats.unknownBlocks.getAndIncrement();
|
||||
@ -1459,12 +1636,29 @@ public class Controller extends Thread {
|
||||
int numberRequested = Math.min(Network.MAX_BLOCK_SUMMARIES_PER_REPLY, getBlockSummariesMessage.getNumberRequested());
|
||||
|
||||
BlockData blockData = repository.getBlockRepository().fromReference(parentSignature);
|
||||
if (blockData == null) {
|
||||
// Try the archive
|
||||
blockData = repository.getBlockArchiveRepository().fromReference(parentSignature);
|
||||
}
|
||||
|
||||
if (blockData != null) {
|
||||
if (PruneManager.getInstance().isBlockPruned(blockData.getHeight())) {
|
||||
// If this request contains a pruned block, we likely only have partial data, so best not to sent anything
|
||||
// We always prune from the oldest first, so it's fine to just check the first block requested
|
||||
blockData = null;
|
||||
}
|
||||
}
|
||||
|
||||
while (blockData != null && blockSummaries.size() < numberRequested) {
|
||||
BlockSummaryData blockSummary = new BlockSummaryData(blockData);
|
||||
blockSummaries.add(blockSummary);
|
||||
|
||||
blockData = repository.getBlockRepository().fromReference(blockData.getSignature());
|
||||
byte[] previousSignature = blockData.getSignature();
|
||||
blockData = repository.getBlockRepository().fromReference(previousSignature);
|
||||
if (blockData == null) {
|
||||
// Try the archive
|
||||
blockData = repository.getBlockArchiveRepository().fromReference(previousSignature);
|
||||
}
|
||||
}
|
||||
} catch (DataException e) {
|
||||
LOGGER.error(String.format("Repository issue while sending block summaries after %s to peer %s", Base58.encode(parentSignature), peer), e);
|
||||
@ -1513,11 +1707,20 @@ public class Controller extends Thread {
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
int numberRequested = getSignaturesMessage.getNumberRequested();
|
||||
BlockData blockData = repository.getBlockRepository().fromReference(parentSignature);
|
||||
if (blockData == null) {
|
||||
// Try the archive
|
||||
blockData = repository.getBlockArchiveRepository().fromReference(parentSignature);
|
||||
}
|
||||
|
||||
while (blockData != null && signatures.size() < numberRequested) {
|
||||
signatures.add(blockData.getSignature());
|
||||
|
||||
blockData = repository.getBlockRepository().fromReference(blockData.getSignature());
|
||||
byte[] previousSignature = blockData.getSignature();
|
||||
blockData = repository.getBlockRepository().fromReference(previousSignature);
|
||||
if (blockData == null) {
|
||||
// Try the archive
|
||||
blockData = repository.getBlockArchiveRepository().fromReference(previousSignature);
|
||||
}
|
||||
}
|
||||
} catch (DataException e) {
|
||||
LOGGER.error(String.format("Repository issue while sending V2 signatures after %s to peer %s", Base58.encode(parentSignature), peer), e);
|
||||
|
@ -3,12 +3,9 @@ package org.qortal.controller;
|
||||
import java.math.BigInteger;
|
||||
import java.text.DecimalFormat;
|
||||
import java.text.NumberFormat;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.*;
|
||||
import java.util.concurrent.locks.ReentrantLock;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.Iterator;
|
||||
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
@ -71,6 +68,11 @@ public class Synchronizer {
|
||||
// Keep track of the size of the last re-org, so it can be logged
|
||||
private int lastReorgSize;
|
||||
|
||||
// Keep track of invalid blocks so that we don't keep trying to sync them
|
||||
private Map<String, Long> invalidBlockSignatures = Collections.synchronizedMap(new HashMap<>());
|
||||
public Long timeValidBlockLastReceived = null;
|
||||
public Long timeInvalidBlockLastReceived = null;
|
||||
|
||||
private static Synchronizer instance;
|
||||
|
||||
public enum SynchronizationResult {
|
||||
@ -346,6 +348,12 @@ public class Synchronizer {
|
||||
}
|
||||
}
|
||||
|
||||
// Ignore this peer if it holds an invalid block
|
||||
if (this.containsInvalidBlockSummary(peer.getCommonBlockData().getBlockSummariesAfterCommonBlock())) {
|
||||
LOGGER.debug("Ignoring peer %s because it holds an invalid block", peer);
|
||||
peers.remove(peer);
|
||||
}
|
||||
|
||||
// Reduce minChainLength if needed. If we don't have any blocks, this peer will be excluded from chain weight comparisons later in the process, so we shouldn't update minChainLength
|
||||
List <BlockSummaryData> peerBlockSummaries = peer.getCommonBlockData().getBlockSummariesAfterCommonBlock();
|
||||
if (peerBlockSummaries != null && peerBlockSummaries.size() > 0)
|
||||
@ -489,6 +497,71 @@ public class Synchronizer {
|
||||
}
|
||||
|
||||
|
||||
|
||||
/* Invalid block signature tracking */
|
||||
|
||||
private void addInvalidBlockSignature(byte[] signature) {
|
||||
Long now = NTP.getTime();
|
||||
if (now == null) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Add or update existing entry
|
||||
String sig58 = Base58.encode(signature);
|
||||
invalidBlockSignatures.put(sig58, now);
|
||||
}
|
||||
private void deleteOlderInvalidSignatures(Long now) {
|
||||
if (now == null) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Delete signatures with older timestamps
|
||||
Iterator it = invalidBlockSignatures.entrySet().iterator();
|
||||
while (it.hasNext()) {
|
||||
Map.Entry pair = (Map.Entry)it.next();
|
||||
Long lastSeen = (Long) pair.getValue();
|
||||
|
||||
// Remove signature if we haven't seen it for more than 1 hour
|
||||
if (now - lastSeen > 60 * 60 * 1000L) {
|
||||
it.remove();
|
||||
}
|
||||
}
|
||||
}
|
||||
private boolean containsInvalidBlockSummary(List<BlockSummaryData> blockSummaries) {
|
||||
if (blockSummaries == null || invalidBlockSignatures == null) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Loop through our known invalid blocks and check each one against supplied block summaries
|
||||
for (String invalidSignature58 : invalidBlockSignatures.keySet()) {
|
||||
byte[] invalidSignature = Base58.decode(invalidSignature58);
|
||||
for (BlockSummaryData blockSummary : blockSummaries) {
|
||||
byte[] signature = blockSummary.getSignature();
|
||||
if (Arrays.equals(signature, invalidSignature)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
private boolean containsInvalidBlockSignature(List<byte[]> blockSignatures) {
|
||||
if (blockSignatures == null || invalidBlockSignatures == null) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Loop through our known invalid blocks and check each one against supplied block signatures
|
||||
for (String invalidSignature58 : invalidBlockSignatures.keySet()) {
|
||||
byte[] invalidSignature = Base58.decode(invalidSignature58);
|
||||
for (byte[] signature : blockSignatures) {
|
||||
if (Arrays.equals(signature, invalidSignature)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Attempt to synchronize blockchain with peer.
|
||||
* <p>
|
||||
@ -535,6 +608,15 @@ public class Synchronizer {
|
||||
// Reset last re-org size as we are starting a new sync round
|
||||
this.lastReorgSize = 0;
|
||||
|
||||
// Set the initial value of timeValidBlockLastReceived if it's null
|
||||
Long now = NTP.getTime();
|
||||
if (this.timeValidBlockLastReceived == null) {
|
||||
this.timeValidBlockLastReceived = now;
|
||||
}
|
||||
|
||||
// Delete invalid signatures with older timestamps
|
||||
this.deleteOlderInvalidSignatures(now);
|
||||
|
||||
List<BlockSummaryData> peerBlockSummaries = new ArrayList<>();
|
||||
SynchronizationResult findCommonBlockResult = fetchSummariesFromCommonBlock(repository, peer, ourInitialHeight, force, peerBlockSummaries, true);
|
||||
if (findCommonBlockResult != SynchronizationResult.OK) {
|
||||
@ -883,6 +965,12 @@ public class Synchronizer {
|
||||
break;
|
||||
}
|
||||
|
||||
// Catch a block with an invalid signature before orphaning, so that we retain our existing valid candidate
|
||||
if (this.containsInvalidBlockSignature(peerBlockSignatures)) {
|
||||
LOGGER.info(String.format("Peer %s sent invalid block signature: %.8s", peer, Base58.encode(latestPeerSignature)));
|
||||
return SynchronizationResult.INVALID_DATA;
|
||||
}
|
||||
|
||||
byte[] nextPeerSignature = peerBlockSignatures.get(0);
|
||||
int nextHeight = height + 1;
|
||||
|
||||
@ -985,13 +1073,20 @@ public class Synchronizer {
|
||||
if (Controller.isStopping())
|
||||
return SynchronizationResult.SHUTTING_DOWN;
|
||||
|
||||
newBlock.preProcess();
|
||||
|
||||
ValidationResult blockResult = newBlock.isValid();
|
||||
if (blockResult != ValidationResult.OK) {
|
||||
LOGGER.info(String.format("Peer %s sent invalid block for height %d, sig %.8s: %s", peer,
|
||||
newBlock.getBlockData().getHeight(), Base58.encode(newBlock.getSignature()), blockResult.name()));
|
||||
this.addInvalidBlockSignature(newBlock.getSignature());
|
||||
this.timeInvalidBlockLastReceived = NTP.getTime();
|
||||
return SynchronizationResult.INVALID_DATA;
|
||||
}
|
||||
|
||||
// Block is valid
|
||||
this.timeValidBlockLastReceived = NTP.getTime();
|
||||
|
||||
// Save transactions attached to this block
|
||||
for (Transaction transaction : newBlock.getTransactions()) {
|
||||
TransactionData transactionData = transaction.getTransactionData();
|
||||
@ -1173,13 +1268,20 @@ public class Synchronizer {
|
||||
for (Transaction transaction : newBlock.getTransactions())
|
||||
transaction.setInitialApprovalStatus();
|
||||
|
||||
newBlock.preProcess();
|
||||
|
||||
ValidationResult blockResult = newBlock.isValid();
|
||||
if (blockResult != ValidationResult.OK) {
|
||||
LOGGER.info(String.format("Peer %s sent invalid block for height %d, sig %.8s: %s", peer,
|
||||
ourHeight, Base58.encode(latestPeerSignature), blockResult.name()));
|
||||
this.addInvalidBlockSignature(newBlock.getSignature());
|
||||
this.timeInvalidBlockLastReceived = NTP.getTime();
|
||||
return SynchronizationResult.INVALID_DATA;
|
||||
}
|
||||
|
||||
// Block is valid
|
||||
this.timeValidBlockLastReceived = NTP.getTime();
|
||||
|
||||
// Save transactions attached to this block
|
||||
for (Transaction transaction : newBlock.getTransactions()) {
|
||||
TransactionData transactionData = transaction.getTransactionData();
|
||||
|
@ -0,0 +1,109 @@
|
||||
package org.qortal.controller.repository;
|
||||
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.qortal.controller.Controller;
|
||||
import org.qortal.data.block.BlockData;
|
||||
import org.qortal.repository.DataException;
|
||||
import org.qortal.repository.Repository;
|
||||
import org.qortal.repository.RepositoryManager;
|
||||
import org.qortal.settings.Settings;
|
||||
import org.qortal.utils.NTP;
|
||||
|
||||
public class AtStatesPruner implements Runnable {
|
||||
|
||||
private static final Logger LOGGER = LogManager.getLogger(AtStatesPruner.class);
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
Thread.currentThread().setName("AT States pruner");
|
||||
|
||||
boolean archiveMode = false;
|
||||
if (!Settings.getInstance().isTopOnly()) {
|
||||
// Top-only mode isn't enabled, but we might want to prune for the purposes of archiving
|
||||
if (!Settings.getInstance().isArchiveEnabled()) {
|
||||
// No pruning or archiving, so we must not prune anything
|
||||
return;
|
||||
}
|
||||
else {
|
||||
// We're allowed to prune blocks that have already been archived
|
||||
archiveMode = true;
|
||||
}
|
||||
}
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
int pruneStartHeight = repository.getATRepository().getAtPruneHeight();
|
||||
|
||||
repository.discardChanges();
|
||||
repository.getATRepository().rebuildLatestAtStates();
|
||||
|
||||
while (!Controller.isStopping()) {
|
||||
repository.discardChanges();
|
||||
|
||||
Thread.sleep(Settings.getInstance().getAtStatesPruneInterval());
|
||||
|
||||
BlockData chainTip = Controller.getInstance().getChainTip();
|
||||
if (chainTip == null || NTP.getTime() == null)
|
||||
continue;
|
||||
|
||||
// Don't even attempt if we're mid-sync as our repository requests will be delayed for ages
|
||||
if (Controller.getInstance().isSynchronizing())
|
||||
continue;
|
||||
|
||||
// Prune AT states for all blocks up until our latest minus pruneBlockLimit
|
||||
final int ourLatestHeight = chainTip.getHeight();
|
||||
int upperPrunableHeight = ourLatestHeight - Settings.getInstance().getPruneBlockLimit();
|
||||
|
||||
// In archive mode we are only allowed to trim blocks that have already been archived
|
||||
if (archiveMode) {
|
||||
upperPrunableHeight = repository.getBlockArchiveRepository().getBlockArchiveHeight() - 1;
|
||||
|
||||
// TODO: validate that the actual archived data exists before pruning it?
|
||||
}
|
||||
|
||||
int upperBatchHeight = pruneStartHeight + Settings.getInstance().getAtStatesPruneBatchSize();
|
||||
int upperPruneHeight = Math.min(upperBatchHeight, upperPrunableHeight);
|
||||
|
||||
if (pruneStartHeight >= upperPruneHeight)
|
||||
continue;
|
||||
|
||||
LOGGER.debug(String.format("Pruning AT states between blocks %d and %d...", pruneStartHeight, upperPruneHeight));
|
||||
|
||||
int numAtStatesPruned = repository.getATRepository().pruneAtStates(pruneStartHeight, upperPruneHeight);
|
||||
repository.saveChanges();
|
||||
int numAtStateDataRowsTrimmed = repository.getATRepository().trimAtStates(
|
||||
pruneStartHeight, upperPruneHeight, Settings.getInstance().getAtStatesTrimLimit());
|
||||
repository.saveChanges();
|
||||
|
||||
if (numAtStatesPruned > 0 || numAtStateDataRowsTrimmed > 0) {
|
||||
final int finalPruneStartHeight = pruneStartHeight;
|
||||
LOGGER.debug(() -> String.format("Pruned %d AT state%s between blocks %d and %d",
|
||||
numAtStatesPruned, (numAtStatesPruned != 1 ? "s" : ""),
|
||||
finalPruneStartHeight, upperPruneHeight));
|
||||
} else {
|
||||
// Can we move onto next batch?
|
||||
if (upperPrunableHeight > upperBatchHeight) {
|
||||
pruneStartHeight = upperBatchHeight;
|
||||
repository.getATRepository().setAtPruneHeight(pruneStartHeight);
|
||||
repository.getATRepository().rebuildLatestAtStates();
|
||||
repository.saveChanges();
|
||||
|
||||
final int finalPruneStartHeight = pruneStartHeight;
|
||||
LOGGER.debug(() -> String.format("Bumping AT state base prune height to %d", finalPruneStartHeight));
|
||||
}
|
||||
else {
|
||||
// We've pruned up to the upper prunable height
|
||||
// Back off for a while to save CPU for syncing
|
||||
repository.discardChanges();
|
||||
Thread.sleep(5*60*1000L);
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (DataException e) {
|
||||
LOGGER.warn(String.format("Repository issue trying to prune AT states: %s", e.getMessage()));
|
||||
} catch (InterruptedException e) {
|
||||
// Time to exit
|
||||
}
|
||||
}
|
||||
|
||||
}
|
@ -1,7 +1,8 @@
|
||||
package org.qortal.controller;
|
||||
package org.qortal.controller.repository;
|
||||
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.qortal.controller.Controller;
|
||||
import org.qortal.data.block.BlockData;
|
||||
import org.qortal.repository.DataException;
|
||||
import org.qortal.repository.Repository;
|
||||
@ -20,8 +21,8 @@ public class AtStatesTrimmer implements Runnable {
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
int trimStartHeight = repository.getATRepository().getAtTrimHeight();
|
||||
|
||||
repository.getATRepository().prepareForAtStateTrimming();
|
||||
repository.saveChanges();
|
||||
repository.discardChanges();
|
||||
repository.getATRepository().rebuildLatestAtStates();
|
||||
|
||||
while (!Controller.isStopping()) {
|
||||
repository.discardChanges();
|
||||
@ -62,7 +63,7 @@ public class AtStatesTrimmer implements Runnable {
|
||||
if (upperTrimmableHeight > upperBatchHeight) {
|
||||
trimStartHeight = upperBatchHeight;
|
||||
repository.getATRepository().setAtTrimHeight(trimStartHeight);
|
||||
repository.getATRepository().prepareForAtStateTrimming();
|
||||
repository.getATRepository().rebuildLatestAtStates();
|
||||
repository.saveChanges();
|
||||
|
||||
final int finalTrimStartHeight = trimStartHeight;
|
@ -0,0 +1,113 @@
|
||||
package org.qortal.controller.repository;
|
||||
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.qortal.controller.Controller;
|
||||
import org.qortal.data.block.BlockData;
|
||||
import org.qortal.repository.*;
|
||||
import org.qortal.settings.Settings;
|
||||
import org.qortal.transform.TransformationException;
|
||||
import org.qortal.utils.NTP;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
public class BlockArchiver implements Runnable {
|
||||
|
||||
private static final Logger LOGGER = LogManager.getLogger(BlockArchiver.class);
|
||||
|
||||
private static final long INITIAL_SLEEP_PERIOD = 0L; // TODO: 5 * 60 * 1000L + 1234L; // ms
|
||||
|
||||
public void run() {
|
||||
Thread.currentThread().setName("Block archiver");
|
||||
|
||||
if (!Settings.getInstance().isArchiveEnabled()) {
|
||||
return;
|
||||
}
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
// Don't even start building until initial rush has ended
|
||||
Thread.sleep(INITIAL_SLEEP_PERIOD);
|
||||
|
||||
int startHeight = repository.getBlockArchiveRepository().getBlockArchiveHeight();
|
||||
|
||||
// Don't attempt to archive if we have no ATStatesHeightIndex, as it will be too slow
|
||||
boolean hasAtStatesHeightIndex = repository.getATRepository().hasAtStatesHeightIndex();
|
||||
if (!hasAtStatesHeightIndex) {
|
||||
LOGGER.info("Unable to start block archiver due to missing ATStatesHeightIndex. Bootstrapping is recommended.");
|
||||
repository.discardChanges();
|
||||
return;
|
||||
}
|
||||
|
||||
LOGGER.info("Starting block archiver...");
|
||||
|
||||
while (!Controller.isStopping()) {
|
||||
repository.discardChanges();
|
||||
|
||||
Thread.sleep(Settings.getInstance().getArchiveInterval());
|
||||
|
||||
BlockData chainTip = Controller.getInstance().getChainTip();
|
||||
if (chainTip == null || NTP.getTime() == null) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Don't even attempt if we're mid-sync as our repository requests will be delayed for ages
|
||||
if (Controller.getInstance().isSynchronizing()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Don't attempt to archive if we're not synced yet
|
||||
final Long minLatestBlockTimestamp = Controller.getMinimumLatestBlockTimestamp();
|
||||
if (minLatestBlockTimestamp == null || chainTip.getTimestamp() < minLatestBlockTimestamp) {
|
||||
continue;
|
||||
}
|
||||
|
||||
|
||||
// Build cache of blocks
|
||||
try {
|
||||
final int maximumArchiveHeight = BlockArchiveWriter.getMaxArchiveHeight(repository);
|
||||
BlockArchiveWriter writer = new BlockArchiveWriter(startHeight, maximumArchiveHeight, repository);
|
||||
BlockArchiveWriter.BlockArchiveWriteResult result = writer.write();
|
||||
switch (result) {
|
||||
case OK:
|
||||
// Increment block archive height
|
||||
startHeight += writer.getWrittenCount();
|
||||
repository.getBlockArchiveRepository().setBlockArchiveHeight(startHeight);
|
||||
repository.saveChanges();
|
||||
break;
|
||||
|
||||
case STOPPING:
|
||||
return;
|
||||
|
||||
// We've reached the limit of the blocks we can archive
|
||||
// Sleep for a while to allow more to become available
|
||||
case NOT_ENOUGH_BLOCKS:
|
||||
// We didn't reach our file size target, so that must mean that we don't have enough blocks
|
||||
// yet or something went wrong. Sleep for a while and then try again.
|
||||
repository.discardChanges();
|
||||
Thread.sleep(60 * 60 * 1000L); // 1 hour
|
||||
break;
|
||||
|
||||
case BLOCK_NOT_FOUND:
|
||||
// We tried to archive a block that didn't exist. This is a major failure and likely means
|
||||
// that a bootstrap or re-sync is needed. Try again every minute until then.
|
||||
LOGGER.info("Error: block not found when building archive. If this error persists, " +
|
||||
"a bootstrap or re-sync may be needed.");
|
||||
repository.discardChanges();
|
||||
Thread.sleep( 60 * 1000L); // 1 minute
|
||||
break;
|
||||
}
|
||||
|
||||
} catch (IOException | TransformationException e) {
|
||||
LOGGER.info("Caught exception when creating block cache", e);
|
||||
}
|
||||
|
||||
}
|
||||
} catch (DataException e) {
|
||||
LOGGER.info("Caught exception when creating block cache", e);
|
||||
} catch (InterruptedException e) {
|
||||
// Do nothing
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
114
src/main/java/org/qortal/controller/repository/BlockPruner.java
Normal file
114
src/main/java/org/qortal/controller/repository/BlockPruner.java
Normal file
@ -0,0 +1,114 @@
|
||||
package org.qortal.controller.repository;
|
||||
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.qortal.controller.Controller;
|
||||
import org.qortal.data.block.BlockData;
|
||||
import org.qortal.repository.DataException;
|
||||
import org.qortal.repository.Repository;
|
||||
import org.qortal.repository.RepositoryManager;
|
||||
import org.qortal.settings.Settings;
|
||||
import org.qortal.utils.NTP;
|
||||
|
||||
public class BlockPruner implements Runnable {
|
||||
|
||||
private static final Logger LOGGER = LogManager.getLogger(BlockPruner.class);
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
Thread.currentThread().setName("Block pruner");
|
||||
|
||||
boolean archiveMode = false;
|
||||
if (!Settings.getInstance().isTopOnly()) {
|
||||
// Top-only mode isn't enabled, but we might want to prune for the purposes of archiving
|
||||
if (!Settings.getInstance().isArchiveEnabled()) {
|
||||
// No pruning or archiving, so we must not prune anything
|
||||
return;
|
||||
}
|
||||
else {
|
||||
// We're allowed to prune blocks that have already been archived
|
||||
archiveMode = true;
|
||||
}
|
||||
}
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
int pruneStartHeight = repository.getBlockRepository().getBlockPruneHeight();
|
||||
|
||||
// Don't attempt to prune if we have no ATStatesHeightIndex, as it will be too slow
|
||||
boolean hasAtStatesHeightIndex = repository.getATRepository().hasAtStatesHeightIndex();
|
||||
if (!hasAtStatesHeightIndex) {
|
||||
LOGGER.info("Unable to start block pruner due to missing ATStatesHeightIndex. Bootstrapping is recommended.");
|
||||
return;
|
||||
}
|
||||
|
||||
while (!Controller.isStopping()) {
|
||||
repository.discardChanges();
|
||||
|
||||
Thread.sleep(Settings.getInstance().getBlockPruneInterval());
|
||||
|
||||
BlockData chainTip = Controller.getInstance().getChainTip();
|
||||
if (chainTip == null || NTP.getTime() == null)
|
||||
continue;
|
||||
|
||||
// Don't even attempt if we're mid-sync as our repository requests will be delayed for ages
|
||||
if (Controller.getInstance().isSynchronizing()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Don't attempt to prune if we're not synced yet
|
||||
final Long minLatestBlockTimestamp = Controller.getMinimumLatestBlockTimestamp();
|
||||
if (minLatestBlockTimestamp == null || chainTip.getTimestamp() < minLatestBlockTimestamp) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Prune all blocks up until our latest minus pruneBlockLimit
|
||||
final int ourLatestHeight = chainTip.getHeight();
|
||||
int upperPrunableHeight = ourLatestHeight - Settings.getInstance().getPruneBlockLimit();
|
||||
|
||||
// In archive mode we are only allowed to trim blocks that have already been archived
|
||||
if (archiveMode) {
|
||||
upperPrunableHeight = repository.getBlockArchiveRepository().getBlockArchiveHeight() - 1;
|
||||
}
|
||||
|
||||
int upperBatchHeight = pruneStartHeight + Settings.getInstance().getBlockPruneBatchSize();
|
||||
int upperPruneHeight = Math.min(upperBatchHeight, upperPrunableHeight);
|
||||
|
||||
if (pruneStartHeight >= upperPruneHeight) {
|
||||
continue;
|
||||
}
|
||||
|
||||
LOGGER.debug(String.format("Pruning blocks between %d and %d...", pruneStartHeight, upperPruneHeight));
|
||||
|
||||
int numBlocksPruned = repository.getBlockRepository().pruneBlocks(pruneStartHeight, upperPruneHeight);
|
||||
repository.saveChanges();
|
||||
|
||||
if (numBlocksPruned > 0) {
|
||||
LOGGER.debug(String.format("Pruned %d block%s between %d and %d",
|
||||
numBlocksPruned, (numBlocksPruned != 1 ? "s" : ""),
|
||||
pruneStartHeight, upperPruneHeight));
|
||||
} else {
|
||||
final int nextPruneHeight = upperPruneHeight + 1;
|
||||
repository.getBlockRepository().setBlockPruneHeight(nextPruneHeight);
|
||||
repository.saveChanges();
|
||||
LOGGER.debug(String.format("Bumping block base prune height to %d", pruneStartHeight));
|
||||
|
||||
// Can we move onto next batch?
|
||||
if (upperPrunableHeight > nextPruneHeight) {
|
||||
pruneStartHeight = nextPruneHeight;
|
||||
}
|
||||
else {
|
||||
// We've pruned up to the upper prunable height
|
||||
// Back off for a while to save CPU for syncing
|
||||
repository.discardChanges();
|
||||
Thread.sleep(10*60*1000L);
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (DataException e) {
|
||||
LOGGER.warn(String.format("Repository issue trying to prune blocks: %s", e.getMessage()));
|
||||
} catch (InterruptedException e) {
|
||||
// Time to exit
|
||||
}
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,410 @@
|
||||
package org.qortal.controller.repository;
|
||||
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.qortal.account.PublicKeyAccount;
|
||||
import org.qortal.api.resource.TransactionsResource.ConfirmationStatus;
|
||||
import org.qortal.data.naming.NameData;
|
||||
import org.qortal.data.transaction.*;
|
||||
import org.qortal.naming.Name;
|
||||
import org.qortal.repository.DataException;
|
||||
import org.qortal.repository.Repository;
|
||||
import org.qortal.repository.RepositoryManager;
|
||||
import org.qortal.transaction.Transaction.TransactionType;
|
||||
import org.qortal.utils.Unicode;
|
||||
|
||||
import java.util.*;
|
||||
|
||||
public class NamesDatabaseIntegrityCheck {
|
||||
|
||||
private static final Logger LOGGER = LogManager.getLogger(NamesDatabaseIntegrityCheck.class);
|
||||
|
||||
private static final List<TransactionType> ALL_NAME_TX_TYPE = Arrays.asList(
|
||||
TransactionType.REGISTER_NAME,
|
||||
TransactionType.UPDATE_NAME,
|
||||
TransactionType.BUY_NAME,
|
||||
TransactionType.SELL_NAME
|
||||
);
|
||||
|
||||
private List<TransactionData> nameTransactions = new ArrayList<>();
|
||||
|
||||
public int rebuildName(String name, Repository repository) {
|
||||
int modificationCount = 0;
|
||||
try {
|
||||
List<TransactionData> transactions = this.fetchAllTransactionsInvolvingName(name, repository);
|
||||
if (transactions.isEmpty()) {
|
||||
// This name was never registered, so there's nothing to do
|
||||
return modificationCount;
|
||||
}
|
||||
|
||||
// Loop through each past transaction and re-apply it to the Names table
|
||||
for (TransactionData currentTransaction : transactions) {
|
||||
|
||||
// Process REGISTER_NAME transactions
|
||||
if (currentTransaction.getType() == TransactionType.REGISTER_NAME) {
|
||||
RegisterNameTransactionData registerNameTransactionData = (RegisterNameTransactionData) currentTransaction;
|
||||
Name nameObj = new Name(repository, registerNameTransactionData);
|
||||
nameObj.register();
|
||||
modificationCount++;
|
||||
LOGGER.trace("Processed REGISTER_NAME transaction for name {}", name);
|
||||
}
|
||||
|
||||
// Process UPDATE_NAME transactions
|
||||
if (currentTransaction.getType() == TransactionType.UPDATE_NAME) {
|
||||
UpdateNameTransactionData updateNameTransactionData = (UpdateNameTransactionData) currentTransaction;
|
||||
|
||||
if (Objects.equals(updateNameTransactionData.getNewName(), name) &&
|
||||
!Objects.equals(updateNameTransactionData.getName(), updateNameTransactionData.getNewName())) {
|
||||
// This renames an existing name, so we need to process that instead
|
||||
this.rebuildName(updateNameTransactionData.getName(), repository);
|
||||
}
|
||||
else {
|
||||
Name nameObj = new Name(repository, name);
|
||||
if (nameObj != null && nameObj.getNameData() != null) {
|
||||
nameObj.update(updateNameTransactionData);
|
||||
modificationCount++;
|
||||
LOGGER.trace("Processed UPDATE_NAME transaction for name {}", name);
|
||||
} else {
|
||||
// Something went wrong
|
||||
throw new DataException(String.format("Name data not found for name %s", updateNameTransactionData.getName()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Process SELL_NAME transactions
|
||||
if (currentTransaction.getType() == TransactionType.SELL_NAME) {
|
||||
SellNameTransactionData sellNameTransactionData = (SellNameTransactionData) currentTransaction;
|
||||
Name nameObj = new Name(repository, sellNameTransactionData.getName());
|
||||
if (nameObj != null && nameObj.getNameData() != null) {
|
||||
nameObj.sell(sellNameTransactionData);
|
||||
modificationCount++;
|
||||
LOGGER.trace("Processed SELL_NAME transaction for name {}", name);
|
||||
}
|
||||
else {
|
||||
// Something went wrong
|
||||
throw new DataException(String.format("Name data not found for name %s", sellNameTransactionData.getName()));
|
||||
}
|
||||
}
|
||||
|
||||
// Process BUY_NAME transactions
|
||||
if (currentTransaction.getType() == TransactionType.BUY_NAME) {
|
||||
BuyNameTransactionData buyNameTransactionData = (BuyNameTransactionData) currentTransaction;
|
||||
Name nameObj = new Name(repository, buyNameTransactionData.getName());
|
||||
if (nameObj != null && nameObj.getNameData() != null) {
|
||||
nameObj.buy(buyNameTransactionData);
|
||||
modificationCount++;
|
||||
LOGGER.trace("Processed BUY_NAME transaction for name {}", name);
|
||||
}
|
||||
else {
|
||||
// Something went wrong
|
||||
throw new DataException(String.format("Name data not found for name %s", buyNameTransactionData.getName()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} catch (DataException e) {
|
||||
LOGGER.info("Unable to run integrity check for name {}: {}", name, e.getMessage());
|
||||
}
|
||||
|
||||
return modificationCount;
|
||||
}
|
||||
|
||||
public int rebuildAllNames() {
|
||||
int modificationCount = 0;
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
List<String> names = this.fetchAllNames(repository);
|
||||
for (String name : names) {
|
||||
modificationCount += this.rebuildName(name, repository);
|
||||
}
|
||||
repository.saveChanges();
|
||||
}
|
||||
catch (DataException e) {
|
||||
LOGGER.info("Error when running integrity check for all names: {}", e.getMessage());
|
||||
}
|
||||
|
||||
//LOGGER.info("modificationCount: {}", modificationCount);
|
||||
return modificationCount;
|
||||
}
|
||||
|
||||
public void runIntegrityCheck() {
|
||||
boolean integrityCheckFailed = false;
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
|
||||
// Fetch all the (confirmed) REGISTER_NAME transactions
|
||||
List<RegisterNameTransactionData> registerNameTransactions = this.fetchRegisterNameTransactions();
|
||||
|
||||
// Loop through each REGISTER_NAME txn signature and request the full transaction data
|
||||
for (RegisterNameTransactionData registerNameTransactionData : registerNameTransactions) {
|
||||
String registeredName = registerNameTransactionData.getName();
|
||||
NameData nameData = repository.getNameRepository().fromName(registeredName);
|
||||
|
||||
// Check to see if this name has been updated or bought at any point
|
||||
TransactionData latestUpdate = this.fetchLatestModificationTransactionInvolvingName(registeredName, repository);
|
||||
if (latestUpdate == null) {
|
||||
// Name was never updated once registered
|
||||
// We expect this name to still be registered to this transaction's creator
|
||||
|
||||
if (nameData == null) {
|
||||
LOGGER.info("Error: registered name {} doesn't exist in Names table. Adding...", registeredName);
|
||||
integrityCheckFailed = true;
|
||||
}
|
||||
else {
|
||||
LOGGER.trace("Registered name {} is correctly registered", registeredName);
|
||||
}
|
||||
|
||||
// Check the owner is correct
|
||||
PublicKeyAccount creator = new PublicKeyAccount(repository, registerNameTransactionData.getCreatorPublicKey());
|
||||
if (!Objects.equals(creator.getAddress(), nameData.getOwner())) {
|
||||
LOGGER.info("Error: registered name {} is owned by {}, but it should be {}",
|
||||
registeredName, nameData.getOwner(), creator.getAddress());
|
||||
integrityCheckFailed = true;
|
||||
}
|
||||
else {
|
||||
LOGGER.trace("Registered name {} has the correct owner", registeredName);
|
||||
}
|
||||
}
|
||||
else {
|
||||
// Check if owner is correct after update
|
||||
|
||||
// Check for name updates
|
||||
if (latestUpdate.getType() == TransactionType.UPDATE_NAME) {
|
||||
UpdateNameTransactionData updateNameTransactionData = (UpdateNameTransactionData) latestUpdate;
|
||||
PublicKeyAccount creator = new PublicKeyAccount(repository, updateNameTransactionData.getCreatorPublicKey());
|
||||
|
||||
// When this name is the "new name", we expect the current owner to match the txn creator
|
||||
if (Objects.equals(updateNameTransactionData.getNewName(), registeredName)) {
|
||||
if (!Objects.equals(creator.getAddress(), nameData.getOwner())) {
|
||||
LOGGER.info("Error: registered name {} is owned by {}, but it should be {}",
|
||||
registeredName, nameData.getOwner(), creator.getAddress());
|
||||
integrityCheckFailed = true;
|
||||
}
|
||||
else {
|
||||
LOGGER.trace("Registered name {} has the correct owner after being updated", registeredName);
|
||||
}
|
||||
}
|
||||
|
||||
// When this name is the old name, we expect the "new name"'s owner to match the txn creator
|
||||
// The old name will then be unregistered, or re-registered.
|
||||
// FUTURE: check database integrity for names that have been updated and then the original name re-registered
|
||||
else if (Objects.equals(updateNameTransactionData.getName(), registeredName)) {
|
||||
NameData newNameData = repository.getNameRepository().fromName(updateNameTransactionData.getNewName());
|
||||
if (!Objects.equals(creator.getAddress(), newNameData.getOwner())) {
|
||||
LOGGER.info("Error: registered name {} is owned by {}, but it should be {}",
|
||||
updateNameTransactionData.getNewName(), newNameData.getOwner(), creator.getAddress());
|
||||
integrityCheckFailed = true;
|
||||
}
|
||||
else {
|
||||
LOGGER.trace("Registered name {} has the correct owner after being updated", updateNameTransactionData.getNewName());
|
||||
}
|
||||
}
|
||||
|
||||
else {
|
||||
LOGGER.info("Unhandled update case for name {}", registeredName);
|
||||
}
|
||||
}
|
||||
|
||||
// Check for name buys
|
||||
else if (latestUpdate.getType() == TransactionType.BUY_NAME) {
|
||||
BuyNameTransactionData buyNameTransactionData = (BuyNameTransactionData) latestUpdate;
|
||||
PublicKeyAccount creator = new PublicKeyAccount(repository, buyNameTransactionData.getCreatorPublicKey());
|
||||
if (!Objects.equals(creator.getAddress(), nameData.getOwner())) {
|
||||
LOGGER.info("Error: registered name {} is owned by {}, but it should be {}",
|
||||
registeredName, nameData.getOwner(), creator.getAddress());
|
||||
integrityCheckFailed = true;
|
||||
}
|
||||
else {
|
||||
LOGGER.trace("Registered name {} has the correct owner after being bought", registeredName);
|
||||
}
|
||||
}
|
||||
|
||||
// Check for name sells
|
||||
else if (latestUpdate.getType() == TransactionType.SELL_NAME) {
|
||||
SellNameTransactionData sellNameTransactionData = (SellNameTransactionData) latestUpdate;
|
||||
PublicKeyAccount creator = new PublicKeyAccount(repository, sellNameTransactionData.getCreatorPublicKey());
|
||||
if (!Objects.equals(creator.getAddress(), nameData.getOwner())) {
|
||||
LOGGER.info("Error: registered name {} is owned by {}, but it should be {}",
|
||||
registeredName, nameData.getOwner(), creator.getAddress());
|
||||
integrityCheckFailed = true;
|
||||
}
|
||||
else {
|
||||
LOGGER.trace("Registered name {} has the correct owner after being listed for sale", registeredName);
|
||||
}
|
||||
}
|
||||
|
||||
else {
|
||||
LOGGER.info("Unhandled case for name {}", registeredName);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
} catch (DataException e) {
|
||||
LOGGER.warn(String.format("Repository issue trying to trim online accounts signatures: %s", e.getMessage()));
|
||||
integrityCheckFailed = true;
|
||||
}
|
||||
|
||||
if (integrityCheckFailed) {
|
||||
LOGGER.info("Registered names database integrity check failed. Bootstrapping is recommended.");
|
||||
} else {
|
||||
LOGGER.info("Registered names database integrity check passed.");
|
||||
}
|
||||
}
|
||||
|
||||
private List<RegisterNameTransactionData> fetchRegisterNameTransactions() {
|
||||
List<RegisterNameTransactionData> registerNameTransactions = new ArrayList<>();
|
||||
|
||||
for (TransactionData transactionData : this.nameTransactions) {
|
||||
if (transactionData.getType() == TransactionType.REGISTER_NAME) {
|
||||
RegisterNameTransactionData registerNameTransactionData = (RegisterNameTransactionData) transactionData;
|
||||
registerNameTransactions.add(registerNameTransactionData);
|
||||
}
|
||||
}
|
||||
return registerNameTransactions;
|
||||
}
|
||||
|
||||
private List<UpdateNameTransactionData> fetchUpdateNameTransactions() {
|
||||
List<UpdateNameTransactionData> updateNameTransactions = new ArrayList<>();
|
||||
|
||||
for (TransactionData transactionData : this.nameTransactions) {
|
||||
if (transactionData.getType() == TransactionType.UPDATE_NAME) {
|
||||
UpdateNameTransactionData updateNameTransactionData = (UpdateNameTransactionData) transactionData;
|
||||
updateNameTransactions.add(updateNameTransactionData);
|
||||
}
|
||||
}
|
||||
return updateNameTransactions;
|
||||
}
|
||||
|
||||
private List<SellNameTransactionData> fetchSellNameTransactions() {
|
||||
List<SellNameTransactionData> sellNameTransactions = new ArrayList<>();
|
||||
|
||||
for (TransactionData transactionData : this.nameTransactions) {
|
||||
if (transactionData.getType() == TransactionType.SELL_NAME) {
|
||||
SellNameTransactionData sellNameTransactionData = (SellNameTransactionData) transactionData;
|
||||
sellNameTransactions.add(sellNameTransactionData);
|
||||
}
|
||||
}
|
||||
return sellNameTransactions;
|
||||
}
|
||||
|
||||
private List<BuyNameTransactionData> fetchBuyNameTransactions() {
|
||||
List<BuyNameTransactionData> buyNameTransactions = new ArrayList<>();
|
||||
|
||||
for (TransactionData transactionData : this.nameTransactions) {
|
||||
if (transactionData.getType() == TransactionType.BUY_NAME) {
|
||||
BuyNameTransactionData buyNameTransactionData = (BuyNameTransactionData) transactionData;
|
||||
buyNameTransactions.add(buyNameTransactionData);
|
||||
}
|
||||
}
|
||||
return buyNameTransactions;
|
||||
}
|
||||
|
||||
private void fetchAllNameTransactions(Repository repository) throws DataException {
|
||||
List<TransactionData> nameTransactions = new ArrayList<>();
|
||||
|
||||
// Fetch all the confirmed REGISTER_NAME transaction signatures
|
||||
List<byte[]> signatures = repository.getTransactionRepository().getSignaturesMatchingCriteria(
|
||||
null, null, null, ALL_NAME_TX_TYPE, null, null,
|
||||
ConfirmationStatus.CONFIRMED, null, null, false);
|
||||
|
||||
for (byte[] signature : signatures) {
|
||||
TransactionData transactionData = repository.getTransactionRepository().fromSignature(signature);
|
||||
nameTransactions.add(transactionData);
|
||||
}
|
||||
this.nameTransactions = nameTransactions;
|
||||
}
|
||||
|
||||
private List<TransactionData> fetchAllTransactionsInvolvingName(String name, Repository repository) throws DataException {
|
||||
List<TransactionData> transactions = new ArrayList<>();
|
||||
String reducedName = Unicode.sanitize(name);
|
||||
|
||||
// Fetch all the confirmed name-modification transactions
|
||||
if (this.nameTransactions.isEmpty()) {
|
||||
this.fetchAllNameTransactions(repository);
|
||||
}
|
||||
|
||||
for (TransactionData transactionData : this.nameTransactions) {
|
||||
|
||||
if ((transactionData instanceof RegisterNameTransactionData)) {
|
||||
RegisterNameTransactionData registerNameTransactionData = (RegisterNameTransactionData) transactionData;
|
||||
if (Objects.equals(registerNameTransactionData.getReducedName(), reducedName)) {
|
||||
transactions.add(transactionData);
|
||||
}
|
||||
}
|
||||
if ((transactionData instanceof UpdateNameTransactionData)) {
|
||||
UpdateNameTransactionData updateNameTransactionData = (UpdateNameTransactionData) transactionData;
|
||||
if (Objects.equals(updateNameTransactionData.getName(), name) ||
|
||||
Objects.equals(updateNameTransactionData.getReducedNewName(), reducedName)) {
|
||||
transactions.add(transactionData);
|
||||
}
|
||||
}
|
||||
if ((transactionData instanceof BuyNameTransactionData)) {
|
||||
BuyNameTransactionData buyNameTransactionData = (BuyNameTransactionData) transactionData;
|
||||
if (Objects.equals(buyNameTransactionData.getName(), name)) {
|
||||
transactions.add(transactionData);
|
||||
}
|
||||
}
|
||||
if ((transactionData instanceof SellNameTransactionData)) {
|
||||
SellNameTransactionData sellNameTransactionData = (SellNameTransactionData) transactionData;
|
||||
if (Objects.equals(sellNameTransactionData.getName(), name)) {
|
||||
transactions.add(transactionData);
|
||||
}
|
||||
}
|
||||
}
|
||||
return transactions;
|
||||
}
|
||||
|
||||
private TransactionData fetchLatestModificationTransactionInvolvingName(String registeredName, Repository repository) throws DataException {
|
||||
List<TransactionData> transactionsInvolvingName = this.fetchAllTransactionsInvolvingName(registeredName, repository);
|
||||
|
||||
// Get the latest update for this name (excluding REGISTER_NAME transactions)
|
||||
TransactionData latestUpdateToName = transactionsInvolvingName.stream()
|
||||
.filter(txn -> txn.getType() != TransactionType.REGISTER_NAME)
|
||||
.max(Comparator.comparing(TransactionData::getTimestamp))
|
||||
.orElse(null);
|
||||
|
||||
return latestUpdateToName;
|
||||
}
|
||||
|
||||
private List<String> fetchAllNames(Repository repository) throws DataException {
|
||||
List<String> names = new ArrayList<>();
|
||||
|
||||
// Fetch all the confirmed name transactions
|
||||
if (this.nameTransactions.isEmpty()) {
|
||||
this.fetchAllNameTransactions(repository);
|
||||
}
|
||||
|
||||
for (TransactionData transactionData : this.nameTransactions) {
|
||||
|
||||
if ((transactionData instanceof RegisterNameTransactionData)) {
|
||||
RegisterNameTransactionData registerNameTransactionData = (RegisterNameTransactionData) transactionData;
|
||||
if (!names.contains(registerNameTransactionData.getName())) {
|
||||
names.add(registerNameTransactionData.getName());
|
||||
}
|
||||
}
|
||||
if ((transactionData instanceof UpdateNameTransactionData)) {
|
||||
UpdateNameTransactionData updateNameTransactionData = (UpdateNameTransactionData) transactionData;
|
||||
if (!names.contains(updateNameTransactionData.getName())) {
|
||||
names.add(updateNameTransactionData.getName());
|
||||
}
|
||||
if (!names.contains(updateNameTransactionData.getNewName())) {
|
||||
names.add(updateNameTransactionData.getNewName());
|
||||
}
|
||||
}
|
||||
if ((transactionData instanceof BuyNameTransactionData)) {
|
||||
BuyNameTransactionData buyNameTransactionData = (BuyNameTransactionData) transactionData;
|
||||
if (!names.contains(buyNameTransactionData.getName())) {
|
||||
names.add(buyNameTransactionData.getName());
|
||||
}
|
||||
}
|
||||
if ((transactionData instanceof SellNameTransactionData)) {
|
||||
SellNameTransactionData sellNameTransactionData = (SellNameTransactionData) transactionData;
|
||||
if (!names.contains(sellNameTransactionData.getName())) {
|
||||
names.add(sellNameTransactionData.getName());
|
||||
}
|
||||
}
|
||||
}
|
||||
return names;
|
||||
}
|
||||
|
||||
}
|
@ -1,8 +1,9 @@
|
||||
package org.qortal.controller;
|
||||
package org.qortal.controller.repository;
|
||||
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.qortal.block.BlockChain;
|
||||
import org.qortal.controller.Controller;
|
||||
import org.qortal.data.block.BlockData;
|
||||
import org.qortal.repository.DataException;
|
||||
import org.qortal.repository.Repository;
|
160
src/main/java/org/qortal/controller/repository/PruneManager.java
Normal file
160
src/main/java/org/qortal/controller/repository/PruneManager.java
Normal file
@ -0,0 +1,160 @@
|
||||
package org.qortal.controller.repository;
|
||||
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.qortal.controller.Controller;
|
||||
|
||||
import org.qortal.data.block.BlockData;
|
||||
import org.qortal.repository.DataException;
|
||||
import org.qortal.repository.Repository;
|
||||
import org.qortal.settings.Settings;
|
||||
import org.qortal.utils.DaemonThreadFactory;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.Paths;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
public class PruneManager {
|
||||
|
||||
private static final Logger LOGGER = LogManager.getLogger(PruneManager.class);
|
||||
|
||||
private static PruneManager instance;
|
||||
|
||||
private boolean isTopOnly = Settings.getInstance().isTopOnly();
|
||||
private int pruneBlockLimit = Settings.getInstance().getPruneBlockLimit();
|
||||
|
||||
private ExecutorService executorService;
|
||||
|
||||
private PruneManager() {
|
||||
|
||||
}
|
||||
|
||||
public static synchronized PruneManager getInstance() {
|
||||
if (instance == null)
|
||||
instance = new PruneManager();
|
||||
|
||||
return instance;
|
||||
}
|
||||
|
||||
public void start() {
|
||||
this.executorService = Executors.newCachedThreadPool(new DaemonThreadFactory());
|
||||
|
||||
if (Settings.getInstance().isTopOnly()) {
|
||||
// Top-only-sync
|
||||
this.startTopOnlySyncMode();
|
||||
}
|
||||
else if (Settings.getInstance().isArchiveEnabled()) {
|
||||
// Full node with block archive
|
||||
this.startFullNodeWithBlockArchive();
|
||||
}
|
||||
else {
|
||||
// Full node with full SQL support
|
||||
this.startFullSQLNode();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Top-only-sync
|
||||
* In this mode, we delete (prune) all blocks except
|
||||
* a small number of recent ones. There is no need for
|
||||
* trimming or archiving, because all relevant blocks
|
||||
* are deleted.
|
||||
*/
|
||||
private void startTopOnlySyncMode() {
|
||||
this.startPruning();
|
||||
|
||||
// We don't need the block archive in top-only mode
|
||||
this.deleteArchive();
|
||||
}
|
||||
|
||||
/**
|
||||
* Full node with block archive
|
||||
* In this mode we archive trimmed blocks, and then
|
||||
* prune archived blocks to keep the database small
|
||||
*/
|
||||
private void startFullNodeWithBlockArchive() {
|
||||
this.startTrimming();
|
||||
this.startArchiving();
|
||||
this.startPruning();
|
||||
}
|
||||
|
||||
/**
|
||||
* Full node with full SQL support
|
||||
* In this mode we trim the database but don't prune
|
||||
* or archive any data, because we want to maintain
|
||||
* full SQL support of old blocks. This mode will not
|
||||
* be actively maintained but can be used by those who
|
||||
* need to perform SQL analysis on older blocks.
|
||||
*/
|
||||
private void startFullSQLNode() {
|
||||
this.startTrimming();
|
||||
}
|
||||
|
||||
|
||||
private void startPruning() {
|
||||
this.executorService.execute(new AtStatesPruner());
|
||||
this.executorService.execute(new BlockPruner());
|
||||
}
|
||||
|
||||
private void startTrimming() {
|
||||
this.executorService.execute(new AtStatesTrimmer());
|
||||
this.executorService.execute(new OnlineAccountsSignaturesTrimmer());
|
||||
}
|
||||
|
||||
private void startArchiving() {
|
||||
this.executorService.execute(new BlockArchiver());
|
||||
}
|
||||
|
||||
private void deleteArchive() {
|
||||
if (!Settings.getInstance().isTopOnly()) {
|
||||
LOGGER.error("Refusing to delete archive when not in top-only mode");
|
||||
}
|
||||
|
||||
try {
|
||||
Path archivePath = Paths.get(Settings.getInstance().getRepositoryPath(), "archive");
|
||||
if (archivePath.toFile().exists()) {
|
||||
LOGGER.info("Deleting block archive because we are in top-only mode...");
|
||||
FileUtils.deleteDirectory(archivePath.toFile());
|
||||
}
|
||||
|
||||
} catch (IOException e) {
|
||||
LOGGER.info("Couldn't delete archive: {}", e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
public void stop() {
|
||||
this.executorService.shutdownNow();
|
||||
|
||||
try {
|
||||
this.executorService.awaitTermination(2L, TimeUnit.SECONDS);
|
||||
} catch (InterruptedException e) {
|
||||
// We tried...
|
||||
}
|
||||
}
|
||||
|
||||
public boolean isBlockPruned(int height) throws DataException {
|
||||
if (!this.isTopOnly) {
|
||||
return false;
|
||||
}
|
||||
|
||||
BlockData chainTip = Controller.getInstance().getChainTip();
|
||||
if (chainTip == null) {
|
||||
throw new DataException("Unable to determine chain tip when checking if a block is pruned");
|
||||
}
|
||||
|
||||
if (height == 1) {
|
||||
// We don't prune the genesis block
|
||||
return false;
|
||||
}
|
||||
|
||||
final int ourLatestHeight = chainTip.getHeight();
|
||||
final int latestUnprunedHeight = ourLatestHeight - this.pruneBlockLimit;
|
||||
|
||||
return (height < latestUnprunedHeight);
|
||||
}
|
||||
|
||||
}
|
@ -360,6 +360,7 @@ public class BitcoinACCTv1TradeBot implements AcctTradeBot {
|
||||
case BOB_DONE:
|
||||
case ALICE_REFUNDED:
|
||||
case BOB_REFUNDED:
|
||||
case ALICE_REFUNDING_A:
|
||||
return true;
|
||||
|
||||
default:
|
||||
|
@ -353,6 +353,7 @@ public class DogecoinACCTv1TradeBot implements AcctTradeBot {
|
||||
case BOB_DONE:
|
||||
case ALICE_REFUNDED:
|
||||
case BOB_REFUNDED:
|
||||
case ALICE_REFUNDING_A:
|
||||
return true;
|
||||
|
||||
default:
|
||||
|
@ -364,6 +364,7 @@ public class LitecoinACCTv1TradeBot implements AcctTradeBot {
|
||||
case BOB_DONE:
|
||||
case ALICE_REFUNDED:
|
||||
case BOB_REFUNDED:
|
||||
case ALICE_REFUNDING_A:
|
||||
return true;
|
||||
|
||||
default:
|
||||
|
@ -245,17 +245,17 @@ public class TradeBot implements Listener {
|
||||
}
|
||||
}
|
||||
|
||||
/*package*/ static byte[] generateTradePrivateKey() {
|
||||
public static byte[] generateTradePrivateKey() {
|
||||
// The private key is used for both Curve25519 and secp256k1 so needs to be valid for both.
|
||||
// Curve25519 accepts any seed, so generate a valid secp256k1 key and use that.
|
||||
return new ECKey().getPrivKeyBytes();
|
||||
}
|
||||
|
||||
/*package*/ static byte[] deriveTradeNativePublicKey(byte[] privateKey) {
|
||||
public static byte[] deriveTradeNativePublicKey(byte[] privateKey) {
|
||||
return PrivateKeyAccount.toPublicKey(privateKey);
|
||||
}
|
||||
|
||||
/*package*/ static byte[] deriveTradeForeignPublicKey(byte[] privateKey) {
|
||||
public static byte[] deriveTradeForeignPublicKey(byte[] privateKey) {
|
||||
return ECKey.fromPrivate(privateKey).getPubKey();
|
||||
}
|
||||
|
||||
|
@ -406,14 +406,24 @@ public abstract class Bitcoiny implements ForeignBlockchain {
|
||||
protected SimpleTransaction convertToSimpleTransaction(BitcoinyTransaction t, Set<String> keySet) {
|
||||
long amount = 0;
|
||||
long total = 0L;
|
||||
long totalInputAmount = 0L;
|
||||
long totalOutputAmount = 0L;
|
||||
List<SimpleTransaction.Input> inputs = new ArrayList<>();
|
||||
List<SimpleTransaction.Output> outputs = new ArrayList<>();
|
||||
|
||||
for (BitcoinyTransaction.Input input : t.inputs) {
|
||||
try {
|
||||
BitcoinyTransaction t2 = getTransaction(input.outputTxHash);
|
||||
List<String> senders = t2.outputs.get(input.outputVout).addresses;
|
||||
long inputAmount = t2.outputs.get(input.outputVout).value;
|
||||
totalInputAmount += inputAmount;
|
||||
for (String sender : senders) {
|
||||
boolean addressInWallet = false;
|
||||
if (keySet.contains(sender)) {
|
||||
total += t2.outputs.get(input.outputVout).value;
|
||||
total += inputAmount;
|
||||
addressInWallet = true;
|
||||
}
|
||||
inputs.add(new SimpleTransaction.Input(sender, inputAmount, addressInWallet));
|
||||
}
|
||||
} catch (ForeignBlockchainException e) {
|
||||
LOGGER.trace("Failed to retrieve transaction information {}", input.outputTxHash);
|
||||
@ -422,17 +432,22 @@ public abstract class Bitcoiny implements ForeignBlockchain {
|
||||
if (t.outputs != null && !t.outputs.isEmpty()) {
|
||||
for (BitcoinyTransaction.Output output : t.outputs) {
|
||||
for (String address : output.addresses) {
|
||||
boolean addressInWallet = false;
|
||||
if (keySet.contains(address)) {
|
||||
if (total > 0L) {
|
||||
amount -= (total - output.value);
|
||||
} else {
|
||||
amount += output.value;
|
||||
}
|
||||
addressInWallet = true;
|
||||
}
|
||||
outputs.add(new SimpleTransaction.Output(address, output.value, addressInWallet));
|
||||
}
|
||||
totalOutputAmount += output.value;
|
||||
}
|
||||
}
|
||||
return new SimpleTransaction(t.txHash, t.timestamp, amount);
|
||||
long fee = totalInputAmount - totalOutputAmount;
|
||||
return new SimpleTransaction(t.txHash, t.timestamp, amount, fee, inputs, outputs);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -653,18 +653,27 @@ public class ElectrumX extends BitcoinyBlockchainProvider {
|
||||
|
||||
Object errorObj = responseJson.get("error");
|
||||
if (errorObj != null) {
|
||||
if (errorObj instanceof String)
|
||||
throw new ForeignBlockchainException.NetworkException(String.format("Unexpected error message from ElectrumX RPC %s: %s", method, (String) errorObj), this.currentServer);
|
||||
if (errorObj instanceof String) {
|
||||
LOGGER.debug(String.format("Unexpected error message from ElectrumX server %s for RPC method %s: %s", this.currentServer, method, (String) errorObj));
|
||||
// Try another server
|
||||
return null;
|
||||
}
|
||||
|
||||
if (!(errorObj instanceof JSONObject))
|
||||
throw new ForeignBlockchainException.NetworkException(String.format("Unexpected error response from ElectrumX RPC %s", method), this.currentServer);
|
||||
if (!(errorObj instanceof JSONObject)) {
|
||||
LOGGER.debug(String.format("Unexpected error response from ElectrumX server %s for RPC method %s", this.currentServer, method));
|
||||
// Try another server
|
||||
return null;
|
||||
}
|
||||
|
||||
JSONObject errorJson = (JSONObject) errorObj;
|
||||
|
||||
Object messageObj = errorJson.get("message");
|
||||
|
||||
if (!(messageObj instanceof String))
|
||||
throw new ForeignBlockchainException.NetworkException(String.format("Missing/invalid message in error response from ElectrumX RPC %s", method), this.currentServer);
|
||||
if (!(messageObj instanceof String)) {
|
||||
LOGGER.debug(String.format("Missing/invalid message in error response from ElectrumX server %s for RPC method %s", this.currentServer, method));
|
||||
// Try another server
|
||||
return null;
|
||||
}
|
||||
|
||||
String message = (String) messageObj;
|
||||
|
||||
|
@ -21,6 +21,8 @@ public class Litecoin extends Bitcoiny {
|
||||
|
||||
private static final Coin DEFAULT_FEE_PER_KB = Coin.valueOf(10000); // 0.0001 LTC per 1000 bytes
|
||||
|
||||
private static final long MINIMUM_ORDER_AMOUNT = 1000000; // 0.01 LTC minimum order, to avoid dust errors
|
||||
|
||||
// Temporary values until a dynamic fee system is written.
|
||||
private static final long MAINNET_FEE = 1000L;
|
||||
private static final long NON_MAINNET_FEE = 1000L; // enough for TESTNET3 and should be OK for REGTEST
|
||||
@ -164,6 +166,11 @@ public class Litecoin extends Bitcoiny {
|
||||
return DEFAULT_FEE_PER_KB;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getMinimumOrderAmount() {
|
||||
return MINIMUM_ORDER_AMOUNT;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns estimated LTC fee, in sats per 1000bytes, optionally for historic timestamp.
|
||||
*
|
||||
|
@ -2,20 +2,85 @@ package org.qortal.crosschain;
|
||||
|
||||
import javax.xml.bind.annotation.XmlAccessType;
|
||||
import javax.xml.bind.annotation.XmlAccessorType;
|
||||
import java.util.List;
|
||||
|
||||
@XmlAccessorType(XmlAccessType.FIELD)
|
||||
public class SimpleTransaction {
|
||||
private String txHash;
|
||||
private Integer timestamp;
|
||||
private long totalAmount;
|
||||
private long feeAmount;
|
||||
private List<Input> inputs;
|
||||
private List<Output> outputs;
|
||||
|
||||
|
||||
@XmlAccessorType(XmlAccessType.FIELD)
|
||||
public static class Input {
|
||||
private String address;
|
||||
private long amount;
|
||||
private boolean addressInWallet;
|
||||
|
||||
public Input() {
|
||||
}
|
||||
|
||||
public Input(String address, long amount, boolean addressInWallet) {
|
||||
this.address = address;
|
||||
this.amount = amount;
|
||||
this.addressInWallet = addressInWallet;
|
||||
}
|
||||
|
||||
public String getAddress() {
|
||||
return address;
|
||||
}
|
||||
|
||||
public long getAmount() {
|
||||
return amount;
|
||||
}
|
||||
|
||||
public boolean getAddressInWallet() {
|
||||
return addressInWallet;
|
||||
}
|
||||
}
|
||||
|
||||
@XmlAccessorType(XmlAccessType.FIELD)
|
||||
public static class Output {
|
||||
private String address;
|
||||
private long amount;
|
||||
private boolean addressInWallet;
|
||||
|
||||
public Output() {
|
||||
}
|
||||
|
||||
public Output(String address, long amount, boolean addressInWallet) {
|
||||
this.address = address;
|
||||
this.amount = amount;
|
||||
this.addressInWallet = addressInWallet;
|
||||
}
|
||||
|
||||
public String getAddress() {
|
||||
return address;
|
||||
}
|
||||
|
||||
public long getAmount() {
|
||||
return amount;
|
||||
}
|
||||
|
||||
public boolean getAddressInWallet() {
|
||||
return addressInWallet;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public SimpleTransaction() {
|
||||
}
|
||||
|
||||
public SimpleTransaction(String txHash, Integer timestamp, long totalAmount) {
|
||||
public SimpleTransaction(String txHash, Integer timestamp, long totalAmount, long feeAmount, List<Input> inputs, List<Output> outputs) {
|
||||
this.txHash = txHash;
|
||||
this.timestamp = timestamp;
|
||||
this.totalAmount = totalAmount;
|
||||
this.feeAmount = feeAmount;
|
||||
this.inputs = inputs;
|
||||
this.outputs = outputs;
|
||||
}
|
||||
|
||||
public String getTxHash() {
|
||||
@ -29,4 +94,16 @@ public class SimpleTransaction {
|
||||
public long getTotalAmount() {
|
||||
return totalAmount;
|
||||
}
|
||||
}
|
||||
|
||||
public long getFeeAmount() {
|
||||
return feeAmount;
|
||||
}
|
||||
|
||||
public List<Input> getInputs() {
|
||||
return this.inputs;
|
||||
}
|
||||
|
||||
public List<Output> getOutputs() {
|
||||
return this.outputs;
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,8 @@
|
||||
package org.qortal.crypto;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.FileInputStream;
|
||||
import java.io.IOException;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.security.MessageDigest;
|
||||
import java.security.NoSuchAlgorithmException;
|
||||
@ -75,12 +78,74 @@ public abstract class Crypto {
|
||||
return digest(digest(input));
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns 32-byte SHA-256 digest of file passed in input.
|
||||
*
|
||||
* @param file
|
||||
* file in which to perform digest
|
||||
* @return byte[32] digest, or null if SHA-256 algorithm can't be accessed
|
||||
*
|
||||
* @throws IOException if the file cannot be read
|
||||
*/
|
||||
public static byte[] digest(File file) throws IOException {
|
||||
return Crypto.digest(file, 8192);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns 32-byte SHA-256 digest of file passed in input, in hex format
|
||||
*
|
||||
* @param file
|
||||
* file in which to perform digest
|
||||
* @return String digest as a hexadecimal string, or null if SHA-256 algorithm can't be accessed
|
||||
*
|
||||
* @throws IOException if the file cannot be read
|
||||
*/
|
||||
public static String digestHexString(File file, int bufferSize) throws IOException {
|
||||
byte[] digest = Crypto.digest(file, bufferSize);
|
||||
|
||||
// Convert to hex
|
||||
StringBuilder stringBuilder = new StringBuilder();
|
||||
for (byte b : digest) {
|
||||
stringBuilder.append(String.format("%02x", b));
|
||||
}
|
||||
return stringBuilder.toString();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns 32-byte SHA-256 digest of file passed in input.
|
||||
*
|
||||
* @param file
|
||||
* file in which to perform digest
|
||||
* @param bufferSize
|
||||
* the number of bytes to load into memory
|
||||
* @return byte[32] digest, or null if SHA-256 algorithm can't be accessed
|
||||
*
|
||||
* @throws IOException if the file cannot be read
|
||||
*/
|
||||
public static byte[] digest(File file, int bufferSize) throws IOException {
|
||||
try {
|
||||
MessageDigest sha256 = MessageDigest.getInstance("SHA-256");
|
||||
FileInputStream fileInputStream = new FileInputStream(file);
|
||||
byte[] bytes = new byte[bufferSize];
|
||||
int count;
|
||||
|
||||
while ((count = fileInputStream.read(bytes)) != -1) {
|
||||
sha256.update(bytes, 0, count);
|
||||
}
|
||||
fileInputStream.close();
|
||||
|
||||
return sha256.digest();
|
||||
|
||||
} catch (NoSuchAlgorithmException e) {
|
||||
throw new RuntimeException("SHA-256 message digest not available");
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns 64-byte duplicated digest of message passed in input.
|
||||
* <p>
|
||||
* Effectively <tt>Bytes.concat(digest(input), digest(input)).
|
||||
*
|
||||
* @param addressVersion
|
||||
*
|
||||
* @param input
|
||||
*/
|
||||
public static byte[] dupDigest(byte[] input) {
|
||||
|
@ -4,10 +4,12 @@ import javax.xml.bind.annotation.XmlAccessType;
|
||||
import javax.xml.bind.annotation.XmlAccessorType;
|
||||
import javax.xml.bind.annotation.XmlTransient;
|
||||
|
||||
import org.json.JSONObject;
|
||||
import org.qortal.crypto.Crypto;
|
||||
|
||||
import io.swagger.v3.oas.annotations.media.Schema;
|
||||
import io.swagger.v3.oas.annotations.media.Schema.AccessMode;
|
||||
import org.qortal.utils.Base58;
|
||||
|
||||
// All properties to be converted to JSON via JAXB
|
||||
@XmlAccessorType(XmlAccessType.FIELD)
|
||||
@ -61,4 +63,21 @@ public class MintingAccountData {
|
||||
return this.publicKey;
|
||||
}
|
||||
|
||||
|
||||
// JSON
|
||||
|
||||
public JSONObject toJson() {
|
||||
JSONObject jsonObject = new JSONObject();
|
||||
jsonObject.put("privateKey", Base58.encode(this.getPrivateKey()));
|
||||
jsonObject.put("publicKey", Base58.encode(this.getPublicKey()));
|
||||
return jsonObject;
|
||||
}
|
||||
|
||||
public static MintingAccountData fromJson(JSONObject json) {
|
||||
return new MintingAccountData(
|
||||
json.isNull("privateKey") ? null : Base58.decode(json.getString("privateKey")),
|
||||
json.isNull("publicKey") ? null : Base58.decode(json.getString("publicKey"))
|
||||
);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -23,6 +23,7 @@ public class ATData {
|
||||
private boolean isFrozen;
|
||||
@XmlJavaTypeAdapter(value = org.qortal.api.AmountTypeAdapter.class)
|
||||
private Long frozenBalance;
|
||||
private Long sleepUntilMessageTimestamp;
|
||||
|
||||
// Constructors
|
||||
|
||||
@ -31,7 +32,8 @@ public class ATData {
|
||||
}
|
||||
|
||||
public ATData(String ATAddress, byte[] creatorPublicKey, long creation, int version, long assetId, byte[] codeBytes, byte[] codeHash,
|
||||
boolean isSleeping, Integer sleepUntilHeight, boolean isFinished, boolean hadFatalError, boolean isFrozen, Long frozenBalance) {
|
||||
boolean isSleeping, Integer sleepUntilHeight, boolean isFinished, boolean hadFatalError, boolean isFrozen, Long frozenBalance,
|
||||
Long sleepUntilMessageTimestamp) {
|
||||
this.ATAddress = ATAddress;
|
||||
this.creatorPublicKey = creatorPublicKey;
|
||||
this.creation = creation;
|
||||
@ -45,6 +47,7 @@ public class ATData {
|
||||
this.hadFatalError = hadFatalError;
|
||||
this.isFrozen = isFrozen;
|
||||
this.frozenBalance = frozenBalance;
|
||||
this.sleepUntilMessageTimestamp = sleepUntilMessageTimestamp;
|
||||
}
|
||||
|
||||
/** For constructing skeleton ATData with bare minimum info. */
|
||||
@ -133,4 +136,12 @@ public class ATData {
|
||||
this.frozenBalance = frozenBalance;
|
||||
}
|
||||
|
||||
public Long getSleepUntilMessageTimestamp() {
|
||||
return this.sleepUntilMessageTimestamp;
|
||||
}
|
||||
|
||||
public void setSleepUntilMessageTimestamp(Long sleepUntilMessageTimestamp) {
|
||||
this.sleepUntilMessageTimestamp = sleepUntilMessageTimestamp;
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -10,35 +10,32 @@ public class ATStateData {
|
||||
private Long fees;
|
||||
private boolean isInitial;
|
||||
|
||||
// Qortal-AT-specific
|
||||
private Long sleepUntilMessageTimestamp;
|
||||
|
||||
// Constructors
|
||||
|
||||
/** Create new ATStateData */
|
||||
public ATStateData(String ATAddress, Integer height, byte[] stateData, byte[] stateHash, Long fees, boolean isInitial) {
|
||||
public ATStateData(String ATAddress, Integer height, byte[] stateData, byte[] stateHash, Long fees,
|
||||
boolean isInitial, Long sleepUntilMessageTimestamp) {
|
||||
this.ATAddress = ATAddress;
|
||||
this.height = height;
|
||||
this.stateData = stateData;
|
||||
this.stateHash = stateHash;
|
||||
this.fees = fees;
|
||||
this.isInitial = isInitial;
|
||||
this.sleepUntilMessageTimestamp = sleepUntilMessageTimestamp;
|
||||
}
|
||||
|
||||
/** For recreating per-block ATStateData from repository where not all info is needed */
|
||||
public ATStateData(String ATAddress, int height, byte[] stateHash, Long fees, boolean isInitial) {
|
||||
this(ATAddress, height, null, stateHash, fees, isInitial);
|
||||
}
|
||||
|
||||
/** For creating ATStateData from serialized bytes when we don't have all the info */
|
||||
public ATStateData(String ATAddress, byte[] stateHash) {
|
||||
// This won't ever be initial AT state from deployment as that's never serialized over the network,
|
||||
// but generated when the DeployAtTransaction is processed locally.
|
||||
this(ATAddress, null, null, stateHash, null, false);
|
||||
this(ATAddress, height, null, stateHash, fees, isInitial, null);
|
||||
}
|
||||
|
||||
/** For creating ATStateData from serialized bytes when we don't have all the info */
|
||||
public ATStateData(String ATAddress, byte[] stateHash, Long fees) {
|
||||
// This won't ever be initial AT state from deployment as that's never serialized over the network,
|
||||
// but generated when the DeployAtTransaction is processed locally.
|
||||
this(ATAddress, null, null, stateHash, fees, false);
|
||||
// This won't ever be initial AT state from deployment, as that's never serialized over the network.
|
||||
this(ATAddress, null, null, stateHash, fees, false, null);
|
||||
}
|
||||
|
||||
// Getters / setters
|
||||
@ -72,4 +69,12 @@ public class ATStateData {
|
||||
return this.isInitial;
|
||||
}
|
||||
|
||||
public Long getSleepUntilMessageTimestamp() {
|
||||
return this.sleepUntilMessageTimestamp;
|
||||
}
|
||||
|
||||
public void setSleepUntilMessageTimestamp(Long sleepUntilMessageTimestamp) {
|
||||
this.sleepUntilMessageTimestamp = sleepUntilMessageTimestamp;
|
||||
}
|
||||
|
||||
}
|
||||
|
47
src/main/java/org/qortal/data/block/BlockArchiveData.java
Normal file
47
src/main/java/org/qortal/data/block/BlockArchiveData.java
Normal file
@ -0,0 +1,47 @@
|
||||
package org.qortal.data.block;
|
||||
|
||||
import org.qortal.block.Block;
|
||||
|
||||
public class BlockArchiveData {
|
||||
|
||||
// Properties
|
||||
private byte[] signature;
|
||||
private Integer height;
|
||||
private Long timestamp;
|
||||
private byte[] minterPublicKey;
|
||||
|
||||
// Constructors
|
||||
|
||||
public BlockArchiveData(byte[] signature, Integer height, long timestamp, byte[] minterPublicKey) {
|
||||
this.signature = signature;
|
||||
this.height = height;
|
||||
this.timestamp = timestamp;
|
||||
this.minterPublicKey = minterPublicKey;
|
||||
}
|
||||
|
||||
public BlockArchiveData(BlockData blockData) {
|
||||
this.signature = blockData.getSignature();
|
||||
this.height = blockData.getHeight();
|
||||
this.timestamp = blockData.getTimestamp();
|
||||
this.minterPublicKey = blockData.getMinterPublicKey();
|
||||
}
|
||||
|
||||
// Getters/setters
|
||||
|
||||
public byte[] getSignature() {
|
||||
return this.signature;
|
||||
}
|
||||
|
||||
public Integer getHeight() {
|
||||
return this.height;
|
||||
}
|
||||
|
||||
public Long getTimestamp() {
|
||||
return this.timestamp;
|
||||
}
|
||||
|
||||
public byte[] getMinterPublicKey() {
|
||||
return this.minterPublicKey;
|
||||
}
|
||||
|
||||
}
|
@ -26,7 +26,7 @@ public class RegisterNameTransactionData extends TransactionData {
|
||||
@Schema(description = "requested name", example = "my-name")
|
||||
private String name;
|
||||
|
||||
@Schema(description = "simple name-related info in JSON format", example = "{ \"age\": 30 }")
|
||||
@Schema(description = "simple name-related info in JSON or text format", example = "Registered Name on the Qortal Chain")
|
||||
private String data;
|
||||
|
||||
// For internal use
|
||||
|
@ -26,7 +26,7 @@ public class UpdateNameTransactionData extends TransactionData {
|
||||
@Schema(description = "new name", example = "my-new-name")
|
||||
private String newName;
|
||||
|
||||
@Schema(description = "replacement simple name-related info in JSON format", example = "{ \"age\": 30 }")
|
||||
@Schema(description = "replacement simple name-related info in JSON or text format", example = "Registered Name on the Qortal Chain")
|
||||
private String newData;
|
||||
|
||||
// For internal use
|
||||
|
@ -23,17 +23,21 @@ public class Gui {
|
||||
private SysTray sysTray = null;
|
||||
|
||||
private Gui() {
|
||||
this.isHeadless = GraphicsEnvironment.isHeadless();
|
||||
try {
|
||||
this.isHeadless = GraphicsEnvironment.isHeadless();
|
||||
|
||||
if (!this.isHeadless) {
|
||||
try {
|
||||
UIManager.setLookAndFeel(UIManager.getSystemLookAndFeelClassName());
|
||||
} catch (ClassNotFoundException | InstantiationException | IllegalAccessException
|
||||
| UnsupportedLookAndFeelException e) {
|
||||
// Use whatever look-and-feel comes by default then
|
||||
if (!this.isHeadless) {
|
||||
try {
|
||||
UIManager.setLookAndFeel(UIManager.getSystemLookAndFeelClassName());
|
||||
} catch (ClassNotFoundException | InstantiationException | IllegalAccessException
|
||||
| UnsupportedLookAndFeelException e) {
|
||||
// Use whatever look-and-feel comes by default then
|
||||
}
|
||||
|
||||
showSplash();
|
||||
}
|
||||
|
||||
showSplash();
|
||||
} catch (Exception e) {
|
||||
LOGGER.info("Unable to initialize GUI: {}", e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -6,9 +6,11 @@ import java.util.List;
|
||||
import java.awt.image.BufferedImage;
|
||||
|
||||
import javax.swing.*;
|
||||
import javax.swing.border.EmptyBorder;
|
||||
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.qortal.controller.Controller;
|
||||
|
||||
public class SplashFrame {
|
||||
|
||||
@ -16,6 +18,7 @@ public class SplashFrame {
|
||||
|
||||
private static SplashFrame instance;
|
||||
private JFrame splashDialog;
|
||||
private SplashPanel splashPanel;
|
||||
|
||||
@SuppressWarnings("serial")
|
||||
public static class SplashPanel extends JPanel {
|
||||
@ -23,26 +26,53 @@ public class SplashFrame {
|
||||
|
||||
private String defaultSplash = "Qlogo_512.png";
|
||||
|
||||
private JLabel statusLabel;
|
||||
|
||||
public SplashPanel() {
|
||||
image = Gui.loadImage(defaultSplash);
|
||||
|
||||
setOpaque(false);
|
||||
setLayout(new GridBagLayout());
|
||||
}
|
||||
setOpaque(true);
|
||||
setLayout(new BoxLayout(this, BoxLayout.Y_AXIS));
|
||||
setBorder(new EmptyBorder(10, 10, 10, 10));
|
||||
setBackground(Color.BLACK);
|
||||
|
||||
@Override
|
||||
protected void paintComponent(Graphics g) {
|
||||
super.paintComponent(g);
|
||||
g.drawImage(image, 0, 0, getWidth(), getHeight(), this);
|
||||
// Add logo
|
||||
JLabel imageLabel = new JLabel(new ImageIcon(image));
|
||||
imageLabel.setSize(new Dimension(300, 300));
|
||||
add(imageLabel);
|
||||
|
||||
// Add spacing
|
||||
add(Box.createRigidArea(new Dimension(0, 16)));
|
||||
|
||||
// Add status label
|
||||
String text = String.format("Starting Qortal Core v%s...", Controller.getInstance().getVersionStringWithoutPrefix());
|
||||
statusLabel = new JLabel(text, JLabel.CENTER);
|
||||
statusLabel.setMaximumSize(new Dimension(500, 50));
|
||||
statusLabel.setFont(new Font("Verdana", Font.PLAIN, 20));
|
||||
statusLabel.setBackground(Color.BLACK);
|
||||
statusLabel.setForeground(new Color(255, 255, 255, 255));
|
||||
statusLabel.setOpaque(true);
|
||||
statusLabel.setBorder(null);
|
||||
add(statusLabel);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Dimension getPreferredSize() {
|
||||
return new Dimension(500, 500);
|
||||
return new Dimension(500, 580);
|
||||
}
|
||||
|
||||
public void updateStatus(String text) {
|
||||
if (statusLabel != null) {
|
||||
statusLabel.setText(text);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private SplashFrame() {
|
||||
if (GraphicsEnvironment.isHeadless()) {
|
||||
return;
|
||||
}
|
||||
|
||||
this.splashDialog = new JFrame();
|
||||
|
||||
List<Image> icons = new ArrayList<>();
|
||||
@ -55,12 +85,13 @@ public class SplashFrame {
|
||||
icons.add(Gui.loadImage("icons/Qlogo_128.png"));
|
||||
this.splashDialog.setIconImages(icons);
|
||||
|
||||
this.splashDialog.getContentPane().add(new SplashPanel());
|
||||
this.splashPanel = new SplashPanel();
|
||||
this.splashDialog.getContentPane().add(this.splashPanel);
|
||||
this.splashDialog.setDefaultCloseOperation(JFrame.DO_NOTHING_ON_CLOSE);
|
||||
this.splashDialog.setUndecorated(true);
|
||||
this.splashDialog.pack();
|
||||
this.splashDialog.setLocationRelativeTo(null);
|
||||
this.splashDialog.setBackground(new Color(0,0,0,0));
|
||||
this.splashDialog.setBackground(Color.BLACK);
|
||||
this.splashDialog.setVisible(true);
|
||||
}
|
||||
|
||||
@ -79,4 +110,10 @@ public class SplashFrame {
|
||||
this.splashDialog.dispose();
|
||||
}
|
||||
|
||||
public void updateStatus(String text) {
|
||||
if (this.splashPanel != null) {
|
||||
this.splashPanel.updateStatus(text);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
157
src/main/java/org/qortal/list/ResourceList.java
Normal file
157
src/main/java/org/qortal/list/ResourceList.java
Normal file
@ -0,0 +1,157 @@
|
||||
package org.qortal.list;
|
||||
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.json.JSONArray;
|
||||
import org.qortal.settings.Settings;
|
||||
|
||||
import java.io.BufferedWriter;
|
||||
import java.io.File;
|
||||
import java.io.FileWriter;
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.Paths;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
public class ResourceList {
|
||||
|
||||
private static final Logger LOGGER = LogManager.getLogger(ResourceList.class);
|
||||
|
||||
private String category;
|
||||
private String resourceName;
|
||||
private List<String> list;
|
||||
|
||||
/**
|
||||
* ResourceList
|
||||
* Creates or updates a list for the purpose of tracking resources on the Qortal network
|
||||
* This can be used for local blocking, or even for curating and sharing content lists
|
||||
* Lists are backed off to JSON files (in the lists folder) to ease sharing between nodes and users
|
||||
*
|
||||
* @param category - for instance "blacklist", "whitelist", or "userlist"
|
||||
* @param resourceName - for instance "address", "poll", or "group"
|
||||
* @throws IOException
|
||||
*/
|
||||
public ResourceList(String category, String resourceName) throws IOException {
|
||||
this.category = category;
|
||||
this.resourceName = resourceName;
|
||||
this.list = new ArrayList<>();
|
||||
this.load();
|
||||
}
|
||||
|
||||
|
||||
/* Filesystem */
|
||||
|
||||
private Path getFilePath() {
|
||||
String pathString = String.format("%s%s%s_%s.json", Settings.getInstance().getListsPath(),
|
||||
File.separator, this.resourceName, this.category);
|
||||
return Paths.get(pathString);
|
||||
}
|
||||
|
||||
public void save() throws IOException {
|
||||
if (this.resourceName == null) {
|
||||
throw new IllegalStateException("Can't save list with missing resource name");
|
||||
}
|
||||
if (this.category == null) {
|
||||
throw new IllegalStateException("Can't save list with missing category");
|
||||
}
|
||||
String jsonString = ResourceList.listToJSONString(this.list);
|
||||
Path filePath = this.getFilePath();
|
||||
|
||||
// Create parent directory if needed
|
||||
try {
|
||||
Files.createDirectories(filePath.getParent());
|
||||
} catch (IOException e) {
|
||||
throw new IllegalStateException("Unable to create lists directory");
|
||||
}
|
||||
|
||||
BufferedWriter writer = new BufferedWriter(new FileWriter(filePath.toString()));
|
||||
writer.write(jsonString);
|
||||
writer.close();
|
||||
}
|
||||
|
||||
private boolean load() throws IOException {
|
||||
Path path = this.getFilePath();
|
||||
File resourceListFile = new File(path.toString());
|
||||
if (!resourceListFile.exists()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
try {
|
||||
String jsonString = new String(Files.readAllBytes(path));
|
||||
this.list = ResourceList.listFromJSONString(jsonString);
|
||||
} catch (IOException e) {
|
||||
throw new IOException(String.format("Couldn't read contents from file %s", path.toString()));
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
public boolean revert() {
|
||||
try {
|
||||
return this.load();
|
||||
} catch (IOException e) {
|
||||
LOGGER.info("Unable to revert {} {}", this.resourceName, this.category);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
/* List management */
|
||||
|
||||
public void add(String resource) {
|
||||
if (resource == null || this.list == null) {
|
||||
return;
|
||||
}
|
||||
if (!this.contains(resource)) {
|
||||
this.list.add(resource);
|
||||
}
|
||||
}
|
||||
|
||||
public void remove(String resource) {
|
||||
if (resource == null || this.list == null) {
|
||||
return;
|
||||
}
|
||||
this.list.remove(resource);
|
||||
}
|
||||
|
||||
public boolean contains(String resource) {
|
||||
if (resource == null || this.list == null) {
|
||||
return false;
|
||||
}
|
||||
return this.list.contains(resource);
|
||||
}
|
||||
|
||||
|
||||
/* Utils */
|
||||
|
||||
public static String listToJSONString(List<String> list) {
|
||||
if (list == null) {
|
||||
return null;
|
||||
}
|
||||
JSONArray items = new JSONArray();
|
||||
for (String item : list) {
|
||||
items.put(item);
|
||||
}
|
||||
return items.toString(4);
|
||||
}
|
||||
|
||||
private static List<String> listFromJSONString(String jsonString) {
|
||||
if (jsonString == null) {
|
||||
return null;
|
||||
}
|
||||
JSONArray jsonList = new JSONArray(jsonString);
|
||||
List<String> resourceList = new ArrayList<>();
|
||||
for (int i=0; i<jsonList.length(); i++) {
|
||||
String item = (String)jsonList.get(i);
|
||||
resourceList.add(item);
|
||||
}
|
||||
return resourceList;
|
||||
}
|
||||
|
||||
public String getJSONString() {
|
||||
return ResourceList.listToJSONString(this.list);
|
||||
}
|
||||
|
||||
}
|
95
src/main/java/org/qortal/list/ResourceListManager.java
Normal file
95
src/main/java/org/qortal/list/ResourceListManager.java
Normal file
@ -0,0 +1,95 @@
|
||||
package org.qortal.list;
|
||||
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
|
||||
public class ResourceListManager {
|
||||
|
||||
private static final Logger LOGGER = LogManager.getLogger(ResourceListManager.class);
|
||||
|
||||
private static ResourceListManager instance;
|
||||
private ResourceList addressBlacklist;
|
||||
|
||||
public ResourceListManager() {
|
||||
try {
|
||||
this.addressBlacklist = new ResourceList("blacklist", "address");
|
||||
} catch (IOException e) {
|
||||
LOGGER.info("Error while loading address blacklist. Blocking is currently unavailable.");
|
||||
}
|
||||
}
|
||||
|
||||
public static synchronized ResourceListManager getInstance() {
|
||||
if (instance == null) {
|
||||
instance = new ResourceListManager();
|
||||
}
|
||||
|
||||
return instance;
|
||||
}
|
||||
|
||||
public boolean addAddressToBlacklist(String address, boolean save) {
|
||||
try {
|
||||
this.addressBlacklist.add(address);
|
||||
if (save) {
|
||||
this.addressBlacklist.save();
|
||||
}
|
||||
return true;
|
||||
|
||||
} catch (IllegalStateException | IOException e) {
|
||||
LOGGER.info("Unable to add address to blacklist", e);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
public boolean removeAddressFromBlacklist(String address, boolean save) {
|
||||
try {
|
||||
this.addressBlacklist.remove(address);
|
||||
|
||||
if (save) {
|
||||
this.addressBlacklist.save();
|
||||
}
|
||||
return true;
|
||||
|
||||
} catch (IllegalStateException | IOException e) {
|
||||
LOGGER.info("Unable to remove address from blacklist", e);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
public boolean isAddressInBlacklist(String address) {
|
||||
if (this.addressBlacklist == null) {
|
||||
return false;
|
||||
}
|
||||
return this.addressBlacklist.contains(address);
|
||||
}
|
||||
|
||||
public void saveBlacklist() {
|
||||
if (this.addressBlacklist == null) {
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
this.addressBlacklist.save();
|
||||
} catch (IOException e) {
|
||||
LOGGER.info("Unable to save blacklist - reverting back to last saved state");
|
||||
this.addressBlacklist.revert();
|
||||
}
|
||||
}
|
||||
|
||||
public void revertBlacklist() {
|
||||
if (this.addressBlacklist == null) {
|
||||
return;
|
||||
}
|
||||
this.addressBlacklist.revert();
|
||||
}
|
||||
|
||||
public String getBlacklistJSONString() {
|
||||
if (this.addressBlacklist == null) {
|
||||
return null;
|
||||
}
|
||||
return this.addressBlacklist.getJSONString();
|
||||
}
|
||||
|
||||
}
|
@ -78,9 +78,10 @@ public class Name {
|
||||
// Set name's last-updated timestamp
|
||||
this.nameData.setUpdated(updateNameTransactionData.getTimestamp());
|
||||
|
||||
// Update name and data where appropriate
|
||||
// Update name, reduced name, and data where appropriate
|
||||
if (!updateNameTransactionData.getNewName().isEmpty()) {
|
||||
this.nameData.setName(updateNameTransactionData.getNewName());
|
||||
this.nameData.setReducedName(updateNameTransactionData.getReducedNewName());
|
||||
|
||||
// If we're changing the name, we need to delete old entry
|
||||
this.repository.getNameRepository().delete(updateNameTransactionData.getName());
|
||||
@ -106,6 +107,9 @@ public class Name {
|
||||
// We can find previous 'name' from update transaction
|
||||
this.nameData.setName(updateNameTransactionData.getName());
|
||||
|
||||
// We can derive the previous 'reduced name' from the previous name
|
||||
this.nameData.setReducedName(Unicode.sanitize(updateNameTransactionData.getName()));
|
||||
|
||||
// We might need to hunt for previous data value
|
||||
if (!updateNameTransactionData.getNewData().isEmpty())
|
||||
this.nameData.setData(findPreviousData(nameReference));
|
||||
@ -261,4 +265,8 @@ public class Name {
|
||||
return previousTransactionData.getTimestamp();
|
||||
}
|
||||
|
||||
public NameData getNameData() {
|
||||
return this.nameData;
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -688,7 +688,7 @@ public class Network {
|
||||
|
||||
if (peersToDisconnect != null && peersToDisconnect.size() > 0) {
|
||||
for (Peer peer : peersToDisconnect) {
|
||||
LOGGER.info("Forcing disconnection of peer {} because connection age ({} ms) " +
|
||||
LOGGER.debug("Forcing disconnection of peer {} because connection age ({} ms) " +
|
||||
"has reached the maximum ({} ms)", peer, peer.getConnectionAge(), peer.getMaxConnectionAge());
|
||||
peer.disconnect("Connection age too old");
|
||||
}
|
||||
|
@ -23,7 +23,7 @@ public class CachedBlockMessage extends Message {
|
||||
this.block = block;
|
||||
}
|
||||
|
||||
private CachedBlockMessage(byte[] cachedBytes) {
|
||||
public CachedBlockMessage(byte[] cachedBytes) {
|
||||
super(MessageType.BLOCK);
|
||||
|
||||
this.block = null;
|
||||
|
@ -1,5 +1,7 @@
|
||||
package org.qortal.repository;
|
||||
|
||||
import java.sql.ResultSet;
|
||||
import java.sql.SQLException;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
@ -103,7 +105,7 @@ public interface ATRepository {
|
||||
/**
|
||||
* Returns all ATStateData for a given block height.
|
||||
* <p>
|
||||
* Unlike <tt>getATState</tt>, only returns ATStateData saved at the given height.
|
||||
* Unlike <tt>getATState</tt>, only returns <i>partial</i> ATStateData saved at the given height.
|
||||
*
|
||||
* @param height
|
||||
* - block height
|
||||
@ -112,6 +114,14 @@ public interface ATRepository {
|
||||
*/
|
||||
public List<ATStateData> getBlockATStatesAtHeight(int height) throws DataException;
|
||||
|
||||
|
||||
/** Rebuild the latest AT states cache, necessary for AT state trimming/pruning.
|
||||
* <p>
|
||||
* NOTE: performs implicit <tt>repository.saveChanges()</tt>.
|
||||
*/
|
||||
public void rebuildLatestAtStates() throws DataException;
|
||||
|
||||
|
||||
/** Returns height of first trimmable AT state. */
|
||||
public int getAtTrimHeight() throws DataException;
|
||||
|
||||
@ -121,12 +131,27 @@ public interface ATRepository {
|
||||
*/
|
||||
public void setAtTrimHeight(int trimHeight) throws DataException;
|
||||
|
||||
/** Hook to allow repository to prepare/cache info for AT state trimming. */
|
||||
public void prepareForAtStateTrimming() throws DataException;
|
||||
|
||||
/** Trims full AT state data between passed heights. Returns number of trimmed rows. */
|
||||
public int trimAtStates(int minHeight, int maxHeight, int limit) throws DataException;
|
||||
|
||||
|
||||
/** Returns height of first prunable AT state. */
|
||||
public int getAtPruneHeight() throws DataException;
|
||||
|
||||
/** Sets new base height for AT state pruning.
|
||||
* <p>
|
||||
* NOTE: performs implicit <tt>repository.saveChanges()</tt>.
|
||||
*/
|
||||
public void setAtPruneHeight(int pruneHeight) throws DataException;
|
||||
|
||||
/** Prunes full AT state data between passed heights. Returns number of pruned rows. */
|
||||
public int pruneAtStates(int minHeight, int maxHeight) throws DataException;
|
||||
|
||||
|
||||
/** Checks for the presence of the ATStatesHeightIndex in repository */
|
||||
public boolean hasAtStatesHeightIndex() throws DataException;
|
||||
|
||||
|
||||
/**
|
||||
* Save ATStateData into repository.
|
||||
* <p>
|
||||
|
@ -191,6 +191,8 @@ public interface AccountRepository {
|
||||
|
||||
public List<MintingAccountData> getMintingAccounts() throws DataException;
|
||||
|
||||
public MintingAccountData getMintingAccount(byte[] mintingAccountKey) throws DataException;
|
||||
|
||||
public void save(MintingAccountData mintingAccountData) throws DataException;
|
||||
|
||||
/** Delete minting account info, used by BlockMinter, from repository using passed public or private key. */
|
||||
|
284
src/main/java/org/qortal/repository/BlockArchiveReader.java
Normal file
284
src/main/java/org/qortal/repository/BlockArchiveReader.java
Normal file
@ -0,0 +1,284 @@
|
||||
package org.qortal.repository;
|
||||
|
||||
import com.google.common.primitives.Ints;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.qortal.data.at.ATStateData;
|
||||
import org.qortal.data.block.BlockArchiveData;
|
||||
import org.qortal.data.block.BlockData;
|
||||
import org.qortal.data.transaction.TransactionData;
|
||||
import org.qortal.settings.Settings;
|
||||
import org.qortal.transform.TransformationException;
|
||||
import org.qortal.transform.block.BlockTransformer;
|
||||
import org.qortal.utils.Triple;
|
||||
|
||||
import static org.qortal.transform.Transformer.INT_LENGTH;
|
||||
|
||||
import java.io.*;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.Paths;
|
||||
import java.util.*;
|
||||
|
||||
public class BlockArchiveReader {
|
||||
|
||||
private static BlockArchiveReader instance;
|
||||
private Map<String, Triple<Integer, Integer, Integer>> fileListCache = Collections.synchronizedMap(new HashMap<>());
|
||||
|
||||
private static final Logger LOGGER = LogManager.getLogger(BlockArchiveReader.class);
|
||||
|
||||
public BlockArchiveReader() {
|
||||
|
||||
}
|
||||
|
||||
public static synchronized BlockArchiveReader getInstance() {
|
||||
if (instance == null) {
|
||||
instance = new BlockArchiveReader();
|
||||
}
|
||||
|
||||
return instance;
|
||||
}
|
||||
|
||||
private void fetchFileList() {
|
||||
Path archivePath = Paths.get(Settings.getInstance().getRepositoryPath(), "archive").toAbsolutePath();
|
||||
File archiveDirFile = archivePath.toFile();
|
||||
String[] files = archiveDirFile.list();
|
||||
Map<String, Triple<Integer, Integer, Integer>> map = new HashMap<>();
|
||||
|
||||
if (files != null) {
|
||||
for (String file : files) {
|
||||
Path filePath = Paths.get(file);
|
||||
String filename = filePath.getFileName().toString();
|
||||
|
||||
// Parse the filename
|
||||
if (filename == null || !filename.contains("-") || !filename.contains(".")) {
|
||||
// Not a usable file
|
||||
continue;
|
||||
}
|
||||
// Remove the extension and split into two parts
|
||||
String[] parts = filename.substring(0, filename.lastIndexOf('.')).split("-");
|
||||
Integer startHeight = Integer.parseInt(parts[0]);
|
||||
Integer endHeight = Integer.parseInt(parts[1]);
|
||||
Integer range = endHeight - startHeight;
|
||||
map.put(filename, new Triple(startHeight, endHeight, range));
|
||||
}
|
||||
}
|
||||
this.fileListCache = map;
|
||||
}
|
||||
|
||||
public Triple<BlockData, List<TransactionData>, List<ATStateData>> fetchBlockAtHeight(int height) {
|
||||
if (this.fileListCache.isEmpty()) {
|
||||
this.fetchFileList();
|
||||
}
|
||||
|
||||
byte[] serializedBytes = this.fetchSerializedBlockBytesForHeight(height);
|
||||
if (serializedBytes == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
ByteBuffer byteBuffer = ByteBuffer.wrap(serializedBytes);
|
||||
Triple<BlockData, List<TransactionData>, List<ATStateData>> blockInfo = null;
|
||||
try {
|
||||
blockInfo = BlockTransformer.fromByteBuffer(byteBuffer);
|
||||
if (blockInfo != null && blockInfo.getA() != null) {
|
||||
// Block height is stored outside of the main serialized bytes, so it
|
||||
// won't be set automatically.
|
||||
blockInfo.getA().setHeight(height);
|
||||
}
|
||||
} catch (TransformationException e) {
|
||||
return null;
|
||||
}
|
||||
return blockInfo;
|
||||
}
|
||||
|
||||
public Triple<BlockData, List<TransactionData>, List<ATStateData>> fetchBlockWithSignature(
|
||||
byte[] signature, Repository repository) {
|
||||
|
||||
if (this.fileListCache.isEmpty()) {
|
||||
this.fetchFileList();
|
||||
}
|
||||
|
||||
Integer height = this.fetchHeightForSignature(signature, repository);
|
||||
if (height != null) {
|
||||
return this.fetchBlockAtHeight(height);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
public List<Triple<BlockData, List<TransactionData>, List<ATStateData>>> fetchBlocksFromRange(
|
||||
int startHeight, int endHeight) {
|
||||
|
||||
List<Triple<BlockData, List<TransactionData>, List<ATStateData>>> blockInfoList = new ArrayList<>();
|
||||
|
||||
for (int height = startHeight; height <= endHeight; height++) {
|
||||
Triple<BlockData, List<TransactionData>, List<ATStateData>> blockInfo = this.fetchBlockAtHeight(height);
|
||||
if (blockInfo == null) {
|
||||
return blockInfoList;
|
||||
}
|
||||
blockInfoList.add(blockInfo);
|
||||
}
|
||||
return blockInfoList;
|
||||
}
|
||||
|
||||
public Integer fetchHeightForSignature(byte[] signature, Repository repository) {
|
||||
// Lookup the height for the requested signature
|
||||
try {
|
||||
BlockArchiveData archivedBlock = repository.getBlockArchiveRepository().getBlockArchiveDataForSignature(signature);
|
||||
if (archivedBlock == null) {
|
||||
return null;
|
||||
}
|
||||
return archivedBlock.getHeight();
|
||||
|
||||
} catch (DataException e) {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
public int fetchHeightForTimestamp(long timestamp, Repository repository) {
|
||||
// Lookup the height for the requested signature
|
||||
try {
|
||||
return repository.getBlockArchiveRepository().getHeightFromTimestamp(timestamp);
|
||||
|
||||
} catch (DataException e) {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
private String getFilenameForHeight(int height) {
|
||||
Iterator it = this.fileListCache.entrySet().iterator();
|
||||
while (it.hasNext()) {
|
||||
Map.Entry pair = (Map.Entry)it.next();
|
||||
if (pair == null && pair.getKey() == null && pair.getValue() == null) {
|
||||
continue;
|
||||
}
|
||||
Triple<Integer, Integer, Integer> heightInfo = (Triple<Integer, Integer, Integer>) pair.getValue();
|
||||
Integer startHeight = heightInfo.getA();
|
||||
Integer endHeight = heightInfo.getB();
|
||||
|
||||
if (height >= startHeight && height <= endHeight) {
|
||||
// Found the correct file
|
||||
String filename = (String) pair.getKey();
|
||||
return filename;
|
||||
}
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
public byte[] fetchSerializedBlockBytesForSignature(byte[] signature, boolean includeHeightPrefix, Repository repository) {
|
||||
|
||||
if (this.fileListCache.isEmpty()) {
|
||||
this.fetchFileList();
|
||||
}
|
||||
|
||||
Integer height = this.fetchHeightForSignature(signature, repository);
|
||||
if (height != null) {
|
||||
byte[] blockBytes = this.fetchSerializedBlockBytesForHeight(height);
|
||||
if (blockBytes == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
// When responding to a peer with a BLOCK message, we must prefix the byte array with the block height
|
||||
// This mimics the toData() method in BlockMessage and CachedBlockMessage
|
||||
if (includeHeightPrefix) {
|
||||
ByteArrayOutputStream bytes = new ByteArrayOutputStream(blockBytes.length + INT_LENGTH);
|
||||
try {
|
||||
bytes.write(Ints.toByteArray(height));
|
||||
bytes.write(blockBytes);
|
||||
return bytes.toByteArray();
|
||||
|
||||
} catch (IOException e) {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
return blockBytes;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
public byte[] fetchSerializedBlockBytesForHeight(int height) {
|
||||
String filename = this.getFilenameForHeight(height);
|
||||
if (filename == null) {
|
||||
// We don't have this block in the archive
|
||||
// Invalidate the file list cache in case it is out of date
|
||||
this.invalidateFileListCache();
|
||||
return null;
|
||||
}
|
||||
|
||||
Path filePath = Paths.get(Settings.getInstance().getRepositoryPath(), "archive", filename).toAbsolutePath();
|
||||
RandomAccessFile file = null;
|
||||
try {
|
||||
file = new RandomAccessFile(filePath.toString(), "r");
|
||||
// Get info about this file (the "fixed length header")
|
||||
final int version = file.readInt(); // Do not remove or comment out, as it is moving the file pointer
|
||||
final int startHeight = file.readInt(); // Do not remove or comment out, as it is moving the file pointer
|
||||
final int endHeight = file.readInt(); // Do not remove or comment out, as it is moving the file pointer
|
||||
file.readInt(); // Block count (unused) // Do not remove or comment out, as it is moving the file pointer
|
||||
final int variableHeaderLength = file.readInt(); // Do not remove or comment out, as it is moving the file pointer
|
||||
final int fixedHeaderLength = (int)file.getFilePointer();
|
||||
// End of fixed length header
|
||||
|
||||
// Make sure the version is one we recognize
|
||||
if (version != 1) {
|
||||
LOGGER.info("Error: unknown version in file {}: {}", filename, version);
|
||||
return null;
|
||||
}
|
||||
|
||||
// Verify that the block is within the reported range
|
||||
if (height < startHeight || height > endHeight) {
|
||||
LOGGER.info("Error: requested height {} but the range of file {} is {}-{}",
|
||||
height, filename, startHeight, endHeight);
|
||||
return null;
|
||||
}
|
||||
|
||||
// Seek to the location of the block index in the variable length header
|
||||
final int locationOfBlockIndexInVariableHeaderSegment = (height - startHeight) * INT_LENGTH;
|
||||
file.seek(fixedHeaderLength + locationOfBlockIndexInVariableHeaderSegment);
|
||||
|
||||
// Read the value to obtain the index of this block in the data segment
|
||||
int locationOfBlockInDataSegment = file.readInt();
|
||||
|
||||
// Now seek to the block data itself
|
||||
int dataSegmentStartIndex = fixedHeaderLength + variableHeaderLength + INT_LENGTH; // Confirmed correct
|
||||
file.seek(dataSegmentStartIndex + locationOfBlockInDataSegment);
|
||||
|
||||
// Read the block metadata
|
||||
int blockHeight = file.readInt(); // Do not remove or comment out, as it is moving the file pointer
|
||||
int blockLength = file.readInt(); // Do not remove or comment out, as it is moving the file pointer
|
||||
|
||||
// Ensure the block height matches the one requested
|
||||
if (blockHeight != height) {
|
||||
LOGGER.info("Error: height {} does not match requested: {}", blockHeight, height);
|
||||
return null;
|
||||
}
|
||||
|
||||
// Now retrieve the block's serialized bytes
|
||||
byte[] blockBytes = new byte[blockLength];
|
||||
file.read(blockBytes);
|
||||
|
||||
return blockBytes;
|
||||
|
||||
} catch (FileNotFoundException e) {
|
||||
LOGGER.info("File {} not found: {}", filename, e.getMessage());
|
||||
return null;
|
||||
} catch (IOException e) {
|
||||
LOGGER.info("Unable to read block {} from archive: {}", height, e.getMessage());
|
||||
return null;
|
||||
}
|
||||
finally {
|
||||
// Close the file
|
||||
if (file != null) {
|
||||
try {
|
||||
file.close();
|
||||
} catch (IOException e) {
|
||||
// Failed to close, but no need to handle this
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void invalidateFileListCache() {
|
||||
this.fileListCache.clear();
|
||||
}
|
||||
|
||||
}
|
130
src/main/java/org/qortal/repository/BlockArchiveRepository.java
Normal file
130
src/main/java/org/qortal/repository/BlockArchiveRepository.java
Normal file
@ -0,0 +1,130 @@
|
||||
package org.qortal.repository;
|
||||
|
||||
import org.qortal.api.model.BlockSignerSummary;
|
||||
import org.qortal.data.block.BlockArchiveData;
|
||||
import org.qortal.data.block.BlockData;
|
||||
import org.qortal.data.block.BlockSummaryData;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
public interface BlockArchiveRepository {
|
||||
|
||||
/**
|
||||
* Returns BlockData from archive using block signature.
|
||||
*
|
||||
* @param signature
|
||||
* @return block data, or null if not found in archive.
|
||||
* @throws DataException
|
||||
*/
|
||||
public BlockData fromSignature(byte[] signature) throws DataException;
|
||||
|
||||
/**
|
||||
* Return height of block in archive using block's signature.
|
||||
*
|
||||
* @param signature
|
||||
* @return height, or 0 if not found in blockchain.
|
||||
* @throws DataException
|
||||
*/
|
||||
public int getHeightFromSignature(byte[] signature) throws DataException;
|
||||
|
||||
/**
|
||||
* Returns BlockData from archive using block height.
|
||||
*
|
||||
* @param height
|
||||
* @return block data, or null if not found in blockchain.
|
||||
* @throws DataException
|
||||
*/
|
||||
public BlockData fromHeight(int height) throws DataException;
|
||||
|
||||
/**
|
||||
* Returns a list of BlockData objects from archive using
|
||||
* block height range.
|
||||
*
|
||||
* @param startHeight
|
||||
* @return a list of BlockData objects, or an empty list if
|
||||
* not found in blockchain. It is not guaranteed that all
|
||||
* requested blocks will be returned.
|
||||
* @throws DataException
|
||||
*/
|
||||
public List<BlockData> fromRange(int startHeight, int endHeight) throws DataException;
|
||||
|
||||
/**
|
||||
* Returns BlockData from archive using block reference.
|
||||
* Currently relies on a child block being the one block
|
||||
* higher than its parent. This limitation can be removed
|
||||
* by storing the reference in the BlockArchive table, but
|
||||
* this has been avoided to reduce space.
|
||||
*
|
||||
* @param reference
|
||||
* @return block data, or null if either parent or child
|
||||
* not found in the archive.
|
||||
* @throws DataException
|
||||
*/
|
||||
public BlockData fromReference(byte[] reference) throws DataException;
|
||||
|
||||
/**
|
||||
* Return height of block with timestamp just before passed timestamp.
|
||||
*
|
||||
* @param timestamp
|
||||
* @return height, or 0 if not found in blockchain.
|
||||
* @throws DataException
|
||||
*/
|
||||
public int getHeightFromTimestamp(long timestamp) throws DataException;
|
||||
|
||||
/**
|
||||
* Returns block summaries for blocks signed by passed public key, or reward-share with minter with passed public key.
|
||||
*/
|
||||
public List<BlockSummaryData> getBlockSummariesBySigner(byte[] signerPublicKey, Integer limit, Integer offset, Boolean reverse) throws DataException;
|
||||
|
||||
/**
|
||||
* Returns summaries of block signers, optionally limited to passed addresses.
|
||||
* This combines both the BlockArchive and the Blocks data into a single result set.
|
||||
*/
|
||||
public List<BlockSignerSummary> getBlockSigners(List<String> addresses, Integer limit, Integer offset, Boolean reverse) throws DataException;
|
||||
|
||||
|
||||
/** Returns height of first unarchived block. */
|
||||
public int getBlockArchiveHeight() throws DataException;
|
||||
|
||||
/** Sets new height for block archiving.
|
||||
* <p>
|
||||
* NOTE: performs implicit <tt>repository.saveChanges()</tt>.
|
||||
*/
|
||||
public void setBlockArchiveHeight(int archiveHeight) throws DataException;
|
||||
|
||||
|
||||
/**
|
||||
* Returns the block archive data for a given signature, from the block archive.
|
||||
* <p>
|
||||
* This method will return null if no block archive has been built for the
|
||||
* requested signature. In those cases, the height (and other data) can be
|
||||
* looked up using the Blocks table. This allows a block to be located in
|
||||
* the archive when we only know its signature.
|
||||
* <p>
|
||||
*
|
||||
* @param signature
|
||||
* @throws DataException
|
||||
*/
|
||||
public BlockArchiveData getBlockArchiveDataForSignature(byte[] signature) throws DataException;
|
||||
|
||||
/**
|
||||
* Saves a block archive entry into the repository.
|
||||
* <p>
|
||||
* This can be used to find the height of a block by its signature, without
|
||||
* having access to the block data itself.
|
||||
* <p>
|
||||
*
|
||||
* @param blockArchiveData
|
||||
* @throws DataException
|
||||
*/
|
||||
public void save(BlockArchiveData blockArchiveData) throws DataException;
|
||||
|
||||
/**
|
||||
* Deletes a block archive entry from the repository.
|
||||
*
|
||||
* @param blockArchiveData
|
||||
* @throws DataException
|
||||
*/
|
||||
public void delete(BlockArchiveData blockArchiveData) throws DataException;
|
||||
|
||||
}
|
201
src/main/java/org/qortal/repository/BlockArchiveWriter.java
Normal file
201
src/main/java/org/qortal/repository/BlockArchiveWriter.java
Normal file
@ -0,0 +1,201 @@
|
||||
package org.qortal.repository;
|
||||
|
||||
import com.google.common.primitives.Ints;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.qortal.block.Block;
|
||||
import org.qortal.controller.Controller;
|
||||
import org.qortal.data.block.BlockArchiveData;
|
||||
import org.qortal.data.block.BlockData;
|
||||
import org.qortal.settings.Settings;
|
||||
import org.qortal.transform.TransformationException;
|
||||
import org.qortal.transform.block.BlockTransformer;
|
||||
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.FileOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.Paths;
|
||||
|
||||
public class BlockArchiveWriter {
|
||||
|
||||
public enum BlockArchiveWriteResult {
|
||||
OK,
|
||||
STOPPING,
|
||||
NOT_ENOUGH_BLOCKS,
|
||||
BLOCK_NOT_FOUND
|
||||
}
|
||||
|
||||
private static final Logger LOGGER = LogManager.getLogger(BlockArchiveWriter.class);
|
||||
|
||||
public static final long DEFAULT_FILE_SIZE_TARGET = 100 * 1024 * 1024; // 100MiB
|
||||
|
||||
private int startHeight;
|
||||
private final int endHeight;
|
||||
private final Repository repository;
|
||||
|
||||
private long fileSizeTarget = DEFAULT_FILE_SIZE_TARGET;
|
||||
private boolean shouldEnforceFileSizeTarget = true;
|
||||
|
||||
private int writtenCount;
|
||||
private int lastWrittenHeight;
|
||||
private Path outputPath;
|
||||
|
||||
public BlockArchiveWriter(int startHeight, int endHeight, Repository repository) {
|
||||
this.startHeight = startHeight;
|
||||
this.endHeight = endHeight;
|
||||
this.repository = repository;
|
||||
}
|
||||
|
||||
public static int getMaxArchiveHeight(Repository repository) throws DataException {
|
||||
// We must only archive trimmed blocks, or the archive will grow far too large
|
||||
final int accountSignaturesTrimStartHeight = repository.getBlockRepository().getOnlineAccountsSignaturesTrimHeight();
|
||||
final int atTrimStartHeight = repository.getATRepository().getAtTrimHeight();
|
||||
final int trimStartHeight = Math.min(accountSignaturesTrimStartHeight, atTrimStartHeight);
|
||||
return trimStartHeight - 1; // subtract 1 because these values represent the first _untrimmed_ block
|
||||
}
|
||||
|
||||
public static boolean isArchiverUpToDate(Repository repository) throws DataException {
|
||||
final int maxArchiveHeight = BlockArchiveWriter.getMaxArchiveHeight(repository);
|
||||
final int actualArchiveHeight = repository.getBlockArchiveRepository().getBlockArchiveHeight();
|
||||
final float progress = (float)actualArchiveHeight / (float) maxArchiveHeight;
|
||||
LOGGER.debug(String.format("maxArchiveHeight: %d, actualArchiveHeight: %d, progress: %f",
|
||||
maxArchiveHeight, actualArchiveHeight, progress));
|
||||
|
||||
// If archiver is within 95% of the maximum, treat it as up to date
|
||||
// We need several percent as an allowance because the archiver will only
|
||||
// save files when they reach the target size
|
||||
return (progress >= 0.95);
|
||||
}
|
||||
|
||||
public BlockArchiveWriteResult write() throws DataException, IOException, TransformationException, InterruptedException {
|
||||
// Create the archive folder if it doesn't exist
|
||||
// This is a subfolder of the db directory, to make bootstrapping easier
|
||||
Path archivePath = Paths.get(Settings.getInstance().getRepositoryPath(), "archive").toAbsolutePath();
|
||||
try {
|
||||
Files.createDirectories(archivePath);
|
||||
} catch (IOException e) {
|
||||
LOGGER.info("Unable to create archive folder");
|
||||
throw new DataException("Unable to create archive folder");
|
||||
}
|
||||
|
||||
// Determine start height of blocks to fetch
|
||||
if (startHeight <= 2) {
|
||||
// Skip genesis block, as it's not designed to be transmitted, and we can build that from blockchain.json
|
||||
// TODO: include genesis block if we can
|
||||
startHeight = 2;
|
||||
}
|
||||
|
||||
// Header bytes will store the block indexes
|
||||
ByteArrayOutputStream headerBytes = new ByteArrayOutputStream();
|
||||
// Bytes will store the actual block data
|
||||
ByteArrayOutputStream bytes = new ByteArrayOutputStream();
|
||||
|
||||
LOGGER.info(String.format("Fetching blocks from height %d...", startHeight));
|
||||
int i = 0;
|
||||
while (headerBytes.size() + bytes.size() < this.fileSizeTarget
|
||||
|| this.shouldEnforceFileSizeTarget == false) {
|
||||
|
||||
if (Controller.isStopping()) {
|
||||
return BlockArchiveWriteResult.STOPPING;
|
||||
}
|
||||
if (Controller.getInstance().isSynchronizing()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
int currentHeight = startHeight + i;
|
||||
if (currentHeight > endHeight) {
|
||||
break;
|
||||
}
|
||||
|
||||
//LOGGER.info("Fetching block {}...", currentHeight);
|
||||
|
||||
BlockData blockData = repository.getBlockRepository().fromHeight(currentHeight);
|
||||
if (blockData == null) {
|
||||
return BlockArchiveWriteResult.BLOCK_NOT_FOUND;
|
||||
}
|
||||
|
||||
// Write the signature and height into the BlockArchive table
|
||||
BlockArchiveData blockArchiveData = new BlockArchiveData(blockData);
|
||||
repository.getBlockArchiveRepository().save(blockArchiveData);
|
||||
repository.saveChanges();
|
||||
|
||||
// Write the block data to some byte buffers
|
||||
Block block = new Block(repository, blockData);
|
||||
int blockIndex = bytes.size();
|
||||
// Write block index to header
|
||||
headerBytes.write(Ints.toByteArray(blockIndex));
|
||||
// Write block height
|
||||
bytes.write(Ints.toByteArray(block.getBlockData().getHeight()));
|
||||
byte[] blockBytes = BlockTransformer.toBytes(block);
|
||||
// Write block length
|
||||
bytes.write(Ints.toByteArray(blockBytes.length));
|
||||
// Write block bytes
|
||||
bytes.write(blockBytes);
|
||||
i++;
|
||||
|
||||
}
|
||||
int totalLength = headerBytes.size() + bytes.size();
|
||||
LOGGER.info(String.format("Total length of %d blocks is %d bytes", i, totalLength));
|
||||
|
||||
// Validate file size, in case something went wrong
|
||||
if (totalLength < fileSizeTarget && this.shouldEnforceFileSizeTarget) {
|
||||
return BlockArchiveWriteResult.NOT_ENOUGH_BLOCKS;
|
||||
}
|
||||
|
||||
// We have enough blocks to create a new file
|
||||
int endHeight = startHeight + i - 1;
|
||||
int version = 1;
|
||||
String filePath = String.format("%s/%d-%d.dat", archivePath.toString(), startHeight, endHeight);
|
||||
FileOutputStream fileOutputStream = new FileOutputStream(filePath);
|
||||
// Write version number
|
||||
fileOutputStream.write(Ints.toByteArray(version));
|
||||
// Write start height
|
||||
fileOutputStream.write(Ints.toByteArray(startHeight));
|
||||
// Write end height
|
||||
fileOutputStream.write(Ints.toByteArray(endHeight));
|
||||
// Write total count
|
||||
fileOutputStream.write(Ints.toByteArray(i));
|
||||
// Write dynamic header (block indexes) segment length
|
||||
fileOutputStream.write(Ints.toByteArray(headerBytes.size()));
|
||||
// Write dynamic header (block indexes) data
|
||||
headerBytes.writeTo(fileOutputStream);
|
||||
// Write data segment (block data) length
|
||||
fileOutputStream.write(Ints.toByteArray(bytes.size()));
|
||||
// Write data
|
||||
bytes.writeTo(fileOutputStream);
|
||||
// Close the file
|
||||
fileOutputStream.close();
|
||||
|
||||
// Invalidate cache so that the rest of the app picks up the new file
|
||||
BlockArchiveReader.getInstance().invalidateFileListCache();
|
||||
|
||||
this.writtenCount = i;
|
||||
this.lastWrittenHeight = endHeight;
|
||||
this.outputPath = Paths.get(filePath);
|
||||
return BlockArchiveWriteResult.OK;
|
||||
}
|
||||
|
||||
public int getWrittenCount() {
|
||||
return this.writtenCount;
|
||||
}
|
||||
|
||||
public int getLastWrittenHeight() {
|
||||
return this.lastWrittenHeight;
|
||||
}
|
||||
|
||||
public Path getOutputPath() {
|
||||
return this.outputPath;
|
||||
}
|
||||
|
||||
public void setFileSizeTarget(long fileSizeTarget) {
|
||||
this.fileSizeTarget = fileSizeTarget;
|
||||
}
|
||||
|
||||
// For testing, to avoid having to pre-calculate file sizes
|
||||
public void setShouldEnforceFileSizeTarget(boolean shouldEnforceFileSizeTarget) {
|
||||
this.shouldEnforceFileSizeTarget = shouldEnforceFileSizeTarget;
|
||||
}
|
||||
|
||||
}
|
@ -137,11 +137,6 @@ public interface BlockRepository {
|
||||
*/
|
||||
public List<BlockSummaryData> getBlockSummaries(int firstBlockHeight, int lastBlockHeight) throws DataException;
|
||||
|
||||
/**
|
||||
* Returns block summaries for the passed height range, for API use.
|
||||
*/
|
||||
public List<BlockSummaryData> getBlockSummaries(Integer startHeight, Integer endHeight, Integer count) throws DataException;
|
||||
|
||||
/** Returns height of first trimmable online accounts signatures. */
|
||||
public int getOnlineAccountsSignaturesTrimHeight() throws DataException;
|
||||
|
||||
@ -166,6 +161,20 @@ public interface BlockRepository {
|
||||
*/
|
||||
public BlockData getDetachedBlockSignature(int startHeight) throws DataException;
|
||||
|
||||
|
||||
/** Returns height of first prunable block. */
|
||||
public int getBlockPruneHeight() throws DataException;
|
||||
|
||||
/** Sets new base height for block pruning.
|
||||
* <p>
|
||||
* NOTE: performs implicit <tt>repository.saveChanges()</tt>.
|
||||
*/
|
||||
public void setBlockPruneHeight(int pruneHeight) throws DataException;
|
||||
|
||||
/** Prunes full block data between passed heights. Returns number of pruned rows. */
|
||||
public int pruneBlocks(int minHeight, int maxHeight) throws DataException;
|
||||
|
||||
|
||||
/**
|
||||
* Saves block into repository.
|
||||
*
|
||||
|
509
src/main/java/org/qortal/repository/Bootstrap.java
Normal file
509
src/main/java/org/qortal/repository/Bootstrap.java
Normal file
@ -0,0 +1,509 @@
|
||||
package org.qortal.repository;
|
||||
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.qortal.block.BlockChain;
|
||||
import org.qortal.controller.Controller;
|
||||
import org.qortal.crypto.Crypto;
|
||||
import org.qortal.data.account.MintingAccountData;
|
||||
import org.qortal.data.block.BlockData;
|
||||
import org.qortal.data.crosschain.TradeBotData;
|
||||
import org.qortal.gui.SplashFrame;
|
||||
import org.qortal.repository.hsqldb.HSQLDBImportExport;
|
||||
import org.qortal.repository.hsqldb.HSQLDBRepositoryFactory;
|
||||
import org.qortal.settings.Settings;
|
||||
import org.qortal.utils.NTP;
|
||||
import org.qortal.utils.SevenZ;
|
||||
|
||||
import java.io.BufferedInputStream;
|
||||
import java.io.FileOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.net.HttpURLConnection;
|
||||
import java.net.MalformedURLException;
|
||||
import java.net.URL;
|
||||
import java.nio.file.*;
|
||||
import java.security.SecureRandom;
|
||||
import java.util.List;
|
||||
import java.util.UUID;
|
||||
import java.util.concurrent.TimeoutException;
|
||||
import java.util.concurrent.locks.ReentrantLock;
|
||||
|
||||
import static java.nio.file.StandardCopyOption.REPLACE_EXISTING;
|
||||
|
||||
|
||||
public class Bootstrap {
|
||||
|
||||
private Repository repository;
|
||||
|
||||
private int retryMinutes = 1;
|
||||
|
||||
private static final Logger LOGGER = LogManager.getLogger(Bootstrap.class);
|
||||
|
||||
/** The maximum number of untrimmed blocks allowed to be included in a bootstrap, beyond the trim threshold */
|
||||
private static final int MAXIMUM_UNTRIMMED_BLOCKS = 100;
|
||||
|
||||
/** The maximum number of unpruned blocks allowed to be included in a bootstrap, beyond the prune threshold */
|
||||
private static final int MAXIMUM_UNPRUNED_BLOCKS = 100;
|
||||
|
||||
|
||||
public Bootstrap() {
|
||||
}
|
||||
|
||||
public Bootstrap(Repository repository) {
|
||||
this.repository = repository;
|
||||
}
|
||||
|
||||
/**
|
||||
* canCreateBootstrap()
|
||||
* Performs basic initial checks to ensure everything is in order
|
||||
* @return true if ready for bootstrap creation, or an exception if not
|
||||
* All failure reasons are logged and included in the exception
|
||||
* @throws DataException
|
||||
*/
|
||||
public boolean checkRepositoryState() throws DataException {
|
||||
LOGGER.info("Checking repository state...");
|
||||
|
||||
final boolean isTopOnly = Settings.getInstance().isTopOnly();
|
||||
final boolean archiveEnabled = Settings.getInstance().isArchiveEnabled();
|
||||
|
||||
// Make sure we have a repository instance
|
||||
if (repository == null) {
|
||||
throw new DataException("Repository instance required to check if we can create a bootstrap.");
|
||||
}
|
||||
|
||||
// Require that a block archive has been built
|
||||
if (!isTopOnly && !archiveEnabled) {
|
||||
throw new DataException("Unable to create bootstrap because the block archive isn't enabled. " +
|
||||
"Set {\"archivedEnabled\": true} in settings.json to fix.");
|
||||
}
|
||||
|
||||
// Make sure that the block archiver is up to date
|
||||
boolean upToDate = BlockArchiveWriter.isArchiverUpToDate(repository);
|
||||
if (!upToDate) {
|
||||
throw new DataException("Unable to create bootstrap because the block archive isn't fully built yet.");
|
||||
}
|
||||
|
||||
// Ensure that this database contains the ATStatesHeightIndex which was missing in some cases
|
||||
boolean hasAtStatesHeightIndex = repository.getATRepository().hasAtStatesHeightIndex();
|
||||
if (!hasAtStatesHeightIndex) {
|
||||
throw new DataException("Unable to create bootstrap due to missing ATStatesHeightIndex. A re-sync from genesis is needed.");
|
||||
}
|
||||
|
||||
// Ensure we have synced NTP time
|
||||
if (NTP.getTime() == null) {
|
||||
throw new DataException("Unable to create bootstrap because the node hasn't synced its time yet.");
|
||||
}
|
||||
|
||||
// Ensure the chain is synced
|
||||
final BlockData chainTip = Controller.getInstance().getChainTip();
|
||||
final Long minLatestBlockTimestamp = Controller.getMinimumLatestBlockTimestamp();
|
||||
if (minLatestBlockTimestamp == null || chainTip.getTimestamp() < minLatestBlockTimestamp) {
|
||||
throw new DataException("Unable to create bootstrap because the blockchain isn't fully synced.");
|
||||
}
|
||||
|
||||
// FUTURE: ensure trim and prune settings are using default values
|
||||
|
||||
if (!isTopOnly) {
|
||||
// We don't trim in top-only mode because we prune the blocks instead
|
||||
// If we're not in top-only mode we should make sure that trimming is up to date
|
||||
|
||||
// Ensure that the online account signatures have been fully trimmed
|
||||
final int accountsTrimStartHeight = repository.getBlockRepository().getOnlineAccountsSignaturesTrimHeight();
|
||||
final long accountsUpperTrimmableTimestamp = NTP.getTime() - BlockChain.getInstance().getOnlineAccountSignaturesMaxLifetime();
|
||||
final int accountsUpperTrimmableHeight = repository.getBlockRepository().getHeightFromTimestamp(accountsUpperTrimmableTimestamp);
|
||||
final int accountsBlocksRemaining = accountsUpperTrimmableHeight - accountsTrimStartHeight;
|
||||
if (accountsBlocksRemaining > MAXIMUM_UNTRIMMED_BLOCKS) {
|
||||
throw new DataException(String.format("Blockchain is not fully trimmed. Please allow the node to run for longer, " +
|
||||
"then try again. Blocks remaining (online accounts signatures): %d", accountsBlocksRemaining));
|
||||
}
|
||||
|
||||
// Ensure that the AT states data has been fully trimmed
|
||||
final int atTrimStartHeight = repository.getATRepository().getAtTrimHeight();
|
||||
final long atUpperTrimmableTimestamp = chainTip.getTimestamp() - Settings.getInstance().getAtStatesMaxLifetime();
|
||||
final int atUpperTrimmableHeight = repository.getBlockRepository().getHeightFromTimestamp(atUpperTrimmableTimestamp);
|
||||
final int atBlocksRemaining = atUpperTrimmableHeight - atTrimStartHeight;
|
||||
if (atBlocksRemaining > MAXIMUM_UNTRIMMED_BLOCKS) {
|
||||
throw new DataException(String.format("Blockchain is not fully trimmed. Please allow the node to run " +
|
||||
"for longer, then try again. Blocks remaining (AT states): %d", atBlocksRemaining));
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that blocks have been fully pruned
|
||||
final int blockPruneStartHeight = repository.getBlockRepository().getBlockPruneHeight();
|
||||
int blockUpperPrunableHeight = chainTip.getHeight() - Settings.getInstance().getPruneBlockLimit();
|
||||
if (archiveEnabled) {
|
||||
blockUpperPrunableHeight = repository.getBlockArchiveRepository().getBlockArchiveHeight() - 1;
|
||||
}
|
||||
final int blocksPruneRemaining = blockUpperPrunableHeight - blockPruneStartHeight;
|
||||
if (blocksPruneRemaining > MAXIMUM_UNPRUNED_BLOCKS) {
|
||||
throw new DataException(String.format("Blockchain is not fully pruned. Please allow the node to run " +
|
||||
"for longer, then try again. Blocks remaining: %d", blocksPruneRemaining));
|
||||
}
|
||||
|
||||
// Ensure that AT states have been fully pruned
|
||||
final int atPruneStartHeight = repository.getATRepository().getAtPruneHeight();
|
||||
int atUpperPrunableHeight = chainTip.getHeight() - Settings.getInstance().getPruneBlockLimit();
|
||||
if (archiveEnabled) {
|
||||
atUpperPrunableHeight = repository.getBlockArchiveRepository().getBlockArchiveHeight() - 1;
|
||||
}
|
||||
final int atPruneRemaining = atUpperPrunableHeight - atPruneStartHeight;
|
||||
if (atPruneRemaining > MAXIMUM_UNPRUNED_BLOCKS) {
|
||||
throw new DataException(String.format("Blockchain is not fully pruned. Please allow the node to run " +
|
||||
"for longer, then try again. Blocks remaining (AT states): %d", atPruneRemaining));
|
||||
}
|
||||
|
||||
LOGGER.info("Repository state checks passed");
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* validateBlockchain
|
||||
* Performs quick validation of recent blocks in blockchain, prior to creating a bootstrap
|
||||
* @return true if valid, an exception if not
|
||||
* @throws DataException
|
||||
*/
|
||||
public boolean validateBlockchain() throws DataException {
|
||||
LOGGER.info("Validating blockchain...");
|
||||
|
||||
try {
|
||||
BlockChain.validate();
|
||||
|
||||
LOGGER.info("Blockchain is valid");
|
||||
|
||||
return true;
|
||||
} catch (DataException e) {
|
||||
throw new DataException(String.format("Blockchain validation failed: %s", e.getMessage()));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* validateCompleteBlockchain
|
||||
* Performs intensive validation of all blocks in blockchain
|
||||
* @return true if valid, false if not
|
||||
*/
|
||||
public boolean validateCompleteBlockchain() {
|
||||
LOGGER.info("Validating blockchain...");
|
||||
|
||||
try {
|
||||
// Perform basic startup validation
|
||||
BlockChain.validate();
|
||||
|
||||
// Perform more intensive full-chain validation
|
||||
BlockChain.validateAllBlocks();
|
||||
|
||||
LOGGER.info("Blockchain is valid");
|
||||
|
||||
return true;
|
||||
} catch (DataException e) {
|
||||
LOGGER.info("Blockchain validation failed: {}", e.getMessage());
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
public String create() throws DataException, InterruptedException, IOException {
|
||||
|
||||
// Make sure we have a repository instance
|
||||
if (repository == null) {
|
||||
throw new DataException("Repository instance required in order to create a boostrap");
|
||||
}
|
||||
|
||||
LOGGER.info("Deleting temp directory if it exists...");
|
||||
this.deleteAllTempDirectories();
|
||||
|
||||
LOGGER.info("Acquiring blockchain lock...");
|
||||
ReentrantLock blockchainLock = Controller.getInstance().getBlockchainLock();
|
||||
blockchainLock.lockInterruptibly();
|
||||
|
||||
Path inputPath = null;
|
||||
Path outputPath = null;
|
||||
|
||||
try {
|
||||
|
||||
LOGGER.info("Exporting local data...");
|
||||
repository.exportNodeLocalData();
|
||||
|
||||
LOGGER.info("Deleting trade bot states...");
|
||||
List<TradeBotData> allTradeBotData = repository.getCrossChainRepository().getAllTradeBotData();
|
||||
for (TradeBotData tradeBotData : allTradeBotData) {
|
||||
repository.getCrossChainRepository().delete(tradeBotData.getTradePrivateKey());
|
||||
}
|
||||
|
||||
LOGGER.info("Deleting minting accounts...");
|
||||
List<MintingAccountData> mintingAccounts = repository.getAccountRepository().getMintingAccounts();
|
||||
for (MintingAccountData mintingAccount : mintingAccounts) {
|
||||
repository.getAccountRepository().delete(mintingAccount.getPrivateKey());
|
||||
}
|
||||
|
||||
repository.saveChanges();
|
||||
|
||||
LOGGER.info("Deleting peers list...");
|
||||
repository.getNetworkRepository().deleteAllPeers();
|
||||
repository.saveChanges();
|
||||
|
||||
LOGGER.info("Creating bootstrap...");
|
||||
// Timeout if the database isn't ready for backing up after 10 seconds
|
||||
long timeout = 10 * 1000L;
|
||||
repository.backup(false, "bootstrap", timeout);
|
||||
|
||||
LOGGER.info("Moving files to output directory...");
|
||||
inputPath = Paths.get(Settings.getInstance().getRepositoryPath(), "bootstrap");
|
||||
outputPath = Paths.get(this.createTempDirectory().toString(), "bootstrap");
|
||||
|
||||
|
||||
// Move the db backup to a "bootstrap" folder in the root directory
|
||||
Files.move(inputPath, outputPath, REPLACE_EXISTING);
|
||||
|
||||
// If in archive mode, copy the archive folder to inside the bootstrap folder
|
||||
if (!Settings.getInstance().isTopOnly() && Settings.getInstance().isArchiveEnabled()) {
|
||||
FileUtils.copyDirectory(
|
||||
Paths.get(Settings.getInstance().getRepositoryPath(), "archive").toFile(),
|
||||
Paths.get(outputPath.toString(), "archive").toFile()
|
||||
);
|
||||
}
|
||||
|
||||
LOGGER.info("Preparing output path...");
|
||||
Path compressedOutputPath = this.getBootstrapOutputPath();
|
||||
try {
|
||||
Files.delete(compressedOutputPath);
|
||||
} catch (NoSuchFileException e) {
|
||||
// Doesn't exist, so no need to delete
|
||||
}
|
||||
|
||||
LOGGER.info("Compressing...");
|
||||
SevenZ.compress(compressedOutputPath.toString(), outputPath.toFile());
|
||||
|
||||
LOGGER.info("Generating checksum file...");
|
||||
String checksum = Crypto.digestHexString(compressedOutputPath.toFile(), 1024*1024);
|
||||
Path checksumPath = Paths.get(String.format("%s.sha256", compressedOutputPath.toString()));
|
||||
Files.writeString(checksumPath, checksum, StandardOpenOption.CREATE);
|
||||
|
||||
// Return the path to the compressed bootstrap file
|
||||
LOGGER.info("Bootstrap creation complete. Output file: {}", compressedOutputPath.toAbsolutePath().toString());
|
||||
return compressedOutputPath.toAbsolutePath().toString();
|
||||
|
||||
}
|
||||
catch (TimeoutException e) {
|
||||
throw new DataException(String.format("Unable to create bootstrap due to timeout: %s", e.getMessage()));
|
||||
}
|
||||
finally {
|
||||
try {
|
||||
LOGGER.info("Re-importing local data...");
|
||||
Path exportPath = HSQLDBImportExport.getExportDirectory(false);
|
||||
repository.importDataFromFile(Paths.get(exportPath.toString(), "TradeBotStates.json").toString());
|
||||
repository.importDataFromFile(Paths.get(exportPath.toString(), "MintingAccounts.json").toString());
|
||||
repository.saveChanges();
|
||||
|
||||
} catch (IOException e) {
|
||||
LOGGER.info("Unable to re-import local data, but created bootstrap is still valid. {}", e);
|
||||
}
|
||||
|
||||
LOGGER.info("Unlocking blockchain...");
|
||||
blockchainLock.unlock();
|
||||
|
||||
// Cleanup
|
||||
LOGGER.info("Cleaning up...");
|
||||
Thread.sleep(5000L);
|
||||
this.deleteAllTempDirectories();
|
||||
}
|
||||
}
|
||||
|
||||
public void startImport() throws InterruptedException {
|
||||
while (!Controller.isStopping()) {
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
this.repository = repository;
|
||||
|
||||
this.updateStatus("Starting import of bootstrap...");
|
||||
|
||||
this.doImport();
|
||||
break;
|
||||
|
||||
} catch (DataException e) {
|
||||
LOGGER.info("Bootstrap import failed", e);
|
||||
this.updateStatus(String.format("Bootstrapping failed. Retrying in %d minutes...", retryMinutes));
|
||||
Thread.sleep(retryMinutes * 60 * 1000L);
|
||||
retryMinutes *= 2;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void doImport() throws DataException {
|
||||
Path path = null;
|
||||
try {
|
||||
Path tempDir = this.createTempDirectory();
|
||||
String filename = String.format("%s%s", Settings.getInstance().getBootstrapFilenamePrefix(), this.getFilename());
|
||||
path = Paths.get(tempDir.toString(), filename);
|
||||
|
||||
this.downloadToPath(path);
|
||||
this.importFromPath(path);
|
||||
|
||||
} catch (InterruptedException | DataException | IOException e) {
|
||||
throw new DataException("Unable to import bootstrap", e);
|
||||
}
|
||||
finally {
|
||||
if (path != null) {
|
||||
try {
|
||||
Files.delete(path);
|
||||
|
||||
} catch (IOException e) {
|
||||
// Temp folder will be cleaned up below, so ignore this failure
|
||||
}
|
||||
}
|
||||
this.deleteAllTempDirectories();
|
||||
}
|
||||
}
|
||||
|
||||
private String getFilename() {
|
||||
boolean isTopOnly = Settings.getInstance().isTopOnly();
|
||||
boolean archiveEnabled = Settings.getInstance().isArchiveEnabled();
|
||||
boolean isTestnet = Settings.getInstance().isTestNet();
|
||||
String prefix = isTestnet ? "testnet-" : "";
|
||||
|
||||
if (isTopOnly) {
|
||||
return prefix.concat("bootstrap-toponly.7z");
|
||||
}
|
||||
else if (archiveEnabled) {
|
||||
return prefix.concat("bootstrap-archive.7z");
|
||||
}
|
||||
else {
|
||||
return prefix.concat("bootstrap-full.7z");
|
||||
}
|
||||
}
|
||||
|
||||
private void downloadToPath(Path path) throws DataException {
|
||||
String bootstrapHost = this.getRandomHost();
|
||||
String bootstrapFilename = this.getFilename();
|
||||
String bootstrapUrl = String.format("%s/%s", bootstrapHost, bootstrapFilename);
|
||||
String type = Settings.getInstance().isTopOnly() ? "top-only" : "full node";
|
||||
|
||||
SplashFrame.getInstance().updateStatus(String.format("Downloading %s bootstrap...", type));
|
||||
LOGGER.info(String.format("Downloading %s bootstrap from %s ...", type, bootstrapUrl));
|
||||
|
||||
// Delete an existing file if it exists
|
||||
try {
|
||||
Files.delete(path);
|
||||
} catch (IOException e) {
|
||||
// No need to do anything
|
||||
}
|
||||
|
||||
// Get the total file size
|
||||
URL url;
|
||||
long fileSize;
|
||||
try {
|
||||
url = new URL(bootstrapUrl);
|
||||
HttpURLConnection connection = (HttpURLConnection) url.openConnection();
|
||||
connection.setRequestMethod("HEAD");
|
||||
connection.connect();
|
||||
fileSize = connection.getContentLengthLong();
|
||||
connection.disconnect();
|
||||
|
||||
} catch (MalformedURLException e) {
|
||||
throw new DataException(String.format("Malformed URL when downloading bootstrap: %s", e.getMessage()));
|
||||
} catch (IOException e) {
|
||||
throw new DataException(String.format("Unable to get bootstrap file size from %s. " +
|
||||
"Please check your internet connection.", e.getMessage()));
|
||||
}
|
||||
|
||||
// Download the file and update the status with progress
|
||||
try (BufferedInputStream in = new BufferedInputStream(url.openStream());
|
||||
FileOutputStream fileOutputStream = new FileOutputStream(path.toFile())) {
|
||||
byte[] buffer = new byte[1024 * 1024];
|
||||
long downloaded = 0;
|
||||
int bytesRead;
|
||||
while ((bytesRead = in.read(buffer, 0, 1024)) != -1) {
|
||||
fileOutputStream.write(buffer, 0, bytesRead);
|
||||
downloaded += bytesRead;
|
||||
|
||||
if (fileSize > 0) {
|
||||
int progress = (int)((double)downloaded / (double)fileSize * 100);
|
||||
SplashFrame.getInstance().updateStatus(String.format("Downloading %s bootstrap... (%d%%)", type, progress));
|
||||
}
|
||||
}
|
||||
|
||||
} catch (IOException e) {
|
||||
throw new DataException(String.format("Unable to download bootstrap: %s", e.getMessage()));
|
||||
}
|
||||
}
|
||||
|
||||
public String getRandomHost() {
|
||||
// Select a random host from bootstrapHosts
|
||||
String[] hosts = Settings.getInstance().getBootstrapHosts();
|
||||
int index = new SecureRandom().nextInt(hosts.length);
|
||||
String bootstrapHost = hosts[index];
|
||||
return bootstrapHost;
|
||||
}
|
||||
|
||||
public void importFromPath(Path path) throws InterruptedException, DataException, IOException {
|
||||
|
||||
ReentrantLock blockchainLock = Controller.getInstance().getBlockchainLock();
|
||||
blockchainLock.lockInterruptibly();
|
||||
|
||||
try {
|
||||
this.updateStatus("Stopping repository...");
|
||||
// Close the repository while we are still able to
|
||||
// Otherwise, the caller will run into difficulties when it tries to close it
|
||||
repository.discardChanges();
|
||||
repository.close();
|
||||
// Now close the repository factory so that we can swap out the database files
|
||||
RepositoryManager.closeRepositoryFactory();
|
||||
|
||||
this.updateStatus("Deleting existing repository...");
|
||||
Path input = path.toAbsolutePath();
|
||||
Path output = path.toAbsolutePath().getParent().toAbsolutePath();
|
||||
Path inputPath = Paths.get(output.toString(), "bootstrap");
|
||||
Path outputPath = Paths.get(Settings.getInstance().getRepositoryPath());
|
||||
FileUtils.deleteDirectory(outputPath.toFile());
|
||||
|
||||
this.updateStatus("Extracting bootstrap...");
|
||||
SevenZ.decompress(input.toString(), output.toFile());
|
||||
|
||||
if (!inputPath.toFile().exists()) {
|
||||
throw new DataException("Extracted bootstrap doesn't exist");
|
||||
}
|
||||
|
||||
// Move the "bootstrap" folder in place of the "db" folder
|
||||
this.updateStatus("Moving files to output directory...");
|
||||
Files.move(inputPath, outputPath);
|
||||
|
||||
this.updateStatus("Starting repository from bootstrap...");
|
||||
}
|
||||
finally {
|
||||
RepositoryFactory repositoryFactory = new HSQLDBRepositoryFactory(Controller.getRepositoryUrl());
|
||||
RepositoryManager.setRepositoryFactory(repositoryFactory);
|
||||
|
||||
blockchainLock.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
private Path createTempDirectory() throws IOException {
|
||||
Path initialPath = Paths.get(Settings.getInstance().getRepositoryPath()).toAbsolutePath().getParent();
|
||||
String baseDir = Paths.get(initialPath.toString(), "tmp").toFile().getCanonicalPath();
|
||||
String identifier = UUID.randomUUID().toString();
|
||||
Path tempDir = Paths.get(baseDir, identifier);
|
||||
Files.createDirectories(tempDir);
|
||||
return tempDir;
|
||||
}
|
||||
|
||||
private void deleteAllTempDirectories() {
|
||||
Path initialPath = Paths.get(Settings.getInstance().getRepositoryPath()).toAbsolutePath().getParent();
|
||||
Path path = Paths.get(initialPath.toString(), "tmp");
|
||||
try {
|
||||
FileUtils.deleteDirectory(path.toFile());
|
||||
} catch (IOException e) {
|
||||
LOGGER.info("Unable to delete temp directory path: {}", path.toString(), e);
|
||||
}
|
||||
}
|
||||
|
||||
public Path getBootstrapOutputPath() {
|
||||
Path initialPath = Paths.get(Settings.getInstance().getRepositoryPath()).toAbsolutePath().getParent();
|
||||
String compressedFilename = String.format("%s%s", Settings.getInstance().getBootstrapFilenamePrefix(), this.getFilename());
|
||||
Path compressedOutputPath = Paths.get(initialPath.toString(), compressedFilename);
|
||||
return compressedOutputPath;
|
||||
}
|
||||
|
||||
private void updateStatus(String text) {
|
||||
LOGGER.info(text);
|
||||
SplashFrame.getInstance().updateStatus(text);
|
||||
}
|
||||
|
||||
}
|
@ -1,5 +1,8 @@
|
||||
package org.qortal.repository;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.concurrent.TimeoutException;
|
||||
|
||||
public interface Repository extends AutoCloseable {
|
||||
|
||||
public ATRepository getATRepository();
|
||||
@ -12,6 +15,8 @@ public interface Repository extends AutoCloseable {
|
||||
|
||||
public BlockRepository getBlockRepository();
|
||||
|
||||
public BlockArchiveRepository getBlockArchiveRepository();
|
||||
|
||||
public ChatRepository getChatRepository();
|
||||
|
||||
public CrossChainRepository getCrossChainRepository();
|
||||
@ -45,14 +50,16 @@ public interface Repository extends AutoCloseable {
|
||||
|
||||
public void setDebug(boolean debugState);
|
||||
|
||||
public void backup(boolean quick) throws DataException;
|
||||
public void backup(boolean quick, String name, Long timeout) throws DataException, TimeoutException;
|
||||
|
||||
public void performPeriodicMaintenance() throws DataException;
|
||||
public void performPeriodicMaintenance(Long timeout) throws DataException, TimeoutException;
|
||||
|
||||
public void exportNodeLocalData() throws DataException;
|
||||
|
||||
public void importDataFromFile(String filename) throws DataException;
|
||||
public void importDataFromFile(String filename) throws DataException, IOException;
|
||||
|
||||
public void checkConsistency() throws DataException;
|
||||
|
||||
public static void attemptRecovery(String connectionUrl, String name) throws DataException {}
|
||||
|
||||
}
|
||||
|
@ -1,8 +1,18 @@
|
||||
package org.qortal.repository;
|
||||
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.qortal.gui.SplashFrame;
|
||||
import org.qortal.repository.hsqldb.HSQLDBDatabaseArchiving;
|
||||
import org.qortal.repository.hsqldb.HSQLDBDatabasePruning;
|
||||
import org.qortal.repository.hsqldb.HSQLDBRepository;
|
||||
import org.qortal.settings.Settings;
|
||||
|
||||
import java.sql.SQLException;
|
||||
import java.util.concurrent.TimeoutException;
|
||||
|
||||
public abstract class RepositoryManager {
|
||||
private static final Logger LOGGER = LogManager.getLogger(RepositoryManager.class);
|
||||
|
||||
private static RepositoryFactory repositoryFactory = null;
|
||||
|
||||
@ -43,14 +53,60 @@ public abstract class RepositoryManager {
|
||||
repositoryFactory = null;
|
||||
}
|
||||
|
||||
public static void backup(boolean quick) {
|
||||
public static void backup(boolean quick, String name, Long timeout) throws TimeoutException {
|
||||
try (final Repository repository = getRepository()) {
|
||||
repository.backup(quick);
|
||||
repository.backup(quick, name, timeout);
|
||||
} catch (DataException e) {
|
||||
// Backup is best-effort so don't complain
|
||||
}
|
||||
}
|
||||
|
||||
public static boolean archive(Repository repository) {
|
||||
// Bulk archive the database the first time we use archive mode
|
||||
if (Settings.getInstance().isArchiveEnabled()) {
|
||||
if (RepositoryManager.canArchiveOrPrune()) {
|
||||
try {
|
||||
return HSQLDBDatabaseArchiving.buildBlockArchive(repository, BlockArchiveWriter.DEFAULT_FILE_SIZE_TARGET);
|
||||
|
||||
} catch (DataException e) {
|
||||
LOGGER.info("Unable to build block archive. The database may have been left in an inconsistent state.");
|
||||
}
|
||||
}
|
||||
else {
|
||||
LOGGER.info("Unable to build block archive due to missing ATStatesHeightIndex. Bootstrapping is recommended.");
|
||||
LOGGER.info("To bootstrap, stop the core and delete the db folder, then start the core again.");
|
||||
SplashFrame.getInstance().updateStatus("Missing index. Bootstrapping is recommended.");
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
public static boolean prune(Repository repository) {
|
||||
// Bulk prune the database the first time we use top-only or block archive mode
|
||||
if (Settings.getInstance().isTopOnly() ||
|
||||
Settings.getInstance().isArchiveEnabled()) {
|
||||
if (RepositoryManager.canArchiveOrPrune()) {
|
||||
try {
|
||||
boolean prunedATStates = HSQLDBDatabasePruning.pruneATStates((HSQLDBRepository) repository);
|
||||
boolean prunedBlocks = HSQLDBDatabasePruning.pruneBlocks((HSQLDBRepository) repository);
|
||||
|
||||
// Perform repository maintenance to shrink the db size down
|
||||
if (prunedATStates && prunedBlocks) {
|
||||
HSQLDBDatabasePruning.performMaintenance(repository);
|
||||
return true;
|
||||
}
|
||||
|
||||
} catch (SQLException | DataException e) {
|
||||
LOGGER.info("Unable to bulk prune AT states. The database may have been left in an inconsistent state.");
|
||||
}
|
||||
}
|
||||
else {
|
||||
LOGGER.info("Unable to prune blocks due to missing ATStatesHeightIndex. Bootstrapping is recommended.");
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
public static void setRequestedCheckpoint(Boolean quick) {
|
||||
quickCheckpointRequested = quick;
|
||||
}
|
||||
@ -77,4 +133,12 @@ public abstract class RepositoryManager {
|
||||
return SQLException.class.isInstance(cause) && repositoryFactory.isDeadlockException((SQLException) cause);
|
||||
}
|
||||
|
||||
public static boolean canArchiveOrPrune() {
|
||||
try (final Repository repository = getRepository()) {
|
||||
return repository.getATRepository().hasAtStatesHeightIndex();
|
||||
} catch (DataException e) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -8,6 +8,7 @@ import java.util.Set;
|
||||
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.qortal.controller.Controller;
|
||||
import org.qortal.data.at.ATData;
|
||||
import org.qortal.data.at.ATStateData;
|
||||
import org.qortal.repository.ATRepository;
|
||||
@ -32,7 +33,7 @@ public class HSQLDBATRepository implements ATRepository {
|
||||
public ATData fromATAddress(String atAddress) throws DataException {
|
||||
String sql = "SELECT creator, created_when, version, asset_id, code_bytes, code_hash, "
|
||||
+ "is_sleeping, sleep_until_height, is_finished, had_fatal_error, "
|
||||
+ "is_frozen, frozen_balance "
|
||||
+ "is_frozen, frozen_balance, sleep_until_message_timestamp "
|
||||
+ "FROM ATs "
|
||||
+ "WHERE AT_address = ? LIMIT 1";
|
||||
|
||||
@ -60,8 +61,13 @@ public class HSQLDBATRepository implements ATRepository {
|
||||
if (frozenBalance == 0 && resultSet.wasNull())
|
||||
frozenBalance = null;
|
||||
|
||||
Long sleepUntilMessageTimestamp = resultSet.getLong(13);
|
||||
if (sleepUntilMessageTimestamp == 0 && resultSet.wasNull())
|
||||
sleepUntilMessageTimestamp = null;
|
||||
|
||||
return new ATData(atAddress, creatorPublicKey, created, version, assetId, codeBytes, codeHash,
|
||||
isSleeping, sleepUntilHeight, isFinished, hadFatalError, isFrozen, frozenBalance);
|
||||
isSleeping, sleepUntilHeight, isFinished, hadFatalError, isFrozen, frozenBalance,
|
||||
sleepUntilMessageTimestamp);
|
||||
} catch (SQLException e) {
|
||||
throw new DataException("Unable to fetch AT from repository", e);
|
||||
}
|
||||
@ -94,7 +100,7 @@ public class HSQLDBATRepository implements ATRepository {
|
||||
public List<ATData> getAllExecutableATs() throws DataException {
|
||||
String sql = "SELECT AT_address, creator, created_when, version, asset_id, code_bytes, code_hash, "
|
||||
+ "is_sleeping, sleep_until_height, had_fatal_error, "
|
||||
+ "is_frozen, frozen_balance "
|
||||
+ "is_frozen, frozen_balance, sleep_until_message_timestamp "
|
||||
+ "FROM ATs "
|
||||
+ "WHERE is_finished = false "
|
||||
+ "ORDER BY created_when ASC";
|
||||
@ -128,8 +134,13 @@ public class HSQLDBATRepository implements ATRepository {
|
||||
if (frozenBalance == 0 && resultSet.wasNull())
|
||||
frozenBalance = null;
|
||||
|
||||
Long sleepUntilMessageTimestamp = resultSet.getLong(13);
|
||||
if (sleepUntilMessageTimestamp == 0 && resultSet.wasNull())
|
||||
sleepUntilMessageTimestamp = null;
|
||||
|
||||
ATData atData = new ATData(atAddress, creatorPublicKey, created, version, assetId, codeBytes, codeHash,
|
||||
isSleeping, sleepUntilHeight, isFinished, hadFatalError, isFrozen, frozenBalance);
|
||||
isSleeping, sleepUntilHeight, isFinished, hadFatalError, isFrozen, frozenBalance,
|
||||
sleepUntilMessageTimestamp);
|
||||
|
||||
executableATs.add(atData);
|
||||
} while (resultSet.next());
|
||||
@ -147,7 +158,7 @@ public class HSQLDBATRepository implements ATRepository {
|
||||
|
||||
sql.append("SELECT AT_address, creator, created_when, version, asset_id, code_bytes, ")
|
||||
.append("is_sleeping, sleep_until_height, is_finished, had_fatal_error, ")
|
||||
.append("is_frozen, frozen_balance ")
|
||||
.append("is_frozen, frozen_balance, sleep_until_message_timestamp ")
|
||||
.append("FROM ATs ")
|
||||
.append("WHERE code_hash = ? ");
|
||||
bindParams.add(codeHash);
|
||||
@ -191,8 +202,13 @@ public class HSQLDBATRepository implements ATRepository {
|
||||
if (frozenBalance == 0 && resultSet.wasNull())
|
||||
frozenBalance = null;
|
||||
|
||||
Long sleepUntilMessageTimestamp = resultSet.getLong(13);
|
||||
if (sleepUntilMessageTimestamp == 0 && resultSet.wasNull())
|
||||
sleepUntilMessageTimestamp = null;
|
||||
|
||||
ATData atData = new ATData(atAddress, creatorPublicKey, created, version, assetId, codeBytes, codeHash,
|
||||
isSleeping, sleepUntilHeight, isFinished, hadFatalError, isFrozen, frozenBalance);
|
||||
isSleeping, sleepUntilHeight, isFinished, hadFatalError, isFrozen, frozenBalance,
|
||||
sleepUntilMessageTimestamp);
|
||||
|
||||
matchingATs.add(atData);
|
||||
} while (resultSet.next());
|
||||
@ -210,7 +226,7 @@ public class HSQLDBATRepository implements ATRepository {
|
||||
|
||||
sql.append("SELECT AT_address, creator, created_when, version, asset_id, code_bytes, ")
|
||||
.append("is_sleeping, sleep_until_height, is_finished, had_fatal_error, ")
|
||||
.append("is_frozen, frozen_balance, code_hash ")
|
||||
.append("is_frozen, frozen_balance, code_hash, sleep_until_message_timestamp ")
|
||||
.append("FROM ");
|
||||
|
||||
// (VALUES (?), (?), ...) AS ATCodeHashes (code_hash)
|
||||
@ -264,9 +280,10 @@ public class HSQLDBATRepository implements ATRepository {
|
||||
frozenBalance = null;
|
||||
|
||||
byte[] codeHash = resultSet.getBytes(13);
|
||||
Long sleepUntilMessageTimestamp = resultSet.getLong(14);
|
||||
|
||||
ATData atData = new ATData(atAddress, creatorPublicKey, created, version, assetId, codeBytes, codeHash,
|
||||
isSleeping, sleepUntilHeight, isFinished, hadFatalError, isFrozen, frozenBalance);
|
||||
isSleeping, sleepUntilHeight, isFinished, hadFatalError, isFrozen, frozenBalance, sleepUntilMessageTimestamp);
|
||||
|
||||
matchingATs.add(atData);
|
||||
} while (resultSet.next());
|
||||
@ -305,7 +322,7 @@ public class HSQLDBATRepository implements ATRepository {
|
||||
.bind("code_bytes", atData.getCodeBytes()).bind("code_hash", atData.getCodeHash())
|
||||
.bind("is_sleeping", atData.getIsSleeping()).bind("sleep_until_height", atData.getSleepUntilHeight())
|
||||
.bind("is_finished", atData.getIsFinished()).bind("had_fatal_error", atData.getHadFatalError()).bind("is_frozen", atData.getIsFrozen())
|
||||
.bind("frozen_balance", atData.getFrozenBalance());
|
||||
.bind("frozen_balance", atData.getFrozenBalance()).bind("sleep_until_message_timestamp", atData.getSleepUntilMessageTimestamp());
|
||||
|
||||
try {
|
||||
saveHelper.execute(this.repository);
|
||||
@ -328,7 +345,7 @@ public class HSQLDBATRepository implements ATRepository {
|
||||
|
||||
@Override
|
||||
public ATStateData getATStateAtHeight(String atAddress, int height) throws DataException {
|
||||
String sql = "SELECT state_data, state_hash, fees, is_initial "
|
||||
String sql = "SELECT state_data, state_hash, fees, is_initial, sleep_until_message_timestamp "
|
||||
+ "FROM ATStates "
|
||||
+ "LEFT OUTER JOIN ATStatesData USING (AT_address, height) "
|
||||
+ "WHERE ATStates.AT_address = ? AND ATStates.height = ? "
|
||||
@ -343,7 +360,11 @@ public class HSQLDBATRepository implements ATRepository {
|
||||
long fees = resultSet.getLong(3);
|
||||
boolean isInitial = resultSet.getBoolean(4);
|
||||
|
||||
return new ATStateData(atAddress, height, stateData, stateHash, fees, isInitial);
|
||||
Long sleepUntilMessageTimestamp = resultSet.getLong(5);
|
||||
if (sleepUntilMessageTimestamp == 0 && resultSet.wasNull())
|
||||
sleepUntilMessageTimestamp = null;
|
||||
|
||||
return new ATStateData(atAddress, height, stateData, stateHash, fees, isInitial, sleepUntilMessageTimestamp);
|
||||
} catch (SQLException e) {
|
||||
throw new DataException("Unable to fetch AT state from repository", e);
|
||||
}
|
||||
@ -351,7 +372,7 @@ public class HSQLDBATRepository implements ATRepository {
|
||||
|
||||
@Override
|
||||
public ATStateData getLatestATState(String atAddress) throws DataException {
|
||||
String sql = "SELECT height, state_data, state_hash, fees, is_initial "
|
||||
String sql = "SELECT height, state_data, state_hash, fees, is_initial, sleep_until_message_timestamp "
|
||||
+ "FROM ATStates "
|
||||
+ "JOIN ATStatesData USING (AT_address, height) "
|
||||
+ "WHERE ATStates.AT_address = ? "
|
||||
@ -370,7 +391,11 @@ public class HSQLDBATRepository implements ATRepository {
|
||||
long fees = resultSet.getLong(4);
|
||||
boolean isInitial = resultSet.getBoolean(5);
|
||||
|
||||
return new ATStateData(atAddress, height, stateData, stateHash, fees, isInitial);
|
||||
Long sleepUntilMessageTimestamp = resultSet.getLong(6);
|
||||
if (sleepUntilMessageTimestamp == 0 && resultSet.wasNull())
|
||||
sleepUntilMessageTimestamp = null;
|
||||
|
||||
return new ATStateData(atAddress, height, stateData, stateHash, fees, isInitial, sleepUntilMessageTimestamp);
|
||||
} catch (SQLException e) {
|
||||
throw new DataException("Unable to fetch latest AT state from repository", e);
|
||||
}
|
||||
@ -383,10 +408,10 @@ public class HSQLDBATRepository implements ATRepository {
|
||||
StringBuilder sql = new StringBuilder(1024);
|
||||
List<Object> bindParams = new ArrayList<>();
|
||||
|
||||
sql.append("SELECT AT_address, height, state_data, state_hash, fees, is_initial "
|
||||
sql.append("SELECT AT_address, height, state_data, state_hash, fees, is_initial, FinalATStates.sleep_until_message_timestamp "
|
||||
+ "FROM ATs "
|
||||
+ "CROSS JOIN LATERAL("
|
||||
+ "SELECT height, state_data, state_hash, fees, is_initial "
|
||||
+ "SELECT height, state_data, state_hash, fees, is_initial, sleep_until_message_timestamp "
|
||||
+ "FROM ATStates "
|
||||
+ "JOIN ATStatesData USING (AT_address, height) "
|
||||
+ "WHERE ATStates.AT_address = ATs.AT_address ");
|
||||
@ -440,7 +465,11 @@ public class HSQLDBATRepository implements ATRepository {
|
||||
long fees = resultSet.getLong(5);
|
||||
boolean isInitial = resultSet.getBoolean(6);
|
||||
|
||||
ATStateData atStateData = new ATStateData(atAddress, height, stateData, stateHash, fees, isInitial);
|
||||
Long sleepUntilMessageTimestamp = resultSet.getLong(7);
|
||||
if (sleepUntilMessageTimestamp == 0 && resultSet.wasNull())
|
||||
sleepUntilMessageTimestamp = null;
|
||||
|
||||
ATStateData atStateData = new ATStateData(atAddress, height, stateData, stateHash, fees, isInitial, sleepUntilMessageTimestamp);
|
||||
|
||||
atStates.add(atStateData);
|
||||
} while (resultSet.next());
|
||||
@ -471,7 +500,7 @@ public class HSQLDBATRepository implements ATRepository {
|
||||
StringBuilder sql = new StringBuilder(1024);
|
||||
List<Object> bindParams = new ArrayList<>();
|
||||
|
||||
sql.append("SELECT AT_address, height, state_data, state_hash, fees, is_initial "
|
||||
sql.append("SELECT AT_address, height, state_data, state_hash, fees, is_initial, sleep_until_message_timestamp "
|
||||
+ "FROM ATs "
|
||||
+ "CROSS JOIN LATERAL("
|
||||
+ "SELECT height, state_data, state_hash, fees, is_initial "
|
||||
@ -526,8 +555,10 @@ public class HSQLDBATRepository implements ATRepository {
|
||||
byte[] stateHash = resultSet.getBytes(4);
|
||||
long fees = resultSet.getLong(5);
|
||||
boolean isInitial = resultSet.getBoolean(6);
|
||||
Long sleepUntilMessageTimestamp = resultSet.getLong(7);
|
||||
|
||||
ATStateData atStateData = new ATStateData(atAddress, height, stateData, stateHash, fees, isInitial);
|
||||
ATStateData atStateData = new ATStateData(atAddress, height, stateData, stateHash, fees, isInitial,
|
||||
sleepUntilMessageTimestamp);
|
||||
|
||||
atStates.add(atStateData);
|
||||
} while (resultSet.next());
|
||||
@ -570,6 +601,44 @@ public class HSQLDBATRepository implements ATRepository {
|
||||
return atStates;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void rebuildLatestAtStates() throws DataException {
|
||||
// latestATStatesLock is to prevent concurrent updates on LatestATStates
|
||||
// that could result in one process using a partial or empty dataset
|
||||
// because it was in the process of being rebuilt by another thread
|
||||
synchronized (this.repository.latestATStatesLock) {
|
||||
LOGGER.trace("Rebuilding latest AT states...");
|
||||
|
||||
// Rebuild cache of latest AT states that we can't trim
|
||||
String deleteSql = "DELETE FROM LatestATStates";
|
||||
try {
|
||||
this.repository.executeCheckedUpdate(deleteSql);
|
||||
} catch (SQLException e) {
|
||||
repository.examineException(e);
|
||||
throw new DataException("Unable to delete temporary latest AT states cache from repository", e);
|
||||
}
|
||||
|
||||
String insertSql = "INSERT INTO LatestATStates ("
|
||||
+ "SELECT AT_address, height FROM ATs "
|
||||
+ "CROSS JOIN LATERAL("
|
||||
+ "SELECT height FROM ATStates "
|
||||
+ "WHERE ATStates.AT_address = ATs.AT_address "
|
||||
+ "ORDER BY AT_address DESC, height DESC LIMIT 1"
|
||||
+ ") "
|
||||
+ ")";
|
||||
try {
|
||||
this.repository.executeCheckedUpdate(insertSql);
|
||||
} catch (SQLException e) {
|
||||
repository.examineException(e);
|
||||
throw new DataException("Unable to populate temporary latest AT states cache in repository", e);
|
||||
}
|
||||
this.repository.saveChanges();
|
||||
LOGGER.trace("Rebuilt latest AT states");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public int getAtTrimHeight() throws DataException {
|
||||
String sql = "SELECT AT_trim_height FROM DatabaseInfo";
|
||||
@ -595,63 +664,153 @@ public class HSQLDBATRepository implements ATRepository {
|
||||
this.repository.executeCheckedUpdate(updateSql, trimHeight);
|
||||
this.repository.saveChanges();
|
||||
} catch (SQLException e) {
|
||||
repository.examineException(e);
|
||||
this.repository.examineException(e);
|
||||
throw new DataException("Unable to set AT state trim height in repository", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void prepareForAtStateTrimming() throws DataException {
|
||||
// Rebuild cache of latest AT states that we can't trim
|
||||
String deleteSql = "DELETE FROM LatestATStates";
|
||||
try {
|
||||
this.repository.executeCheckedUpdate(deleteSql);
|
||||
} catch (SQLException e) {
|
||||
repository.examineException(e);
|
||||
throw new DataException("Unable to delete temporary latest AT states cache from repository", e);
|
||||
}
|
||||
|
||||
String insertSql = "INSERT INTO LatestATStates ("
|
||||
+ "SELECT AT_address, height FROM ATs "
|
||||
+ "CROSS JOIN LATERAL("
|
||||
+ "SELECT height FROM ATStates "
|
||||
+ "WHERE ATStates.AT_address = ATs.AT_address "
|
||||
+ "ORDER BY AT_address DESC, height DESC LIMIT 1"
|
||||
+ ") "
|
||||
+ ")";
|
||||
try {
|
||||
this.repository.executeCheckedUpdate(insertSql);
|
||||
} catch (SQLException e) {
|
||||
repository.examineException(e);
|
||||
throw new DataException("Unable to populate temporary latest AT states cache in repository", e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public int trimAtStates(int minHeight, int maxHeight, int limit) throws DataException {
|
||||
if (minHeight >= maxHeight)
|
||||
return 0;
|
||||
|
||||
// We're often called so no need to trim all states in one go.
|
||||
// Limit updates to reduce CPU and memory load.
|
||||
String sql = "DELETE FROM ATStatesData "
|
||||
+ "WHERE height BETWEEN ? AND ? "
|
||||
+ "AND NOT EXISTS("
|
||||
// latestATStatesLock is to prevent concurrent updates on LatestATStates
|
||||
// that could result in one process using a partial or empty dataset
|
||||
// because it was in the process of being rebuilt by another thread
|
||||
synchronized (this.repository.latestATStatesLock) {
|
||||
|
||||
// We're often called so no need to trim all states in one go.
|
||||
// Limit updates to reduce CPU and memory load.
|
||||
String sql = "DELETE FROM ATStatesData "
|
||||
+ "WHERE height BETWEEN ? AND ? "
|
||||
+ "AND NOT EXISTS("
|
||||
+ "SELECT TRUE FROM LatestATStates "
|
||||
+ "WHERE LatestATStates.AT_address = ATStatesData.AT_address "
|
||||
+ "AND LatestATStates.height = ATStatesData.height"
|
||||
+ ") "
|
||||
+ "LIMIT ?";
|
||||
+ ") "
|
||||
+ "LIMIT ?";
|
||||
|
||||
try {
|
||||
return this.repository.executeCheckedUpdate(sql, minHeight, maxHeight, limit);
|
||||
} catch (SQLException e) {
|
||||
repository.examineException(e);
|
||||
throw new DataException("Unable to trim AT states in repository", e);
|
||||
try {
|
||||
int modifiedRows = this.repository.executeCheckedUpdate(sql, minHeight, maxHeight, limit);
|
||||
this.repository.saveChanges();
|
||||
return modifiedRows;
|
||||
|
||||
} catch (SQLException e) {
|
||||
repository.examineException(e);
|
||||
throw new DataException("Unable to trim AT states in repository", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public int getAtPruneHeight() throws DataException {
|
||||
String sql = "SELECT AT_prune_height FROM DatabaseInfo";
|
||||
|
||||
try (ResultSet resultSet = this.repository.checkedExecute(sql)) {
|
||||
if (resultSet == null)
|
||||
return 0;
|
||||
|
||||
return resultSet.getInt(1);
|
||||
} catch (SQLException e) {
|
||||
throw new DataException("Unable to fetch AT state prune height from repository", e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setAtPruneHeight(int pruneHeight) throws DataException {
|
||||
// trimHeightsLock is to prevent concurrent update on DatabaseInfo
|
||||
// that could result in "transaction rollback: serialization failure"
|
||||
synchronized (this.repository.trimHeightsLock) {
|
||||
String updateSql = "UPDATE DatabaseInfo SET AT_prune_height = ?";
|
||||
|
||||
try {
|
||||
this.repository.executeCheckedUpdate(updateSql, pruneHeight);
|
||||
this.repository.saveChanges();
|
||||
} catch (SQLException e) {
|
||||
repository.examineException(e);
|
||||
throw new DataException("Unable to set AT state prune height in repository", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public int pruneAtStates(int minHeight, int maxHeight) throws DataException {
|
||||
// latestATStatesLock is to prevent concurrent updates on LatestATStates
|
||||
// that could result in one process using a partial or empty dataset
|
||||
// because it was in the process of being rebuilt by another thread
|
||||
synchronized (this.repository.latestATStatesLock) {
|
||||
|
||||
int deletedCount = 0;
|
||||
|
||||
for (int height = minHeight; height <= maxHeight; height++) {
|
||||
|
||||
// Give up if we're stopping
|
||||
if (Controller.isStopping()) {
|
||||
return deletedCount;
|
||||
}
|
||||
|
||||
// Get latest AT states for this height
|
||||
List<String> atAddresses = new ArrayList<>();
|
||||
String updateSql = "SELECT AT_address FROM LatestATStates WHERE height = ?";
|
||||
try (ResultSet resultSet = this.repository.checkedExecute(updateSql, height)) {
|
||||
if (resultSet != null) {
|
||||
do {
|
||||
String atAddress = resultSet.getString(1);
|
||||
atAddresses.add(atAddress);
|
||||
|
||||
} while (resultSet.next());
|
||||
}
|
||||
} catch (SQLException e) {
|
||||
throw new DataException("Unable to fetch latest AT states from repository", e);
|
||||
}
|
||||
|
||||
List<ATStateData> atStates = this.getBlockATStatesAtHeight(height);
|
||||
for (ATStateData atState : atStates) {
|
||||
//LOGGER.info("Found atState {} at height {}", atState.getATAddress(), atState.getHeight());
|
||||
|
||||
// Give up if we're stopping
|
||||
if (Controller.isStopping()) {
|
||||
return deletedCount;
|
||||
}
|
||||
|
||||
if (atAddresses.contains(atState.getATAddress())) {
|
||||
// We don't want to delete this AT state because it is still active
|
||||
LOGGER.trace("Skipping atState {} at height {}", atState.getATAddress(), atState.getHeight());
|
||||
continue;
|
||||
}
|
||||
|
||||
// Safe to delete everything else for this height
|
||||
try {
|
||||
this.repository.delete("ATStates", "AT_address = ? AND height = ?",
|
||||
atState.getATAddress(), atState.getHeight());
|
||||
deletedCount++;
|
||||
} catch (SQLException e) {
|
||||
throw new DataException("Unable to delete AT state data from repository", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
this.repository.saveChanges();
|
||||
|
||||
return deletedCount;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public boolean hasAtStatesHeightIndex() throws DataException {
|
||||
String sql = "SELECT INDEX_NAME FROM INFORMATION_SCHEMA.SYSTEM_INDEXINFO where INDEX_NAME='ATSTATESHEIGHTINDEX'";
|
||||
|
||||
try (ResultSet resultSet = this.repository.checkedExecute(sql)) {
|
||||
return resultSet != null;
|
||||
|
||||
} catch (SQLException e) {
|
||||
throw new DataException("Unable to check for ATStatesHeightIndex in repository", e);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void save(ATStateData atStateData) throws DataException {
|
||||
// We shouldn't ever save partial ATStateData
|
||||
@ -662,7 +821,8 @@ public class HSQLDBATRepository implements ATRepository {
|
||||
|
||||
atStatesSaver.bind("AT_address", atStateData.getATAddress()).bind("height", atStateData.getHeight())
|
||||
.bind("state_hash", atStateData.getStateHash())
|
||||
.bind("fees", atStateData.getFees()).bind("is_initial", atStateData.isInitial());
|
||||
.bind("fees", atStateData.getFees()).bind("is_initial", atStateData.isInitial())
|
||||
.bind("sleep_until_message_timestamp", atStateData.getSleepUntilMessageTimestamp());
|
||||
|
||||
try {
|
||||
atStatesSaver.execute(this.repository);
|
||||
|
@ -904,6 +904,25 @@ public class HSQLDBAccountRepository implements AccountRepository {
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public MintingAccountData getMintingAccount(byte[] mintingAccountKey) throws DataException {
|
||||
try (ResultSet resultSet = this.repository.checkedExecute("SELECT minter_private_key, minter_public_key " +
|
||||
"FROM MintingAccounts WHERE minter_private_key = ? OR minter_public_key = ?",
|
||||
mintingAccountKey, mintingAccountKey)) {
|
||||
|
||||
if (resultSet == null)
|
||||
return null;
|
||||
|
||||
byte[] minterPrivateKey = resultSet.getBytes(1);
|
||||
byte[] minterPublicKey = resultSet.getBytes(2);
|
||||
|
||||
return new MintingAccountData(minterPrivateKey, minterPublicKey);
|
||||
|
||||
} catch (SQLException e) {
|
||||
throw new DataException("Unable to fetch minting accounts from repository", e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void save(MintingAccountData mintingAccountData) throws DataException {
|
||||
HSQLDBSaver saveHelper = new HSQLDBSaver("MintingAccounts");
|
||||
|
@ -0,0 +1,296 @@
|
||||
package org.qortal.repository.hsqldb;
|
||||
|
||||
import org.qortal.api.ApiError;
|
||||
import org.qortal.api.ApiExceptionFactory;
|
||||
import org.qortal.api.model.BlockSignerSummary;
|
||||
import org.qortal.block.Block;
|
||||
import org.qortal.data.block.BlockArchiveData;
|
||||
import org.qortal.data.block.BlockData;
|
||||
import org.qortal.data.block.BlockSummaryData;
|
||||
import org.qortal.repository.BlockArchiveReader;
|
||||
import org.qortal.repository.BlockArchiveRepository;
|
||||
import org.qortal.repository.DataException;
|
||||
import org.qortal.utils.Triple;
|
||||
|
||||
import java.sql.ResultSet;
|
||||
import java.sql.SQLException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
public class HSQLDBBlockArchiveRepository implements BlockArchiveRepository {
|
||||
|
||||
protected HSQLDBRepository repository;
|
||||
|
||||
public HSQLDBBlockArchiveRepository(HSQLDBRepository repository) {
|
||||
this.repository = repository;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public BlockData fromSignature(byte[] signature) throws DataException {
|
||||
Triple blockInfo = BlockArchiveReader.getInstance().fetchBlockWithSignature(signature, this.repository);
|
||||
if (blockInfo != null) {
|
||||
return (BlockData) blockInfo.getA();
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getHeightFromSignature(byte[] signature) throws DataException {
|
||||
Integer height = BlockArchiveReader.getInstance().fetchHeightForSignature(signature, this.repository);
|
||||
if (height == null || height == 0) {
|
||||
return 0;
|
||||
}
|
||||
return height;
|
||||
}
|
||||
|
||||
@Override
|
||||
public BlockData fromHeight(int height) throws DataException {
|
||||
Triple blockInfo = BlockArchiveReader.getInstance().fetchBlockAtHeight(height);
|
||||
if (blockInfo != null) {
|
||||
return (BlockData) blockInfo.getA();
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<BlockData> fromRange(int startHeight, int endHeight) throws DataException {
|
||||
List<BlockData> blocks = new ArrayList<>();
|
||||
|
||||
for (int height = startHeight; height < endHeight; height++) {
|
||||
BlockData blockData = this.fromHeight(height);
|
||||
if (blockData == null) {
|
||||
return blocks;
|
||||
}
|
||||
blocks.add(blockData);
|
||||
}
|
||||
return blocks;
|
||||
}
|
||||
|
||||
@Override
|
||||
public BlockData fromReference(byte[] reference) throws DataException {
|
||||
BlockData referenceBlock = this.repository.getBlockArchiveRepository().fromSignature(reference);
|
||||
if (referenceBlock == null) {
|
||||
// Try the main block repository. Needed for genesis block.
|
||||
referenceBlock = this.repository.getBlockRepository().fromSignature(reference);
|
||||
}
|
||||
if (referenceBlock != null) {
|
||||
int height = referenceBlock.getHeight();
|
||||
if (height > 0) {
|
||||
// Request the block at height + 1
|
||||
Triple blockInfo = BlockArchiveReader.getInstance().fetchBlockAtHeight(height + 1);
|
||||
if (blockInfo != null) {
|
||||
return (BlockData) blockInfo.getA();
|
||||
}
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getHeightFromTimestamp(long timestamp) throws DataException {
|
||||
String sql = "SELECT height FROM BlockArchive WHERE minted_when <= ? ORDER BY minted_when DESC, height DESC LIMIT 1";
|
||||
|
||||
try (ResultSet resultSet = this.repository.checkedExecute(sql, timestamp)) {
|
||||
if (resultSet == null) {
|
||||
return 0;
|
||||
}
|
||||
return resultSet.getInt(1);
|
||||
|
||||
} catch (SQLException e) {
|
||||
throw new DataException("Error fetching height from BlockArchive repository", e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<BlockSummaryData> getBlockSummariesBySigner(byte[] signerPublicKey, Integer limit, Integer offset, Boolean reverse) throws DataException {
|
||||
StringBuilder sql = new StringBuilder(512);
|
||||
sql.append("SELECT signature, height, BlockArchive.minter FROM ");
|
||||
|
||||
// List of minter account's public key and reward-share public keys with minter's public key
|
||||
sql.append("(SELECT * FROM (VALUES (CAST(? AS QortalPublicKey))) UNION (SELECT reward_share_public_key FROM RewardShares WHERE minter_public_key = ?)) AS PublicKeys (public_key) ");
|
||||
|
||||
// Match BlockArchive blocks signed with public key from above list
|
||||
sql.append("JOIN BlockArchive ON BlockArchive.minter = public_key ");
|
||||
|
||||
sql.append("ORDER BY BlockArchive.height ");
|
||||
if (reverse != null && reverse)
|
||||
sql.append("DESC ");
|
||||
|
||||
HSQLDBRepository.limitOffsetSql(sql, limit, offset);
|
||||
|
||||
List<BlockSummaryData> blockSummaries = new ArrayList<>();
|
||||
|
||||
try (ResultSet resultSet = this.repository.checkedExecute(sql.toString(), signerPublicKey, signerPublicKey)) {
|
||||
if (resultSet == null)
|
||||
return blockSummaries;
|
||||
|
||||
do {
|
||||
byte[] signature = resultSet.getBytes(1);
|
||||
int height = resultSet.getInt(2);
|
||||
byte[] blockMinterPublicKey = resultSet.getBytes(3);
|
||||
|
||||
// Fetch additional info from the archive itself
|
||||
int onlineAccountsCount = 0;
|
||||
BlockData blockData = this.fromSignature(signature);
|
||||
if (blockData != null) {
|
||||
onlineAccountsCount = blockData.getOnlineAccountsCount();
|
||||
}
|
||||
|
||||
BlockSummaryData blockSummary = new BlockSummaryData(height, signature, blockMinterPublicKey, onlineAccountsCount);
|
||||
blockSummaries.add(blockSummary);
|
||||
} while (resultSet.next());
|
||||
|
||||
return blockSummaries;
|
||||
} catch (SQLException e) {
|
||||
throw new DataException("Unable to fetch minter's block summaries from repository", e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<BlockSignerSummary> getBlockSigners(List<String> addresses, Integer limit, Integer offset, Boolean reverse) throws DataException {
|
||||
String subquerySql = "SELECT minter, COUNT(signature) FROM (" +
|
||||
"(SELECT minter, signature FROM Blocks) UNION ALL (SELECT minter, signature FROM BlockArchive)" +
|
||||
") GROUP BY minter";
|
||||
|
||||
StringBuilder sql = new StringBuilder(1024);
|
||||
sql.append("SELECT DISTINCT block_minter, n_blocks, minter_public_key, minter, recipient FROM (");
|
||||
sql.append(subquerySql);
|
||||
sql.append(") AS Minters (block_minter, n_blocks) LEFT OUTER JOIN RewardShares ON reward_share_public_key = block_minter ");
|
||||
|
||||
if (addresses != null && !addresses.isEmpty()) {
|
||||
sql.append(" LEFT OUTER JOIN Accounts AS BlockMinterAccounts ON BlockMinterAccounts.public_key = block_minter ");
|
||||
sql.append(" LEFT OUTER JOIN Accounts AS RewardShareMinterAccounts ON RewardShareMinterAccounts.public_key = minter_public_key ");
|
||||
sql.append(" JOIN (VALUES ");
|
||||
|
||||
final int addressesSize = addresses.size();
|
||||
for (int ai = 0; ai < addressesSize; ++ai) {
|
||||
if (ai != 0)
|
||||
sql.append(", ");
|
||||
|
||||
sql.append("(?)");
|
||||
}
|
||||
|
||||
sql.append(") AS FilterAccounts (account) ");
|
||||
sql.append(" ON FilterAccounts.account IN (recipient, BlockMinterAccounts.account, RewardShareMinterAccounts.account) ");
|
||||
} else {
|
||||
addresses = Collections.emptyList();
|
||||
}
|
||||
|
||||
sql.append("ORDER BY n_blocks ");
|
||||
if (reverse != null && reverse)
|
||||
sql.append("DESC ");
|
||||
|
||||
HSQLDBRepository.limitOffsetSql(sql, limit, offset);
|
||||
|
||||
List<BlockSignerSummary> summaries = new ArrayList<>();
|
||||
|
||||
try (ResultSet resultSet = this.repository.checkedExecute(sql.toString(), addresses.toArray())) {
|
||||
if (resultSet == null)
|
||||
return summaries;
|
||||
|
||||
do {
|
||||
byte[] blockMinterPublicKey = resultSet.getBytes(1);
|
||||
int nBlocks = resultSet.getInt(2);
|
||||
|
||||
// May not be present if no reward-share:
|
||||
byte[] mintingAccountPublicKey = resultSet.getBytes(3);
|
||||
String minterAccount = resultSet.getString(4);
|
||||
String recipientAccount = resultSet.getString(5);
|
||||
|
||||
BlockSignerSummary blockSignerSummary;
|
||||
if (recipientAccount == null)
|
||||
blockSignerSummary = new BlockSignerSummary(blockMinterPublicKey, nBlocks);
|
||||
else
|
||||
blockSignerSummary = new BlockSignerSummary(blockMinterPublicKey, nBlocks, mintingAccountPublicKey, minterAccount, recipientAccount);
|
||||
|
||||
summaries.add(blockSignerSummary);
|
||||
} while (resultSet.next());
|
||||
|
||||
return summaries;
|
||||
} catch (SQLException e) {
|
||||
throw new DataException("Unable to fetch block minters from repository", e);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public int getBlockArchiveHeight() throws DataException {
|
||||
String sql = "SELECT block_archive_height FROM DatabaseInfo";
|
||||
|
||||
try (ResultSet resultSet = this.repository.checkedExecute(sql)) {
|
||||
if (resultSet == null)
|
||||
return 0;
|
||||
|
||||
return resultSet.getInt(1);
|
||||
} catch (SQLException e) {
|
||||
throw new DataException("Unable to fetch block archive height from repository", e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setBlockArchiveHeight(int archiveHeight) throws DataException {
|
||||
// trimHeightsLock is to prevent concurrent update on DatabaseInfo
|
||||
// that could result in "transaction rollback: serialization failure"
|
||||
synchronized (this.repository.trimHeightsLock) {
|
||||
String updateSql = "UPDATE DatabaseInfo SET block_archive_height = ?";
|
||||
|
||||
try {
|
||||
this.repository.executeCheckedUpdate(updateSql, archiveHeight);
|
||||
this.repository.saveChanges();
|
||||
} catch (SQLException e) {
|
||||
repository.examineException(e);
|
||||
throw new DataException("Unable to set block archive height in repository", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public BlockArchiveData getBlockArchiveDataForSignature(byte[] signature) throws DataException {
|
||||
String sql = "SELECT height, signature, minted_when, minter FROM BlockArchive WHERE signature = ? LIMIT 1";
|
||||
|
||||
try (ResultSet resultSet = this.repository.checkedExecute(sql, signature)) {
|
||||
if (resultSet == null) {
|
||||
return null;
|
||||
}
|
||||
int height = resultSet.getInt(1);
|
||||
byte[] sig = resultSet.getBytes(2);
|
||||
long timestamp = resultSet.getLong(3);
|
||||
byte[] minterPublicKey = resultSet.getBytes(4);
|
||||
return new BlockArchiveData(sig, height, timestamp, minterPublicKey);
|
||||
|
||||
} catch (SQLException e) {
|
||||
throw new DataException("Error fetching height from BlockArchive repository", e);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void save(BlockArchiveData blockArchiveData) throws DataException {
|
||||
HSQLDBSaver saveHelper = new HSQLDBSaver("BlockArchive");
|
||||
|
||||
saveHelper.bind("signature", blockArchiveData.getSignature())
|
||||
.bind("height", blockArchiveData.getHeight())
|
||||
.bind("minted_when", blockArchiveData.getTimestamp())
|
||||
.bind("minter", blockArchiveData.getMinterPublicKey());
|
||||
|
||||
try {
|
||||
saveHelper.execute(this.repository);
|
||||
} catch (SQLException e) {
|
||||
throw new DataException("Unable to save SimpleBlockData into BlockArchive repository", e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void delete(BlockArchiveData blockArchiveData) throws DataException {
|
||||
try {
|
||||
this.repository.delete("BlockArchive",
|
||||
"block_signature = ?", blockArchiveData.getSignature());
|
||||
} catch (SQLException e) {
|
||||
throw new DataException("Unable to delete SimpleBlockData from BlockArchive repository", e);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
@ -10,6 +10,7 @@ import org.qortal.api.model.BlockSignerSummary;
|
||||
import org.qortal.data.block.BlockData;
|
||||
import org.qortal.data.block.BlockSummaryData;
|
||||
import org.qortal.data.block.BlockTransactionData;
|
||||
import org.qortal.data.block.BlockArchiveData;
|
||||
import org.qortal.data.transaction.TransactionData;
|
||||
import org.qortal.repository.BlockRepository;
|
||||
import org.qortal.repository.DataException;
|
||||
@ -382,86 +383,6 @@ public class HSQLDBBlockRepository implements BlockRepository {
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<BlockSummaryData> getBlockSummaries(Integer startHeight, Integer endHeight, Integer count) throws DataException {
|
||||
StringBuilder sql = new StringBuilder(512);
|
||||
List<Object> bindParams = new ArrayList<>();
|
||||
|
||||
sql.append("SELECT signature, height, minter, online_accounts_count, minted_when, transaction_count ");
|
||||
|
||||
/*
|
||||
* start end count result
|
||||
* 10 40 null blocks 10 to 39 (excludes end block, ignore count)
|
||||
*
|
||||
* null null null blocks 1 to 50 (assume count=50, maybe start=1)
|
||||
* 30 null null blocks 30 to 79 (assume count=50)
|
||||
* 30 null 10 blocks 30 to 39
|
||||
*
|
||||
* null null 50 last 50 blocks? so if max(blocks.height) is 200, then blocks 151 to 200
|
||||
* null 200 null blocks 150 to 199 (excludes end block, assume count=50)
|
||||
* null 200 10 blocks 190 to 199 (excludes end block)
|
||||
*/
|
||||
|
||||
if (startHeight != null && endHeight != null) {
|
||||
sql.append("FROM Blocks ");
|
||||
sql.append("WHERE height BETWEEN ? AND ?");
|
||||
bindParams.add(startHeight);
|
||||
bindParams.add(Integer.valueOf(endHeight - 1));
|
||||
} else if (endHeight != null || (startHeight == null && count != null)) {
|
||||
// we are going to return blocks from the end of the chain
|
||||
if (count == null)
|
||||
count = 50;
|
||||
|
||||
if (endHeight == null) {
|
||||
sql.append("FROM (SELECT height FROM Blocks ORDER BY height DESC LIMIT 1) AS MaxHeights (max_height) ");
|
||||
sql.append("JOIN Blocks ON height BETWEEN (max_height - ? + 1) AND max_height ");
|
||||
bindParams.add(count);
|
||||
} else {
|
||||
sql.append("FROM Blocks ");
|
||||
sql.append("WHERE height BETWEEN ? AND ?");
|
||||
bindParams.add(Integer.valueOf(endHeight - count));
|
||||
bindParams.add(Integer.valueOf(endHeight - 1));
|
||||
}
|
||||
|
||||
} else {
|
||||
// we are going to return blocks from the start of the chain
|
||||
if (startHeight == null)
|
||||
startHeight = 1;
|
||||
|
||||
if (count == null)
|
||||
count = 50;
|
||||
|
||||
sql.append("FROM Blocks ");
|
||||
sql.append("WHERE height BETWEEN ? AND ?");
|
||||
bindParams.add(startHeight);
|
||||
bindParams.add(Integer.valueOf(startHeight + count - 1));
|
||||
}
|
||||
|
||||
List<BlockSummaryData> blockSummaries = new ArrayList<>();
|
||||
|
||||
try (ResultSet resultSet = this.repository.checkedExecute(sql.toString(), bindParams.toArray())) {
|
||||
if (resultSet == null)
|
||||
return blockSummaries;
|
||||
|
||||
do {
|
||||
byte[] signature = resultSet.getBytes(1);
|
||||
int height = resultSet.getInt(2);
|
||||
byte[] minterPublicKey = resultSet.getBytes(3);
|
||||
int onlineAccountsCount = resultSet.getInt(4);
|
||||
long timestamp = resultSet.getLong(5);
|
||||
int transactionCount = resultSet.getInt(6);
|
||||
|
||||
BlockSummaryData blockSummary = new BlockSummaryData(height, signature, minterPublicKey, onlineAccountsCount,
|
||||
timestamp, transactionCount);
|
||||
blockSummaries.add(blockSummary);
|
||||
} while (resultSet.next());
|
||||
|
||||
return blockSummaries;
|
||||
} catch (SQLException e) {
|
||||
throw new DataException("Unable to fetch height-ranged block summaries from repository", e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getOnlineAccountsSignaturesTrimHeight() throws DataException {
|
||||
String sql = "SELECT online_signatures_trim_height FROM DatabaseInfo";
|
||||
@ -509,6 +430,53 @@ public class HSQLDBBlockRepository implements BlockRepository {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public int getBlockPruneHeight() throws DataException {
|
||||
String sql = "SELECT block_prune_height FROM DatabaseInfo";
|
||||
|
||||
try (ResultSet resultSet = this.repository.checkedExecute(sql)) {
|
||||
if (resultSet == null)
|
||||
return 0;
|
||||
|
||||
return resultSet.getInt(1);
|
||||
} catch (SQLException e) {
|
||||
throw new DataException("Unable to fetch block prune height from repository", e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setBlockPruneHeight(int pruneHeight) throws DataException {
|
||||
// trimHeightsLock is to prevent concurrent update on DatabaseInfo
|
||||
// that could result in "transaction rollback: serialization failure"
|
||||
synchronized (this.repository.trimHeightsLock) {
|
||||
String updateSql = "UPDATE DatabaseInfo SET block_prune_height = ?";
|
||||
|
||||
try {
|
||||
this.repository.executeCheckedUpdate(updateSql, pruneHeight);
|
||||
this.repository.saveChanges();
|
||||
} catch (SQLException e) {
|
||||
repository.examineException(e);
|
||||
throw new DataException("Unable to set block prune height in repository", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public int pruneBlocks(int minHeight, int maxHeight) throws DataException {
|
||||
// Don't prune the genesis block
|
||||
if (minHeight <= 1) {
|
||||
minHeight = 2;
|
||||
}
|
||||
|
||||
try {
|
||||
return this.repository.delete("Blocks", "height BETWEEN ? AND ?", minHeight, maxHeight);
|
||||
} catch (SQLException e) {
|
||||
throw new DataException("Unable to prune blocks from repository", e);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public BlockData getDetachedBlockSignature(int startHeight) throws DataException {
|
||||
String sql = "SELECT " + BLOCK_DB_COLUMNS + " FROM Blocks "
|
||||
|
@ -0,0 +1,88 @@
|
||||
package org.qortal.repository.hsqldb;
|
||||
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.qortal.controller.Controller;
|
||||
import org.qortal.gui.SplashFrame;
|
||||
import org.qortal.repository.BlockArchiveWriter;
|
||||
import org.qortal.repository.DataException;
|
||||
import org.qortal.repository.Repository;
|
||||
import org.qortal.repository.RepositoryManager;
|
||||
import org.qortal.transform.TransformationException;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
*
|
||||
* When switching to an archiving node, we need to archive most of the database contents.
|
||||
* This involves copying its data into flat files.
|
||||
* If we do this entirely as a background process, it is very slow and can interfere with syncing.
|
||||
* However, if we take the approach of doing this in bulk, before starting up the rest of the
|
||||
* processes, this makes it much faster and less invasive.
|
||||
*
|
||||
* From that point, the original background archiving process will run, but can be dialled right down
|
||||
* so not to interfere with syncing.
|
||||
*
|
||||
*/
|
||||
|
||||
|
||||
public class HSQLDBDatabaseArchiving {
|
||||
|
||||
private static final Logger LOGGER = LogManager.getLogger(HSQLDBDatabaseArchiving.class);
|
||||
|
||||
|
||||
public static boolean buildBlockArchive(Repository repository, long fileSizeTarget) throws DataException {
|
||||
|
||||
// Only build the archive if we haven't already got one that is up to date
|
||||
boolean upToDate = BlockArchiveWriter.isArchiverUpToDate(repository);
|
||||
if (upToDate) {
|
||||
// Already archived
|
||||
return false;
|
||||
}
|
||||
|
||||
LOGGER.info("Building block archive - this process could take a while... (approx. 15 mins on high spec)");
|
||||
SplashFrame.getInstance().updateStatus("Building block archive (takes 60+ mins)...");
|
||||
|
||||
final int maximumArchiveHeight = BlockArchiveWriter.getMaxArchiveHeight(repository);
|
||||
int startHeight = 0;
|
||||
|
||||
while (!Controller.isStopping()) {
|
||||
try {
|
||||
BlockArchiveWriter writer = new BlockArchiveWriter(startHeight, maximumArchiveHeight, repository);
|
||||
writer.setFileSizeTarget(fileSizeTarget);
|
||||
BlockArchiveWriter.BlockArchiveWriteResult result = writer.write();
|
||||
switch (result) {
|
||||
case OK:
|
||||
// Increment block archive height
|
||||
startHeight = writer.getLastWrittenHeight() + 1;
|
||||
repository.getBlockArchiveRepository().setBlockArchiveHeight(startHeight);
|
||||
repository.saveChanges();
|
||||
break;
|
||||
|
||||
case STOPPING:
|
||||
return false;
|
||||
|
||||
case NOT_ENOUGH_BLOCKS:
|
||||
// We've reached the limit of the blocks we can archive
|
||||
// Return from the whole method
|
||||
return true;
|
||||
|
||||
case BLOCK_NOT_FOUND:
|
||||
// We tried to archive a block that didn't exist. This is a major failure and likely means
|
||||
// that a bootstrap or re-sync is needed. Return rom the method
|
||||
LOGGER.info("Error: block not found when building archive. If this error persists, " +
|
||||
"a bootstrap or re-sync may be needed.");
|
||||
return false;
|
||||
}
|
||||
|
||||
} catch (IOException | TransformationException | InterruptedException e) {
|
||||
LOGGER.info("Caught exception when creating block cache", e);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// If we got this far then something went wrong (most likely the app is stopping)
|
||||
return false;
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,332 @@
|
||||
package org.qortal.repository.hsqldb;
|
||||
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.qortal.controller.Controller;
|
||||
import org.qortal.data.block.BlockData;
|
||||
import org.qortal.gui.SplashFrame;
|
||||
import org.qortal.repository.BlockArchiveWriter;
|
||||
import org.qortal.repository.DataException;
|
||||
import org.qortal.repository.Repository;
|
||||
import org.qortal.settings.Settings;
|
||||
|
||||
import java.sql.ResultSet;
|
||||
import java.sql.SQLException;
|
||||
import java.util.concurrent.TimeoutException;
|
||||
|
||||
/**
|
||||
*
|
||||
* When switching from a full node to a pruning node, we need to delete most of the database contents.
|
||||
* If we do this entirely as a background process, it is very slow and can interfere with syncing.
|
||||
* However, if we take the approach of transferring only the necessary rows to a new table and then
|
||||
* deleting the original table, this makes the process much faster. It was taking several days to
|
||||
* delete the AT states in the background, but only a couple of minutes to copy them to a new table.
|
||||
*
|
||||
* The trade off is that we have to go through a form of "reshape" when starting the app for the first
|
||||
* time after enabling pruning mode. But given that this is an opt-in mode, I don't think it will be
|
||||
* a problem.
|
||||
*
|
||||
* Once the pruning is complete, it automatically performs a CHECKPOINT DEFRAG in order to
|
||||
* shrink the database file size down to a fraction of what it was before.
|
||||
*
|
||||
* From this point, the original background process will run, but can be dialled right down so not
|
||||
* to interfere with syncing.
|
||||
*
|
||||
*/
|
||||
|
||||
|
||||
public class HSQLDBDatabasePruning {
|
||||
|
||||
private static final Logger LOGGER = LogManager.getLogger(HSQLDBDatabasePruning.class);
|
||||
|
||||
|
||||
public static boolean pruneATStates(HSQLDBRepository repository) throws SQLException, DataException {
|
||||
|
||||
// Only bulk prune AT states if we have never done so before
|
||||
int pruneHeight = repository.getATRepository().getAtPruneHeight();
|
||||
if (pruneHeight > 0) {
|
||||
// Already pruned AT states
|
||||
return false;
|
||||
}
|
||||
|
||||
if (Settings.getInstance().isArchiveEnabled()) {
|
||||
// Only proceed if we can see that the archiver has already finished
|
||||
// This way, if the archiver failed for any reason, we can prune once it has had
|
||||
// some opportunities to try again
|
||||
boolean upToDate = BlockArchiveWriter.isArchiverUpToDate(repository);
|
||||
if (!upToDate) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
LOGGER.info("Starting bulk prune of AT states - this process could take a while... " +
|
||||
"(approx. 2 mins on high spec, or upwards of 30 mins in some cases)");
|
||||
SplashFrame.getInstance().updateStatus("Pruning database (takes up to 30 mins)...");
|
||||
|
||||
// Create new AT-states table to hold smaller dataset
|
||||
repository.executeCheckedUpdate("DROP TABLE IF EXISTS ATStatesNew");
|
||||
repository.executeCheckedUpdate("CREATE TABLE ATStatesNew ("
|
||||
+ "AT_address QortalAddress, height INTEGER NOT NULL, state_hash ATStateHash NOT NULL, "
|
||||
+ "fees QortalAmount NOT NULL, is_initial BOOLEAN NOT NULL, sleep_until_message_timestamp BIGINT, "
|
||||
+ "PRIMARY KEY (AT_address, height), "
|
||||
+ "FOREIGN KEY (AT_address) REFERENCES ATs (AT_address) ON DELETE CASCADE)");
|
||||
repository.executeCheckedUpdate("SET TABLE ATStatesNew NEW SPACE");
|
||||
repository.executeCheckedUpdate("CHECKPOINT");
|
||||
|
||||
// Add a height index
|
||||
LOGGER.info("Adding index to AT states table...");
|
||||
repository.executeCheckedUpdate("CREATE INDEX IF NOT EXISTS ATStatesNewHeightIndex ON ATStatesNew (height)");
|
||||
repository.executeCheckedUpdate("CHECKPOINT");
|
||||
|
||||
|
||||
// Find our latest block
|
||||
BlockData latestBlock = repository.getBlockRepository().getLastBlock();
|
||||
if (latestBlock == null) {
|
||||
LOGGER.info("Unable to determine blockchain height, necessary for bulk block pruning");
|
||||
return false;
|
||||
}
|
||||
|
||||
// Calculate some constants for later use
|
||||
final int blockchainHeight = latestBlock.getHeight();
|
||||
int maximumBlockToTrim = blockchainHeight - Settings.getInstance().getPruneBlockLimit();
|
||||
if (Settings.getInstance().isArchiveEnabled()) {
|
||||
// Archive mode - don't prune anything that hasn't been archived yet
|
||||
maximumBlockToTrim = Math.min(maximumBlockToTrim, repository.getBlockArchiveRepository().getBlockArchiveHeight() - 1);
|
||||
}
|
||||
final int endHeight = blockchainHeight;
|
||||
final int blockStep = 10000;
|
||||
|
||||
|
||||
// It's essential that we rebuild the latest AT states here, as we are using this data in the next query.
|
||||
// Failing to do this will result in important AT states being deleted, rendering the database unusable.
|
||||
repository.getATRepository().rebuildLatestAtStates();
|
||||
|
||||
|
||||
// Loop through all the LatestATStates and copy them to the new table
|
||||
LOGGER.info("Copying AT states...");
|
||||
for (int height = 0; height < endHeight; height += blockStep) {
|
||||
final int batchEndHeight = height + blockStep - 1;
|
||||
//LOGGER.info(String.format("Copying AT states between %d and %d...", height, batchEndHeight));
|
||||
|
||||
String sql = "SELECT height, AT_address FROM LatestATStates WHERE height BETWEEN ? AND ?";
|
||||
try (ResultSet latestAtStatesResultSet = repository.checkedExecute(sql, height, batchEndHeight)) {
|
||||
if (latestAtStatesResultSet != null) {
|
||||
do {
|
||||
int latestAtHeight = latestAtStatesResultSet.getInt(1);
|
||||
String latestAtAddress = latestAtStatesResultSet.getString(2);
|
||||
|
||||
// Copy this latest ATState to the new table
|
||||
//LOGGER.info(String.format("Copying AT %s at height %d...", latestAtAddress, latestAtHeight));
|
||||
try {
|
||||
String updateSql = "INSERT INTO ATStatesNew ("
|
||||
+ "SELECT AT_address, height, state_hash, fees, is_initial, sleep_until_message_timestamp "
|
||||
+ "FROM ATStates "
|
||||
+ "WHERE height = ? AND AT_address = ?)";
|
||||
repository.executeCheckedUpdate(updateSql, latestAtHeight, latestAtAddress);
|
||||
} catch (SQLException e) {
|
||||
repository.examineException(e);
|
||||
throw new DataException("Unable to copy ATStates", e);
|
||||
}
|
||||
|
||||
// If this batch includes blocks after the maximum block to trim, we will need to copy
|
||||
// each of its AT states above maximumBlockToTrim as they are considered "recent". We
|
||||
// need to do this for _all_ AT states in these blocks, regardless of their latest state.
|
||||
if (batchEndHeight >= maximumBlockToTrim) {
|
||||
// Now copy this AT's states for each recent block they are present in
|
||||
for (int i = maximumBlockToTrim; i < endHeight; i++) {
|
||||
if (latestAtHeight < i) {
|
||||
// This AT finished before this block so there is nothing to copy
|
||||
continue;
|
||||
}
|
||||
|
||||
//LOGGER.info(String.format("Copying recent AT %s at height %d...", latestAtAddress, i));
|
||||
try {
|
||||
// Copy each LatestATState to the new table
|
||||
String updateSql = "INSERT IGNORE INTO ATStatesNew ("
|
||||
+ "SELECT AT_address, height, state_hash, fees, is_initial, sleep_until_message_timestamp "
|
||||
+ "FROM ATStates "
|
||||
+ "WHERE height = ? AND AT_address = ?)";
|
||||
repository.executeCheckedUpdate(updateSql, i, latestAtAddress);
|
||||
} catch (SQLException e) {
|
||||
repository.examineException(e);
|
||||
throw new DataException("Unable to copy ATStates", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
repository.saveChanges();
|
||||
|
||||
} while (latestAtStatesResultSet.next());
|
||||
}
|
||||
} catch (SQLException e) {
|
||||
throw new DataException("Unable to copy AT states", e);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Finally, drop the original table and rename
|
||||
LOGGER.info("Deleting old AT states...");
|
||||
repository.executeCheckedUpdate("DROP TABLE ATStates");
|
||||
repository.executeCheckedUpdate("ALTER TABLE ATStatesNew RENAME TO ATStates");
|
||||
repository.executeCheckedUpdate("ALTER INDEX ATStatesNewHeightIndex RENAME TO ATStatesHeightIndex");
|
||||
repository.executeCheckedUpdate("CHECKPOINT");
|
||||
|
||||
// Update the prune height
|
||||
int nextPruneHeight = maximumBlockToTrim + 1;
|
||||
repository.getATRepository().setAtPruneHeight(nextPruneHeight);
|
||||
repository.saveChanges();
|
||||
|
||||
repository.executeCheckedUpdate("CHECKPOINT");
|
||||
|
||||
// Now prune/trim the ATStatesData, as this currently goes back over a month
|
||||
return HSQLDBDatabasePruning.pruneATStateData(repository);
|
||||
}
|
||||
|
||||
/*
|
||||
* Bulk prune ATStatesData to catch up with the now pruned ATStates table
|
||||
* This uses the existing AT States trimming code but with a much higher end block
|
||||
*/
|
||||
private static boolean pruneATStateData(Repository repository) throws DataException {
|
||||
|
||||
if (Settings.getInstance().isArchiveEnabled()) {
|
||||
// Don't prune ATStatesData in archive mode
|
||||
return true;
|
||||
}
|
||||
|
||||
BlockData latestBlock = repository.getBlockRepository().getLastBlock();
|
||||
if (latestBlock == null) {
|
||||
LOGGER.info("Unable to determine blockchain height, necessary for bulk ATStatesData pruning");
|
||||
return false;
|
||||
}
|
||||
final int blockchainHeight = latestBlock.getHeight();
|
||||
int upperPrunableHeight = blockchainHeight - Settings.getInstance().getPruneBlockLimit();
|
||||
// ATStateData is already trimmed - so carry on from where we left off in the past
|
||||
int pruneStartHeight = repository.getATRepository().getAtTrimHeight();
|
||||
|
||||
LOGGER.info("Starting bulk prune of AT states data - this process could take a while... (approx. 3 mins on high spec)");
|
||||
|
||||
while (pruneStartHeight < upperPrunableHeight) {
|
||||
// Prune all AT state data up until our latest minus pruneBlockLimit (or our archive height)
|
||||
|
||||
if (Controller.isStopping()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Override batch size in the settings because this is a one-off process
|
||||
final int batchSize = 1000;
|
||||
final int rowLimitPerBatch = 50000;
|
||||
int upperBatchHeight = pruneStartHeight + batchSize;
|
||||
int upperPruneHeight = Math.min(upperBatchHeight, upperPrunableHeight);
|
||||
|
||||
LOGGER.trace(String.format("Pruning AT states data between %d and %d...", pruneStartHeight, upperPruneHeight));
|
||||
|
||||
int numATStatesPruned = repository.getATRepository().trimAtStates(pruneStartHeight, upperPruneHeight, rowLimitPerBatch);
|
||||
repository.saveChanges();
|
||||
|
||||
if (numATStatesPruned > 0) {
|
||||
LOGGER.trace(String.format("Pruned %d AT states data rows between blocks %d and %d",
|
||||
numATStatesPruned, pruneStartHeight, upperPruneHeight));
|
||||
} else {
|
||||
repository.getATRepository().setAtTrimHeight(upperBatchHeight);
|
||||
// No need to rebuild the latest AT states as we aren't currently synchronizing
|
||||
repository.saveChanges();
|
||||
LOGGER.debug(String.format("Bumping AT states trim height to %d", upperBatchHeight));
|
||||
|
||||
// Can we move onto next batch?
|
||||
if (upperPrunableHeight > upperBatchHeight) {
|
||||
pruneStartHeight = upperBatchHeight;
|
||||
}
|
||||
else {
|
||||
// We've finished pruning
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
public static boolean pruneBlocks(Repository repository) throws SQLException, DataException {
|
||||
|
||||
// Only bulk prune AT states if we have never done so before
|
||||
int pruneHeight = repository.getBlockRepository().getBlockPruneHeight();
|
||||
if (pruneHeight > 0) {
|
||||
// Already pruned blocks
|
||||
return false;
|
||||
}
|
||||
|
||||
if (Settings.getInstance().isArchiveEnabled()) {
|
||||
// Only proceed if we can see that the archiver has already finished
|
||||
// This way, if the archiver failed for any reason, we can prune once it has had
|
||||
// some opportunities to try again
|
||||
boolean upToDate = BlockArchiveWriter.isArchiverUpToDate(repository);
|
||||
if (!upToDate) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
BlockData latestBlock = repository.getBlockRepository().getLastBlock();
|
||||
if (latestBlock == null) {
|
||||
LOGGER.info("Unable to determine blockchain height, necessary for bulk block pruning");
|
||||
return false;
|
||||
}
|
||||
final int blockchainHeight = latestBlock.getHeight();
|
||||
int upperPrunableHeight = blockchainHeight - Settings.getInstance().getPruneBlockLimit();
|
||||
int pruneStartHeight = 0;
|
||||
|
||||
if (Settings.getInstance().isArchiveEnabled()) {
|
||||
// Archive mode - don't prune anything that hasn't been archived yet
|
||||
upperPrunableHeight = Math.min(upperPrunableHeight, repository.getBlockArchiveRepository().getBlockArchiveHeight() - 1);
|
||||
}
|
||||
|
||||
LOGGER.info("Starting bulk prune of blocks - this process could take a while... (approx. 5 mins on high spec)");
|
||||
|
||||
while (pruneStartHeight < upperPrunableHeight) {
|
||||
// Prune all blocks up until our latest minus pruneBlockLimit
|
||||
|
||||
int upperBatchHeight = pruneStartHeight + Settings.getInstance().getBlockPruneBatchSize();
|
||||
int upperPruneHeight = Math.min(upperBatchHeight, upperPrunableHeight);
|
||||
|
||||
LOGGER.info(String.format("Pruning blocks between %d and %d...", pruneStartHeight, upperPruneHeight));
|
||||
|
||||
int numBlocksPruned = repository.getBlockRepository().pruneBlocks(pruneStartHeight, upperPruneHeight);
|
||||
repository.saveChanges();
|
||||
|
||||
if (numBlocksPruned > 0) {
|
||||
LOGGER.info(String.format("Pruned %d block%s between %d and %d",
|
||||
numBlocksPruned, (numBlocksPruned != 1 ? "s" : ""),
|
||||
pruneStartHeight, upperPruneHeight));
|
||||
} else {
|
||||
final int nextPruneHeight = upperPruneHeight + 1;
|
||||
repository.getBlockRepository().setBlockPruneHeight(nextPruneHeight);
|
||||
repository.saveChanges();
|
||||
LOGGER.debug(String.format("Bumping block base prune height to %d", nextPruneHeight));
|
||||
|
||||
// Can we move onto next batch?
|
||||
if (upperPrunableHeight > nextPruneHeight) {
|
||||
pruneStartHeight = nextPruneHeight;
|
||||
}
|
||||
else {
|
||||
// We've finished pruning
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
public static void performMaintenance(Repository repository) throws SQLException, DataException {
|
||||
try {
|
||||
SplashFrame.getInstance().updateStatus("Performing maintenance...");
|
||||
|
||||
// Timeout if the database isn't ready for backing up after 5 minutes
|
||||
// Nothing else should be using the db at this point, so a timeout shouldn't happen
|
||||
long timeout = 5 * 60 * 1000L;
|
||||
repository.performPeriodicMaintenance(timeout);
|
||||
|
||||
} catch (TimeoutException e) {
|
||||
LOGGER.info("Attempt to perform maintenance failed due to timeout: {}", e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
}
|
@ -9,7 +9,9 @@ import java.util.stream.Collectors;
|
||||
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.qortal.controller.Controller;
|
||||
import org.qortal.controller.tradebot.BitcoinACCTv1TradeBot;
|
||||
import org.qortal.gui.SplashFrame;
|
||||
|
||||
public class HSQLDBDatabaseUpdates {
|
||||
|
||||
@ -27,9 +29,14 @@ public class HSQLDBDatabaseUpdates {
|
||||
public static boolean updateDatabase(Connection connection) throws SQLException {
|
||||
final boolean wasPristine = fetchDatabaseVersion(connection) == 0;
|
||||
|
||||
SplashFrame.getInstance().updateStatus("Upgrading database, please wait...");
|
||||
|
||||
while (databaseUpdating(connection, wasPristine))
|
||||
incrementDatabaseVersion(connection);
|
||||
|
||||
String text = String.format("Starting Qortal Core v%s...", Controller.getInstance().getVersionStringWithoutPrefix());
|
||||
SplashFrame.getInstance().updateStatus(text);
|
||||
|
||||
return wasPristine;
|
||||
}
|
||||
|
||||
@ -698,7 +705,7 @@ public class HSQLDBDatabaseUpdates {
|
||||
stmt.execute("CHECKPOINT");
|
||||
break;
|
||||
|
||||
case 30:
|
||||
case 30: {
|
||||
// Split AT state data off to new table for better performance/management.
|
||||
|
||||
if (!wasPristine && !"mem".equals(HSQLDBRepository.getDbPathname(connection.getMetaData().getURL()))) {
|
||||
@ -773,6 +780,7 @@ public class HSQLDBDatabaseUpdates {
|
||||
stmt.execute("ALTER TABLE ATStatesNew RENAME TO ATStates");
|
||||
stmt.execute("CHECKPOINT");
|
||||
break;
|
||||
}
|
||||
|
||||
case 31:
|
||||
// Fix latest AT state cache which was previous created as TEMPORARY
|
||||
@ -844,6 +852,75 @@ public class HSQLDBDatabaseUpdates {
|
||||
stmt.execute("ALTER TABLE ArbitraryTransactions ADD compression INTEGER NOT NULL DEFAULT 0");
|
||||
break;
|
||||
|
||||
case 34: {
|
||||
// AT sleep-until-message support
|
||||
LOGGER.info("Altering AT table in repository - this might take a while... (approx. 20 seconds on high-spec)");
|
||||
stmt.execute("ALTER TABLE ATs ADD sleep_until_message_timestamp BIGINT");
|
||||
|
||||
// Create new AT-states table with new column
|
||||
stmt.execute("CREATE TABLE ATStatesNew ("
|
||||
+ "AT_address QortalAddress, height INTEGER NOT NULL, state_hash ATStateHash NOT NULL, "
|
||||
+ "fees QortalAmount NOT NULL, is_initial BOOLEAN NOT NULL, sleep_until_message_timestamp BIGINT, "
|
||||
+ "PRIMARY KEY (AT_address, height), "
|
||||
+ "FOREIGN KEY (AT_address) REFERENCES ATs (AT_address) ON DELETE CASCADE)");
|
||||
stmt.execute("SET TABLE ATStatesNew NEW SPACE");
|
||||
stmt.execute("CHECKPOINT");
|
||||
|
||||
// Add the height index
|
||||
LOGGER.info("Adding index to AT states table...");
|
||||
stmt.execute("CREATE INDEX ATStatesNewHeightIndex ON ATStatesNew (height)");
|
||||
stmt.execute("CHECKPOINT");
|
||||
|
||||
ResultSet resultSet = stmt.executeQuery("SELECT height FROM Blocks ORDER BY height DESC LIMIT 1");
|
||||
final int blockchainHeight = resultSet.next() ? resultSet.getInt(1) : 0;
|
||||
final int heightStep = 100;
|
||||
|
||||
LOGGER.info("Altering AT states table in repository - this might take a while... (approx. 3 mins on high-spec)");
|
||||
for (int minHeight = 1; minHeight < blockchainHeight; minHeight += heightStep) {
|
||||
stmt.execute("INSERT INTO ATStatesNew ("
|
||||
+ "SELECT AT_address, height, state_hash, fees, is_initial, NULL "
|
||||
+ "FROM ATStates "
|
||||
+ "WHERE height BETWEEN " + minHeight + " AND " + (minHeight + heightStep - 1)
|
||||
+ ")");
|
||||
stmt.execute("COMMIT");
|
||||
|
||||
int processed = Math.min(minHeight + heightStep - 1, blockchainHeight);
|
||||
double percentage = (double)processed / (double)blockchainHeight * 100.0f;
|
||||
LOGGER.info(String.format("Processed %d of %d blocks (%.1f%%)", processed, blockchainHeight, percentage));
|
||||
}
|
||||
stmt.execute("CHECKPOINT");
|
||||
|
||||
stmt.execute("DROP TABLE ATStates");
|
||||
stmt.execute("ALTER TABLE ATStatesNew RENAME TO ATStates");
|
||||
stmt.execute("ALTER INDEX ATStatesNewHeightIndex RENAME TO ATStatesHeightIndex");
|
||||
stmt.execute("CHECKPOINT");
|
||||
break;
|
||||
}
|
||||
case 35:
|
||||
// Support for pruning
|
||||
stmt.execute("ALTER TABLE DatabaseInfo ADD AT_prune_height INT NOT NULL DEFAULT 0");
|
||||
stmt.execute("ALTER TABLE DatabaseInfo ADD block_prune_height INT NOT NULL DEFAULT 0");
|
||||
break;
|
||||
|
||||
case 36:
|
||||
// Block archive support
|
||||
stmt.execute("ALTER TABLE DatabaseInfo ADD block_archive_height INT NOT NULL DEFAULT 0");
|
||||
|
||||
// Block archive (lookup table to map signature to height)
|
||||
// Actual data is stored in archive files outside of the database
|
||||
stmt.execute("CREATE TABLE BlockArchive (signature BlockSignature, height INTEGER NOT NULL, "
|
||||
+ "minted_when EpochMillis NOT NULL, minter QortalPublicKey NOT NULL, "
|
||||
+ "PRIMARY KEY (signature))");
|
||||
// For finding blocks by height.
|
||||
stmt.execute("CREATE INDEX BlockArchiveHeightIndex ON BlockArchive (height)");
|
||||
// For finding blocks by the account that minted them.
|
||||
stmt.execute("CREATE INDEX BlockArchiveMinterIndex ON BlockArchive (minter)");
|
||||
// For finding blocks by timestamp or finding height of latest block immediately before timestamp, etc.
|
||||
stmt.execute("CREATE INDEX BlockArchiveTimestampHeightIndex ON BlockArchive (minted_when, height)");
|
||||
// Use a separate table space as this table will be very large.
|
||||
stmt.execute("SET TABLE BlockArchive NEW SPACE");
|
||||
break;
|
||||
|
||||
default:
|
||||
// nothing to do
|
||||
return false;
|
||||
|
@ -0,0 +1,298 @@
|
||||
package org.qortal.repository.hsqldb;
|
||||
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.json.JSONArray;
|
||||
import org.json.JSONException;
|
||||
import org.json.JSONObject;
|
||||
import org.qortal.data.account.MintingAccountData;
|
||||
import org.qortal.data.crosschain.TradeBotData;
|
||||
import org.qortal.repository.Bootstrap;
|
||||
import org.qortal.repository.DataException;
|
||||
import org.qortal.repository.Repository;
|
||||
import org.qortal.settings.Settings;
|
||||
import org.qortal.utils.Base58;
|
||||
import org.qortal.utils.Triple;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.FileWriter;
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.Paths;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
|
||||
public class HSQLDBImportExport {
|
||||
|
||||
private static final Logger LOGGER = LogManager.getLogger(Bootstrap.class);
|
||||
|
||||
public static void backupTradeBotStates(Repository repository) throws DataException {
|
||||
HSQLDBImportExport.backupCurrentTradeBotStates(repository);
|
||||
HSQLDBImportExport.backupArchivedTradeBotStates(repository);
|
||||
|
||||
LOGGER.info("Exported sensitive/node-local data: trade bot states");
|
||||
}
|
||||
|
||||
public static void backupMintingAccounts(Repository repository) throws DataException {
|
||||
HSQLDBImportExport.backupCurrentMintingAccounts(repository);
|
||||
|
||||
LOGGER.info("Exported sensitive/node-local data: minting accounts");
|
||||
}
|
||||
|
||||
|
||||
/* Trade bot states */
|
||||
|
||||
/**
|
||||
* Backs up the trade bot states currently in the repository, without combining them with past ones
|
||||
* @param repository
|
||||
* @throws DataException
|
||||
*/
|
||||
private static void backupCurrentTradeBotStates(Repository repository) throws DataException {
|
||||
try {
|
||||
Path backupDirectory = HSQLDBImportExport.getExportDirectory(true);
|
||||
|
||||
// Load current trade bot data
|
||||
List<TradeBotData> allTradeBotData = repository.getCrossChainRepository().getAllTradeBotData();
|
||||
JSONArray currentTradeBotDataJson = new JSONArray();
|
||||
for (TradeBotData tradeBotData : allTradeBotData) {
|
||||
JSONObject tradeBotDataJson = tradeBotData.toJson();
|
||||
currentTradeBotDataJson.put(tradeBotDataJson);
|
||||
}
|
||||
|
||||
// Wrap current trade bot data in an object to indicate the type
|
||||
JSONObject currentTradeBotDataJsonWrapper = new JSONObject();
|
||||
currentTradeBotDataJsonWrapper.put("type", "tradeBotStates");
|
||||
currentTradeBotDataJsonWrapper.put("dataset", "current");
|
||||
currentTradeBotDataJsonWrapper.put("data", currentTradeBotDataJson);
|
||||
|
||||
// Write current trade bot data (just the ones currently in the database)
|
||||
String fileName = Paths.get(backupDirectory.toString(), "TradeBotStates.json").toString();
|
||||
FileWriter writer = new FileWriter(fileName);
|
||||
writer.write(currentTradeBotDataJsonWrapper.toString(2));
|
||||
writer.close();
|
||||
|
||||
} catch (DataException | IOException e) {
|
||||
throw new DataException("Unable to export trade bot states from repository");
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Backs up the trade bot states currently in the repository to a separate "archive" file,
|
||||
* making sure to combine them with any unique states already present in the archive.
|
||||
* @param repository
|
||||
* @throws DataException
|
||||
*/
|
||||
private static void backupArchivedTradeBotStates(Repository repository) throws DataException {
|
||||
try {
|
||||
Path backupDirectory = HSQLDBImportExport.getExportDirectory(true);
|
||||
|
||||
// Load current trade bot data
|
||||
List<TradeBotData> allTradeBotData = repository.getCrossChainRepository().getAllTradeBotData();
|
||||
JSONArray allTradeBotDataJson = new JSONArray();
|
||||
for (TradeBotData tradeBotData : allTradeBotData) {
|
||||
JSONObject tradeBotDataJson = tradeBotData.toJson();
|
||||
allTradeBotDataJson.put(tradeBotDataJson);
|
||||
}
|
||||
|
||||
// We need to combine existing archived TradeBotStates data before overwriting
|
||||
String fileName = Paths.get(backupDirectory.toString(), "TradeBotStatesArchive.json").toString();
|
||||
File tradeBotStatesBackupFile = new File(fileName);
|
||||
if (tradeBotStatesBackupFile.exists()) {
|
||||
|
||||
String jsonString = new String(Files.readAllBytes(Paths.get(fileName)));
|
||||
Triple<String, String, JSONArray> parsedJSON = HSQLDBImportExport.parseJSONString(jsonString);
|
||||
if (parsedJSON.getA() == null || parsedJSON.getC() == null) {
|
||||
throw new DataException("Missing data when exporting archived trade bot states");
|
||||
}
|
||||
String type = parsedJSON.getA();
|
||||
String dataset = parsedJSON.getB();
|
||||
JSONArray data = parsedJSON.getC();
|
||||
|
||||
if (!type.equals("tradeBotStates") || !dataset.equals("archive")) {
|
||||
throw new DataException("Format mismatch when exporting archived trade bot states");
|
||||
}
|
||||
|
||||
Iterator<Object> iterator = data.iterator();
|
||||
while(iterator.hasNext()) {
|
||||
JSONObject existingTradeBotDataItem = (JSONObject)iterator.next();
|
||||
String existingTradePrivateKey = (String) existingTradeBotDataItem.get("tradePrivateKey");
|
||||
// Check if we already have an entry for this trade
|
||||
boolean found = allTradeBotData.stream().anyMatch(tradeBotData -> Base58.encode(tradeBotData.getTradePrivateKey()).equals(existingTradePrivateKey));
|
||||
if (found == false)
|
||||
// Add the data from the backup file to our "allTradeBotDataJson" array as it's not currently in the db
|
||||
allTradeBotDataJson.put(existingTradeBotDataItem);
|
||||
}
|
||||
}
|
||||
|
||||
// Wrap all trade bot data in an object to indicate the type
|
||||
JSONObject allTradeBotDataJsonWrapper = new JSONObject();
|
||||
allTradeBotDataJsonWrapper.put("type", "tradeBotStates");
|
||||
allTradeBotDataJsonWrapper.put("dataset", "archive");
|
||||
allTradeBotDataJsonWrapper.put("data", allTradeBotDataJson);
|
||||
|
||||
// Write ALL trade bot data to archive (current plus states that are no longer in the database)
|
||||
FileWriter writer = new FileWriter(fileName);
|
||||
writer.write(allTradeBotDataJsonWrapper.toString(2));
|
||||
writer.close();
|
||||
|
||||
} catch (DataException | IOException e) {
|
||||
throw new DataException("Unable to export trade bot states from repository");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* Minting accounts */
|
||||
|
||||
/**
|
||||
* Backs up the minting accounts currently in the repository, without combining them with past ones
|
||||
* @param repository
|
||||
* @throws DataException
|
||||
*/
|
||||
private static void backupCurrentMintingAccounts(Repository repository) throws DataException {
|
||||
try {
|
||||
Path backupDirectory = HSQLDBImportExport.getExportDirectory(true);
|
||||
|
||||
// Load current trade bot data
|
||||
List<MintingAccountData> allMintingAccountData = repository.getAccountRepository().getMintingAccounts();
|
||||
JSONArray currentMintingAccountJson = new JSONArray();
|
||||
for (MintingAccountData mintingAccountData : allMintingAccountData) {
|
||||
JSONObject mintingAccountDataJson = mintingAccountData.toJson();
|
||||
currentMintingAccountJson.put(mintingAccountDataJson);
|
||||
}
|
||||
|
||||
// Wrap current trade bot data in an object to indicate the type
|
||||
JSONObject currentMintingAccountDataJsonWrapper = new JSONObject();
|
||||
currentMintingAccountDataJsonWrapper.put("type", "mintingAccounts");
|
||||
currentMintingAccountDataJsonWrapper.put("dataset", "current");
|
||||
currentMintingAccountDataJsonWrapper.put("data", currentMintingAccountJson);
|
||||
|
||||
// Write current trade bot data (just the ones currently in the database)
|
||||
String fileName = Paths.get(backupDirectory.toString(), "MintingAccounts.json").toString();
|
||||
FileWriter writer = new FileWriter(fileName);
|
||||
writer.write(currentMintingAccountDataJsonWrapper.toString(2));
|
||||
writer.close();
|
||||
|
||||
} catch (DataException | IOException e) {
|
||||
throw new DataException("Unable to export minting accounts from repository");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* Utils */
|
||||
|
||||
/**
|
||||
* Imports data from supplied file
|
||||
* Data type is loaded from the file itself, and if missing, TradeBotStates is assumed
|
||||
*
|
||||
* @param filename
|
||||
* @param repository
|
||||
* @throws DataException
|
||||
* @throws IOException
|
||||
*/
|
||||
public static void importDataFromFile(String filename, Repository repository) throws DataException, IOException {
|
||||
Path path = Paths.get(filename);
|
||||
if (!path.toFile().exists()) {
|
||||
throw new FileNotFoundException(String.format("File doesn't exist: %s", filename));
|
||||
}
|
||||
byte[] fileContents = Files.readAllBytes(path);
|
||||
if (fileContents == null) {
|
||||
throw new FileNotFoundException(String.format("Unable to read file contents: %s", filename));
|
||||
}
|
||||
|
||||
LOGGER.info(String.format("Importing %s into repository ...", filename));
|
||||
|
||||
String jsonString = new String(fileContents);
|
||||
Triple<String, String, JSONArray> parsedJSON = HSQLDBImportExport.parseJSONString(jsonString);
|
||||
if (parsedJSON.getA() == null || parsedJSON.getC() == null) {
|
||||
throw new DataException(String.format("Missing data when importing %s into repository", filename));
|
||||
}
|
||||
String type = parsedJSON.getA();
|
||||
JSONArray data = parsedJSON.getC();
|
||||
|
||||
Iterator<Object> iterator = data.iterator();
|
||||
while(iterator.hasNext()) {
|
||||
JSONObject dataJsonObject = (JSONObject)iterator.next();
|
||||
|
||||
if (type.equals("tradeBotStates")) {
|
||||
HSQLDBImportExport.importTradeBotDataJSON(dataJsonObject, repository);
|
||||
}
|
||||
else if (type.equals("mintingAccounts")) {
|
||||
HSQLDBImportExport.importMintingAccountDataJSON(dataJsonObject, repository);
|
||||
}
|
||||
else {
|
||||
throw new DataException(String.format("Unrecognized data type when importing %s into repository", filename));
|
||||
}
|
||||
|
||||
}
|
||||
LOGGER.info(String.format("Imported %s into repository from %s", type, filename));
|
||||
}
|
||||
|
||||
private static void importTradeBotDataJSON(JSONObject tradeBotDataJson, Repository repository) throws DataException {
|
||||
TradeBotData tradeBotData = TradeBotData.fromJson(tradeBotDataJson);
|
||||
repository.getCrossChainRepository().save(tradeBotData);
|
||||
}
|
||||
|
||||
private static void importMintingAccountDataJSON(JSONObject mintingAccountDataJson, Repository repository) throws DataException {
|
||||
MintingAccountData mintingAccountData = MintingAccountData.fromJson(mintingAccountDataJson);
|
||||
repository.getAccountRepository().save(mintingAccountData);
|
||||
}
|
||||
|
||||
public static Path getExportDirectory(boolean createIfNotExists) throws DataException {
|
||||
Path backupPath = Paths.get(Settings.getInstance().getExportPath());
|
||||
|
||||
if (createIfNotExists) {
|
||||
// Create the qortal-backup folder if it doesn't exist
|
||||
try {
|
||||
Files.createDirectories(backupPath);
|
||||
} catch (IOException e) {
|
||||
LOGGER.info(String.format("Unable to create %s folder", backupPath.toString()));
|
||||
throw new DataException(String.format("Unable to create %s folder", backupPath.toString()));
|
||||
}
|
||||
}
|
||||
|
||||
return backupPath;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parses a JSON string and returns "data", "type", and "dataset" fields.
|
||||
* In the case of legacy JSON files with no type, they are assumed to be TradeBotStates archives,
|
||||
* as we had never implemented this for any other types.
|
||||
*
|
||||
* @param jsonString
|
||||
* @return Triple<String, String, JSONArray> (type, dataset, data)
|
||||
*/
|
||||
private static Triple<String, String, JSONArray> parseJSONString(String jsonString) throws DataException {
|
||||
String type = null;
|
||||
String dataset = null;
|
||||
JSONArray data = null;
|
||||
|
||||
try {
|
||||
// Firstly try importing the new format
|
||||
JSONObject jsonData = new JSONObject(jsonString);
|
||||
if (jsonData != null && jsonData.getString("type") != null) {
|
||||
|
||||
type = jsonData.getString("type");
|
||||
dataset = jsonData.getString("dataset");
|
||||
data = jsonData.getJSONArray("data");
|
||||
}
|
||||
|
||||
} catch (JSONException e) {
|
||||
// Could be a legacy format which didn't contain a type or any other outer keys, so try importing that
|
||||
// Treat these as TradeBotStates archives, given that this was the only type previously implemented
|
||||
try {
|
||||
type = "tradeBotStates";
|
||||
dataset = "archive";
|
||||
data = new JSONArray(jsonString);
|
||||
|
||||
} catch (JSONException e2) {
|
||||
// Still failed, so give up
|
||||
throw new DataException("Couldn't import JSON file");
|
||||
}
|
||||
}
|
||||
|
||||
return new Triple(type, dataset, data);
|
||||
}
|
||||
|
||||
}
|
@ -2,7 +2,6 @@ package org.qortal.repository.hsqldb;
|
||||
|
||||
import java.awt.TrayIcon.MessageType;
|
||||
import java.io.File;
|
||||
import java.io.FileWriter;
|
||||
import java.io.IOException;
|
||||
import java.math.BigDecimal;
|
||||
import java.nio.file.Files;
|
||||
@ -17,39 +16,20 @@ import java.sql.SQLException;
|
||||
import java.sql.Savepoint;
|
||||
import java.sql.Statement;
|
||||
import java.util.*;
|
||||
import java.util.concurrent.TimeoutException;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
import org.json.JSONArray;
|
||||
import org.json.JSONObject;
|
||||
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.qortal.account.PrivateKeyAccount;
|
||||
import org.qortal.crypto.Crypto;
|
||||
import org.qortal.data.crosschain.TradeBotData;
|
||||
import org.qortal.globalization.Translator;
|
||||
import org.qortal.gui.SysTray;
|
||||
import org.qortal.repository.ATRepository;
|
||||
import org.qortal.repository.AccountRepository;
|
||||
import org.qortal.repository.ArbitraryRepository;
|
||||
import org.qortal.repository.AssetRepository;
|
||||
import org.qortal.repository.BlockRepository;
|
||||
import org.qortal.repository.ChatRepository;
|
||||
import org.qortal.repository.CrossChainRepository;
|
||||
import org.qortal.repository.DataException;
|
||||
import org.qortal.repository.GroupRepository;
|
||||
import org.qortal.repository.MessageRepository;
|
||||
import org.qortal.repository.NameRepository;
|
||||
import org.qortal.repository.NetworkRepository;
|
||||
import org.qortal.repository.Repository;
|
||||
import org.qortal.repository.RepositoryManager;
|
||||
import org.qortal.repository.TransactionRepository;
|
||||
import org.qortal.repository.VotingRepository;
|
||||
import org.qortal.repository.*;
|
||||
import org.qortal.repository.hsqldb.transaction.HSQLDBTransactionRepository;
|
||||
import org.qortal.settings.Settings;
|
||||
import org.qortal.utils.Base58;
|
||||
|
||||
public class HSQLDBRepository implements Repository {
|
||||
|
||||
@ -69,12 +49,14 @@ public class HSQLDBRepository implements Repository {
|
||||
protected final Map<String, PreparedStatement> preparedStatementCache = new HashMap<>();
|
||||
// We want the same object corresponding to the actual DB
|
||||
protected final Object trimHeightsLock = RepositoryManager.getRepositoryFactory();
|
||||
protected final Object latestATStatesLock = RepositoryManager.getRepositoryFactory();
|
||||
|
||||
private final ATRepository atRepository = new HSQLDBATRepository(this);
|
||||
private final AccountRepository accountRepository = new HSQLDBAccountRepository(this);
|
||||
private final ArbitraryRepository arbitraryRepository = new HSQLDBArbitraryRepository(this);
|
||||
private final AssetRepository assetRepository = new HSQLDBAssetRepository(this);
|
||||
private final BlockRepository blockRepository = new HSQLDBBlockRepository(this);
|
||||
private final BlockArchiveRepository blockArchiveRepository = new HSQLDBBlockArchiveRepository(this);
|
||||
private final ChatRepository chatRepository = new HSQLDBChatRepository(this);
|
||||
private final CrossChainRepository crossChainRepository = new HSQLDBCrossChainRepository(this);
|
||||
private final GroupRepository groupRepository = new HSQLDBGroupRepository(this);
|
||||
@ -142,6 +124,11 @@ public class HSQLDBRepository implements Repository {
|
||||
return this.blockRepository;
|
||||
}
|
||||
|
||||
@Override
|
||||
public BlockArchiveRepository getBlockArchiveRepository() {
|
||||
return this.blockArchiveRepository;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ChatRepository getChatRepository() {
|
||||
return this.chatRepository;
|
||||
@ -281,7 +268,7 @@ public class HSQLDBRepository implements Repository {
|
||||
public void close() throws DataException {
|
||||
// Already closed? No need to do anything but maybe report double-call
|
||||
if (this.connection == null) {
|
||||
LOGGER.warn("HSQLDBRepository.close() called when repository already closed", new Exception("Repository already closed"));
|
||||
LOGGER.warn("HSQLDBRepository.close() called when repository already closed. This is expected when bootstrapping.");
|
||||
return;
|
||||
}
|
||||
|
||||
@ -393,133 +380,104 @@ public class HSQLDBRepository implements Repository {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void backup(boolean quick) throws DataException {
|
||||
if (!quick)
|
||||
// First perform a CHECKPOINT
|
||||
public void backup(boolean quick, String name, Long timeout) throws DataException, TimeoutException {
|
||||
synchronized (CHECKPOINT_LOCK) {
|
||||
|
||||
// We can only perform a CHECKPOINT if no other HSQLDB session is mid-transaction,
|
||||
// otherwise the CHECKPOINT blocks for COMMITs and other threads can't open HSQLDB sessions
|
||||
// due to HSQLDB blocking until CHECKPOINT finishes - i.e. deadlock.
|
||||
// Since we don't want to give up too easily, it's best to wait until the other transaction
|
||||
// count reaches zero, and then continue.
|
||||
this.blockUntilNoOtherTransactions(timeout);
|
||||
|
||||
if (!quick)
|
||||
// First perform a CHECKPOINT
|
||||
try (Statement stmt = this.connection.createStatement()) {
|
||||
LOGGER.info("Performing maintenance - this will take a while...");
|
||||
stmt.execute("CHECKPOINT");
|
||||
stmt.execute("CHECKPOINT DEFRAG");
|
||||
LOGGER.info("Maintenance completed");
|
||||
} catch (SQLException e) {
|
||||
throw new DataException("Unable to prepare repository for backup");
|
||||
}
|
||||
|
||||
// Clean out any previous backup
|
||||
try {
|
||||
String connectionUrl = this.connection.getMetaData().getURL();
|
||||
String dbPathname = getDbPathname(connectionUrl);
|
||||
if (dbPathname == null)
|
||||
throw new DataException("Unable to locate repository for backup?");
|
||||
|
||||
// Doesn't really make sense to backup an in-memory database...
|
||||
if (dbPathname.equals("mem")) {
|
||||
LOGGER.debug("Ignoring request to backup in-memory repository!");
|
||||
return;
|
||||
}
|
||||
|
||||
String backupUrl = buildBackupUrl(dbPathname, name);
|
||||
String backupPathname = getDbPathname(backupUrl);
|
||||
if (backupPathname == null)
|
||||
throw new DataException("Unable to determine location for repository backup?");
|
||||
|
||||
Path backupDirPath = Paths.get(backupPathname).getParent();
|
||||
String backupDirPathname = backupDirPath.toString();
|
||||
|
||||
try (Stream<Path> paths = Files.walk(backupDirPath)) {
|
||||
paths.sorted(Comparator.reverseOrder())
|
||||
.map(Path::toFile)
|
||||
.filter(file -> file.getPath().startsWith(backupDirPathname))
|
||||
.forEach(File::delete);
|
||||
}
|
||||
} catch (NoSuchFileException e) {
|
||||
// Nothing to remove
|
||||
} catch (SQLException | IOException e) {
|
||||
throw new DataException("Unable to remove previous repository backup");
|
||||
}
|
||||
|
||||
// Actually create backup
|
||||
try (Statement stmt = this.connection.createStatement()) {
|
||||
stmt.execute("CHECKPOINT DEFRAG");
|
||||
LOGGER.info("Backing up repository...");
|
||||
stmt.execute(String.format("BACKUP DATABASE TO '%s/' BLOCKING AS FILES", name));
|
||||
LOGGER.info("Backup completed");
|
||||
} catch (SQLException e) {
|
||||
throw new DataException("Unable to prepare repository for backup");
|
||||
throw new DataException("Unable to backup repository");
|
||||
}
|
||||
|
||||
// Clean out any previous backup
|
||||
try {
|
||||
String connectionUrl = this.connection.getMetaData().getURL();
|
||||
String dbPathname = getDbPathname(connectionUrl);
|
||||
if (dbPathname == null)
|
||||
throw new DataException("Unable to locate repository for backup?");
|
||||
|
||||
// Doesn't really make sense to backup an in-memory database...
|
||||
if (dbPathname.equals("mem")) {
|
||||
LOGGER.debug("Ignoring request to backup in-memory repository!");
|
||||
return;
|
||||
}
|
||||
|
||||
String backupUrl = buildBackupUrl(dbPathname);
|
||||
String backupPathname = getDbPathname(backupUrl);
|
||||
if (backupPathname == null)
|
||||
throw new DataException("Unable to determine location for repository backup?");
|
||||
|
||||
Path backupDirPath = Paths.get(backupPathname).getParent();
|
||||
String backupDirPathname = backupDirPath.toString();
|
||||
|
||||
try (Stream<Path> paths = Files.walk(backupDirPath)) {
|
||||
paths.sorted(Comparator.reverseOrder())
|
||||
.map(Path::toFile)
|
||||
.filter(file -> file.getPath().startsWith(backupDirPathname))
|
||||
.forEach(File::delete);
|
||||
}
|
||||
} catch (NoSuchFileException e) {
|
||||
// Nothing to remove
|
||||
} catch (SQLException | IOException e) {
|
||||
throw new DataException("Unable to remove previous repository backup");
|
||||
}
|
||||
|
||||
// Actually create backup
|
||||
try (Statement stmt = this.connection.createStatement()) {
|
||||
stmt.execute("BACKUP DATABASE TO 'backup/' BLOCKING AS FILES");
|
||||
} catch (SQLException e) {
|
||||
throw new DataException("Unable to backup repository");
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void performPeriodicMaintenance() throws DataException {
|
||||
// Defrag DB - takes a while!
|
||||
try (Statement stmt = this.connection.createStatement()) {
|
||||
LOGGER.info("performing maintenance - this will take a while");
|
||||
stmt.execute("CHECKPOINT");
|
||||
stmt.execute("CHECKPOINT DEFRAG");
|
||||
LOGGER.info("maintenance completed");
|
||||
} catch (SQLException e) {
|
||||
throw new DataException("Unable to defrag repository");
|
||||
public void performPeriodicMaintenance(Long timeout) throws DataException, TimeoutException {
|
||||
synchronized (CHECKPOINT_LOCK) {
|
||||
|
||||
// We can only perform a CHECKPOINT if no other HSQLDB session is mid-transaction,
|
||||
// otherwise the CHECKPOINT blocks for COMMITs and other threads can't open HSQLDB sessions
|
||||
// due to HSQLDB blocking until CHECKPOINT finishes - i.e. deadlock.
|
||||
// Since we don't want to give up too easily, it's best to wait until the other transaction
|
||||
// count reaches zero, and then continue.
|
||||
this.blockUntilNoOtherTransactions(timeout);
|
||||
|
||||
// Defrag DB - takes a while!
|
||||
try (Statement stmt = this.connection.createStatement()) {
|
||||
LOGGER.info("performing maintenance - this will take a while");
|
||||
stmt.execute("CHECKPOINT");
|
||||
stmt.execute("CHECKPOINT DEFRAG");
|
||||
LOGGER.info("maintenance completed");
|
||||
} catch (SQLException e) {
|
||||
throw new DataException("Unable to defrag repository");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void exportNodeLocalData() throws DataException {
|
||||
// Create the qortal-backup folder if it doesn't exist
|
||||
Path backupPath = Paths.get("qortal-backup");
|
||||
try {
|
||||
Files.createDirectories(backupPath);
|
||||
} catch (IOException e) {
|
||||
LOGGER.info("Unable to create backup folder");
|
||||
throw new DataException("Unable to create backup folder");
|
||||
}
|
||||
|
||||
try {
|
||||
// Load trade bot data
|
||||
List<TradeBotData> allTradeBotData = this.getCrossChainRepository().getAllTradeBotData();
|
||||
JSONArray allTradeBotDataJson = new JSONArray();
|
||||
for (TradeBotData tradeBotData : allTradeBotData) {
|
||||
JSONObject tradeBotDataJson = tradeBotData.toJson();
|
||||
allTradeBotDataJson.put(tradeBotDataJson);
|
||||
}
|
||||
|
||||
// We need to combine existing TradeBotStates data before overwriting
|
||||
String fileName = "qortal-backup/TradeBotStates.json";
|
||||
File tradeBotStatesBackupFile = new File(fileName);
|
||||
if (tradeBotStatesBackupFile.exists()) {
|
||||
String jsonString = new String(Files.readAllBytes(Paths.get(fileName)));
|
||||
JSONArray allExistingTradeBotData = new JSONArray(jsonString);
|
||||
Iterator<Object> iterator = allExistingTradeBotData.iterator();
|
||||
while(iterator.hasNext()) {
|
||||
JSONObject existingTradeBotData = (JSONObject)iterator.next();
|
||||
String existingTradePrivateKey = (String) existingTradeBotData.get("tradePrivateKey");
|
||||
// Check if we already have an entry for this trade
|
||||
boolean found = allTradeBotData.stream().anyMatch(tradeBotData -> Base58.encode(tradeBotData.getTradePrivateKey()).equals(existingTradePrivateKey));
|
||||
if (found == false)
|
||||
// We need to add this to our list
|
||||
allTradeBotDataJson.put(existingTradeBotData);
|
||||
}
|
||||
}
|
||||
|
||||
FileWriter writer = new FileWriter(fileName);
|
||||
writer.write(allTradeBotDataJson.toString());
|
||||
writer.close();
|
||||
LOGGER.info("Exported sensitive/node-local data: trade bot states");
|
||||
|
||||
} catch (DataException | IOException e) {
|
||||
throw new DataException("Unable to export trade bot states from repository");
|
||||
}
|
||||
HSQLDBImportExport.backupTradeBotStates(this);
|
||||
HSQLDBImportExport.backupMintingAccounts(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void importDataFromFile(String filename) throws DataException {
|
||||
LOGGER.info(() -> String.format("Importing data into repository from %s", filename));
|
||||
try {
|
||||
String jsonString = new String(Files.readAllBytes(Paths.get(filename)));
|
||||
JSONArray tradeBotDataToImport = new JSONArray(jsonString);
|
||||
Iterator<Object> iterator = tradeBotDataToImport.iterator();
|
||||
while(iterator.hasNext()) {
|
||||
JSONObject tradeBotDataJson = (JSONObject)iterator.next();
|
||||
TradeBotData tradeBotData = TradeBotData.fromJson(tradeBotDataJson);
|
||||
this.getCrossChainRepository().save(tradeBotData);
|
||||
}
|
||||
} catch (IOException e) {
|
||||
throw new DataException("Unable to import sensitive/node-local trade bot states to repository: " + e.getMessage());
|
||||
}
|
||||
LOGGER.info(() -> String.format("Imported trade bot states into repository from %s", filename));
|
||||
public void importDataFromFile(String filename) throws DataException, IOException {
|
||||
HSQLDBImportExport.importDataFromFile(filename, this);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -541,22 +499,22 @@ public class HSQLDBRepository implements Repository {
|
||||
return matcher.group(2);
|
||||
}
|
||||
|
||||
private static String buildBackupUrl(String dbPathname) {
|
||||
private static String buildBackupUrl(String dbPathname, String backupName) {
|
||||
Path oldRepoPath = Paths.get(dbPathname);
|
||||
Path oldRepoDirPath = oldRepoPath.getParent();
|
||||
Path oldRepoFilePath = oldRepoPath.getFileName();
|
||||
|
||||
// Try to open backup. We need to remove "create=true" and insert "backup" dir before final filename.
|
||||
String backupUrlTemplate = "jdbc:hsqldb:file:%s%sbackup%s%s;create=false;hsqldb.full_log_replay=true";
|
||||
return String.format(backupUrlTemplate, oldRepoDirPath.toString(), File.separator, File.separator, oldRepoFilePath.toString());
|
||||
String backupUrlTemplate = "jdbc:hsqldb:file:%s%s%s%s%s;create=false;hsqldb.full_log_replay=true";
|
||||
return String.format(backupUrlTemplate, oldRepoDirPath.toString(), File.separator, backupName, File.separator, oldRepoFilePath.toString());
|
||||
}
|
||||
|
||||
/* package */ static void attemptRecovery(String connectionUrl) throws DataException {
|
||||
/* package */ static void attemptRecovery(String connectionUrl, String name) throws DataException {
|
||||
String dbPathname = getDbPathname(connectionUrl);
|
||||
if (dbPathname == null)
|
||||
throw new DataException("Unable to locate repository for backup?");
|
||||
|
||||
String backupUrl = buildBackupUrl(dbPathname);
|
||||
String backupUrl = buildBackupUrl(dbPathname, name);
|
||||
Path oldRepoDirPath = Paths.get(dbPathname).getParent();
|
||||
|
||||
// Attempt connection to backup to see if it is viable
|
||||
@ -1059,4 +1017,51 @@ public class HSQLDBRepository implements Repository {
|
||||
return DEADLOCK_ERROR_CODE.equals(e.getErrorCode());
|
||||
}
|
||||
|
||||
private int otherTransactionsCount() throws DataException {
|
||||
// We can only perform a CHECKPOINT if no other HSQLDB session is mid-transaction,
|
||||
// otherwise the CHECKPOINT blocks for COMMITs and other threads can't open HSQLDB sessions
|
||||
// due to HSQLDB blocking until CHECKPOINT finishes - i.e. deadlock
|
||||
String sql = "SELECT COUNT(*) "
|
||||
+ "FROM Information_schema.system_sessions "
|
||||
+ "WHERE transaction = TRUE AND session_id != ?";
|
||||
try {
|
||||
PreparedStatement pstmt = this.cachePreparedStatement(sql);
|
||||
pstmt.setLong(1, this.sessionId);
|
||||
|
||||
if (!pstmt.execute())
|
||||
throw new DataException("Unable to check repository session status");
|
||||
|
||||
try (ResultSet resultSet = pstmt.getResultSet()) {
|
||||
if (resultSet == null || !resultSet.next())
|
||||
// Failed to even find HSQLDB session info!
|
||||
throw new DataException("No results when checking repository session status");
|
||||
|
||||
int transactionCount = resultSet.getInt(1);
|
||||
|
||||
return transactionCount;
|
||||
}
|
||||
} catch (SQLException e) {
|
||||
throw new DataException("Unable to check repository session status", e);
|
||||
}
|
||||
}
|
||||
|
||||
private void blockUntilNoOtherTransactions(Long timeout) throws DataException, TimeoutException {
|
||||
try {
|
||||
long startTime = System.currentTimeMillis();
|
||||
while (this.otherTransactionsCount() > 0) {
|
||||
// Wait and try again
|
||||
LOGGER.debug("Waiting for repository...");
|
||||
Thread.sleep(1000L);
|
||||
|
||||
if (timeout != null) {
|
||||
if (System.currentTimeMillis() - startTime >= timeout) {
|
||||
throw new TimeoutException("Timed out waiting for repository to become available");
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (InterruptedException e) {
|
||||
throw new DataException("Interrupted before repository became available");
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -54,7 +54,7 @@ public class HSQLDBRepositoryFactory implements RepositoryFactory {
|
||||
throw new DataException("Unable to read repository: " + e.getMessage(), e);
|
||||
|
||||
// Attempt recovery?
|
||||
HSQLDBRepository.attemptRecovery(connectionUrl);
|
||||
HSQLDBRepository.attemptRecovery(connectionUrl, "backup");
|
||||
}
|
||||
|
||||
this.connectionPool = new HSQLDBPool(Settings.getInstance().getRepositoryConnectionPoolSize());
|
||||
|
@ -74,6 +74,9 @@ public class Settings {
|
||||
};
|
||||
private Boolean apiRestricted;
|
||||
private String apiKey = null;
|
||||
/** Whether to disable API key or loopback address checking
|
||||
* IMPORTANT: do not disable for shared nodes or low-security local networks */
|
||||
private boolean apiKeyDisabled = false;
|
||||
private boolean apiLoggingEnabled = false;
|
||||
private boolean apiDocumentationEnabled = false;
|
||||
// Both of these need to be set for API to use SSL
|
||||
@ -98,6 +101,12 @@ public class Settings {
|
||||
private long repositoryBackupInterval = 0; // ms
|
||||
/** Whether to show a notification when we backup repository. */
|
||||
private boolean showBackupNotification = false;
|
||||
/** Minimum time between repository maintenance attempts (ms) */
|
||||
private long repositoryMaintenanceMinInterval = 7 * 24 * 60 * 60 * 1000L; // 7 days (ms) default
|
||||
/** Maximum time between repository maintenance attempts (ms) (0 if disabled). */
|
||||
private long repositoryMaintenanceMaxInterval = 30 * 24 * 60 * 60 * 1000L; // 30 days (ms) default
|
||||
/** Whether to show a notification when we run scheduled maintenance. */
|
||||
private boolean showMaintenanceNotification = false;
|
||||
/** How long between repository checkpoints (ms). */
|
||||
private long repositoryCheckpointInterval = 60 * 60 * 1000L; // 1 hour (ms) default
|
||||
/** Whether to show a notification when we perform repository 'checkpoint'. */
|
||||
@ -106,7 +115,7 @@ public class Settings {
|
||||
private int blockCacheSize = 10;
|
||||
|
||||
/** How long to keep old, full, AT state data (ms). */
|
||||
private long atStatesMaxLifetime = 2 * 7 * 24 * 60 * 60 * 1000L; // milliseconds
|
||||
private long atStatesMaxLifetime = 5 * 24 * 60 * 60 * 1000L; // milliseconds
|
||||
/** How often to attempt AT state trimming (ms). */
|
||||
private long atStatesTrimInterval = 5678L; // milliseconds
|
||||
/** Block height range to scan for trimmable AT states.<br>
|
||||
@ -121,6 +130,36 @@ public class Settings {
|
||||
* This has a significant effect on execution time. */
|
||||
private int onlineSignaturesTrimBatchSize = 100; // blocks
|
||||
|
||||
|
||||
/** Whether we should prune old data to reduce database size
|
||||
* This prevents the node from being able to serve older blocks */
|
||||
private boolean topOnly = false;
|
||||
/** The amount of recent blocks we should keep when pruning */
|
||||
private int pruneBlockLimit = 1450;
|
||||
|
||||
/** How often to attempt AT state pruning (ms). */
|
||||
private long atStatesPruneInterval = 3219L; // milliseconds
|
||||
/** Block height range to scan for prunable AT states.<br>
|
||||
* This has a significant effect on execution time. */
|
||||
private int atStatesPruneBatchSize = 25; // blocks
|
||||
|
||||
/** How often to attempt block pruning (ms). */
|
||||
private long blockPruneInterval = 3219L; // milliseconds
|
||||
/** Block height range to scan for prunable blocks.<br>
|
||||
* This has a significant effect on execution time. */
|
||||
private int blockPruneBatchSize = 10000; // blocks
|
||||
|
||||
|
||||
/** Whether we should archive old data to reduce the database size */
|
||||
private boolean archiveEnabled = true;
|
||||
/** How often to attempt archiving (ms). */
|
||||
private long archiveInterval = 7171L; // milliseconds
|
||||
|
||||
|
||||
/** Whether to automatically bootstrap instead of syncing from genesis */
|
||||
private boolean bootstrap = true;
|
||||
|
||||
|
||||
// Peer-to-peer related
|
||||
private boolean isTestNet = false;
|
||||
/** Port number for inbound peer-to-peer connections. */
|
||||
@ -179,12 +218,27 @@ public class Settings {
|
||||
private int repositoryConnectionPoolSize = 100;
|
||||
private List<String> fixedNetwork;
|
||||
|
||||
// Export/import
|
||||
private String exportPath = "qortal-backup";
|
||||
|
||||
// Bootstrap
|
||||
private String bootstrapFilenamePrefix = "";
|
||||
|
||||
// Bootstrap sources
|
||||
private String[] bootstrapHosts = new String[] {
|
||||
"http://bootstrap.qortal.org",
|
||||
"http://cinfu1.crowetic.com"
|
||||
};
|
||||
|
||||
// Auto-update sources
|
||||
private String[] autoUpdateRepos = new String[] {
|
||||
"https://github.com/Qortal/qortal/raw/%s/qortal.update",
|
||||
"https://raw.githubusercontent.com@151.101.16.133/Qortal/qortal/%s/qortal.update"
|
||||
};
|
||||
|
||||
// Lists
|
||||
private String listsPath = "lists";
|
||||
|
||||
/** Array of NTP server hostnames. */
|
||||
private String[] ntpServers = new String[] {
|
||||
"pool.ntp.org",
|
||||
@ -412,6 +466,10 @@ public class Settings {
|
||||
return this.apiKey;
|
||||
}
|
||||
|
||||
public boolean isApiKeyDisabled() {
|
||||
return this.apiKeyDisabled;
|
||||
}
|
||||
|
||||
public boolean isApiLoggingEnabled() {
|
||||
return this.apiLoggingEnabled;
|
||||
}
|
||||
@ -552,6 +610,14 @@ public class Settings {
|
||||
return this.repositoryConnectionPoolSize;
|
||||
}
|
||||
|
||||
public String getExportPath() {
|
||||
return this.exportPath;
|
||||
}
|
||||
|
||||
public String getBootstrapFilenamePrefix() {
|
||||
return this.bootstrapFilenamePrefix;
|
||||
}
|
||||
|
||||
public boolean isFastSyncEnabled() {
|
||||
return this.fastSyncEnabled;
|
||||
}
|
||||
@ -574,6 +640,14 @@ public class Settings {
|
||||
return this.autoUpdateRepos;
|
||||
}
|
||||
|
||||
public String[] getBootstrapHosts() {
|
||||
return this.bootstrapHosts;
|
||||
}
|
||||
|
||||
public String getListsPath() {
|
||||
return this.listsPath;
|
||||
}
|
||||
|
||||
public String[] getNtpServers() {
|
||||
return this.ntpServers;
|
||||
}
|
||||
@ -590,6 +664,18 @@ public class Settings {
|
||||
return this.showBackupNotification;
|
||||
}
|
||||
|
||||
public long getRepositoryMaintenanceMinInterval() {
|
||||
return this.repositoryMaintenanceMinInterval;
|
||||
}
|
||||
|
||||
public long getRepositoryMaintenanceMaxInterval() {
|
||||
return this.repositoryMaintenanceMaxInterval;
|
||||
}
|
||||
|
||||
public boolean getShowMaintenanceNotification() {
|
||||
return this.showMaintenanceNotification;
|
||||
}
|
||||
|
||||
public long getRepositoryCheckpointInterval() {
|
||||
return this.repositoryCheckpointInterval;
|
||||
}
|
||||
@ -598,6 +684,10 @@ public class Settings {
|
||||
return this.showCheckpointNotification;
|
||||
}
|
||||
|
||||
public List<String> getFixedNetwork() {
|
||||
return fixedNetwork;
|
||||
}
|
||||
|
||||
public long getAtStatesMaxLifetime() {
|
||||
return this.atStatesMaxLifetime;
|
||||
}
|
||||
@ -622,10 +712,48 @@ public class Settings {
|
||||
return this.onlineSignaturesTrimBatchSize;
|
||||
}
|
||||
|
||||
public List<String> getFixedNetwork() {
|
||||
return fixedNetwork;
|
||||
public boolean isTopOnly() {
|
||||
return this.topOnly;
|
||||
}
|
||||
|
||||
public int getPruneBlockLimit() {
|
||||
return this.pruneBlockLimit;
|
||||
}
|
||||
|
||||
public long getAtStatesPruneInterval() {
|
||||
return this.atStatesPruneInterval;
|
||||
}
|
||||
|
||||
public int getAtStatesPruneBatchSize() {
|
||||
return this.atStatesPruneBatchSize;
|
||||
}
|
||||
|
||||
public long getBlockPruneInterval() {
|
||||
return this.blockPruneInterval;
|
||||
}
|
||||
|
||||
public int getBlockPruneBatchSize() {
|
||||
return this.blockPruneBatchSize;
|
||||
}
|
||||
|
||||
|
||||
public boolean isArchiveEnabled() {
|
||||
if (this.topOnly) {
|
||||
return false;
|
||||
}
|
||||
return this.archiveEnabled;
|
||||
}
|
||||
|
||||
public long getArchiveInterval() {
|
||||
return this.archiveInterval;
|
||||
}
|
||||
|
||||
|
||||
public boolean getBootstrap() {
|
||||
return this.bootstrap;
|
||||
}
|
||||
|
||||
|
||||
public String getDataPath() {
|
||||
return this.dataPath;
|
||||
}
|
||||
|
@ -48,6 +48,11 @@ public class AccountFlagsTransaction extends Transaction {
|
||||
return ValidationResult.NO_FLAG_PERMISSION;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void preProcess() throws DataException {
|
||||
// Nothing to do
|
||||
}
|
||||
|
||||
@Override
|
||||
public void process() throws DataException {
|
||||
Account target = this.getTarget();
|
||||
|
@ -49,6 +49,11 @@ public class AccountLevelTransaction extends Transaction {
|
||||
return ValidationResult.NO_FLAG_PERMISSION;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void preProcess() throws DataException {
|
||||
// Nothing to do
|
||||
}
|
||||
|
||||
@Override
|
||||
public void process() throws DataException {
|
||||
Account target = getTarget();
|
||||
|
@ -84,6 +84,11 @@ public class AddGroupAdminTransaction extends Transaction {
|
||||
return ValidationResult.OK;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void preProcess() throws DataException {
|
||||
// Nothing to do
|
||||
}
|
||||
|
||||
@Override
|
||||
public void process() throws DataException {
|
||||
// Update Group adminship
|
||||
@ -98,4 +103,4 @@ public class AddGroupAdminTransaction extends Transaction {
|
||||
group.unpromoteToAdmin(this.addGroupAdminTransactionData);
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
@ -195,6 +195,11 @@ public class ArbitraryTransaction extends Transaction {
|
||||
arbitraryTransactionData.getFee());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void preProcess() throws DataException {
|
||||
// Nothing to do
|
||||
}
|
||||
|
||||
@Override
|
||||
public void process() throws DataException {
|
||||
// Wrap and delegate payment processing to Payment class.
|
||||
|
@ -80,6 +80,11 @@ public class AtTransaction extends Transaction {
|
||||
return Arrays.equals(atAccount.getLastReference(), atTransactionData.getReference());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void preProcess() throws DataException {
|
||||
// Nothing to do
|
||||
}
|
||||
|
||||
@Override
|
||||
public ValidationResult isValid() throws DataException {
|
||||
// Check recipient address is valid
|
||||
|
@ -6,6 +6,7 @@ import java.util.List;
|
||||
import org.qortal.account.Account;
|
||||
import org.qortal.asset.Asset;
|
||||
import org.qortal.block.BlockChain;
|
||||
import org.qortal.controller.repository.NamesDatabaseIntegrityCheck;
|
||||
import org.qortal.crypto.Crypto;
|
||||
import org.qortal.data.naming.NameData;
|
||||
import org.qortal.data.transaction.BuyNameTransactionData;
|
||||
@ -98,6 +99,17 @@ public class BuyNameTransaction extends Transaction {
|
||||
return ValidationResult.OK;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void preProcess() throws DataException {
|
||||
BuyNameTransactionData buyNameTransactionData = (BuyNameTransactionData) transactionData;
|
||||
|
||||
// Rebuild this name in the Names table from the transaction history
|
||||
// This is necessary because in some rare cases names can be missing from the Names table after registration
|
||||
// but we have been unable to reproduce the issue and track down the root cause
|
||||
NamesDatabaseIntegrityCheck namesDatabaseIntegrityCheck = new NamesDatabaseIntegrityCheck();
|
||||
namesDatabaseIntegrityCheck.rebuildName(buyNameTransactionData.getName(), this.repository);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void process() throws DataException {
|
||||
// Buy Name
|
||||
|
@ -62,6 +62,11 @@ public class CancelAssetOrderTransaction extends Transaction {
|
||||
return ValidationResult.OK;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void preProcess() throws DataException {
|
||||
// Nothing to do
|
||||
}
|
||||
|
||||
@Override
|
||||
public void process() throws DataException {
|
||||
// Mark Order as completed so no more trades can happen
|
||||
|
@ -83,6 +83,11 @@ public class CancelGroupBanTransaction extends Transaction {
|
||||
return ValidationResult.OK;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void preProcess() throws DataException {
|
||||
// Nothing to do
|
||||
}
|
||||
|
||||
@Override
|
||||
public void process() throws DataException {
|
||||
// Update Group Membership
|
||||
|
@ -83,6 +83,11 @@ public class CancelGroupInviteTransaction extends Transaction {
|
||||
return ValidationResult.OK;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void preProcess() throws DataException {
|
||||
// Nothing to do
|
||||
}
|
||||
|
||||
@Override
|
||||
public void process() throws DataException {
|
||||
// Update Group Membership
|
||||
|
@ -79,6 +79,11 @@ public class CancelSellNameTransaction extends Transaction {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void preProcess() throws DataException {
|
||||
// Nothing to do
|
||||
}
|
||||
|
||||
@Override
|
||||
public void process() throws DataException {
|
||||
// Update Name
|
||||
|
@ -11,6 +11,7 @@ import org.qortal.crypto.MemoryPoW;
|
||||
import org.qortal.data.transaction.ChatTransactionData;
|
||||
import org.qortal.data.transaction.TransactionData;
|
||||
import org.qortal.group.Group;
|
||||
import org.qortal.list.ResourceListManager;
|
||||
import org.qortal.repository.DataException;
|
||||
import org.qortal.repository.GroupRepository;
|
||||
import org.qortal.repository.Repository;
|
||||
@ -134,10 +135,21 @@ public class ChatTransaction extends Transaction {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void preProcess() throws DataException {
|
||||
// Nothing to do
|
||||
}
|
||||
|
||||
@Override
|
||||
public ValidationResult isValid() throws DataException {
|
||||
// Nonce checking is done via isSignatureValid() as that method is only called once per import
|
||||
|
||||
// Check for blacklisted author by address
|
||||
ResourceListManager listManager = ResourceListManager.getInstance();
|
||||
if (listManager.isAddressInBlacklist(this.chatTransactionData.getSender())) {
|
||||
return ValidationResult.ADDRESS_IN_BLACKLIST;
|
||||
}
|
||||
|
||||
// If we exist in the repository then we've been imported as unconfirmed,
|
||||
// but we don't want to make it into a block, so return fake non-OK result.
|
||||
if (this.repository.getTransactionRepository().exists(this.chatTransactionData.getSignature()))
|
||||
|
@ -135,6 +135,11 @@ public class CreateAssetOrderTransaction extends Transaction {
|
||||
return ValidationResult.OK;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void preProcess() throws DataException {
|
||||
// Nothing to do
|
||||
}
|
||||
|
||||
@Override
|
||||
public void process() throws DataException {
|
||||
// Order Id is transaction's signature
|
||||
|
@ -92,6 +92,11 @@ public class CreateGroupTransaction extends Transaction {
|
||||
return ValidationResult.OK;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void preProcess() throws DataException {
|
||||
// Nothing to do
|
||||
}
|
||||
|
||||
@Override
|
||||
public void process() throws DataException {
|
||||
// Create Group
|
||||
|
@ -106,6 +106,11 @@ public class CreatePollTransaction extends Transaction {
|
||||
return ValidationResult.OK;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void preProcess() throws DataException {
|
||||
// Nothing to do
|
||||
}
|
||||
|
||||
@Override
|
||||
public void process() throws DataException {
|
||||
// Publish poll to allow voting
|
||||
|
@ -203,6 +203,11 @@ public class DeployAtTransaction extends Transaction {
|
||||
return ValidationResult.OK;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void preProcess() throws DataException {
|
||||
// Nothing to do
|
||||
}
|
||||
|
||||
@Override
|
||||
public void process() throws DataException {
|
||||
ensureATAddress(this.deployAtTransactionData);
|
||||
|
@ -100,6 +100,11 @@ public class GenesisTransaction extends Transaction {
|
||||
return ValidationResult.OK;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void preProcess() throws DataException {
|
||||
// Nothing to do
|
||||
}
|
||||
|
||||
@Override
|
||||
public void process() throws DataException {
|
||||
Account recipient = new Account(repository, this.genesisTransactionData.getRecipient());
|
||||
|
@ -66,6 +66,11 @@ public class GroupApprovalTransaction extends Transaction {
|
||||
return ValidationResult.OK;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void preProcess() throws DataException {
|
||||
// Nothing to do
|
||||
}
|
||||
|
||||
@Override
|
||||
public void process() throws DataException {
|
||||
// Find previous approval decision (if any) by this admin for pending transaction
|
||||
|
@ -87,6 +87,11 @@ public class GroupBanTransaction extends Transaction {
|
||||
return ValidationResult.OK;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void preProcess() throws DataException {
|
||||
// Nothing to do
|
||||
}
|
||||
|
||||
@Override
|
||||
public void process() throws DataException {
|
||||
// Update Group Membership
|
||||
|
@ -88,6 +88,11 @@ public class GroupInviteTransaction extends Transaction {
|
||||
return ValidationResult.OK;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void preProcess() throws DataException {
|
||||
// Nothing to do
|
||||
}
|
||||
|
||||
@Override
|
||||
public void process() throws DataException {
|
||||
// Update Group Membership
|
||||
|
@ -89,6 +89,11 @@ public class GroupKickTransaction extends Transaction {
|
||||
return ValidationResult.OK;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void preProcess() throws DataException {
|
||||
// Nothing to do
|
||||
}
|
||||
|
||||
@Override
|
||||
public void process() throws DataException {
|
||||
// Update Group Membership
|
||||
|
@ -92,6 +92,11 @@ public class IssueAssetTransaction extends Transaction {
|
||||
return ValidationResult.OK;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void preProcess() throws DataException {
|
||||
// Nothing to do
|
||||
}
|
||||
|
||||
@Override
|
||||
public void process() throws DataException {
|
||||
// Issue asset
|
||||
|
@ -67,6 +67,11 @@ public class JoinGroupTransaction extends Transaction {
|
||||
return ValidationResult.OK;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void preProcess() throws DataException {
|
||||
// Nothing to do
|
||||
}
|
||||
|
||||
@Override
|
||||
public void process() throws DataException {
|
||||
// Update Group Membership
|
||||
|
@ -67,6 +67,11 @@ public class LeaveGroupTransaction extends Transaction {
|
||||
return ValidationResult.OK;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void preProcess() throws DataException {
|
||||
// Nothing to do
|
||||
}
|
||||
|
||||
@Override
|
||||
public void process() throws DataException {
|
||||
// Update Group Membership
|
||||
|
@ -239,6 +239,11 @@ public class MessageTransaction extends Transaction {
|
||||
getPaymentData(), this.messageTransactionData.getFee(), true);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void preProcess() throws DataException {
|
||||
// Nothing to do
|
||||
}
|
||||
|
||||
@Override
|
||||
public void process() throws DataException {
|
||||
// If we have no amount then there's nothing to do
|
||||
|
@ -67,6 +67,11 @@ public class MultiPaymentTransaction extends Transaction {
|
||||
return new Payment(this.repository).isProcessable(this.multiPaymentTransactionData.getSenderPublicKey(), payments, this.multiPaymentTransactionData.getFee());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void preProcess() throws DataException {
|
||||
// Nothing to do
|
||||
}
|
||||
|
||||
@Override
|
||||
public void process() throws DataException {
|
||||
// Wrap and delegate payment processing to Payment class.
|
||||
|
@ -61,6 +61,11 @@ public class PaymentTransaction extends Transaction {
|
||||
return new Payment(this.repository).isProcessable(this.paymentTransactionData.getSenderPublicKey(), getPaymentData(), this.paymentTransactionData.getFee());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void preProcess() throws DataException {
|
||||
// Nothing to do
|
||||
}
|
||||
|
||||
@Override
|
||||
public void process() throws DataException {
|
||||
// Wrap and delegate payment processing to Payment class.
|
||||
|
@ -149,6 +149,11 @@ public class PresenceTransaction extends Transaction {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void preProcess() throws DataException {
|
||||
// Nothing to do
|
||||
}
|
||||
|
||||
@Override
|
||||
public ValidationResult isValid() throws DataException {
|
||||
// Nonce checking is done via isSignatureValid() as that method is only called once per import
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user