Browse Source

Merge branch 'master' into q-apps

# Conflicts:
#	src/main/java/org/qortal/api/resource/ArbitraryResource.java
qdn-on-chain-data
CalDescent 2 years ago
parent
commit
b5ce8d5fb3
  1. 33
      src/main/java/org/qortal/api/resource/ArbitraryResource.java
  2. 12
      src/main/java/org/qortal/api/resource/BlocksResource.java
  3. 2
      src/main/java/org/qortal/api/resource/ChatResource.java
  4. 59
      src/main/java/org/qortal/api/restricted/resource/AdminResource.java
  5. 2
      src/main/java/org/qortal/api/websocket/ChatMessagesWebSocket.java
  6. 6
      src/main/java/org/qortal/arbitrary/ArbitraryDataTransactionBuilder.java
  7. 4
      src/main/java/org/qortal/block/Block.java
  8. 7
      src/main/java/org/qortal/block/BlockChain.java
  9. 29
      src/main/java/org/qortal/controller/Controller.java
  10. 121
      src/main/java/org/qortal/controller/repository/BlockArchiveRebuilder.java
  11. 43
      src/main/java/org/qortal/network/message/CachedBlockV2Message.java
  12. 85
      src/main/java/org/qortal/repository/BlockArchiveReader.java
  13. 129
      src/main/java/org/qortal/repository/BlockArchiveWriter.java
  14. 2
      src/main/java/org/qortal/repository/ChatRepository.java
  15. 61
      src/main/java/org/qortal/repository/RepositoryManager.java
  16. 7
      src/main/java/org/qortal/repository/hsqldb/HSQLDBChatRepository.java
  17. 88
      src/main/java/org/qortal/repository/hsqldb/HSQLDBDatabaseArchiving.java
  18. 332
      src/main/java/org/qortal/repository/hsqldb/HSQLDBDatabasePruning.java
  19. 7
      src/main/java/org/qortal/settings/Settings.java
  20. 18
      src/main/java/org/qortal/transaction/ArbitraryTransaction.java
  21. 28
      src/main/java/org/qortal/transform/block/BlockTransformer.java
  22. 25
      src/main/java/org/qortal/utils/BlockArchiveUtils.java
  23. 3
      src/main/resources/blockchain.json
  24. 217
      src/test/java/org/qortal/test/BlockArchiveV1Tests.java
  25. 504
      src/test/java/org/qortal/test/BlockArchiveV2Tests.java
  26. 2
      src/test/java/org/qortal/test/arbitrary/ArbitraryDataStoragePolicyTests.java
  27. 8
      src/test/java/org/qortal/test/arbitrary/ArbitraryTransactionMetadataTests.java
  28. 344
      src/test/java/org/qortal/test/arbitrary/ArbitraryTransactionTests.java
  29. 11
      src/test/java/org/qortal/test/common/ArbitraryUtils.java
  30. 3
      src/test/resources/test-chain-v2-block-timestamps.json
  31. 3
      src/test/resources/test-chain-v2-disable-reference.json
  32. 3
      src/test/resources/test-chain-v2-founder-rewards.json
  33. 3
      src/test/resources/test-chain-v2-leftover-reward.json
  34. 5
      src/test/resources/test-chain-v2-minting.json
  35. 3
      src/test/resources/test-chain-v2-qora-holder-extremes.json
  36. 3
      src/test/resources/test-chain-v2-qora-holder-reduction.json
  37. 3
      src/test/resources/test-chain-v2-qora-holder.json
  38. 3
      src/test/resources/test-chain-v2-reward-levels.json
  39. 3
      src/test/resources/test-chain-v2-reward-scaling.json
  40. 3
      src/test/resources/test-chain-v2-reward-shares.json
  41. 3
      src/test/resources/test-chain-v2-self-sponsorship-algo.json
  42. 3
      src/test/resources/test-chain-v2.json
  43. 3
      src/test/resources/test-settings-v2-block-archive.json

33
src/main/java/org/qortal/api/resource/ArbitraryResource.java

@ -781,6 +781,7 @@ public class ArbitraryResource {
@QueryParam("description") String description,
@QueryParam("tags") List<String> tags,
@QueryParam("category") Category category,
@QueryParam("fee") Long fee,
@QueryParam("preview") Boolean preview,
String path) {
Security.checkApiCallAllowed(request);
@ -790,7 +791,7 @@ public class ArbitraryResource {
}
return this.upload(Service.valueOf(serviceString), name, null, path, null, null, false,
title, description, tags, category, preview);
fee, title, description, tags, category, preview);
}
@POST
@ -827,6 +828,7 @@ public class ArbitraryResource {
@QueryParam("description") String description,
@QueryParam("tags") List<String> tags,
@QueryParam("category") Category category,
@QueryParam("fee") Long fee,
@QueryParam("preview") Boolean preview,
String path) {
Security.checkApiCallAllowed(request);
@ -836,7 +838,7 @@ public class ArbitraryResource {
}
return this.upload(Service.valueOf(serviceString), name, identifier, path, null, null, false,
title, description, tags, category, preview);
fee, title, description, tags, category, preview);
}
@ -874,6 +876,7 @@ public class ArbitraryResource {
@QueryParam("description") String description,
@QueryParam("tags") List<String> tags,
@QueryParam("category") Category category,
@QueryParam("fee") Long fee,
@QueryParam("preview") Boolean preview,
String base64) {
Security.checkApiCallAllowed(request);
@ -883,7 +886,7 @@ public class ArbitraryResource {
}
return this.upload(Service.valueOf(serviceString), name, null, null, null, base64, false,
title, description, tags, category, preview);
fee, title, description, tags, category, preview);
}
@POST
@ -918,6 +921,7 @@ public class ArbitraryResource {
@QueryParam("description") String description,
@QueryParam("tags") List<String> tags,
@QueryParam("category") Category category,
@QueryParam("fee") Long fee,
@QueryParam("preview") Boolean preview,
String base64) {
Security.checkApiCallAllowed(request);
@ -927,7 +931,7 @@ public class ArbitraryResource {
}
return this.upload(Service.valueOf(serviceString), name, identifier, null, null, base64, false,
title, description, tags, category, preview);
fee, title, description, tags, category, preview);
}
@ -964,6 +968,7 @@ public class ArbitraryResource {
@QueryParam("description") String description,
@QueryParam("tags") List<String> tags,
@QueryParam("category") Category category,
@QueryParam("fee") Long fee,
@QueryParam("preview") Boolean preview,
String base64Zip) {
Security.checkApiCallAllowed(request);
@ -973,7 +978,7 @@ public class ArbitraryResource {
}
return this.upload(Service.valueOf(serviceString), name, null, null, null, base64Zip, true,
title, description, tags, category, preview);
fee, title, description, tags, category, preview);
}
@POST
@ -1008,6 +1013,7 @@ public class ArbitraryResource {
@QueryParam("description") String description,
@QueryParam("tags") List<String> tags,
@QueryParam("category") Category category,
@QueryParam("fee") Long fee,
@QueryParam("preview") Boolean preview,
String base64Zip) {
Security.checkApiCallAllowed(request);
@ -1017,7 +1023,7 @@ public class ArbitraryResource {
}
return this.upload(Service.valueOf(serviceString), name, identifier, null, null, base64Zip, true,
title, description, tags, category, preview);
fee, title, description, tags, category, preview);
}
@ -1057,6 +1063,7 @@ public class ArbitraryResource {
@QueryParam("description") String description,
@QueryParam("tags") List<String> tags,
@QueryParam("category") Category category,
@QueryParam("fee") Long fee,
@QueryParam("preview") Boolean preview,
String string) {
Security.checkApiCallAllowed(request);
@ -1066,7 +1073,7 @@ public class ArbitraryResource {
}
return this.upload(Service.valueOf(serviceString), name, null, null, string, null, false,
title, description, tags, category, preview);
fee, title, description, tags, category, preview);
}
@POST
@ -1103,6 +1110,7 @@ public class ArbitraryResource {
@QueryParam("description") String description,
@QueryParam("tags") List<String> tags,
@QueryParam("category") Category category,
@QueryParam("fee") Long fee,
@QueryParam("preview") Boolean preview,
String string) {
Security.checkApiCallAllowed(request);
@ -1112,7 +1120,7 @@ public class ArbitraryResource {
}
return this.upload(Service.valueOf(serviceString), name, identifier, null, string, null, false,
title, description, tags, category, preview);
fee, title, description, tags, category, preview);
}
@ -1151,7 +1159,7 @@ public class ArbitraryResource {
}
private String upload(Service service, String name, String identifier,
String path, String string, String base64, boolean zipped,
String path, String string, String base64, boolean zipped, Long fee,
String title, String description, List<String> tags, Category category,
Boolean preview) {
// Fetch public key from registered name
@ -1221,9 +1229,14 @@ public class ArbitraryResource {
return this.preview(path, service);
}
// Default to zero fee if not specified
if (fee == null) {
fee = 0L;
}
try {
ArbitraryDataTransactionBuilder transactionBuilder = new ArbitraryDataTransactionBuilder(
repository, publicKey58, Paths.get(path), name, null, service, identifier,
repository, publicKey58, fee, Paths.get(path), name, null, service, identifier,
title, description, tags, category
);

12
src/main/java/org/qortal/api/resource/BlocksResource.java

@ -48,6 +48,7 @@ import org.qortal.repository.RepositoryManager;
import org.qortal.transform.TransformationException;
import org.qortal.transform.block.BlockTransformer;
import org.qortal.utils.Base58;
import org.qortal.utils.Triple;
@Path("/blocks")
@Tag(name = "Blocks")
@ -165,10 +166,13 @@ public class BlocksResource {
}
// Not found, so try the block archive
byte[] bytes = BlockArchiveReader.getInstance().fetchSerializedBlockBytesForSignature(signature, false, repository);
if (bytes != null) {
if (version != 1) {
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.INVALID_CRITERIA, "Archived blocks require version 1");
Triple<byte[], Integer, Integer> serializedBlock = BlockArchiveReader.getInstance().fetchSerializedBlockBytesForSignature(signature, false, repository);
if (serializedBlock != null) {
byte[] bytes = serializedBlock.getA();
Integer serializationVersion = serializedBlock.getB();
if (version != serializationVersion) {
// TODO: we could quite easily reserialize the block with the requested version
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.INVALID_CRITERIA, "Block is not stored using requested serialization version.");
}
return Base58.encode(bytes);
}

2
src/main/java/org/qortal/api/resource/ChatResource.java

@ -72,6 +72,7 @@ public class ChatResource {
@QueryParam("reference") String reference,
@QueryParam("chatreference") String chatReference,
@QueryParam("haschatreference") Boolean hasChatReference,
@QueryParam("sender") String sender,
@Parameter(ref = "limit") @QueryParam("limit") Integer limit,
@Parameter(ref = "offset") @QueryParam("offset") Integer offset,
@Parameter(ref = "reverse") @QueryParam("reverse") Boolean reverse) {
@ -107,6 +108,7 @@ public class ChatResource {
chatReferenceBytes,
hasChatReference,
involvingAddresses,
sender,
limit, offset, reverse);
} catch (DataException e) {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);

59
src/main/java/org/qortal/api/restricted/resource/AdminResource.java

@ -45,6 +45,7 @@ import org.qortal.block.BlockChain;
import org.qortal.controller.Controller;
import org.qortal.controller.Synchronizer;
import org.qortal.controller.Synchronizer.SynchronizationResult;
import org.qortal.controller.repository.BlockArchiveRebuilder;
import org.qortal.data.account.MintingAccountData;
import org.qortal.data.account.RewardShareData;
import org.qortal.network.Network;
@ -734,6 +735,64 @@ public class AdminResource {
}
}
@POST
@Path("/repository/archive/rebuild")
@Operation(
summary = "Rebuild archive",
description = "Rebuilds archive files, using the specified serialization version",
requestBody = @RequestBody(
required = true,
content = @Content(
mediaType = MediaType.TEXT_PLAIN,
schema = @Schema(
type = "number", example = "2"
)
)
),
responses = {
@ApiResponse(
description = "\"true\"",
content = @Content(mediaType = MediaType.TEXT_PLAIN, schema = @Schema(type = "string"))
)
}
)
@ApiErrors({ApiError.REPOSITORY_ISSUE})
@SecurityRequirement(name = "apiKey")
public String rebuildArchive(@HeaderParam(Security.API_KEY_HEADER) String apiKey, Integer serializationVersion) {
Security.checkApiCallAllowed(request);
// Default serialization version to value specified in settings
if (serializationVersion == null) {
serializationVersion = Settings.getInstance().getDefaultArchiveVersion();
}
try {
// We don't actually need to lock the blockchain here, but we'll do it anyway so that
// the node can focus on rebuilding rather than synchronizing / minting.
ReentrantLock blockchainLock = Controller.getInstance().getBlockchainLock();
blockchainLock.lockInterruptibly();
try {
BlockArchiveRebuilder blockArchiveRebuilder = new BlockArchiveRebuilder(serializationVersion);
blockArchiveRebuilder.start();
return "true";
} catch (IOException e) {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA, e);
} finally {
blockchainLock.unlock();
}
} catch (InterruptedException e) {
// We couldn't lock blockchain to perform rebuild
return "false";
} catch (DataException e) {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
}
}
@DELETE
@Path("/repository")
@Operation(

2
src/main/java/org/qortal/api/websocket/ChatMessagesWebSocket.java

@ -49,6 +49,7 @@ public class ChatMessagesWebSocket extends ApiWebSocket {
null,
null,
null,
null,
null, null, null);
sendMessages(session, chatMessages);
@ -79,6 +80,7 @@ public class ChatMessagesWebSocket extends ApiWebSocket {
null,
null,
involvingAddresses,
null,
null, null, null);
sendMessages(session, chatMessages);

6
src/main/java/org/qortal/arbitrary/ArbitraryDataTransactionBuilder.java

@ -46,6 +46,7 @@ public class ArbitraryDataTransactionBuilder {
private static final double MAX_FILE_DIFF = 0.5f;
private final String publicKey58;
private final long fee;
private final Path path;
private final String name;
private Method method;
@ -64,11 +65,12 @@ public class ArbitraryDataTransactionBuilder {
private ArbitraryTransactionData arbitraryTransactionData;
private ArbitraryDataFile arbitraryDataFile;
public ArbitraryDataTransactionBuilder(Repository repository, String publicKey58, Path path, String name,
public ArbitraryDataTransactionBuilder(Repository repository, String publicKey58, long fee, Path path, String name,
Method method, Service service, String identifier,
String title, String description, List<String> tags, Category category) {
this.repository = repository;
this.publicKey58 = publicKey58;
this.fee = fee;
this.path = path;
this.name = name;
this.method = method;
@ -261,7 +263,7 @@ public class ArbitraryDataTransactionBuilder {
}
final BaseTransactionData baseTransactionData = new BaseTransactionData(now, Group.NO_GROUP,
lastReference, creatorPublicKey, 0L, null);
lastReference, creatorPublicKey, fee, null);
final int size = (int) arbitraryDataFile.size();
final int version = 5;
final int nonce = 0;

4
src/main/java/org/qortal/block/Block.java

@ -657,6 +657,10 @@ public class Block {
return this.atStates;
}
public byte[] getAtStatesHash() {
return this.atStatesHash;
}
/**
* Return expanded info on block's online accounts.
* <p>

7
src/main/java/org/qortal/block/BlockChain.java

@ -78,7 +78,8 @@ public class BlockChain {
onlineAccountMinterLevelValidationHeight,
selfSponsorshipAlgoV1Height,
feeValidationFixTimestamp,
chatReferenceTimestamp;
chatReferenceTimestamp,
arbitraryOptionalFeeTimestamp;
}
// Custom transaction fees
@ -522,6 +523,10 @@ public class BlockChain {
return this.featureTriggers.get(FeatureTrigger.chatReferenceTimestamp.name()).longValue();
}
public long getArbitraryOptionalFeeTimestamp() {
return this.featureTriggers.get(FeatureTrigger.arbitraryOptionalFeeTimestamp.name()).longValue();
}
// More complex getters for aspects that change by height or timestamp

29
src/main/java/org/qortal/controller/Controller.java

@ -400,12 +400,8 @@ public class Controller extends Thread {
RepositoryFactory repositoryFactory = new HSQLDBRepositoryFactory(getRepositoryUrl());
RepositoryManager.setRepositoryFactory(repositoryFactory);
RepositoryManager.setRequestedCheckpoint(Boolean.TRUE);
try (final Repository repository = RepositoryManager.getRepository()) {
RepositoryManager.archive(repository);
RepositoryManager.prune(repository);
}
} catch (DataException e) {
}
catch (DataException e) {
// If exception has no cause then repository is in use by some other process.
if (e.getCause() == null) {
LOGGER.info("Repository in use by another process?");
@ -1379,9 +1375,24 @@ public class Controller extends Thread {
// If we have no block data, we should check the archive in case it's there
if (blockData == null) {
if (Settings.getInstance().isArchiveEnabled()) {
byte[] bytes = BlockArchiveReader.getInstance().fetchSerializedBlockBytesForSignature(signature, true, repository);
if (bytes != null) {
CachedBlockMessage blockMessage = new CachedBlockMessage(bytes);
Triple<byte[], Integer, Integer> serializedBlock = BlockArchiveReader.getInstance().fetchSerializedBlockBytesForSignature(signature, true, repository);
if (serializedBlock != null) {
byte[] bytes = serializedBlock.getA();
Integer serializationVersion = serializedBlock.getB();
Message blockMessage;
switch (serializationVersion) {
case 1:
blockMessage = new CachedBlockMessage(bytes);
break;
case 2:
blockMessage = new CachedBlockV2Message(bytes);
break;
default:
return;
}
blockMessage.setId(message.getId());
// This call also causes the other needed data to be pulled in from repository

121
src/main/java/org/qortal/controller/repository/BlockArchiveRebuilder.java

@ -0,0 +1,121 @@
package org.qortal.controller.repository;
import org.apache.commons.io.FileUtils;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.qortal.controller.Controller;
import org.qortal.controller.Synchronizer;
import org.qortal.repository.*;
import org.qortal.settings.Settings;
import org.qortal.transform.TransformationException;
import java.io.IOException;
import java.nio.file.Path;
import java.nio.file.Paths;
public class BlockArchiveRebuilder {
private static final Logger LOGGER = LogManager.getLogger(BlockArchiveRebuilder.class);
private final int serializationVersion;
public BlockArchiveRebuilder(int serializationVersion) {
this.serializationVersion = serializationVersion;
}
public void start() throws DataException, IOException {
if (!Settings.getInstance().isArchiveEnabled() || Settings.getInstance().isLite()) {
return;
}
// New archive path is in a different location from original archive path, to avoid conflicts.
// It will be moved later, once the process is complete.
final Path newArchivePath = Paths.get(Settings.getInstance().getRepositoryPath(), "archive-rebuild");
final Path originalArchivePath = Paths.get(Settings.getInstance().getRepositoryPath(), "archive");
// Delete archive-rebuild if it exists from a previous attempt
FileUtils.deleteDirectory(newArchivePath.toFile());
try (final Repository repository = RepositoryManager.getRepository()) {
int startHeight = 1; // We need to rebuild the entire archive
LOGGER.info("Rebuilding block archive from height {}...", startHeight);
while (!Controller.isStopping()) {
repository.discardChanges();
Thread.sleep(1000L);
// Don't even attempt if we're mid-sync as our repository requests will be delayed for ages
if (Synchronizer.getInstance().isSynchronizing()) {
continue;
}
// Rebuild archive
try {
final int maximumArchiveHeight = BlockArchiveReader.getInstance().getHeightOfLastArchivedBlock();
if (startHeight >= maximumArchiveHeight) {
// We've finished.
// Delete existing archive and move the newly built one into its place
FileUtils.deleteDirectory(originalArchivePath.toFile());
FileUtils.moveDirectory(newArchivePath.toFile(), originalArchivePath.toFile());
BlockArchiveReader.getInstance().invalidateFileListCache();
LOGGER.info("Block archive successfully rebuilt");
return;
}
BlockArchiveWriter writer = new BlockArchiveWriter(startHeight, maximumArchiveHeight, serializationVersion, newArchivePath, repository);
// Set data source to BLOCK_ARCHIVE as we are rebuilding
writer.setDataSource(BlockArchiveWriter.BlockArchiveDataSource.BLOCK_ARCHIVE);
// We can't enforce the 100MB file size target, as the final file needs to contain all blocks
// that exist in the current archive. Otherwise, the final blocks in the archive will be lost.
writer.setShouldEnforceFileSizeTarget(false);
// We want to log the rebuild progress
writer.setShouldLogProgress(true);
BlockArchiveWriter.BlockArchiveWriteResult result = writer.write();
switch (result) {
case OK:
// Increment block archive height
startHeight += writer.getWrittenCount();
repository.saveChanges();
break;
case STOPPING:
return;
// We've reached the limit of the blocks we can archive
// Sleep for a while to allow more to become available
case NOT_ENOUGH_BLOCKS:
// This shouldn't happen, as we're not enforcing minimum file sizes
repository.discardChanges();
throw new DataException("Unable to rebuild archive due to unexpected NOT_ENOUGH_BLOCKS response.");
case BLOCK_NOT_FOUND:
// We tried to archive a block that didn't exist. This is a major failure and likely means
// that a bootstrap or re-sync is needed. Try again every minute until then.
LOGGER.info("Error: block not found when rebuilding archive. If this error persists, " +
"a bootstrap or re-sync may be needed.");
repository.discardChanges();
throw new DataException("Unable to rebuild archive because a block is missing.");
}
} catch (IOException | TransformationException e) {
LOGGER.info("Caught exception when rebuilding block archive", e);
throw new DataException("Unable to rebuild block archive");
}
}
} catch (InterruptedException e) {
// Do nothing
} finally {
// Delete archive-rebuild if it still exists, as that means something went wrong
FileUtils.deleteDirectory(newArchivePath.toFile());
}
}
}

43
src/main/java/org/qortal/network/message/CachedBlockV2Message.java

@ -0,0 +1,43 @@
package org.qortal.network.message;
import com.google.common.primitives.Ints;
import org.qortal.block.Block;
import org.qortal.transform.TransformationException;
import org.qortal.transform.block.BlockTransformer;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.nio.ByteBuffer;
// This is an OUTGOING-only Message which more readily lends itself to being cached
public class CachedBlockV2Message extends Message implements Cloneable {
public CachedBlockV2Message(Block block) throws TransformationException {
super(MessageType.BLOCK_V2);
ByteArrayOutputStream bytes = new ByteArrayOutputStream();
try {
bytes.write(Ints.toByteArray(block.getBlockData().getHeight()));
bytes.write(BlockTransformer.toBytes(block));
} catch (IOException e) {
throw new AssertionError("IOException shouldn't occur with ByteArrayOutputStream");
}
this.dataBytes = bytes.toByteArray();
this.checksumBytes = Message.generateChecksum(this.dataBytes);
}
public CachedBlockV2Message(byte[] cachedBytes) {
super(MessageType.BLOCK_V2);
this.dataBytes = cachedBytes;
this.checksumBytes = Message.generateChecksum(this.dataBytes);
}
public static Message fromByteBuffer(int id, ByteBuffer byteBuffer) {
throw new UnsupportedOperationException("CachedBlockMessageV2 is for outgoing messages only");
}
}

85
src/main/java/org/qortal/repository/BlockArchiveReader.java

@ -3,10 +3,7 @@ package org.qortal.repository;
import com.google.common.primitives.Ints;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.qortal.data.at.ATStateData;
import org.qortal.data.block.BlockArchiveData;
import org.qortal.data.block.BlockData;
import org.qortal.data.transaction.TransactionData;
import org.qortal.settings.Settings;
import org.qortal.transform.TransformationException;
import org.qortal.transform.block.BlockTransformation;
@ -67,20 +64,51 @@ public class BlockArchiveReader {
this.fileListCache = Map.copyOf(map);
}
public Integer fetchSerializationVersionForHeight(int height) {
if (this.fileListCache == null) {
this.fetchFileList();
}
Triple<byte[], Integer, Integer> serializedBlock = this.fetchSerializedBlockBytesForHeight(height);
if (serializedBlock == null) {
return null;
}
Integer serializationVersion = serializedBlock.getB();
return serializationVersion;
}
public BlockTransformation fetchBlockAtHeight(int height) {
if (this.fileListCache == null) {
this.fetchFileList();
}
byte[] serializedBytes = this.fetchSerializedBlockBytesForHeight(height);
if (serializedBytes == null) {
Triple<byte[], Integer, Integer> serializedBlock = this.fetchSerializedBlockBytesForHeight(height);
if (serializedBlock == null) {
return null;
}
byte[] serializedBytes = serializedBlock.getA();
Integer serializationVersion = serializedBlock.getB();
if (serializedBytes == null || serializationVersion == null) {
return null;
}
ByteBuffer byteBuffer = ByteBuffer.wrap(serializedBytes);
BlockTransformation blockInfo = null;
try {
blockInfo = BlockTransformer.fromByteBuffer(byteBuffer);
switch (serializationVersion) {
case 1:
blockInfo = BlockTransformer.fromByteBuffer(byteBuffer);
break;
case 2:
blockInfo = BlockTransformer.fromByteBufferV2(byteBuffer);
break;
default:
// Invalid serialization version
return null;
}
if (blockInfo != null && blockInfo.getBlockData() != null) {
// Block height is stored outside of the main serialized bytes, so it
// won't be set automatically.
@ -168,15 +196,20 @@ public class BlockArchiveReader {
return null;
}
public byte[] fetchSerializedBlockBytesForSignature(byte[] signature, boolean includeHeightPrefix, Repository repository) {
public Triple<byte[], Integer, Integer> fetchSerializedBlockBytesForSignature(byte[] signature, boolean includeHeightPrefix, Repository repository) {
if (this.fileListCache == null) {
this.fetchFileList();
}
Integer height = this.fetchHeightForSignature(signature, repository);
if (height != null) {
byte[] blockBytes = this.fetchSerializedBlockBytesForHeight(height);
if (blockBytes == null) {
Triple<byte[], Integer, Integer> serializedBlock = this.fetchSerializedBlockBytesForHeight(height);
if (serializedBlock == null) {
return null;
}
byte[] blockBytes = serializedBlock.getA();
Integer version = serializedBlock.getB();
if (blockBytes == null || version == null) {
return null;
}
@ -187,18 +220,18 @@ public class BlockArchiveReader {
try {
bytes.write(Ints.toByteArray(height));
bytes.write(blockBytes);
return bytes.toByteArray();
return new Triple<>(bytes.toByteArray(), version, height);
} catch (IOException e) {
return null;
}
}
return blockBytes;
return new Triple<>(blockBytes, version, height);
}
return null;
}
public byte[] fetchSerializedBlockBytesForHeight(int height) {
public Triple<byte[], Integer, Integer> fetchSerializedBlockBytesForHeight(int height) {
String filename = this.getFilenameForHeight(height);
if (filename == null) {
// We don't have this block in the archive
@ -221,7 +254,7 @@ public class BlockArchiveReader {
// End of fixed length header
// Make sure the version is one we recognize
if (version != 1) {
if (version != 1 && version != 2) {
LOGGER.info("Error: unknown version in file {}: {}", filename, version);
return null;
}
@ -258,7 +291,7 @@ public class BlockArchiveReader {
byte[] blockBytes = new byte[blockLength];
file.read(blockBytes);
return blockBytes;
return new Triple<>(blockBytes, version, height);
} catch (FileNotFoundException e) {
LOGGER.info("File {} not found: {}", filename, e.getMessage());
@ -279,6 +312,30 @@ public class BlockArchiveReader {
}
}
public int getHeightOfLastArchivedBlock() {
if (this.fileListCache == null) {
this.fetchFileList();
}
int maxEndHeight = 0;
Iterator it = this.fileListCache.entrySet().iterator();
while (it.hasNext()) {
Map.Entry pair = (Map.Entry) it.next();
if (pair == null && pair.getKey() == null && pair.getValue() == null) {
continue;
}
Triple<Integer, Integer, Integer> heightInfo = (Triple<Integer, Integer, Integer>) pair.getValue();
Integer endHeight = heightInfo.getB();
if (endHeight != null && endHeight > maxEndHeight) {
maxEndHeight = endHeight;
}
}
return maxEndHeight;
}
public void invalidateFileListCache() {
this.fileListCache = null;
}

129
src/main/java/org/qortal/repository/BlockArchiveWriter.java

@ -6,10 +6,13 @@ import org.apache.logging.log4j.Logger;
import org.qortal.block.Block;
import org.qortal.controller.Controller;
import org.qortal.controller.Synchronizer;
import org.qortal.data.at.ATStateData;
import org.qortal.data.block.BlockArchiveData;
import org.qortal.data.block.BlockData;
import org.qortal.data.transaction.TransactionData;
import org.qortal.settings.Settings;
import org.qortal.transform.TransformationException;
import org.qortal.transform.block.BlockTransformation;
import org.qortal.transform.block.BlockTransformer;
import java.io.ByteArrayOutputStream;
@ -18,6 +21,7 @@ import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.List;
public class BlockArchiveWriter {
@ -28,25 +32,71 @@ public class BlockArchiveWriter {
BLOCK_NOT_FOUND
}
public enum BlockArchiveDataSource {
BLOCK_REPOSITORY, // To build an archive from the Blocks table
BLOCK_ARCHIVE // To build a new archive from an existing archive
}
private static final Logger LOGGER = LogManager.getLogger(BlockArchiveWriter.class);
public static final long DEFAULT_FILE_SIZE_TARGET = 100 * 1024 * 1024; // 100MiB
private int startHeight;
private final int endHeight;
private final Integer serializationVersion;
private final Path archivePath;
private final Repository repository;
private long fileSizeTarget = DEFAULT_FILE_SIZE_TARGET;
private boolean shouldEnforceFileSizeTarget = true;
// Default data source to BLOCK_REPOSITORY; can optionally be overridden
private BlockArchiveDataSource dataSource = BlockArchiveDataSource.BLOCK_REPOSITORY;
private boolean shouldLogProgress = false;
private int writtenCount;
private int lastWrittenHeight;
private Path outputPath;
public BlockArchiveWriter(int startHeight, int endHeight, Repository repository) {
/**
* Instantiate a BlockArchiveWriter using a custom archive path
* @param startHeight
* @param endHeight
* @param repository
*/
public BlockArchiveWriter(int startHeight, int endHeight, Integer serializationVersion, Path archivePath, Repository repository) {
this.startHeight = startHeight;
this.endHeight = endHeight;
this.archivePath = archivePath.toAbsolutePath();
this.repository = repository;
if (serializationVersion == null) {
// When serialization version isn't specified, fetch it from the existing archive
serializationVersion = this.findSerializationVersion();
}
this.serializationVersion = serializationVersion;
}
/**
* Instantiate a BlockArchiveWriter using the default archive path and version
* @param startHeight
* @param endHeight
* @param repository
*/
public BlockArchiveWriter(int startHeight, int endHeight, Repository repository) {
this(startHeight, endHeight, null, Paths.get(Settings.getInstance().getRepositoryPath(), "archive"), repository);
}
private int findSerializationVersion() {
// Attempt to fetch the serialization version from the existing archive
Integer block2SerializationVersion = BlockArchiveReader.getInstance().fetchSerializationVersionForHeight(2);
if (block2SerializationVersion != null) {
return block2SerializationVersion;
}
// Default to version specified in settings
return Settings.getInstance().getDefaultArchiveVersion();
}
public static int getMaxArchiveHeight(Repository repository) throws DataException {
@ -72,8 +122,7 @@ public class BlockArchiveWriter {
public BlockArchiveWriteResult write() throws DataException, IOException, TransformationException, InterruptedException {
// Create the archive folder if it doesn't exist
// This is a subfolder of the db directory, to make bootstrapping easier
Path archivePath = Paths.get(Settings.getInstance().getRepositoryPath(), "archive").toAbsolutePath();
// This is generally a subfolder of the db directory, to make bootstrapping easier
try {
Files.createDirectories(archivePath);
} catch (IOException e) {
@ -95,13 +144,13 @@ public class BlockArchiveWriter {
LOGGER.info(String.format("Fetching blocks from height %d...", startHeight));
int i = 0;
while (headerBytes.size() + bytes.size() < this.fileSizeTarget
|| this.shouldEnforceFileSizeTarget == false) {
while (headerBytes.size() + bytes.size() < this.fileSizeTarget) {
if (Controller.isStopping()) {
return BlockArchiveWriteResult.STOPPING;
}
if (Synchronizer.getInstance().isSynchronizing()) {
Thread.sleep(1000L);
continue;
}
@ -112,7 +161,28 @@ public class BlockArchiveWriter {
//LOGGER.info("Fetching block {}...", currentHeight);
BlockData blockData = repository.getBlockRepository().fromHeight(currentHeight);
BlockData blockData = null;
List<TransactionData> transactions = null;
List<ATStateData> atStates = null;
byte[] atStatesHash = null;
switch (this.dataSource) {
case BLOCK_ARCHIVE:
BlockTransformation archivedBlock = BlockArchiveReader.getInstance().fetchBlockAtHeight(currentHeight);
if (archivedBlock != null) {
blockData = archivedBlock.getBlockData();
transactions = archivedBlock.getTransactions();
atStates = archivedBlock.getAtStates();
atStatesHash = archivedBlock.getAtStatesHash();
}
break;
case BLOCK_REPOSITORY:
default:
blockData = repository.getBlockRepository().fromHeight(currentHeight);
break;
}
if (blockData == null) {
return BlockArchiveWriteResult.BLOCK_NOT_FOUND;
}
@ -122,18 +192,50 @@ public class BlockArchiveWriter {
repository.getBlockArchiveRepository().save(blockArchiveData);
repository.saveChanges();
// Build the block
Block block;
if (atStatesHash != null) {
block = new Block(repository, blockData, transactions, atStatesHash);
}
else if (atStates != null) {
block = new Block(repository, blockData, transactions, atStates);
}
else {
block = new Block(repository, blockData);
}
// Write the block data to some byte buffers
Block block = new Block(repository, blockData);
int blockIndex = bytes.size();
// Write block index to header
headerBytes.write(Ints.toByteArray(blockIndex));
// Write block height
bytes.write(Ints.toByteArray(block.getBlockData().getHeight()));
byte[] blockBytes = BlockTransformer.toBytes(block);
// Get serialized block bytes
byte[] blockBytes;
switch (serializationVersion) {
case 1:
blockBytes = BlockTransformer.toBytes(block);
break;
case 2:
blockBytes = BlockTransformer.toBytesV2(block);
break;
default:
throw new DataException("Invalid serialization version");
}
// Write block length
bytes.write(Ints.toByteArray(blockBytes.length));
// Write block bytes
bytes.write(blockBytes);
// Log every 1000 blocks
if (this.shouldLogProgress && i % 1000 == 0) {
LOGGER.info("Archived up to block height {}. Size of current file: {} bytes", currentHeight, (headerBytes.size() + bytes.size()));
}
i++;
}
@ -147,11 +249,10 @@ public class BlockArchiveWriter {
// We have enough blocks to create a new file
int endHeight = startHeight + i - 1;
int version = 1;
String filePath = String.format("%s/%d-%d.dat", archivePath.toString(), startHeight, endHeight);
FileOutputStream fileOutputStream = new FileOutputStream(filePath);
// Write version number
fileOutputStream.write(Ints.toByteArray(version));
fileOutputStream.write(Ints.toByteArray(serializationVersion));
// Write start height
fileOutputStream.write(Ints.toByteArray(startHeight));
// Write end height
@ -199,4 +300,12 @@ public class BlockArchiveWriter {
this.shouldEnforceFileSizeTarget = shouldEnforceFileSizeTarget;
}
public void setDataSource(BlockArchiveDataSource dataSource) {
this.dataSource = dataSource;
}
public void setShouldLogProgress(boolean shouldLogProgress) {
this.shouldLogProgress = shouldLogProgress;
}
}

2
src/main/java/org/qortal/repository/ChatRepository.java

@ -15,7 +15,7 @@ public interface ChatRepository {
*/
public List<ChatMessage> getMessagesMatchingCriteria(Long before, Long after,
Integer txGroupId, byte[] reference, byte[] chatReferenceBytes, Boolean hasChatReference,
List<String> involving, Integer limit, Integer offset, Boolean reverse) throws DataException;
List<String> involving, String senderAddress, Integer limit, Integer offset, Boolean reverse) throws DataException;
public ChatMessage toChatMessage(ChatTransactionData chatTransactionData) throws DataException;

61
src/main/java/org/qortal/repository/RepositoryManager.java

@ -2,11 +2,6 @@ package org.qortal.repository;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.qortal.gui.SplashFrame;
import org.qortal.repository.hsqldb.HSQLDBDatabaseArchiving;
import org.qortal.repository.hsqldb.HSQLDBDatabasePruning;
import org.qortal.repository.hsqldb.HSQLDBRepository;
import org.qortal.settings.Settings;
import java.sql.SQLException;
import java.util.concurrent.TimeoutException;
@ -61,62 +56,6 @@ public abstract class RepositoryManager {
}
}
public static boolean archive(Repository repository) {
if (Settings.getInstance().isLite()) {
// Lite nodes have no blockchain
return false;
}
// Bulk archive the database the first time we use archive mode
if (Settings.getInstance().isArchiveEnabled()) {
if (RepositoryManager.canArchiveOrPrune()) {
try {
return HSQLDBDatabaseArchiving.buildBlockArchive(repository, BlockArchiveWriter.DEFAULT_FILE_SIZE_TARGET);
} catch (DataException e) {
LOGGER.info("Unable to build block archive. The database may have been left in an inconsistent state.");
}
}
else {
LOGGER.info("Unable to build block archive due to missing ATStatesHeightIndex. Bootstrapping is recommended.");
LOGGER.info("To bootstrap, stop the core and delete the db folder, then start the core again.");
SplashFrame.getInstance().updateStatus("Missing index. Bootstrapping is recommended.");
}
}
return false;
}
public static boolean prune(Repository repository) {
if (Settings.getInstance().isLite()) {
// Lite nodes have no blockchain
return false;
}
// Bulk prune the database the first time we use top-only or block archive mode
if (Settings.getInstance().isTopOnly() ||
Settings.getInstance().isArchiveEnabled()) {
if (RepositoryManager.canArchiveOrPrune()) {
try {
boolean prunedATStates = HSQLDBDatabasePruning.pruneATStates((HSQLDBRepository) repository);
boolean prunedBlocks = HSQLDBDatabasePruning.pruneBlocks((HSQLDBRepository) repository);
// Perform repository maintenance to shrink the db size down
if (prunedATStates && prunedBlocks) {
HSQLDBDatabasePruning.performMaintenance(repository);
return true;
}
} catch (SQLException | DataException e) {
LOGGER.info("Unable to bulk prune AT states. The database may have been left in an inconsistent state.");
}
}
else {
LOGGER.info("Unable to prune blocks due to missing ATStatesHeightIndex. Bootstrapping is recommended.");
}
}
return false;
}
public static void setRequestedCheckpoint(Boolean quick) {
quickCheckpointRequested = quick;
}

7
src/main/java/org/qortal/repository/hsqldb/HSQLDBChatRepository.java

@ -24,7 +24,7 @@ public class HSQLDBChatRepository implements ChatRepository {
@Override
public List<ChatMessage> getMessagesMatchingCriteria(Long before, Long after, Integer txGroupId, byte[] referenceBytes,
byte[] chatReferenceBytes, Boolean hasChatReference, List<String> involving,
byte[] chatReferenceBytes, Boolean hasChatReference, List<String> involving, String senderAddress,
Integer limit, Integer offset, Boolean reverse) throws DataException {
// Check args meet expectations
if ((txGroupId != null && involving != null && !involving.isEmpty())
@ -74,6 +74,11 @@ public class HSQLDBChatRepository implements ChatRepository {
whereClauses.add("chat_reference IS NULL");
}
if (senderAddress != null) {
whereClauses.add("sender = ?");
bindParams.add(senderAddress);
}
if (txGroupId != null) {
whereClauses.add("tx_group_id = " + txGroupId); // int safe to use literally
whereClauses.add("recipient IS NULL");

88
src/main/java/org/qortal/repository/hsqldb/HSQLDBDatabaseArchiving.java

@ -1,88 +0,0 @@
package org.qortal.repository.hsqldb;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.qortal.controller.Controller;
import org.qortal.gui.SplashFrame;
import org.qortal.repository.BlockArchiveWriter;
import org.qortal.repository.DataException;
import org.qortal.repository.Repository;
import org.qortal.repository.RepositoryManager;
import org.qortal.transform.TransformationException;
import java.io.IOException;
/**
*
* When switching to an archiving node, we need to archive most of the database contents.
* This involves copying its data into flat files.
* If we do this entirely as a background process, it is very slow and can interfere with syncing.
* However, if we take the approach of doing this in bulk, before starting up the rest of the
* processes, this makes it much faster and less invasive.
*
* From that point, the original background archiving process will run, but can be dialled right down
* so not to interfere with syncing.
*
*/
public class HSQLDBDatabaseArchiving {
private static final Logger LOGGER = LogManager.getLogger(HSQLDBDatabaseArchiving.class);
public static boolean buildBlockArchive(Repository repository, long fileSizeTarget) throws DataException {
// Only build the archive if we haven't already got one that is up to date
boolean upToDate = BlockArchiveWriter.isArchiverUpToDate(repository);
if (upToDate) {
// Already archived
return false;
}
LOGGER.info("Building block archive - this process could take a while...");
SplashFrame.getInstance().updateStatus("Building block archive...");
final int maximumArchiveHeight = BlockArchiveWriter.getMaxArchiveHeight(repository);
int startHeight = 0;
while (!Controller.isStopping()) {
try {
BlockArchiveWriter writer = new BlockArchiveWriter(startHeight, maximumArchiveHeight, repository);
writer.setFileSizeTarget(fileSizeTarget);
BlockArchiveWriter.BlockArchiveWriteResult result = writer.write();
switch (result) {
case OK:
// Increment block archive height
startHeight = writer.getLastWrittenHeight() + 1;
repository.getBlockArchiveRepository().setBlockArchiveHeight(startHeight);
repository.saveChanges();
break;
case STOPPING:
return false;
case NOT_ENOUGH_BLOCKS:
// We've reached the limit of the blocks we can archive
// Return from the whole method
return true;
case BLOCK_NOT_FOUND:
// We tried to archive a block that didn't exist. This is a major failure and likely means
// that a bootstrap or re-sync is needed. Return rom the method
LOGGER.info("Error: block not found when building archive. If this error persists, " +
"a bootstrap or re-sync may be needed.");
return false;
}
} catch (IOException | TransformationException | InterruptedException e) {
LOGGER.info("Caught exception when creating block cache", e);
return false;
}
}
// If we got this far then something went wrong (most likely the app is stopping)
return false;
}
}

332
src/main/java/org/qortal/repository/hsqldb/HSQLDBDatabasePruning.java

@ -1,332 +0,0 @@
package org.qortal.repository.hsqldb;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.qortal.controller.Controller;
import org.qortal.data.block.BlockData;
import org.qortal.gui.SplashFrame;
import org.qortal.repository.BlockArchiveWriter;
import org.qortal.repository.DataException;
import org.qortal.repository.Repository;
import org.qortal.settings.Settings;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.concurrent.TimeoutException;
/**
*
* When switching from a full node to a pruning node, we need to delete most of the database contents.
* If we do this entirely as a background process, it is very slow and can interfere with syncing.
* However, if we take the approach of transferring only the necessary rows to a new table and then
* deleting the original table, this makes the process much faster. It was taking several days to
* delete the AT states in the background, but only a couple of minutes to copy them to a new table.
*
* The trade off is that we have to go through a form of "reshape" when starting the app for the first
* time after enabling pruning mode. But given that this is an opt-in mode, I don't think it will be
* a problem.
*
* Once the pruning is complete, it automatically performs a CHECKPOINT DEFRAG in order to
* shrink the database file size down to a fraction of what it was before.
*
* From this point, the original background process will run, but can be dialled right down so not
* to interfere with syncing.
*
*/
public class HSQLDBDatabasePruning {
private static final Logger LOGGER = LogManager.getLogger(HSQLDBDatabasePruning.class);
public static boolean pruneATStates(HSQLDBRepository repository) throws SQLException, DataException {
// Only bulk prune AT states if we have never done so before
int pruneHeight = repository.getATRepository().getAtPruneHeight();
if (pruneHeight > 0) {
// Already pruned AT states
return false;
}
if (Settings.getInstance().isArchiveEnabled()) {
// Only proceed if we can see that the archiver has already finished
// This way, if the archiver failed for any reason, we can prune once it has had
// some opportunities to try again
boolean upToDate = BlockArchiveWriter.isArchiverUpToDate(repository);
if (!upToDate) {
return false;
}
}
LOGGER.info("Starting bulk prune of AT states - this process could take a while... " +
"(approx. 2 mins on high spec, or upwards of 30 mins in some cases)");
SplashFrame.getInstance().updateStatus("Pruning database (takes up to 30 mins)...");
// Create new AT-states table to hold smaller dataset
repository.executeCheckedUpdate("DROP TABLE IF EXISTS ATStatesNew");
repository.executeCheckedUpdate("CREATE TABLE ATStatesNew ("
+ "AT_address QortalAddress, height INTEGER NOT NULL, state_hash ATStateHash NOT NULL, "
+ "fees QortalAmount NOT NULL, is_initial BOOLEAN NOT NULL, sleep_until_message_timestamp BIGINT, "
+ "PRIMARY KEY (AT_address, height), "
+ "FOREIGN KEY (AT_address) REFERENCES ATs (AT_address) ON DELETE CASCADE)");
repository.executeCheckedUpdate("SET TABLE ATStatesNew NEW SPACE");
repository.executeCheckedUpdate("CHECKPOINT");
// Add a height index
LOGGER.info("Adding index to AT states table...");
repository.executeCheckedUpdate("CREATE INDEX IF NOT EXISTS ATStatesNewHeightIndex ON ATStatesNew (height)");
repository.executeCheckedUpdate("CHECKPOINT");
// Find our latest block
BlockData latestBlock = repository.getBlockRepository().getLastBlock();
if (latestBlock == null) {
LOGGER.info("Unable to determine blockchain height, necessary for bulk block pruning");
return false;
}
// Calculate some constants for later use
final int blockchainHeight = latestBlock.getHeight();
int maximumBlockToTrim = blockchainHeight - Settings.getInstance().getPruneBlockLimit();
if (Settings.getInstance().isArchiveEnabled()) {
// Archive mode - don't prune anything that hasn't been archived yet
maximumBlockToTrim = Math.min(maximumBlockToTrim, repository.getBlockArchiveRepository().getBlockArchiveHeight() - 1);
}
final int endHeight = blockchainHeight;
final int blockStep = 10000;
// It's essential that we rebuild the latest AT states here, as we are using this data in the next query.
// Failing to do this will result in important AT states being deleted, rendering the database unusable.
repository.getATRepository().rebuildLatestAtStates(endHeight);
// Loop through all the LatestATStates and copy them to the new table
LOGGER.info("Copying AT states...");
for (int height = 0; height < endHeight; height += blockStep) {
final int batchEndHeight = height + blockStep - 1;
//LOGGER.info(String.format("Copying AT states between %d and %d...", height, batchEndHeight));
String sql = "SELECT height, AT_address FROM LatestATStates WHERE height BETWEEN ? AND ?";
try (ResultSet latestAtStatesResultSet = repository.checkedExecute(sql, height, batchEndHeight)) {
if (latestAtStatesResultSet != null) {
do {
int latestAtHeight = latestAtStatesResultSet.getInt(1);
String latestAtAddress = latestAtStatesResultSet.getString(2);
// Copy this latest ATState to the new table
//LOGGER.info(String.format("Copying AT %s at height %d...", latestAtAddress, latestAtHeight));
try {
String updateSql = "INSERT INTO ATStatesNew ("
+ "SELECT AT_address, height, state_hash, fees, is_initial, sleep_until_message_timestamp "
+ "FROM ATStates "
+ "WHERE height = ? AND AT_address = ?)";
repository.executeCheckedUpdate(updateSql, latestAtHeight, latestAtAddress);
} catch (SQLException e) {
repository.examineException(e);
throw new DataException("Unable to copy ATStates", e);
}
// If this batch includes blocks after the maximum block to trim, we will need to copy
// each of its AT states above maximumBlockToTrim as they are considered "recent". We
// need to do this for _all_ AT states in these blocks, regardless of their latest state.
if (batchEndHeight >= maximumBlockToTrim) {
// Now copy this AT's states for each recent block they are present in
for (int i = maximumBlockToTrim; i < endHeight; i++) {
if (latestAtHeight < i) {
// This AT finished before this block so there is nothing to copy
continue;
}
//LOGGER.info(String.format("Copying recent AT %s at height %d...", latestAtAddress, i));
try {
// Copy each LatestATState to the new table
String updateSql = "INSERT IGNORE INTO ATStatesNew ("
+ "SELECT AT_address, height, state_hash, fees, is_initial, sleep_until_message_timestamp "
+ "FROM ATStates "
+ "WHERE height = ? AND AT_address = ?)";
repository.executeCheckedUpdate(updateSql, i, latestAtAddress);
} catch (SQLException e) {
repository.examineException(e);
throw new DataException("Unable to copy ATStates", e);
}
}
}
repository.saveChanges();
} while (latestAtStatesResultSet.next());
}
} catch (SQLException e) {
throw new DataException("Unable to copy AT states", e);
}
}
// Finally, drop the original table and rename
LOGGER.info("Deleting old AT states...");
repository.executeCheckedUpdate("DROP TABLE ATStates");
repository.executeCheckedUpdate("ALTER TABLE ATStatesNew RENAME TO ATStates");
repository.executeCheckedUpdate("ALTER INDEX ATStatesNewHeightIndex RENAME TO ATStatesHeightIndex");
repository.executeCheckedUpdate("CHECKPOINT");
// Update the prune height
int nextPruneHeight = maximumBlockToTrim + 1;
repository.getATRepository().setAtPruneHeight(nextPruneHeight);
repository.saveChanges();
repository.executeCheckedUpdate("CHECKPOINT");
// Now prune/trim the ATStatesData, as this currently goes back over a month
return HSQLDBDatabasePruning.pruneATStateData(repository);
}
/*
* Bulk prune ATStatesData to catch up with the now pruned ATStates table
* This uses the existing AT States trimming code but with a much higher end block
*/
private static boolean pruneATStateData(Repository repository) throws DataException {
if (Settings.getInstance().isArchiveEnabled()) {
// Don't prune ATStatesData in archive mode
return true;
}
BlockData latestBlock = repository.getBlockRepository().getLastBlock();
if (latestBlock == null) {
LOGGER.info("Unable to determine blockchain height, necessary for bulk ATStatesData pruning");
return false;
}
final int blockchainHeight = latestBlock.getHeight();
int upperPrunableHeight = blockchainHeight - Settings.getInstance().getPruneBlockLimit();
// ATStateData is already trimmed - so carry on from where we left off in the past
int pruneStartHeight = repository.getATRepository().getAtTrimHeight();
LOGGER.info("Starting bulk prune of AT states data - this process could take a while... (approx. 3 mins on high spec)");
while (pruneStartHeight < upperPrunableHeight) {
// Prune all AT state data up until our latest minus pruneBlockLimit (or our archive height)
if (Controller.isStopping()) {
return false;
}
// Override batch size in the settings because this is a one-off process
final int batchSize = 1000;
final int rowLimitPerBatch = 50000;
int upperBatchHeight = pruneStartHeight + batchSize;
int upperPruneHeight = Math.min(upperBatchHeight, upperPrunableHeight);
LOGGER.trace(String.format("Pruning AT states data between %d and %d...", pruneStartHeight, upperPruneHeight));
int numATStatesPruned = repository.getATRepository().trimAtStates(pruneStartHeight, upperPruneHeight, rowLimitPerBatch);
repository.saveChanges();
if (numATStatesPruned > 0) {
LOGGER.trace(String.format("Pruned %d AT states data rows between blocks %d and %d",
numATStatesPruned, pruneStartHeight, upperPruneHeight));
} else {
repository.getATRepository().setAtTrimHeight(upperBatchHeight);
// No need to rebuild the latest AT states as we aren't currently synchronizing
repository.saveChanges();
LOGGER.debug(String.format("Bumping AT states trim height to %d", upperBatchHeight));
// Can we move onto next batch?
if (upperPrunableHeight > upperBatchHeight) {
pruneStartHeight = upperBatchHeight;
}
else {
// We've finished pruning
break;
}
}
}
return true;
}
public static boolean pruneBlocks(Repository repository) throws SQLException, DataException {
// Only bulk prune AT states if we have never done so before
int pruneHeight = repository.getBlockRepository().getBlockPruneHeight();
if (pruneHeight > 0) {
// Already pruned blocks
return false;
}
if (Settings.getInstance().isArchiveEnabled()) {
// Only proceed if we can see that the archiver has already finished
// This way, if the archiver failed for any reason, we can prune once it has had
// some opportunities to try again
boolean upToDate = BlockArchiveWriter.isArchiverUpToDate(repository);
if (!upToDate) {
return false;
}
}
BlockData latestBlock = repository.getBlockRepository().getLastBlock();
if (latestBlock == null) {
LOGGER.info("Unable to determine blockchain height, necessary for bulk block pruning");
return false;
}
final int blockchainHeight = latestBlock.getHeight();
int upperPrunableHeight = blockchainHeight - Settings.getInstance().getPruneBlockLimit();
int pruneStartHeight = 0;
if (Settings.getInstance().isArchiveEnabled()) {
// Archive mode - don't prune anything that hasn't been archived yet
upperPrunableHeight = Math.min(upperPrunableHeight, repository.getBlockArchiveRepository().getBlockArchiveHeight() - 1);
}
LOGGER.info("Starting bulk prune of blocks - this process could take a while... (approx. 5 mins on high spec)");
while (pruneStartHeight < upperPrunableHeight) {
// Prune all blocks up until our latest minus pruneBlockLimit
int upperBatchHeight = pruneStartHeight + Settings.getInstance().getBlockPruneBatchSize();
int upperPruneHeight = Math.min(upperBatchHeight, upperPrunableHeight);
LOGGER.info(String.format("Pruning blocks between %d and %d...", pruneStartHeight, upperPruneHeight));
int numBlocksPruned = repository.getBlockRepository().pruneBlocks(pruneStartHeight, upperPruneHeight);
repository.saveChanges();
if (numBlocksPruned > 0) {
LOGGER.info(String.format("Pruned %d block%s between %d and %d",
numBlocksPruned, (numBlocksPruned != 1 ? "s" : ""),
pruneStartHeight, upperPruneHeight));
} else {
final int nextPruneHeight = upperPruneHeight + 1;
repository.getBlockRepository().setBlockPruneHeight(nextPruneHeight);
repository.saveChanges();
LOGGER.debug(String.format("Bumping block base prune height to %d", nextPruneHeight));
// Can we move onto next batch?
if (upperPrunableHeight > nextPruneHeight) {
pruneStartHeight = nextPruneHeight;
}
else {
// We've finished pruning
break;
}
}
}
return true;
}
public static void performMaintenance(Repository repository) throws SQLException, DataException {
try {
SplashFrame.getInstance().updateStatus("Performing maintenance...");
// Timeout if the database isn't ready for backing up after 5 minutes
// Nothing else should be using the db at this point, so a timeout shouldn't happen
long timeout = 5 * 60 * 1000L;
repository.performPeriodicMaintenance(timeout);
} catch (TimeoutException e) {
LOGGER.info("Attempt to perform maintenance failed due to timeout: {}", e.getMessage());
}
}
}

7
src/main/java/org/qortal/settings/Settings.java

@ -179,6 +179,8 @@ public class Settings {
private boolean archiveEnabled = true;
/** How often to attempt archiving (ms). */
private long archiveInterval = 7171L; // milliseconds
/** Serialization version to use when building an archive */
private int defaultArchiveVersion = 1;
/** Whether to automatically bootstrap instead of syncing from genesis */
@ -274,6 +276,7 @@ public class Settings {
private String[] bootstrapHosts = new String[] {
"http://bootstrap.qortal.org",
"http://bootstrap2.qortal.org",
"http://bootstrap3.qortal.org",
"http://bootstrap.qortal.online"
};
@ -931,6 +934,10 @@ public class Settings {
return this.archiveInterval;
}
public int getDefaultArchiveVersion() {
return this.defaultArchiveVersion;
}
public boolean getBootstrap() {
return this.bootstrap;

18
src/main/java/org/qortal/transaction/ArbitraryTransaction.java

@ -88,6 +88,12 @@ public class ArbitraryTransaction extends Transaction {
if (this.transactionData.getFee() < 0)
return ValidationResult.NEGATIVE_FEE;
// After the feature trigger, we require the fee to be sufficient if it's not 0.
// If the fee is zero, then the nonce is validated in isSignatureValid() as an alternative to a fee
if (this.arbitraryTransactionData.getTimestamp() >= BlockChain.getInstance().getArbitraryOptionalFeeTimestamp() && this.arbitraryTransactionData.getFee() != 0L) {
return super.isFeeValid();
}
return ValidationResult.OK;
}
@ -208,10 +214,14 @@ public class ArbitraryTransaction extends Transaction {
// Clear nonce from transactionBytes
ArbitraryTransactionTransformer.clearNonce(transactionBytes);
// We only need to check nonce for recent transactions due to PoW verification overhead
if (NTP.getTime() - this.arbitraryTransactionData.getTimestamp() < HISTORIC_THRESHOLD) {
int difficulty = ArbitraryDataManager.getInstance().getPowDifficulty();
return MemoryPoW.verify2(transactionBytes, POW_BUFFER_SIZE, difficulty, nonce);
// As of feature-trigger timestamp, we only require a nonce when the fee is zero
boolean beforeFeatureTrigger = this.arbitraryTransactionData.getTimestamp() < BlockChain.getInstance().getArbitraryOptionalFeeTimestamp();
if (beforeFeatureTrigger || this.arbitraryTransactionData.getFee() == 0L) {
// We only need to check nonce for recent transactions due to PoW verification overhead
if (NTP.getTime() - this.arbitraryTransactionData.getTimestamp() < HISTORIC_THRESHOLD) {
int difficulty = ArbitraryDataManager.getInstance().getPowDifficulty();
return MemoryPoW.verify2(transactionBytes, POW_BUFFER_SIZE, difficulty, nonce);
}
}
}

28
src/main/java/org/qortal/transform/block/BlockTransformer.java

@ -312,16 +312,24 @@ public class BlockTransformer extends Transformer {
ByteArrayOutputStream atHashBytes = new ByteArrayOutputStream(atBytesLength);
long atFees = 0;
for (ATStateData atStateData : block.getATStates()) {
// Skip initial states generated by DEPLOY_AT transactions in the same block
if (atStateData.isInitial())
continue;
atHashBytes.write(atStateData.getATAddress().getBytes(StandardCharsets.UTF_8));
atHashBytes.write(atStateData.getStateHash());
atHashBytes.write(Longs.toByteArray(atStateData.getFees()));
atFees += atStateData.getFees();
if (block.getAtStatesHash() != null) {
// We already have the AT states hash
atFees = blockData.getATFees();
atHashBytes.write(block.getAtStatesHash());
}
else {
// We need to build the AT states hash
for (ATStateData atStateData : block.getATStates()) {
// Skip initial states generated by DEPLOY_AT transactions in the same block
if (atStateData.isInitial())
continue;
atHashBytes.write(atStateData.getATAddress().getBytes(StandardCharsets.UTF_8));
atHashBytes.write(atStateData.getStateHash());
atHashBytes.write(Longs.toByteArray(atStateData.getFees()));
atFees += atStateData.getFees();
}
}
bytes.write(Ints.toByteArray(blockData.getATCount()));

25
src/main/java/org/qortal/utils/BlockArchiveUtils.java

@ -21,6 +21,16 @@ public class BlockArchiveUtils {
* into the HSQLDB, in order to make it SQL-compatible
* again.
* <p>
* This is only fully compatible with archives that use
* serialization version 1. For version 2 (or above),
* we are unable to import individual AT states as we
* only have a single combined hash, so the use cases
* for this are greatly limited.
* <p>
* A version 1 archive should ultimately be rebuildable
* via a resync or reindex from genesis, allowing
* access to this feature once again.
* <p>
* Note: calls discardChanges() and saveChanges(), so
* make sure that you commit any existing repository
* changes before calling this method.
@ -61,9 +71,18 @@ public class BlockArchiveUtils {
repository.getBlockRepository().save(blockInfo.getBlockData());
// Save AT state data hashes
for (ATStateData atStateData : blockInfo.getAtStates()) {
atStateData.setHeight(blockInfo.getBlockData().getHeight());
repository.getATRepository().save(atStateData);
if (blockInfo.getAtStates() != null) {
for (ATStateData atStateData : blockInfo.getAtStates()) {
atStateData.setHeight(blockInfo.getBlockData().getHeight());
repository.getATRepository().save(atStateData);
}
}
else {
// We don't have AT state hashes, so we are only importing a partial state.
// This can still be useful to allow orphaning to very old blocks, when we
// need to access other chainstate info (such as balances) at an earlier block.
// In order to do this, the orphan process must be temporarily adjusted to avoid
// orphaning AT states, as it will otherwise fail due to having no previous state.
}
} catch (DataException e) {

3
src/main/resources/blockchain.json

@ -85,7 +85,8 @@
"onlineAccountMinterLevelValidationHeight": 1092000,
"selfSponsorshipAlgoV1Height": 1092400,
"feeValidationFixTimestamp": 1671918000000,
"chatReferenceTimestamp": 1674316800000
"chatReferenceTimestamp": 1674316800000,
"arbitraryOptionalFeeTimestamp": 9999999999999
},
"checkpoints": [
{ "height": 1136300, "signature": "3BbwawEF2uN8Ni5ofpJXkukoU8ctAPxYoFB7whq9pKfBnjfZcpfEJT4R95NvBDoTP8WDyWvsUvbfHbcr9qSZuYpSKZjUQTvdFf6eqznHGEwhZApWfvXu6zjGCxYCp65F4jsVYYJjkzbjmkCg5WAwN5voudngA23kMK6PpTNygapCzXt" }

217
src/test/java/org/qortal/test/BlockArchiveTests.java → src/test/java/org/qortal/test/BlockArchiveV1Tests.java

@ -1,6 +1,7 @@
package org.qortal.test;
import org.apache.commons.io.FileUtils;
import org.apache.commons.lang3.reflect.FieldUtils;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
@ -10,8 +11,6 @@ import org.qortal.data.at.ATStateData;
import org.qortal.data.block.BlockData;
import org.qortal.data.transaction.TransactionData;
import org.qortal.repository.*;
import org.qortal.repository.hsqldb.HSQLDBDatabaseArchiving;
import org.qortal.repository.hsqldb.HSQLDBDatabasePruning;
import org.qortal.repository.hsqldb.HSQLDBRepository;
import org.qortal.settings.Settings;
import org.qortal.test.common.AtUtils;
@ -26,7 +25,6 @@ import org.qortal.utils.NTP;
import java.io.File;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.sql.SQLException;
@ -34,13 +32,16 @@ import java.util.List;
import static org.junit.Assert.*;
public class BlockArchiveTests extends Common {
public class BlockArchiveV1Tests extends Common {
@Before
public void beforeTest() throws DataException {
public void beforeTest() throws DataException, IllegalAccessException {
Common.useSettings("test-settings-v2-block-archive.json");
NTP.setFixedOffset(Settings.getInstance().getTestNtpOffset());
this.deleteArchiveDirectory();
// Set default archive version to 1, so that archive builds in these tests use V2
FieldUtils.writeField(Settings.getInstance(), "defaultArchiveVersion", 1, true);
}
@After
@ -333,212 +334,6 @@ public class BlockArchiveTests extends Common {
}
}
@Test
public void testBulkArchiveAndPrune() throws DataException, SQLException {
try (final Repository repository = RepositoryManager.getRepository()) {
HSQLDBRepository hsqldb = (HSQLDBRepository) repository;
// Deploy an AT so that we have AT state data
PrivateKeyAccount deployer = Common.getTestAccount(repository, "alice");
byte[] creationBytes = AtUtils.buildSimpleAT();
long fundingAmount = 1_00000000L;
AtUtils.doDeployAT(repository, deployer, creationBytes, fundingAmount);
// Mint some blocks so that we are able to archive them later
for (int i = 0; i < 1000; i++) {
BlockMinter.mintTestingBlock(repository, Common.getTestAccount(repository, "alice-reward-share"));
}
// Assume 900 blocks are trimmed (this specifies the first untrimmed height)
repository.getBlockRepository().setOnlineAccountsSignaturesTrimHeight(901);
repository.getATRepository().setAtTrimHeight(901);
// Check the max archive height - this should be one less than the first untrimmed height
final int maximumArchiveHeight = BlockArchiveWriter.getMaxArchiveHeight(repository);
assertEquals(900, maximumArchiveHeight);
// Check the current archive height
assertEquals(0, repository.getBlockArchiveRepository().getBlockArchiveHeight());
// Write blocks 2-900 to the archive (using bulk method)
int fileSizeTarget = 428600; // Pre-calculated size of 900 blocks
assertTrue(HSQLDBDatabaseArchiving.buildBlockArchive(repository, fileSizeTarget));
// Ensure the block archive height has increased
assertEquals(901, repository.getBlockArchiveRepository().getBlockArchiveHeight());
// Ensure the SQL repository contains blocks 2 and 900...
assertNotNull(repository.getBlockRepository().fromHeight(2));
assertNotNull(repository.getBlockRepository().fromHeight(900));
// Check the current prune heights
assertEquals(0, repository.getBlockRepository().getBlockPruneHeight());
assertEquals(0, repository.getATRepository().getAtPruneHeight());
// Prior to archiving or pruning, ensure blocks 2 to 1002 and their AT states are available in the db
for (int i=2; i<=1002; i++) {
assertNotNull(repository.getBlockRepository().fromHeight(i));
List<ATStateData> atStates = repository.getATRepository().getBlockATStatesAtHeight(i);
assertNotNull(atStates);
assertEquals(1, atStates.size());
}
// Prune all the archived blocks and AT states (using bulk method)
assertTrue(HSQLDBDatabasePruning.pruneBlocks(hsqldb));
assertTrue(HSQLDBDatabasePruning.pruneATStates(hsqldb));
// Ensure the current prune heights have increased
assertEquals(901, repository.getBlockRepository().getBlockPruneHeight());
assertEquals(901, repository.getATRepository().getAtPruneHeight());
// Now ensure the SQL repository is missing blocks 2 and 900...
assertNull(repository.getBlockRepository().fromHeight(2));
assertNull(repository.getBlockRepository().fromHeight(900));
// ... but it's not missing blocks 1 and 901 (we don't prune the genesis block)
assertNotNull(repository.getBlockRepository().fromHeight(1));
assertNotNull(repository.getBlockRepository().fromHeight(901));
// Validate the latest block height in the repository
assertEquals(1002, (int) repository.getBlockRepository().getLastBlock().getHeight());
// Ensure blocks 2-900 are all available in the archive
for (int i=2; i<=900; i++) {
assertNotNull(repository.getBlockArchiveRepository().fromHeight(i));
}
// Ensure blocks 2-900 are NOT available in the db
for (int i=2; i<=900; i++) {
assertNull(repository.getBlockRepository().fromHeight(i));
}
// Ensure blocks 901 to 1002 and their AT states are available in the db
for (int i=901; i<=1002; i++) {
assertNotNull(repository.getBlockRepository().fromHeight(i));
List<ATStateData> atStates = repository.getATRepository().getBlockATStatesAtHeight(i);
assertNotNull(atStates);
assertEquals(1, atStates.size());
}
// Ensure blocks 901 to 1002 are not available in the archive
for (int i=901; i<=1002; i++) {
assertNull(repository.getBlockArchiveRepository().fromHeight(i));
}
}
}
@Test
public void testBulkArchiveAndPruneMultipleFiles() throws DataException, SQLException {
try (final Repository repository = RepositoryManager.getRepository()) {
HSQLDBRepository hsqldb = (HSQLDBRepository) repository;
// Deploy an AT so that we have AT state data
PrivateKeyAccount deployer = Common.getTestAccount(repository, "alice");
byte[] creationBytes = AtUtils.buildSimpleAT();
long fundingAmount = 1_00000000L;
AtUtils.doDeployAT(repository, deployer, creationBytes, fundingAmount);
// Mint some blocks so that we are able to archive them later
for (int i = 0; i < 1000; i++) {
BlockMinter.mintTestingBlock(repository, Common.getTestAccount(repository, "alice-reward-share"));
}
// Assume 900 blocks are trimmed (this specifies the first untrimmed height)
repository.getBlockRepository().setOnlineAccountsSignaturesTrimHeight(901);
repository.getATRepository().setAtTrimHeight(901);
// Check the max archive height - this should be one less than the first untrimmed height
final int maximumArchiveHeight = BlockArchiveWriter.getMaxArchiveHeight(repository);
assertEquals(900, maximumArchiveHeight);
// Check the current archive height
assertEquals(0, repository.getBlockArchiveRepository().getBlockArchiveHeight());
// Write blocks 2-900 to the archive (using bulk method)
int fileSizeTarget = 42360; // Pre-calculated size of approx 90 blocks
assertTrue(HSQLDBDatabaseArchiving.buildBlockArchive(repository, fileSizeTarget));
// Ensure 10 archive files have been created
Path archivePath = Paths.get(Settings.getInstance().getRepositoryPath(), "archive");
assertEquals(10, new File(archivePath.toString()).list().length);
// Check the files exist
assertTrue(Files.exists(Paths.get(archivePath.toString(), "2-90.dat")));
assertTrue(Files.exists(Paths.get(archivePath.toString(), "91-179.dat")));
assertTrue(Files.exists(Paths.get(archivePath.toString(), "180-268.dat")));
assertTrue(Files.exists(Paths.get(archivePath.toString(), "269-357.dat")));
assertTrue(Files.exists(Paths.get(archivePath.toString(), "358-446.dat")));
assertTrue(Files.exists(Paths.get(archivePath.toString(), "447-535.dat")));
assertTrue(Files.exists(Paths.get(archivePath.toString(), "536-624.dat")));
assertTrue(Files.exists(Paths.get(archivePath.toString(), "625-713.dat")));
assertTrue(Files.exists(Paths.get(archivePath.toString(), "714-802.dat")));
assertTrue(Files.exists(Paths.get(archivePath.toString(), "803-891.dat")));
// Ensure the block archive height has increased
// It won't be as high as 901, because blocks 892-901 were too small to reach the file size
// target of the 11th file
assertEquals(892, repository.getBlockArchiveRepository().getBlockArchiveHeight());
// Ensure the SQL repository contains blocks 2 and 891...
assertNotNull(repository.getBlockRepository().fromHeight(2));
assertNotNull(repository.getBlockRepository().fromHeight(891));
// Check the current prune heights
assertEquals(0, repository.getBlockRepository().getBlockPruneHeight());
assertEquals(0, repository.getATRepository().getAtPruneHeight());
// Prior to archiving or pruning, ensure blocks 2 to 1002 and their AT states are available in the db
for (int i=2; i<=1002; i++) {
assertNotNull(repository.getBlockRepository().fromHeight(i));
List<ATStateData> atStates = repository.getATRepository().getBlockATStatesAtHeight(i);
assertNotNull(atStates);
assertEquals(1, atStates.size());
}
// Prune all the archived blocks and AT states (using bulk method)
assertTrue(HSQLDBDatabasePruning.pruneBlocks(hsqldb));
assertTrue(HSQLDBDatabasePruning.pruneATStates(hsqldb));
// Ensure the current prune heights have increased
assertEquals(892, repository.getBlockRepository().getBlockPruneHeight());
assertEquals(892, repository.getATRepository().getAtPruneHeight());
// Now ensure the SQL repository is missing blocks 2 and 891...
assertNull(repository.getBlockRepository().fromHeight(2));
assertNull(repository.getBlockRepository().fromHeight(891));
// ... but it's not missing blocks 1 and 901 (we don't prune the genesis block)
assertNotNull(repository.getBlockRepository().fromHeight(1));
assertNotNull(repository.getBlockRepository().fromHeight(892));
// Validate the latest block height in the repository
assertEquals(1002, (int) repository.getBlockRepository().getLastBlock().getHeight());
// Ensure blocks 2-891 are all available in the archive
for (int i=2; i<=891; i++) {
assertNotNull(repository.getBlockArchiveRepository().fromHeight(i));
}
// Ensure blocks 2-891 are NOT available in the db
for (int i=2; i<=891; i++) {
assertNull(repository.getBlockRepository().fromHeight(i));
}
// Ensure blocks 892 to 1002 and their AT states are available in the db
for (int i=892; i<=1002; i++) {
assertNotNull(repository.getBlockRepository().fromHeight(i));
List<ATStateData> atStates = repository.getATRepository().getBlockATStatesAtHeight(i);
assertNotNull(atStates);
assertEquals(1, atStates.size());
}
// Ensure blocks 892 to 1002 are not available in the archive
for (int i=892; i<=1002; i++) {
assertNull(repository.getBlockArchiveRepository().fromHeight(i));
}
}
}
@Test
public void testTrimArchivePruneAndOrphan() throws DataException, InterruptedException, TransformationException, IOException {
try (final Repository repository = RepositoryManager.getRepository()) {

504
src/test/java/org/qortal/test/BlockArchiveV2Tests.java

@ -0,0 +1,504 @@
package org.qortal.test;
import org.apache.commons.io.FileUtils;
import org.apache.commons.lang3.reflect.FieldUtils;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.qortal.account.PrivateKeyAccount;
import org.qortal.controller.BlockMinter;
import org.qortal.data.at.ATStateData;
import org.qortal.data.block.BlockData;
import org.qortal.data.transaction.TransactionData;
import org.qortal.repository.*;
import org.qortal.repository.hsqldb.HSQLDBRepository;
import org.qortal.settings.Settings;
import org.qortal.test.common.AtUtils;
import org.qortal.test.common.BlockUtils;
import org.qortal.test.common.Common;
import org.qortal.transaction.DeployAtTransaction;
import org.qortal.transaction.Transaction;
import org.qortal.transform.TransformationException;
import org.qortal.transform.block.BlockTransformation;
import org.qortal.utils.BlockArchiveUtils;
import org.qortal.utils.NTP;
import java.io.File;
import java.io.IOException;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.sql.SQLException;
import java.util.List;
import static org.junit.Assert.*;
public class BlockArchiveV2Tests extends Common {
@Before
public void beforeTest() throws DataException, IllegalAccessException {
Common.useSettings("test-settings-v2-block-archive.json");
NTP.setFixedOffset(Settings.getInstance().getTestNtpOffset());
this.deleteArchiveDirectory();
// Set default archive version to 2, so that archive builds in these tests use V2
FieldUtils.writeField(Settings.getInstance(), "defaultArchiveVersion", 2, true);
}
@After
public void afterTest() throws DataException {
this.deleteArchiveDirectory();
}
@Test
public void testWriter() throws DataException, InterruptedException, TransformationException, IOException {
try (final Repository repository = RepositoryManager.getRepository()) {
// Mint some blocks so that we are able to archive them later
for (int i = 0; i < 1000; i++) {
BlockMinter.mintTestingBlock(repository, Common.getTestAccount(repository, "alice-reward-share"));
}
// 900 blocks are trimmed (this specifies the first untrimmed height)
repository.getBlockRepository().setOnlineAccountsSignaturesTrimHeight(901);
repository.getATRepository().setAtTrimHeight(901);
// Check the max archive height - this should be one less than the first untrimmed height
final int maximumArchiveHeight = BlockArchiveWriter.getMaxArchiveHeight(repository);
assertEquals(900, maximumArchiveHeight);
// Write blocks 2-900 to the archive
BlockArchiveWriter writer = new BlockArchiveWriter(0, maximumArchiveHeight, repository);
writer.setShouldEnforceFileSizeTarget(false); // To avoid the need to pre-calculate file sizes
BlockArchiveWriter.BlockArchiveWriteResult result = writer.write();
assertEquals(BlockArchiveWriter.BlockArchiveWriteResult.OK, result);
// Make sure that the archive contains the correct number of blocks
assertEquals(900 - 1, writer.getWrittenCount());
// Increment block archive height
repository.getBlockArchiveRepository().setBlockArchiveHeight(writer.getWrittenCount());
repository.saveChanges();
assertEquals(900 - 1, repository.getBlockArchiveRepository().getBlockArchiveHeight());
// Ensure the file exists
File outputFile = writer.getOutputPath().toFile();
assertTrue(outputFile.exists());
}
}
@Test
public void testWriterAndReader() throws DataException, InterruptedException, TransformationException, IOException {
try (final Repository repository = RepositoryManager.getRepository()) {
// Mint some blocks so that we are able to archive them later
for (int i = 0; i < 1000; i++) {
BlockMinter.mintTestingBlock(repository, Common.getTestAccount(repository, "alice-reward-share"));
}
// 900 blocks are trimmed (this specifies the first untrimmed height)
repository.getBlockRepository().setOnlineAccountsSignaturesTrimHeight(901);
repository.getATRepository().setAtTrimHeight(901);
// Check the max archive height - this should be one less than the first untrimmed height
final int maximumArchiveHeight = BlockArchiveWriter.getMaxArchiveHeight(repository);
assertEquals(900, maximumArchiveHeight);
// Write blocks 2-900 to the archive
BlockArchiveWriter writer = new BlockArchiveWriter(0, maximumArchiveHeight, repository);
writer.setShouldEnforceFileSizeTarget(false); // To avoid the need to pre-calculate file sizes
BlockArchiveWriter.BlockArchiveWriteResult result = writer.write();
assertEquals(BlockArchiveWriter.BlockArchiveWriteResult.OK, result);
// Make sure that the archive contains the correct number of blocks
assertEquals(900 - 1, writer.getWrittenCount());
// Increment block archive height
repository.getBlockArchiveRepository().setBlockArchiveHeight(writer.getWrittenCount());
repository.saveChanges();
assertEquals(900 - 1, repository.getBlockArchiveRepository().getBlockArchiveHeight());
// Ensure the file exists
File outputFile = writer.getOutputPath().toFile();
assertTrue(outputFile.exists());
// Read block 2 from the archive
BlockArchiveReader reader = BlockArchiveReader.getInstance();
BlockTransformation block2Info = reader.fetchBlockAtHeight(2);
BlockData block2ArchiveData = block2Info.getBlockData();
// Read block 2 from the repository
BlockData block2RepositoryData = repository.getBlockRepository().fromHeight(2);
// Ensure the values match
assertEquals(block2ArchiveData.getHeight(), block2RepositoryData.getHeight());
assertArrayEquals(block2ArchiveData.getSignature(), block2RepositoryData.getSignature());
// Test some values in the archive
assertEquals(1, block2ArchiveData.getOnlineAccountsCount());
// Read block 900 from the archive
BlockTransformation block900Info = reader.fetchBlockAtHeight(900);
BlockData block900ArchiveData = block900Info.getBlockData();
// Read block 900 from the repository
BlockData block900RepositoryData = repository.getBlockRepository().fromHeight(900);
// Ensure the values match
assertEquals(block900ArchiveData.getHeight(), block900RepositoryData.getHeight());
assertArrayEquals(block900ArchiveData.getSignature(), block900RepositoryData.getSignature());
// Test some values in the archive
assertEquals(1, block900ArchiveData.getOnlineAccountsCount());
}
}
@Test
public void testArchivedAtStates() throws DataException, InterruptedException, TransformationException, IOException {
try (final Repository repository = RepositoryManager.getRepository()) {
// Deploy an AT so that we have AT state data
PrivateKeyAccount deployer = Common.getTestAccount(repository, "alice");
byte[] creationBytes = AtUtils.buildSimpleAT();
long fundingAmount = 1_00000000L;
DeployAtTransaction deployAtTransaction = AtUtils.doDeployAT(repository, deployer, creationBytes, fundingAmount);
String atAddress = deployAtTransaction.getATAccount().getAddress();
// Mint some blocks so that we are able to archive them later
for (int i = 0; i < 1000; i++) {
BlockMinter.mintTestingBlock(repository, Common.getTestAccount(repository, "alice-reward-share"));
}
// 9 blocks are trimmed (this specifies the first untrimmed height)
repository.getBlockRepository().setOnlineAccountsSignaturesTrimHeight(10);
repository.getATRepository().setAtTrimHeight(10);
// Check the max archive height
final int maximumArchiveHeight = BlockArchiveWriter.getMaxArchiveHeight(repository);
assertEquals(9, maximumArchiveHeight);
// Write blocks 2-9 to the archive
BlockArchiveWriter writer = new BlockArchiveWriter(0, maximumArchiveHeight, repository);
writer.setShouldEnforceFileSizeTarget(false); // To avoid the need to pre-calculate file sizes
BlockArchiveWriter.BlockArchiveWriteResult result = writer.write();
assertEquals(BlockArchiveWriter.BlockArchiveWriteResult.OK, result);
// Make sure that the archive contains the correct number of blocks
assertEquals(9 - 1, writer.getWrittenCount());
// Increment block archive height
repository.getBlockArchiveRepository().setBlockArchiveHeight(writer.getWrittenCount());
repository.saveChanges();
assertEquals(9 - 1, repository.getBlockArchiveRepository().getBlockArchiveHeight());
// Ensure the file exists
File outputFile = writer.getOutputPath().toFile();
assertTrue(outputFile.exists());
// Check blocks 3-9
for (Integer testHeight = 2; testHeight <= 9; testHeight++) {
// Read a block from the archive
BlockArchiveReader reader = BlockArchiveReader.getInstance();
BlockTransformation blockInfo = reader.fetchBlockAtHeight(testHeight);
BlockData archivedBlockData = blockInfo.getBlockData();
byte[] archivedAtStateHash = blockInfo.getAtStatesHash();
List<TransactionData> archivedTransactions = blockInfo.getTransactions();
// Read the same block from the repository
BlockData repositoryBlockData = repository.getBlockRepository().fromHeight(testHeight);
ATStateData repositoryAtStateData = repository.getATRepository().getATStateAtHeight(atAddress, testHeight);
// Ensure the repository has full AT state data
assertNotNull(repositoryAtStateData.getStateHash());
assertNotNull(repositoryAtStateData.getStateData());
// Check the archived AT state
if (testHeight == 2) {
assertEquals(1, archivedTransactions.size());
assertEquals(Transaction.TransactionType.DEPLOY_AT, archivedTransactions.get(0).getType());
}
else {
// Blocks 3+ shouldn't have any transactions
assertTrue(archivedTransactions.isEmpty());
}
// Ensure the archive has the AT states hash
assertNotNull(archivedAtStateHash);
// Also check the online accounts count and height
assertEquals(1, archivedBlockData.getOnlineAccountsCount());
assertEquals(testHeight, archivedBlockData.getHeight());
// Ensure the values match
assertEquals(archivedBlockData.getHeight(), repositoryBlockData.getHeight());
assertArrayEquals(archivedBlockData.getSignature(), repositoryBlockData.getSignature());
assertEquals(archivedBlockData.getOnlineAccountsCount(), repositoryBlockData.getOnlineAccountsCount());
assertArrayEquals(archivedBlockData.getMinterSignature(), repositoryBlockData.getMinterSignature());
assertEquals(archivedBlockData.getATCount(), repositoryBlockData.getATCount());
assertEquals(archivedBlockData.getOnlineAccountsCount(), repositoryBlockData.getOnlineAccountsCount());
assertArrayEquals(archivedBlockData.getReference(), repositoryBlockData.getReference());
assertEquals(archivedBlockData.getTimestamp(), repositoryBlockData.getTimestamp());
assertEquals(archivedBlockData.getATFees(), repositoryBlockData.getATFees());
assertEquals(archivedBlockData.getTotalFees(), repositoryBlockData.getTotalFees());
assertEquals(archivedBlockData.getTransactionCount(), repositoryBlockData.getTransactionCount());
assertArrayEquals(archivedBlockData.getTransactionsSignature(), repositoryBlockData.getTransactionsSignature());
// TODO: build atStatesHash and compare against value in archive
}
// Check block 10 (unarchived)
BlockArchiveReader reader = BlockArchiveReader.getInstance();
BlockTransformation blockInfo = reader.fetchBlockAtHeight(10);
assertNull(blockInfo);
}
}
@Test
public void testArchiveAndPrune() throws DataException, InterruptedException, TransformationException, IOException {
try (final Repository repository = RepositoryManager.getRepository()) {
// Deploy an AT so that we have AT state data
PrivateKeyAccount deployer = Common.getTestAccount(repository, "alice");
byte[] creationBytes = AtUtils.buildSimpleAT();
long fundingAmount = 1_00000000L;
AtUtils.doDeployAT(repository, deployer, creationBytes, fundingAmount);
// Mint some blocks so that we are able to archive them later
for (int i = 0; i < 1000; i++) {
BlockMinter.mintTestingBlock(repository, Common.getTestAccount(repository, "alice-reward-share"));
}
// Assume 900 blocks are trimmed (this specifies the first untrimmed height)
repository.getBlockRepository().setOnlineAccountsSignaturesTrimHeight(901);
repository.getATRepository().setAtTrimHeight(901);
// Check the max archive height - this should be one less than the first untrimmed height
final int maximumArchiveHeight = BlockArchiveWriter.getMaxArchiveHeight(repository);
assertEquals(900, maximumArchiveHeight);
// Write blocks 2-900 to the archive
BlockArchiveWriter writer = new BlockArchiveWriter(0, maximumArchiveHeight, repository);
writer.setShouldEnforceFileSizeTarget(false); // To avoid the need to pre-calculate file sizes
BlockArchiveWriter.BlockArchiveWriteResult result = writer.write();
assertEquals(BlockArchiveWriter.BlockArchiveWriteResult.OK, result);
// Make sure that the archive contains the correct number of blocks
assertEquals(900 - 1, writer.getWrittenCount());
// Increment block archive height
repository.getBlockArchiveRepository().setBlockArchiveHeight(901);
repository.saveChanges();
assertEquals(901, repository.getBlockArchiveRepository().getBlockArchiveHeight());
// Ensure the file exists
File outputFile = writer.getOutputPath().toFile();
assertTrue(outputFile.exists());
// Ensure the SQL repository contains blocks 2 and 900...
assertNotNull(repository.getBlockRepository().fromHeight(2));
assertNotNull(repository.getBlockRepository().fromHeight(900));
// Prune all the archived blocks
int numBlocksPruned = repository.getBlockRepository().pruneBlocks(0, 900);
assertEquals(900-1, numBlocksPruned);
repository.getBlockRepository().setBlockPruneHeight(901);
// Prune the AT states for the archived blocks
repository.getATRepository().rebuildLatestAtStates(900);
repository.saveChanges();
int numATStatesPruned = repository.getATRepository().pruneAtStates(0, 900);
assertEquals(900-2, numATStatesPruned); // Minus 1 for genesis block, and another for the latest AT state
repository.getATRepository().setAtPruneHeight(901);
// Now ensure the SQL repository is missing blocks 2 and 900...
assertNull(repository.getBlockRepository().fromHeight(2));
assertNull(repository.getBlockRepository().fromHeight(900));
// ... but it's not missing blocks 1 and 901 (we don't prune the genesis block)
assertNotNull(repository.getBlockRepository().fromHeight(1));
assertNotNull(repository.getBlockRepository().fromHeight(901));
// Validate the latest block height in the repository
assertEquals(1002, (int) repository.getBlockRepository().getLastBlock().getHeight());
}
}
@Test
public void testTrimArchivePruneAndOrphan() throws DataException, InterruptedException, TransformationException, IOException {
try (final Repository repository = RepositoryManager.getRepository()) {
// Deploy an AT so that we have AT state data
PrivateKeyAccount deployer = Common.getTestAccount(repository, "alice");
byte[] creationBytes = AtUtils.buildSimpleAT();
long fundingAmount = 1_00000000L;
AtUtils.doDeployAT(repository, deployer, creationBytes, fundingAmount);
// Mint some blocks so that we are able to archive them later
for (int i = 0; i < 1000; i++) {
BlockMinter.mintTestingBlock(repository, Common.getTestAccount(repository, "alice-reward-share"));
}
// Make sure that block 500 has full AT state data and data hash
List<ATStateData> block500AtStatesData = repository.getATRepository().getBlockATStatesAtHeight(500);
ATStateData atStatesData = repository.getATRepository().getATStateAtHeight(block500AtStatesData.get(0).getATAddress(), 500);
assertNotNull(atStatesData.getStateHash());
assertNotNull(atStatesData.getStateData());
// Trim the first 500 blocks
repository.getBlockRepository().trimOldOnlineAccountsSignatures(0, 500);
repository.getBlockRepository().setOnlineAccountsSignaturesTrimHeight(501);
repository.getATRepository().rebuildLatestAtStates(500);
repository.getATRepository().trimAtStates(0, 500, 1000);
repository.getATRepository().setAtTrimHeight(501);
// Now block 499 should only have the AT state data hash
List<ATStateData> block499AtStatesData = repository.getATRepository().getBlockATStatesAtHeight(499);
atStatesData = repository.getATRepository().getATStateAtHeight(block499AtStatesData.get(0).getATAddress(), 499);
assertNotNull(atStatesData.getStateHash());
assertNull(atStatesData.getStateData());
// ... but block 500 should have the full data (due to being retained as the "latest" AT state in the trimmed range
block500AtStatesData = repository.getATRepository().getBlockATStatesAtHeight(500);
atStatesData = repository.getATRepository().getATStateAtHeight(block500AtStatesData.get(0).getATAddress(), 500);
assertNotNull(atStatesData.getStateHash());
assertNotNull(atStatesData.getStateData());
// ... and block 501 should also have the full data
List<ATStateData> block501AtStatesData = repository.getATRepository().getBlockATStatesAtHeight(501);
atStatesData = repository.getATRepository().getATStateAtHeight(block501AtStatesData.get(0).getATAddress(), 501);
assertNotNull(atStatesData.getStateHash());
assertNotNull(atStatesData.getStateData());
// Check the max archive height - this should be one less than the first untrimmed height
final int maximumArchiveHeight = BlockArchiveWriter.getMaxArchiveHeight(repository);
assertEquals(500, maximumArchiveHeight);
BlockData block3DataPreArchive = repository.getBlockRepository().fromHeight(3);
// Write blocks 2-500 to the archive
BlockArchiveWriter writer = new BlockArchiveWriter(0, maximumArchiveHeight, repository);
writer.setShouldEnforceFileSizeTarget(false); // To avoid the need to pre-calculate file sizes
BlockArchiveWriter.BlockArchiveWriteResult result = writer.write();
assertEquals(BlockArchiveWriter.BlockArchiveWriteResult.OK, result);
// Make sure that the archive contains the correct number of blocks
assertEquals(500 - 1, writer.getWrittenCount()); // -1 for the genesis block
// Increment block archive height
repository.getBlockArchiveRepository().setBlockArchiveHeight(writer.getWrittenCount());
repository.saveChanges();
assertEquals(500 - 1, repository.getBlockArchiveRepository().getBlockArchiveHeight());
// Ensure the file exists
File outputFile = writer.getOutputPath().toFile();
assertTrue(outputFile.exists());
// Ensure the SQL repository contains blocks 2 and 500...
assertNotNull(repository.getBlockRepository().fromHeight(2));
assertNotNull(repository.getBlockRepository().fromHeight(500));
// Prune all the archived blocks
int numBlocksPruned = repository.getBlockRepository().pruneBlocks(0, 500);
assertEquals(500-1, numBlocksPruned);
repository.getBlockRepository().setBlockPruneHeight(501);
// Prune the AT states for the archived blocks
repository.getATRepository().rebuildLatestAtStates(500);
repository.saveChanges();
int numATStatesPruned = repository.getATRepository().pruneAtStates(2, 500);
assertEquals(498, numATStatesPruned); // Minus 1 for genesis block, and another for the latest AT state
repository.getATRepository().setAtPruneHeight(501);
// Now ensure the SQL repository is missing blocks 2 and 500...
assertNull(repository.getBlockRepository().fromHeight(2));
assertNull(repository.getBlockRepository().fromHeight(500));
// ... but it's not missing blocks 1 and 501 (we don't prune the genesis block)
assertNotNull(repository.getBlockRepository().fromHeight(1));
assertNotNull(repository.getBlockRepository().fromHeight(501));
// Validate the latest block height in the repository
assertEquals(1002, (int) repository.getBlockRepository().getLastBlock().getHeight());
// Now orphan some unarchived blocks.
BlockUtils.orphanBlocks(repository, 500);
assertEquals(502, (int) repository.getBlockRepository().getLastBlock().getHeight());
// We're close to the lower limit of the SQL database now, so
// we need to import some blocks from the archive
BlockArchiveUtils.importFromArchive(401, 500, repository);
// Ensure the SQL repository now contains block 401 but not 400...
assertNotNull(repository.getBlockRepository().fromHeight(401));
assertNull(repository.getBlockRepository().fromHeight(400));
// Import the remaining 399 blocks
BlockArchiveUtils.importFromArchive(2, 400, repository);
// Verify that block 3 matches the original
BlockData block3DataPostArchive = repository.getBlockRepository().fromHeight(3);
assertArrayEquals(block3DataPreArchive.getSignature(), block3DataPostArchive.getSignature());
assertEquals(block3DataPreArchive.getHeight(), block3DataPostArchive.getHeight());
// Orphan 2 more block, which should be the last one that is possible to be orphaned
// TODO: figure out why this is 1 block more than in the equivalent block archive V1 test
BlockUtils.orphanBlocks(repository, 2);
// Orphan another block, which should fail
Exception exception = null;
try {
BlockUtils.orphanBlocks(repository, 1);
} catch (DataException e) {
exception = e;
}
// Ensure that a DataException is thrown because there is no more AT states data available
assertNotNull(exception);
assertEquals(DataException.class, exception.getClass());
// FUTURE: we may be able to retain unique AT states when trimming, to avoid this exception
// and allow orphaning back through blocks with trimmed AT states.
}
}
/**
* Many nodes are missing an ATStatesHeightIndex due to an earlier bug
* In these cases we disable archiving and pruning as this index is a
* very essential component in these processes.
*/
@Test
public void testMissingAtStatesHeightIndex() throws DataException, SQLException {
try (final HSQLDBRepository repository = (HSQLDBRepository) RepositoryManager.getRepository()) {
// Firstly check that we're able to prune or archive when the index exists
assertTrue(repository.getATRepository().hasAtStatesHeightIndex());
assertTrue(RepositoryManager.canArchiveOrPrune());
// Delete the index
repository.prepareStatement("DROP INDEX ATSTATESHEIGHTINDEX").execute();
// Ensure check that we're unable to prune or archive when the index doesn't exist
assertFalse(repository.getATRepository().hasAtStatesHeightIndex());
assertFalse(RepositoryManager.canArchiveOrPrune());
}
}
private void deleteArchiveDirectory() {
// Delete archive directory if exists
Path archivePath = Paths.get(Settings.getInstance().getRepositoryPath(), "archive").toAbsolutePath();
try {
FileUtils.deleteDirectory(archivePath.toFile());
} catch (IOException e) {
}
}
}

2
src/test/java/org/qortal/test/arbitrary/ArbitraryDataStoragePolicyTests.java

@ -246,7 +246,7 @@ public class ArbitraryDataStoragePolicyTests extends Common {
Path path = Paths.get("src/test/resources/arbitrary/demo1");
ArbitraryDataTransactionBuilder txnBuilder = new ArbitraryDataTransactionBuilder(
repository, publicKey58, path, name, Method.PUT, Service.ARBITRARY_DATA, null,
repository, publicKey58, 0L, path, name, Method.PUT, Service.ARBITRARY_DATA, null,
null, null, null, null);
txnBuilder.build();

8
src/test/java/org/qortal/test/arbitrary/ArbitraryTransactionMetadataTests.java

@ -107,7 +107,7 @@ public class ArbitraryTransactionMetadataTests extends Common {
// Create PUT transaction
Path path1 = ArbitraryUtils.generateRandomDataPath(dataLength);
ArbitraryDataFile arbitraryDataFile = ArbitraryUtils.createAndMintTxn(repository, publicKey58, path1, name,
identifier, ArbitraryTransactionData.Method.PUT, service, alice, chunkSize,
identifier, ArbitraryTransactionData.Method.PUT, service, alice, chunkSize, 0L, true,
title, description, tags, category);
// Check the chunk count is correct
@ -157,7 +157,7 @@ public class ArbitraryTransactionMetadataTests extends Common {
// Create PUT transaction
Path path1 = ArbitraryUtils.generateRandomDataPath(dataLength);
ArbitraryDataFile arbitraryDataFile = ArbitraryUtils.createAndMintTxn(repository, publicKey58, path1, name,
identifier, ArbitraryTransactionData.Method.PUT, service, alice, chunkSize,
identifier, ArbitraryTransactionData.Method.PUT, service, alice, chunkSize, 0L, true,
title, description, tags, category);
// Check the chunk count is correct
@ -219,7 +219,7 @@ public class ArbitraryTransactionMetadataTests extends Common {
// Create PUT transaction
Path path1 = ArbitraryUtils.generateRandomDataPath(dataLength);
ArbitraryDataFile arbitraryDataFile = ArbitraryUtils.createAndMintTxn(repository, publicKey58, path1, name,
identifier, ArbitraryTransactionData.Method.PUT, service, alice, chunkSize,
identifier, ArbitraryTransactionData.Method.PUT, service, alice, chunkSize, 0L, true,
title, description, tags, category);
// Check the chunk count is correct
@ -273,7 +273,7 @@ public class ArbitraryTransactionMetadataTests extends Common {
// Create PUT transaction
Path path1 = ArbitraryUtils.generateRandomDataPath(dataLength);
ArbitraryDataFile arbitraryDataFile = ArbitraryUtils.createAndMintTxn(repository, publicKey58, path1, name,
identifier, ArbitraryTransactionData.Method.PUT, service, alice, chunkSize,
identifier, ArbitraryTransactionData.Method.PUT, service, alice, chunkSize, 0L, true,
title, description, tags, category);
// Check the metadata is correct

344
src/test/java/org/qortal/test/arbitrary/ArbitraryTransactionTests.java

@ -5,6 +5,7 @@ import org.junit.Before;
import org.junit.Test;
import org.qortal.account.PrivateKeyAccount;
import org.qortal.arbitrary.ArbitraryDataFile;
import org.qortal.arbitrary.ArbitraryDataTransactionBuilder;
import org.qortal.arbitrary.exception.MissingDataException;
import org.qortal.arbitrary.misc.Service;
import org.qortal.controller.arbitrary.ArbitraryDataManager;
@ -20,9 +21,11 @@ import org.qortal.test.common.TransactionUtils;
import org.qortal.test.common.transaction.TestTransaction;
import org.qortal.transaction.ArbitraryTransaction;
import org.qortal.transaction.RegisterNameTransaction;
import org.qortal.transaction.Transaction;
import org.qortal.utils.Base58;
import org.qortal.utils.NTP;
import javax.xml.crypto.Data;
import java.io.IOException;
import java.nio.file.Path;
@ -36,7 +39,7 @@ public class ArbitraryTransactionTests extends Common {
}
@Test
public void testDifficultyTooLow() throws IllegalAccessException, DataException, IOException, MissingDataException {
public void testDifficultyTooLow() throws IllegalAccessException, DataException, IOException {
try (final Repository repository = RepositoryManager.getRepository()) {
PrivateKeyAccount alice = Common.getTestAccount(repository, "alice");
String publicKey58 = Base58.encode(alice.getPublicKey());
@ -78,7 +81,346 @@ public class ArbitraryTransactionTests extends Common {
assertTrue(transaction.isSignatureValid());
}
}
@Test
public void testNonceAndFee() throws IllegalAccessException, DataException, IOException {
try (final Repository repository = RepositoryManager.getRepository()) {
PrivateKeyAccount alice = Common.getTestAccount(repository, "alice");
String publicKey58 = Base58.encode(alice.getPublicKey());
String name = "TEST"; // Can be anything for this test
String identifier = null; // Not used for this test
Service service = Service.ARBITRARY_DATA;
int chunkSize = 100;
int dataLength = 900; // Actual data length will be longer due to encryption
// Register the name to Alice
RegisterNameTransactionData registerNameTransactionData = new RegisterNameTransactionData(TestTransaction.generateBase(alice), name, "");
registerNameTransactionData.setFee(new RegisterNameTransaction(null, null).getUnitFee(registerNameTransactionData.getTimestamp()));
TransactionUtils.signAndMint(repository, registerNameTransactionData, alice);
// Set difficulty to 1
FieldUtils.writeField(ArbitraryDataManager.getInstance(), "powDifficulty", 1, true);
// Create PUT transaction, with a fee
Path path1 = ArbitraryUtils.generateRandomDataPath(dataLength);
long fee = 10000000; // sufficient
boolean computeNonce = true;
ArbitraryDataFile arbitraryDataFile = ArbitraryUtils.createAndMintTxn(repository, publicKey58, path1, name, identifier, ArbitraryTransactionData.Method.PUT, service, alice, chunkSize, fee, computeNonce, null, null, null, null);
// Check that nonce validation succeeds
byte[] signature = arbitraryDataFile.getSignature();
TransactionData transactionData = repository.getTransactionRepository().fromSignature(signature);
ArbitraryTransaction transaction = new ArbitraryTransaction(repository, transactionData);
assertTrue(transaction.isSignatureValid());
// Increase difficulty to 15
FieldUtils.writeField(ArbitraryDataManager.getInstance(), "powDifficulty", 15, true);
// Make sure that nonce validation still succeeds, as the fee has allowed us to avoid including a nonce
assertTrue(transaction.isSignatureValid());
}
}
@Test
public void testNonceAndLowFee() throws IllegalAccessException, DataException, IOException {
try (final Repository repository = RepositoryManager.getRepository()) {
PrivateKeyAccount alice = Common.getTestAccount(repository, "alice");
String publicKey58 = Base58.encode(alice.getPublicKey());
String name = "TEST"; // Can be anything for this test
String identifier = null; // Not used for this test
Service service = Service.ARBITRARY_DATA;
int chunkSize = 100;
int dataLength = 900; // Actual data length will be longer due to encryption
// Register the name to Alice
RegisterNameTransactionData registerNameTransactionData = new RegisterNameTransactionData(TestTransaction.generateBase(alice), name, "");
registerNameTransactionData.setFee(new RegisterNameTransaction(null, null).getUnitFee(registerNameTransactionData.getTimestamp()));
TransactionUtils.signAndMint(repository, registerNameTransactionData, alice);
// Set difficulty to 1
FieldUtils.writeField(ArbitraryDataManager.getInstance(), "powDifficulty", 1, true);
// Create PUT transaction, with a fee that is too low
Path path1 = ArbitraryUtils.generateRandomDataPath(dataLength);
long fee = 9999999; // insufficient
boolean computeNonce = true;
boolean insufficientFeeDetected = false;
try {
ArbitraryUtils.createAndMintTxn(repository, publicKey58, path1, name, identifier, ArbitraryTransactionData.Method.PUT, service, alice, chunkSize, fee, computeNonce, null, null, null, null);
}
catch (DataException e) {
if (e.getMessage().contains("INSUFFICIENT_FEE")) {
insufficientFeeDetected = true;
}
}
// Transaction should be invalid due to an insufficient fee
assertTrue(insufficientFeeDetected);
}
}
@Test
public void testFeeNoNonce() throws IllegalAccessException, DataException, IOException {
try (final Repository repository = RepositoryManager.getRepository()) {
PrivateKeyAccount alice = Common.getTestAccount(repository, "alice");
String publicKey58 = Base58.encode(alice.getPublicKey());
String name = "TEST"; // Can be anything for this test
String identifier = null; // Not used for this test
Service service = Service.ARBITRARY_DATA;
int chunkSize = 100;
int dataLength = 900; // Actual data length will be longer due to encryption
// Register the name to Alice
RegisterNameTransactionData registerNameTransactionData = new RegisterNameTransactionData(TestTransaction.generateBase(alice), name, "");
registerNameTransactionData.setFee(new RegisterNameTransaction(null, null).getUnitFee(registerNameTransactionData.getTimestamp()));
TransactionUtils.signAndMint(repository, registerNameTransactionData, alice);
// Set difficulty to 1
FieldUtils.writeField(ArbitraryDataManager.getInstance(), "powDifficulty", 1, true);
// Create PUT transaction, with a fee
Path path1 = ArbitraryUtils.generateRandomDataPath(dataLength);
long fee = 10000000; // sufficient
boolean computeNonce = false;
ArbitraryDataFile arbitraryDataFile = ArbitraryUtils.createAndMintTxn(repository, publicKey58, path1, name, identifier, ArbitraryTransactionData.Method.PUT, service, alice, chunkSize, fee, computeNonce, null, null, null, null);
// Check that nonce validation succeeds, even though it wasn't computed. This is because we have included a sufficient fee.
byte[] signature = arbitraryDataFile.getSignature();
TransactionData transactionData = repository.getTransactionRepository().fromSignature(signature);
ArbitraryTransaction transaction = new ArbitraryTransaction(repository, transactionData);
assertTrue(transaction.isSignatureValid());
// Increase difficulty to 15
FieldUtils.writeField(ArbitraryDataManager.getInstance(), "powDifficulty", 15, true);
// Make sure that nonce validation still succeeds, as the fee has allowed us to avoid including a nonce
assertTrue(transaction.isSignatureValid());
}
}
@Test
public void testLowFeeNoNonce() throws IllegalAccessException, DataException, IOException {
try (final Repository repository = RepositoryManager.getRepository()) {
PrivateKeyAccount alice = Common.getTestAccount(repository, "alice");
String publicKey58 = Base58.encode(alice.getPublicKey());
String name = "TEST"; // Can be anything for this test
String identifier = null; // Not used for this test
Service service = Service.ARBITRARY_DATA;
int chunkSize = 100;
int dataLength = 900; // Actual data length will be longer due to encryption
// Register the name to Alice
RegisterNameTransactionData registerNameTransactionData = new RegisterNameTransactionData(TestTransaction.generateBase(alice), name, "");
registerNameTransactionData.setFee(new RegisterNameTransaction(null, null).getUnitFee(registerNameTransactionData.getTimestamp()));
TransactionUtils.signAndMint(repository, registerNameTransactionData, alice);
// Set difficulty to 1
FieldUtils.writeField(ArbitraryDataManager.getInstance(), "powDifficulty", 1, true);
// Create PUT transaction, with a fee that is too low. Also, don't compute a nonce.
Path path1 = ArbitraryUtils.generateRandomDataPath(dataLength);
long fee = 9999999; // insufficient
ArbitraryDataTransactionBuilder txnBuilder = new ArbitraryDataTransactionBuilder(
repository, publicKey58, fee, path1, name, ArbitraryTransactionData.Method.PUT, service, identifier, null, null, null, null);
txnBuilder.setChunkSize(chunkSize);
txnBuilder.build();
ArbitraryTransactionData transactionData = txnBuilder.getArbitraryTransactionData();
Transaction.ValidationResult result = TransactionUtils.signAndImport(repository, transactionData, alice);
// Transaction should be invalid due to an insufficient fee
assertEquals(Transaction.ValidationResult.INSUFFICIENT_FEE, result);
}
}
@Test
public void testZeroFeeNoNonce() throws IllegalAccessException, DataException, IOException {
try (final Repository repository = RepositoryManager.getRepository()) {
PrivateKeyAccount alice = Common.getTestAccount(repository, "alice");
String publicKey58 = Base58.encode(alice.getPublicKey());
String name = "TEST"; // Can be anything for this test
String identifier = null; // Not used for this test
Service service = Service.ARBITRARY_DATA;
int chunkSize = 100;
int dataLength = 900; // Actual data length will be longer due to encryption
// Register the name to Alice
RegisterNameTransactionData registerNameTransactionData = new RegisterNameTransactionData(TestTransaction.generateBase(alice), name, "");
registerNameTransactionData.setFee(new RegisterNameTransaction(null, null).getUnitFee(registerNameTransactionData.getTimestamp()));
TransactionUtils.signAndMint(repository, registerNameTransactionData, alice);
// Set difficulty to 1
FieldUtils.writeField(ArbitraryDataManager.getInstance(), "powDifficulty", 1, true);
// Create PUT transaction, with a fee that is too low. Also, don't compute a nonce.
Path path1 = ArbitraryUtils.generateRandomDataPath(dataLength);
long fee = 0L;
ArbitraryDataTransactionBuilder txnBuilder = new ArbitraryDataTransactionBuilder(
repository, publicKey58, fee, path1, name, ArbitraryTransactionData.Method.PUT, service, identifier, null, null, null, null);
txnBuilder.setChunkSize(chunkSize);
txnBuilder.build();
ArbitraryTransactionData transactionData = txnBuilder.getArbitraryTransactionData();
ArbitraryTransaction arbitraryTransaction = new ArbitraryTransaction(repository, transactionData);
// Transaction should be invalid
assertFalse(arbitraryTransaction.isSignatureValid());
}
}
@Test
public void testNonceAndFeeBeforeFeatureTrigger() throws IllegalAccessException, DataException, IOException {
// Use v2-minting settings, as these are pre-feature-trigger
Common.useSettings("test-settings-v2-minting.json");
try (final Repository repository = RepositoryManager.getRepository()) {
PrivateKeyAccount alice = Common.getTestAccount(repository, "alice");
String publicKey58 = Base58.encode(alice.getPublicKey());
String name = "TEST"; // Can be anything for this test
String identifier = null; // Not used for this test
Service service = Service.ARBITRARY_DATA;
int chunkSize = 100;
int dataLength = 900; // Actual data length will be longer due to encryption
// Register the name to Alice
RegisterNameTransactionData registerNameTransactionData = new RegisterNameTransactionData(TestTransaction.generateBase(alice), name, "");
registerNameTransactionData.setFee(new RegisterNameTransaction(null, null).getUnitFee(registerNameTransactionData.getTimestamp()));
TransactionUtils.signAndMint(repository, registerNameTransactionData, alice);
// Set difficulty to 1
FieldUtils.writeField(ArbitraryDataManager.getInstance(), "powDifficulty", 1, true);
// Create PUT transaction, with a fee
Path path1 = ArbitraryUtils.generateRandomDataPath(dataLength);
long fee = 10000000; // sufficient
boolean computeNonce = true;
ArbitraryDataFile arbitraryDataFile = ArbitraryUtils.createAndMintTxn(repository, publicKey58, path1, name, identifier, ArbitraryTransactionData.Method.PUT, service, alice, chunkSize, fee, computeNonce, null, null, null, null);
// Check that nonce validation succeeds
byte[] signature = arbitraryDataFile.getSignature();
TransactionData transactionData = repository.getTransactionRepository().fromSignature(signature);
ArbitraryTransaction transaction = new ArbitraryTransaction(repository, transactionData);
assertTrue(transaction.isSignatureValid());
// Increase difficulty to 15
FieldUtils.writeField(ArbitraryDataManager.getInstance(), "powDifficulty", 15, true);
// Make sure the nonce validation fails, as we aren't allowing a fee to replace a nonce yet.
// Note: there is a very tiny chance this could succeed due to being extremely lucky
// and finding a high difficulty nonce in the first couple of cycles. It will be rare
// enough that we shouldn't need to account for it.
assertFalse(transaction.isSignatureValid());
// Reduce difficulty back to 1, to double check
FieldUtils.writeField(ArbitraryDataManager.getInstance(), "powDifficulty", 1, true);
assertTrue(transaction.isSignatureValid());
}
}
@Test
public void testNonceAndInsufficientFeeBeforeFeatureTrigger() throws IllegalAccessException, DataException, IOException {
// Use v2-minting settings, as these are pre-feature-trigger
Common.useSettings("test-settings-v2-minting.json");
try (final Repository repository = RepositoryManager.getRepository()) {
PrivateKeyAccount alice = Common.getTestAccount(repository, "alice");
String publicKey58 = Base58.encode(alice.getPublicKey());
String name = "TEST"; // Can be anything for this test
String identifier = null; // Not used for this test
Service service = Service.ARBITRARY_DATA;
int chunkSize = 100;
int dataLength = 900; // Actual data length will be longer due to encryption
// Register the name to Alice
RegisterNameTransactionData registerNameTransactionData = new RegisterNameTransactionData(TestTransaction.generateBase(alice), name, "");
registerNameTransactionData.setFee(new RegisterNameTransaction(null, null).getUnitFee(registerNameTransactionData.getTimestamp()));
TransactionUtils.signAndMint(repository, registerNameTransactionData, alice);
// Set difficulty to 1
FieldUtils.writeField(ArbitraryDataManager.getInstance(), "powDifficulty", 1, true);
// Create PUT transaction, with a fee
Path path1 = ArbitraryUtils.generateRandomDataPath(dataLength);
long fee = 9999999; // insufficient
boolean computeNonce = true;
ArbitraryDataFile arbitraryDataFile = ArbitraryUtils.createAndMintTxn(repository, publicKey58, path1, name, identifier, ArbitraryTransactionData.Method.PUT, service, alice, chunkSize, fee, computeNonce, null, null, null, null);
// Check that nonce validation succeeds
byte[] signature = arbitraryDataFile.getSignature();
TransactionData transactionData = repository.getTransactionRepository().fromSignature(signature);
ArbitraryTransaction transaction = new ArbitraryTransaction(repository, transactionData);
assertTrue(transaction.isSignatureValid());
// The transaction should be valid because we don't care about the fee (before the feature trigger)
assertEquals(Transaction.ValidationResult.OK, transaction.isValidUnconfirmed());
// Increase difficulty to 15
FieldUtils.writeField(ArbitraryDataManager.getInstance(), "powDifficulty", 15, true);
// Make sure the nonce validation fails, as we aren't allowing a fee to replace a nonce yet (and it was insufficient anyway)
// Note: there is a very tiny chance this could succeed due to being extremely lucky
// and finding a high difficulty nonce in the first couple of cycles. It will be rare
// enough that we shouldn't need to account for it.
assertFalse(transaction.isSignatureValid());
// Reduce difficulty back to 1, to double check
FieldUtils.writeField(ArbitraryDataManager.getInstance(), "powDifficulty", 1, true);
assertTrue(transaction.isSignatureValid());
}
}
@Test
public void testNonceAndZeroFeeBeforeFeatureTrigger() throws IllegalAccessException, DataException, IOException {
// Use v2-minting settings, as these are pre-feature-trigger
Common.useSettings("test-settings-v2-minting.json");
try (final Repository repository = RepositoryManager.getRepository()) {
PrivateKeyAccount alice = Common.getTestAccount(repository, "alice");
String publicKey58 = Base58.encode(alice.getPublicKey());
String name = "TEST"; // Can be anything for this test
String identifier = null; // Not used for this test
Service service = Service.ARBITRARY_DATA;
int chunkSize = 100;
int dataLength = 900; // Actual data length will be longer due to encryption
// Register the name to Alice
RegisterNameTransactionData registerNameTransactionData = new RegisterNameTransactionData(TestTransaction.generateBase(alice), name, "");
registerNameTransactionData.setFee(new RegisterNameTransaction(null, null).getUnitFee(registerNameTransactionData.getTimestamp()));
TransactionUtils.signAndMint(repository, registerNameTransactionData, alice);
// Set difficulty to 1
FieldUtils.writeField(ArbitraryDataManager.getInstance(), "powDifficulty", 1, true);
// Create PUT transaction, with a fee
Path path1 = ArbitraryUtils.generateRandomDataPath(dataLength);
long fee = 0L;
boolean computeNonce = true;
ArbitraryDataFile arbitraryDataFile = ArbitraryUtils.createAndMintTxn(repository, publicKey58, path1, name, identifier, ArbitraryTransactionData.Method.PUT, service, alice, chunkSize, fee, computeNonce, null, null, null, null);
// Check that nonce validation succeeds
byte[] signature = arbitraryDataFile.getSignature();
TransactionData transactionData = repository.getTransactionRepository().fromSignature(signature);
ArbitraryTransaction transaction = new ArbitraryTransaction(repository, transactionData);
assertTrue(transaction.isSignatureValid());
// The transaction should be valid because we don't care about the fee (before the feature trigger)
assertEquals(Transaction.ValidationResult.OK, transaction.isValidUnconfirmed());
// Increase difficulty to 15
FieldUtils.writeField(ArbitraryDataManager.getInstance(), "powDifficulty", 15, true);
// Make sure the nonce validation fails, as we aren't allowing a fee to replace a nonce yet (and it was insufficient anyway)
// Note: there is a very tiny chance this could succeed due to being extremely lucky
// and finding a high difficulty nonce in the first couple of cycles. It will be rare
// enough that we shouldn't need to account for it.
assertFalse(transaction.isSignatureValid());
// Reduce difficulty back to 1, to double check
FieldUtils.writeField(ArbitraryDataManager.getInstance(), "powDifficulty", 1, true);
assertTrue(transaction.isSignatureValid());
}
}
}

11
src/test/java/org/qortal/test/common/ArbitraryUtils.java

@ -29,19 +29,22 @@ public class ArbitraryUtils {
int chunkSize) throws DataException {
return ArbitraryUtils.createAndMintTxn(repository, publicKey58, path, name, identifier, method, service,
account, chunkSize, null, null, null, null);
account, chunkSize, 0L, true, null, null, null, null);
}
public static ArbitraryDataFile createAndMintTxn(Repository repository, String publicKey58, Path path, String name, String identifier,
ArbitraryTransactionData.Method method, Service service, PrivateKeyAccount account,
int chunkSize, String title, String description, List<String> tags, Category category) throws DataException {
int chunkSize, long fee, boolean computeNonce,
String title, String description, List<String> tags, Category category) throws DataException {
ArbitraryDataTransactionBuilder txnBuilder = new ArbitraryDataTransactionBuilder(
repository, publicKey58, path, name, method, service, identifier, title, description, tags, category);
repository, publicKey58, fee, path, name, method, service, identifier, title, description, tags, category);
txnBuilder.setChunkSize(chunkSize);
txnBuilder.build();
txnBuilder.computeNonce();
if (computeNonce) {
txnBuilder.computeNonce();
}
ArbitraryTransactionData transactionData = txnBuilder.getArbitraryTransactionData();
Transaction.ValidationResult result = TransactionUtils.signAndImport(repository, transactionData, account);
assertEquals(Transaction.ValidationResult.OK, result);

3
src/test/resources/test-chain-v2-block-timestamps.json

@ -75,7 +75,8 @@
"onlineAccountMinterLevelValidationHeight": 0,
"selfSponsorshipAlgoV1Height": 999999999,
"feeValidationFixTimestamp": 0,
"chatReferenceTimestamp": 0
"chatReferenceTimestamp": 0,
"arbitraryOptionalFeeTimestamp": 0
},
"genesisInfo": {
"version": 4,

3
src/test/resources/test-chain-v2-disable-reference.json

@ -78,7 +78,8 @@
"onlineAccountMinterLevelValidationHeight": 0,
"selfSponsorshipAlgoV1Height": 999999999,
"feeValidationFixTimestamp": 0,
"chatReferenceTimestamp": 0
"chatReferenceTimestamp": 0,
"arbitraryOptionalFeeTimestamp": 0
},
"genesisInfo": {
"version": 4,

3
src/test/resources/test-chain-v2-founder-rewards.json

@ -79,7 +79,8 @@
"onlineAccountMinterLevelValidationHeight": 0,
"selfSponsorshipAlgoV1Height": 999999999,
"feeValidationFixTimestamp": 0,
"chatReferenceTimestamp": 0
"chatReferenceTimestamp": 0,
"arbitraryOptionalFeeTimestamp": 0
},
"genesisInfo": {
"version": 4,

3
src/test/resources/test-chain-v2-leftover-reward.json

@ -79,7 +79,8 @@
"onlineAccountMinterLevelValidationHeight": 0,
"selfSponsorshipAlgoV1Height": 999999999,
"feeValidationFixTimestamp": 0,
"chatReferenceTimestamp": 0
"chatReferenceTimestamp": 0,
"arbitraryOptionalFeeTimestamp": 0
},
"genesisInfo": {
"version": 4,

5
src/test/resources/test-chain-v2-minting.json

@ -74,12 +74,13 @@
"calcChainWeightTimestamp": 0,
"transactionV5Timestamp": 0,
"transactionV6Timestamp": 0,
"disableReferenceTimestamp": 9999999999999,
"disableReferenceTimestamp": 0,
"increaseOnlineAccountsDifficultyTimestamp": 9999999999999,
"onlineAccountMinterLevelValidationHeight": 0,
"selfSponsorshipAlgoV1Height": 999999999,
"feeValidationFixTimestamp": 0,
"chatReferenceTimestamp": 0
"chatReferenceTimestamp": 0,
"arbitraryOptionalFeeTimestamp": 9999999999999
},
"genesisInfo": {
"version": 4,

3
src/test/resources/test-chain-v2-qora-holder-extremes.json

@ -79,7 +79,8 @@
"onlineAccountMinterLevelValidationHeight": 0,
"selfSponsorshipAlgoV1Height": 999999999,
"feeValidationFixTimestamp": 0,
"chatReferenceTimestamp": 0
"chatReferenceTimestamp": 0,
"arbitraryOptionalFeeTimestamp": 0
},
"genesisInfo": {
"version": 4,

3
src/test/resources/test-chain-v2-qora-holder-reduction.json

@ -80,7 +80,8 @@
"onlineAccountMinterLevelValidationHeight": 0,
"selfSponsorshipAlgoV1Height": 999999999,
"feeValidationFixTimestamp": 0,
"chatReferenceTimestamp": 0
"chatReferenceTimestamp": 0,
"arbitraryOptionalFeeTimestamp": 0
},
"genesisInfo": {
"version": 4,

3
src/test/resources/test-chain-v2-qora-holder.json

@ -79,7 +79,8 @@
"onlineAccountMinterLevelValidationHeight": 0,
"selfSponsorshipAlgoV1Height": 999999999,
"feeValidationFixTimestamp": 0,
"chatReferenceTimestamp": 0
"chatReferenceTimestamp": 0,
"arbitraryOptionalFeeTimestamp": 0
},
"genesisInfo": {
"version": 4,

3
src/test/resources/test-chain-v2-reward-levels.json

@ -79,7 +79,8 @@
"onlineAccountMinterLevelValidationHeight": 0,
"selfSponsorshipAlgoV1Height": 999999999,
"feeValidationFixTimestamp": 0,
"chatReferenceTimestamp": 0
"chatReferenceTimestamp": 0,
"arbitraryOptionalFeeTimestamp": 0
},
"genesisInfo": {
"version": 4,

3
src/test/resources/test-chain-v2-reward-scaling.json

@ -79,7 +79,8 @@
"onlineAccountMinterLevelValidationHeight": 0,
"selfSponsorshipAlgoV1Height": 999999999,
"feeValidationFixTimestamp": 0,
"chatReferenceTimestamp": 0
"chatReferenceTimestamp": 0,
"arbitraryOptionalFeeTimestamp": 0
},
"genesisInfo": {
"version": 4,

3
src/test/resources/test-chain-v2-reward-shares.json

@ -79,7 +79,8 @@
"onlineAccountMinterLevelValidationHeight": 0,
"selfSponsorshipAlgoV1Height": 999999999,
"feeValidationFixTimestamp": 0,
"chatReferenceTimestamp": 0
"chatReferenceTimestamp": 0,
"arbitraryOptionalFeeTimestamp": 0
},
"genesisInfo": {
"version": 4,

3
src/test/resources/test-chain-v2-self-sponsorship-algo.json

@ -79,7 +79,8 @@
"onlineAccountMinterLevelValidationHeight": 0,
"selfSponsorshipAlgoV1Height": 20,
"feeValidationFixTimestamp": 0,
"chatReferenceTimestamp": 0
"chatReferenceTimestamp": 0,
"arbitraryOptionalFeeTimestamp": 0
},
"genesisInfo": {
"version": 4,

3
src/test/resources/test-chain-v2.json

@ -79,7 +79,8 @@
"onlineAccountMinterLevelValidationHeight": 0,
"selfSponsorshipAlgoV1Height": 999999999,
"feeValidationFixTimestamp": 0,
"chatReferenceTimestamp": 0
"chatReferenceTimestamp": 0,
"arbitraryOptionalFeeTimestamp": 0
},
"genesisInfo": {
"version": 4,

3
src/test/resources/test-settings-v2-block-archive.json

@ -9,5 +9,6 @@
"testNtpOffset": 0,
"minPeers": 0,
"pruneBlockLimit": 100,
"repositoryPath": "dbtest"
"repositoryPath": "dbtest",
"defaultArchiveVersion": 1
}

Loading…
Cancel
Save