diff --git a/WindowsInstaller/Install Files/AppData/settings.json b/WindowsInstaller/Install Files/AppData/settings.json
index 088afef4..0d66e4e8 100755
--- a/WindowsInstaller/Install Files/AppData/settings.json
+++ b/WindowsInstaller/Install Files/AppData/settings.json
@@ -1,3 +1,4 @@
{
- "apiDocumentationEnabled": true
+ "apiDocumentationEnabled": true,
+ "apiWhitelistEnabled": false
}
diff --git a/WindowsInstaller/Nice-Qortal-Logo-crop.bmp b/WindowsInstaller/Nice-Qortal-Logo-crop.bmp
new file mode 100644
index 00000000..0b9f457b
Binary files /dev/null and b/WindowsInstaller/Nice-Qortal-Logo-crop.bmp differ
diff --git a/WindowsInstaller/Nice-Qortal-Logo-crop.png b/WindowsInstaller/Nice-Qortal-Logo-crop.png
new file mode 100644
index 00000000..0cea3c92
Binary files /dev/null and b/WindowsInstaller/Nice-Qortal-Logo-crop.png differ
diff --git a/WindowsInstaller/Qortal.aip b/WindowsInstaller/Qortal.aip
index 1b0c944b..a3dbd88a 100755
--- a/WindowsInstaller/Qortal.aip
+++ b/WindowsInstaller/Qortal.aip
@@ -4,10 +4,12 @@
-
-
+
+
+
+
@@ -16,19 +18,22 @@
-
+
+
+
+
+
-
+
-
+
-
-
+
@@ -40,6 +45,7 @@
+
@@ -133,232 +139,239 @@
-
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
+
-
+
-
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
-
-
-
-
-
+
+
+
+
+
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
-
-
-
-
-
+
+
+
+
+
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
@@ -368,497 +381,505 @@
-
-
+
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
@@ -869,8 +890,11 @@
+
+
+
-
+
@@ -899,19 +923,19 @@
-
-
-
-
-
+
+
+
+
+
+
-
+
-
-
-
+
+
@@ -941,175 +965,76 @@
+
+
+
+
+
+
+
+
-
-
-
-
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
@@ -1129,6 +1054,7 @@
+
@@ -1145,6 +1071,8 @@
+
+
@@ -1162,6 +1090,7 @@
+
@@ -1180,11 +1109,6 @@
-
-
-
-
-
@@ -1195,15 +1119,20 @@
+
+
+
+
+
@@ -1217,7 +1146,7 @@
-
+
@@ -1239,6 +1168,7 @@
+
@@ -1279,9 +1209,15 @@
-
+
+
+
+
+
+
+
@@ -1297,13 +1233,6 @@
-
-
-
-
-
-
-
@@ -1327,86 +1256,87 @@
-
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
@@ -1428,12 +1358,12 @@
-
+
-
+
@@ -1462,6 +1392,8 @@
+
+
@@ -1469,7 +1401,7 @@
-
+
@@ -1477,6 +1409,8 @@
+
+
@@ -1491,15 +1425,13 @@
-
-
-
-
-
-
+
+
+
+
@@ -1508,6 +1440,7 @@
+
@@ -1559,8 +1492,11 @@
+
+
+
-
+
diff --git a/WindowsInstaller/README.md b/WindowsInstaller/README.md
index 29a7b64a..4431564e 100644
--- a/WindowsInstaller/README.md
+++ b/WindowsInstaller/README.md
@@ -2,7 +2,9 @@
## Prerequisites
-* AdvancedInstaller v19.4 or better, and enterprise licence if translations are required
+* AdvancedInstaller v19.4 or better, and enterprise licence.
+* Qortal has an open source license, however it currently (as of December 2024) only supports up to version 19. (We may need to reach out to Advanced Installer again to obtain a new license at some point, if needed.
+* Reach out to @crowetic for links to the installer install files, and license.
* Installed AdoptOpenJDK v17 64bit, full JDK *not* JRE
## General build instructions
@@ -10,6 +12,12 @@
If this is your first time opening the `qortal.aip` file then you might need to adjust
configured paths, or create a dummy `D:` drive with the expected layout.
+Opening the aip file from within a clone of the qortal repo also works, if you have a separate windows machine setup to do the build.
+
+You May need to change the location of the 'jre64' files inside Advanced Installer, if it is set to a path that your build machine doesn't have.
+
+The Java Memory Arguments can be set manually, but as of December 2024 they have been reset back to system defaults. This should include G1GC Garbage Collector.
+
Typical build procedure:
* Place the `qortal.jar` file in `Install-Files\`
diff --git a/pom.xml b/pom.xml
index 44bb10da..c632e29d 100644
--- a/pom.xml
+++ b/pom.xml
@@ -3,7 +3,7 @@
4.0.0
org.qortal
qortal
- 4.6.5
+ 4.6.6
jar
UTF-8
diff --git a/src/main/java/org/qortal/account/Account.java b/src/main/java/org/qortal/account/Account.java
index 537f0788..99fa5217 100644
--- a/src/main/java/org/qortal/account/Account.java
+++ b/src/main/java/org/qortal/account/Account.java
@@ -349,10 +349,28 @@ public class Account {
}
/**
- * Returns 'effective' minting level, or zero if reward-share does not exist.
+ * Returns reward-share minting address, or unknown if reward-share does not exist.
*
* @param repository
* @param rewardSharePublicKey
+ * @return address or unknown
+ * @throws DataException
+ */
+ public static String getRewardShareMintingAddress(Repository repository, byte[] rewardSharePublicKey) throws DataException {
+ // Find actual minter address
+ RewardShareData rewardShareData = repository.getAccountRepository().getRewardShare(rewardSharePublicKey);
+
+ if (rewardShareData == null)
+ return "Unknown";
+
+ return rewardShareData.getMinter();
+ }
+
+ /**
+ * Returns 'effective' minting level, or zero if reward-share does not exist.
+ *
+ * @param repository
+ * @param rewardSharePublicKey
* @return 0+
* @throws DataException
*/
diff --git a/src/main/java/org/qortal/api/model/ApiOnlineAccount.java b/src/main/java/org/qortal/api/model/ApiOnlineAccount.java
index 08b697aa..e26eb816 100644
--- a/src/main/java/org/qortal/api/model/ApiOnlineAccount.java
+++ b/src/main/java/org/qortal/api/model/ApiOnlineAccount.java
@@ -1,7 +1,13 @@
package org.qortal.api.model;
+import org.qortal.account.Account;
+import org.qortal.repository.DataException;
+import org.qortal.repository.RepositoryManager;
+import org.qortal.repository.Repository;
+
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlElement;
// All properties to be converted to JSON via JAXB
@XmlAccessorType(XmlAccessType.FIELD)
@@ -47,4 +53,31 @@ public class ApiOnlineAccount {
return this.recipientAddress;
}
+ public int getMinterLevelFromPublicKey() {
+ try (final Repository repository = RepositoryManager.getRepository()) {
+ return Account.getRewardShareEffectiveMintingLevel(repository, this.rewardSharePublicKey);
+ } catch (DataException e) {
+ return 0;
+ }
+ }
+
+ public boolean getIsMember() {
+ try (final Repository repository = RepositoryManager.getRepository()) {
+ return repository.getGroupRepository().memberExists(694, getMinterAddress());
+ } catch (DataException e) {
+ return false;
+ }
+ }
+
+ // JAXB special
+
+ @XmlElement(name = "minterLevel")
+ protected int getMinterLevel() {
+ return getMinterLevelFromPublicKey();
+ }
+
+ @XmlElement(name = "isMinterMember")
+ protected boolean getMinterMember() {
+ return getIsMember();
+ }
}
diff --git a/src/main/java/org/qortal/api/model/BlockMintingInfo.java b/src/main/java/org/qortal/api/model/BlockMintingInfo.java
index f84e179e..02765a89 100644
--- a/src/main/java/org/qortal/api/model/BlockMintingInfo.java
+++ b/src/main/java/org/qortal/api/model/BlockMintingInfo.java
@@ -9,6 +9,7 @@ import java.math.BigInteger;
public class BlockMintingInfo {
public byte[] minterPublicKey;
+ public String minterAddress;
public int minterLevel;
public int onlineAccountsCount;
public BigDecimal maxDistance;
@@ -19,5 +20,4 @@ public class BlockMintingInfo {
public BlockMintingInfo() {
}
-
}
diff --git a/src/main/java/org/qortal/api/resource/BlocksResource.java b/src/main/java/org/qortal/api/resource/BlocksResource.java
index 01d8d2ab..ff0bb979 100644
--- a/src/main/java/org/qortal/api/resource/BlocksResource.java
+++ b/src/main/java/org/qortal/api/resource/BlocksResource.java
@@ -542,6 +542,7 @@ public class BlocksResource {
}
}
+ String minterAddress = Account.getRewardShareMintingAddress(repository, blockData.getMinterPublicKey());
int minterLevel = Account.getRewardShareEffectiveMintingLevel(repository, blockData.getMinterPublicKey());
if (minterLevel == 0)
// This may be unavailable when requesting a trimmed block
@@ -554,6 +555,7 @@ public class BlocksResource {
BlockMintingInfo blockMintingInfo = new BlockMintingInfo();
blockMintingInfo.minterPublicKey = blockData.getMinterPublicKey();
+ blockMintingInfo.minterAddress = minterAddress;
blockMintingInfo.minterLevel = minterLevel;
blockMintingInfo.onlineAccountsCount = blockData.getOnlineAccountsCount();
blockMintingInfo.maxDistance = new BigDecimal(block.MAX_DISTANCE);
@@ -887,5 +889,4 @@ public class BlocksResource {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
}
}
-
}
diff --git a/src/main/java/org/qortal/api/resource/ChatResource.java b/src/main/java/org/qortal/api/resource/ChatResource.java
index 66a2bd46..df2ca399 100644
--- a/src/main/java/org/qortal/api/resource/ChatResource.java
+++ b/src/main/java/org/qortal/api/resource/ChatResource.java
@@ -234,17 +234,21 @@ public class ChatResource {
}
)
@ApiErrors({ApiError.INVALID_CRITERIA, ApiError.INVALID_ADDRESS, ApiError.REPOSITORY_ISSUE})
- public ActiveChats getActiveChats(@PathParam("address") String address, @QueryParam("encoding") Encoding encoding) {
+ public ActiveChats getActiveChats(
+ @PathParam("address") String address,
+ @QueryParam("encoding") Encoding encoding,
+ @QueryParam("haschatreference") Boolean hasChatReference
+ ) {
if (address == null || !Crypto.isValidAddress(address))
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_ADDRESS);
-
+
try (final Repository repository = RepositoryManager.getRepository()) {
- return repository.getChatRepository().getActiveChats(address, encoding);
+ return repository.getChatRepository().getActiveChats(address, encoding, hasChatReference);
} catch (DataException e) {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
}
}
-
+
@POST
@Operation(
summary = "Build raw, unsigned, CHAT transaction",
diff --git a/src/main/java/org/qortal/api/websocket/ActiveChatsWebSocket.java b/src/main/java/org/qortal/api/websocket/ActiveChatsWebSocket.java
index b92fb19f..ca3ef2b3 100644
--- a/src/main/java/org/qortal/api/websocket/ActiveChatsWebSocket.java
+++ b/src/main/java/org/qortal/api/websocket/ActiveChatsWebSocket.java
@@ -77,7 +77,9 @@ public class ActiveChatsWebSocket extends ApiWebSocket {
}
try (final Repository repository = RepositoryManager.getRepository()) {
- ActiveChats activeChats = repository.getChatRepository().getActiveChats(ourAddress, getTargetEncoding(session));
+ Boolean hasChatReference = getHasChatReference(session);
+
+ ActiveChats activeChats = repository.getChatRepository().getActiveChats(ourAddress, getTargetEncoding(session), hasChatReference);
StringWriter stringWriter = new StringWriter();
@@ -103,4 +105,20 @@ public class ActiveChatsWebSocket extends ApiWebSocket {
return Encoding.valueOf(encoding);
}
+ private Boolean getHasChatReference(Session session) {
+ Map> queryParams = session.getUpgradeRequest().getParameterMap();
+ List hasChatReferenceList = queryParams.get("haschatreference");
+
+ // Return null if not specified
+ if (hasChatReferenceList != null && hasChatReferenceList.size() == 1) {
+ String value = hasChatReferenceList.get(0).toLowerCase();
+ if (value.equals("true")) {
+ return true;
+ } else if (value.equals("false")) {
+ return false;
+ }
+ }
+ return null; // Ignored if not present
+ }
+
}
diff --git a/src/main/java/org/qortal/block/Block.java b/src/main/java/org/qortal/block/Block.java
index 918a20ae..c9353d70 100644
--- a/src/main/java/org/qortal/block/Block.java
+++ b/src/main/java/org/qortal/block/Block.java
@@ -145,7 +145,7 @@ public class Block {
private final Account recipientAccount;
private final AccountData recipientAccountData;
-
+
final BlockChain blockChain = BlockChain.getInstance();
ExpandedAccount(Repository repository, RewardShareData rewardShareData) throws DataException {
@@ -414,6 +414,21 @@ public class Block {
});
}
+ // After feature trigger, remove any online accounts that are not minter group member
+ if (height >= BlockChain.getInstance().getGroupMemberCheckHeight()) {
+ onlineAccounts.removeIf(a -> {
+ try {
+ int groupId = BlockChain.getInstance().getMintingGroupId();
+ String address = Account.getRewardShareMintingAddress(repository, a.getPublicKey());
+ boolean isMinterGroupMember = repository.getGroupRepository().memberExists(groupId, address);
+ return !isMinterGroupMember;
+ } catch (DataException e) {
+ // Something went wrong, so remove the account
+ return true;
+ }
+ });
+ }
+
if (onlineAccounts.isEmpty()) {
LOGGER.debug("No online accounts - not even our own?");
return null;
@@ -721,19 +736,19 @@ public class Block {
List expandedAccounts = new ArrayList<>();
for (RewardShareData rewardShare : this.cachedOnlineRewardShares) {
- if (this.getBlockData().getHeight() < BlockChain.getInstance().getFixBatchRewardHeight()) {
+ int groupId = BlockChain.getInstance().getMintingGroupId();
+ String address = rewardShare.getMinter();
+ boolean isMinterGroupMember = repository.getGroupRepository().memberExists(groupId, address);
+
+ if (this.getBlockData().getHeight() < BlockChain.getInstance().getFixBatchRewardHeight())
+ expandedAccounts.add(new ExpandedAccount(repository, rewardShare));
+
+ if (this.getBlockData().getHeight() >= BlockChain.getInstance().getFixBatchRewardHeight() && isMinterGroupMember)
expandedAccounts.add(new ExpandedAccount(repository, rewardShare));
- }
- if (this.getBlockData().getHeight() >= BlockChain.getInstance().getFixBatchRewardHeight()) {
- boolean isMinterGroupMember = repository.getGroupRepository().memberExists(BlockChain.getInstance().getMintingGroupId(), rewardShare.getMinter());
- if (isMinterGroupMember) {
- expandedAccounts.add(new ExpandedAccount(repository, rewardShare));
- }
- }
}
-
this.cachedExpandedAccounts = expandedAccounts;
+ LOGGER.trace(() -> String.format("Online reward-shares after expanded accounts %s", this.cachedOnlineRewardShares));
return this.cachedExpandedAccounts;
}
@@ -1143,8 +1158,17 @@ public class Block {
if (this.getBlockData().getHeight() >= BlockChain.getInstance().getOnlineAccountMinterLevelValidationHeight()) {
List expandedAccounts = this.getExpandedAccounts();
for (ExpandedAccount account : expandedAccounts) {
+ int groupId = BlockChain.getInstance().getMintingGroupId();
+ String address = account.getMintingAccount().getAddress();
+ boolean isMinterGroupMember = repository.getGroupRepository().memberExists(groupId, address);
+
if (account.getMintingAccount().getEffectiveMintingLevel() == 0)
return ValidationResult.ONLINE_ACCOUNTS_INVALID;
+
+ if (this.getBlockData().getHeight() >= BlockChain.getInstance().getFixBatchRewardHeight()) {
+ if (!isMinterGroupMember)
+ return ValidationResult.ONLINE_ACCOUNTS_INVALID;
+ }
}
}
@@ -1273,6 +1297,7 @@ public class Block {
// Online Accounts
ValidationResult onlineAccountsResult = this.areOnlineAccountsValid();
+ LOGGER.trace("Accounts valid = {}", onlineAccountsResult);
if (onlineAccountsResult != ValidationResult.OK)
return onlineAccountsResult;
@@ -1361,7 +1386,7 @@ public class Block {
// Check transaction can even be processed
validationResult = transaction.isProcessable();
if (validationResult != Transaction.ValidationResult.OK) {
- LOGGER.info(String.format("Error during transaction validation, tx %s: %s", Base58.encode(transactionData.getSignature()), validationResult.name()));
+ LOGGER.debug(String.format("Error during transaction validation, tx %s: %s", Base58.encode(transactionData.getSignature()), validationResult.name()));
return ValidationResult.TRANSACTION_INVALID;
}
@@ -1562,6 +1587,7 @@ public class Block {
this.blockData.setHeight(blockchainHeight + 1);
LOGGER.trace(() -> String.format("Processing block %d", this.blockData.getHeight()));
+ LOGGER.trace(() -> String.format("Online Reward Shares in process %s", this.cachedOnlineRewardShares));
if (this.blockData.getHeight() > 1) {
@@ -2280,7 +2306,6 @@ public class Block {
// Select the correct set of share bins based on block height
List accountLevelShareBinsForBlock = (this.blockData.getHeight() >= BlockChain.getInstance().getSharesByLevelV2Height()) ?
BlockChain.getInstance().getAccountLevelShareBinsV2() : BlockChain.getInstance().getAccountLevelShareBinsV1();
-
// Determine reward candidates based on account level
// This needs a deep copy, so the shares can be modified when tiers aren't activated yet
List accountLevelShareBins = new ArrayList<>();
@@ -2570,9 +2595,11 @@ public class Block {
return;
int minterLevel = Account.getRewardShareEffectiveMintingLevel(this.repository, this.getMinter().getPublicKey());
+ String minterAddress = Account.getRewardShareMintingAddress(this.repository, this.getMinter().getPublicKey());
LOGGER.debug(String.format("======= BLOCK %d (%.8s) =======", this.getBlockData().getHeight(), Base58.encode(this.getSignature())));
LOGGER.debug(String.format("Timestamp: %d", this.getBlockData().getTimestamp()));
+ LOGGER.debug(String.format("Minter address: %s", minterAddress));
LOGGER.debug(String.format("Minter level: %d", minterLevel));
LOGGER.debug(String.format("Online accounts: %d", this.getBlockData().getOnlineAccountsCount()));
LOGGER.debug(String.format("AT count: %d", this.getBlockData().getATCount()));
diff --git a/src/main/java/org/qortal/controller/BlockMinter.java b/src/main/java/org/qortal/controller/BlockMinter.java
index a1fb9769..64024d00 100644
--- a/src/main/java/org/qortal/controller/BlockMinter.java
+++ b/src/main/java/org/qortal/controller/BlockMinter.java
@@ -97,364 +97,375 @@ public class BlockMinter extends Thread {
final boolean isSingleNodeTestnet = Settings.getInstance().isSingleNodeTestnet();
- try (final Repository repository = RepositoryManager.getRepository()) {
- // Going to need this a lot...
- BlockRepository blockRepository = repository.getBlockRepository();
-
- // Flags for tracking change in whether minting is possible,
- // so we can notify Controller, and further update SysTray, etc.
- boolean isMintingPossible = false;
- boolean wasMintingPossible = isMintingPossible;
+ // Flags for tracking change in whether minting is possible,
+ // so we can notify Controller, and further update SysTray, etc.
+ boolean isMintingPossible = false;
+ boolean wasMintingPossible = isMintingPossible;
+ try {
while (running) {
- if (isMintingPossible != wasMintingPossible)
- Controller.getInstance().onMintingPossibleChange(isMintingPossible);
+ // recreate repository for new loop iteration
+ try (final Repository repository = RepositoryManager.getRepository()) {
- wasMintingPossible = isMintingPossible;
+ // Going to need this a lot...
+ BlockRepository blockRepository = repository.getBlockRepository();
- try {
- // Free up any repository locks
- repository.discardChanges();
+ if (isMintingPossible != wasMintingPossible)
+ Controller.getInstance().onMintingPossibleChange(isMintingPossible);
- // Sleep for a while.
- // It's faster on single node testnets, to allow lots of blocks to be minted quickly.
- Thread.sleep(isSingleNodeTestnet ? 50 : 1000);
-
- isMintingPossible = false;
-
- final Long now = NTP.getTime();
- if (now == null)
- continue;
-
- final Long minLatestBlockTimestamp = Controller.getMinimumLatestBlockTimestamp();
- if (minLatestBlockTimestamp == null)
- continue;
-
- List mintingAccountsData = repository.getAccountRepository().getMintingAccounts();
- // No minting accounts?
- if (mintingAccountsData.isEmpty())
- continue;
-
- // Disregard minting accounts that are no longer valid, e.g. by transfer/loss of founder flag or account level
- // Note that minting accounts are actually reward-shares in Qortal
- Iterator madi = mintingAccountsData.iterator();
- while (madi.hasNext()) {
- MintingAccountData mintingAccountData = madi.next();
-
- RewardShareData rewardShareData = repository.getAccountRepository().getRewardShare(mintingAccountData.getPublicKey());
- if (rewardShareData == null) {
- // Reward-share doesn't exist - probably cancelled but not yet removed from node's list of minting accounts
- madi.remove();
- continue;
- }
-
- Account mintingAccount = new Account(repository, rewardShareData.getMinter());
- if (!mintingAccount.canMint(true)) {
- // Minting-account component of reward-share can no longer mint - disregard
- madi.remove();
- continue;
- }
-
- // Optional (non-validated) prevention of block submissions below a defined level.
- // This is an unvalidated version of Blockchain.minAccountLevelToMint
- // and exists only to reduce block candidates by default.
- int level = mintingAccount.getEffectiveMintingLevel();
- if (level < BlockChain.getInstance().getMinAccountLevelForBlockSubmissions()) {
- madi.remove();
- }
- }
-
- // Needs a mutable copy of the unmodifiableList
- List peers = new ArrayList<>(Network.getInstance().getImmutableHandshakedPeers());
- BlockData lastBlockData = blockRepository.getLastBlock();
-
- // Disregard peers that have "misbehaved" recently
- peers.removeIf(Controller.hasMisbehaved);
-
- // Disregard peers that don't have a recent block, but only if we're not in recovery mode.
- // In that mode, we want to allow minting on top of older blocks, to recover stalled networks.
- if (!Synchronizer.getInstance().getRecoveryMode())
- peers.removeIf(Controller.hasNoRecentBlock);
-
- // Don't mint if we don't have enough up-to-date peers as where would the transactions/consensus come from?
- if (peers.size() < Settings.getInstance().getMinBlockchainPeers())
- continue;
-
- // If we are stuck on an invalid block, we should allow an alternative to be minted
- boolean recoverInvalidBlock = false;
- if (Synchronizer.getInstance().timeInvalidBlockLastReceived != null) {
- // We've had at least one invalid block
- long timeSinceLastValidBlock = NTP.getTime() - Synchronizer.getInstance().timeValidBlockLastReceived;
- long timeSinceLastInvalidBlock = NTP.getTime() - Synchronizer.getInstance().timeInvalidBlockLastReceived;
- if (timeSinceLastValidBlock > INVALID_BLOCK_RECOVERY_TIMEOUT) {
- if (timeSinceLastInvalidBlock < INVALID_BLOCK_RECOVERY_TIMEOUT) {
- // Last valid block was more than 10 mins ago, but we've had an invalid block since then
- // Assume that the chain has stalled because there is no alternative valid candidate
- // Enter recovery mode to allow alternative, valid candidates to be minted
- recoverInvalidBlock = true;
- }
- }
- }
-
- // If our latest block isn't recent then we need to synchronize instead of minting, unless we're in recovery mode.
- if (!peers.isEmpty() && lastBlockData.getTimestamp() < minLatestBlockTimestamp)
- if (!Synchronizer.getInstance().getRecoveryMode() && !recoverInvalidBlock)
- continue;
-
- // There are enough peers with a recent block and our latest block is recent
- // so go ahead and mint a block if possible.
- isMintingPossible = true;
-
- // Check blockchain hasn't changed
- if (previousBlockData == null || !Arrays.equals(previousBlockData.getSignature(), lastBlockData.getSignature())) {
- previousBlockData = lastBlockData;
- newBlocks.clear();
-
- // Reduce log timeout
- logTimeout = 10 * 1000L;
-
- // Last low weight block is no longer valid
- parentSignatureForLastLowWeightBlock = null;
- }
-
- // Discard accounts we have already built blocks with
- mintingAccountsData.removeIf(mintingAccountData -> newBlocks.stream().anyMatch(newBlock -> Arrays.equals(newBlock.getBlockData().getMinterPublicKey(), mintingAccountData.getPublicKey())));
-
- // Do we need to build any potential new blocks?
- List newBlocksMintingAccounts = mintingAccountsData.stream().map(accountData -> new PrivateKeyAccount(repository, accountData.getPrivateKey())).collect(Collectors.toList());
-
- // We might need to sit the next block out, if one of our minting accounts signed the previous one
- // Skip this check for single node testnets, since they definitely need to mint every block
- byte[] previousBlockMinter = previousBlockData.getMinterPublicKey();
- boolean mintedLastBlock = mintingAccountsData.stream().anyMatch(mintingAccount -> Arrays.equals(mintingAccount.getPublicKey(), previousBlockMinter));
- if (mintedLastBlock && !isSingleNodeTestnet) {
- LOGGER.trace(String.format("One of our keys signed the last block, so we won't sign the next one"));
- continue;
- }
-
- if (parentSignatureForLastLowWeightBlock != null) {
- // The last iteration found a higher weight block in the network, so sleep for a while
- // to allow is to sync the higher weight chain. We are sleeping here rather than when
- // detected as we don't want to hold the blockchain lock open.
- LOGGER.info("Sleeping for 10 seconds...");
- Thread.sleep(10 * 1000L);
- }
-
- for (PrivateKeyAccount mintingAccount : newBlocksMintingAccounts) {
- // First block does the AT heavy-lifting
- if (newBlocks.isEmpty()) {
- Block newBlock = Block.mint(repository, previousBlockData, mintingAccount);
- if (newBlock == null) {
- // For some reason we can't mint right now
- moderatedLog(() -> LOGGER.info("Couldn't build a to-be-minted block"));
- continue;
- }
-
- newBlocks.add(newBlock);
- } else {
- // The blocks for other minters require less effort...
- Block newBlock = newBlocks.get(0).remint(mintingAccount);
- if (newBlock == null) {
- // For some reason we can't mint right now
- moderatedLog(() -> LOGGER.error("Couldn't rebuild a to-be-minted block"));
- continue;
- }
-
- newBlocks.add(newBlock);
- }
- }
-
- // No potential block candidates?
- if (newBlocks.isEmpty())
- continue;
-
- // Make sure we're the only thread modifying the blockchain
- ReentrantLock blockchainLock = Controller.getInstance().getBlockchainLock();
- if (!blockchainLock.tryLock(30, TimeUnit.SECONDS)) {
- LOGGER.debug("Couldn't acquire blockchain lock even after waiting 30 seconds");
- continue;
- }
-
- boolean newBlockMinted = false;
- Block newBlock = null;
+ wasMintingPossible = isMintingPossible;
try {
- // Clear repository session state so we have latest view of data
+ // reset the repository, to the repository recreated for this loop iteration
+ for( Block newBlock : newBlocks ) newBlock.setRepository(repository);
+
+ // Free up any repository locks
repository.discardChanges();
- // Now that we have blockchain lock, do final check that chain hasn't changed
- BlockData latestBlockData = blockRepository.getLastBlock();
- if (!Arrays.equals(lastBlockData.getSignature(), latestBlockData.getSignature()))
+ // Sleep for a while.
+ // It's faster on single node testnets, to allow lots of blocks to be minted quickly.
+ Thread.sleep(isSingleNodeTestnet ? 50 : 1000);
+
+ isMintingPossible = false;
+
+ final Long now = NTP.getTime();
+ if (now == null)
continue;
- List goodBlocks = new ArrayList<>();
- boolean wasInvalidBlockDiscarded = false;
- Iterator newBlocksIterator = newBlocks.iterator();
+ final Long minLatestBlockTimestamp = Controller.getMinimumLatestBlockTimestamp();
+ if (minLatestBlockTimestamp == null)
+ continue;
- while (newBlocksIterator.hasNext()) {
- Block testBlock = newBlocksIterator.next();
+ List mintingAccountsData = repository.getAccountRepository().getMintingAccounts();
+ // No minting accounts?
+ if (mintingAccountsData.isEmpty())
+ continue;
- // Is new block's timestamp valid yet?
- // We do a separate check as some timestamp checks are skipped for testchains
- if (testBlock.isTimestampValid() != ValidationResult.OK)
+ // Disregard minting accounts that are no longer valid, e.g. by transfer/loss of founder flag or account level
+ // Note that minting accounts are actually reward-shares in Qortal
+ Iterator madi = mintingAccountsData.iterator();
+ while (madi.hasNext()) {
+ MintingAccountData mintingAccountData = madi.next();
+
+ RewardShareData rewardShareData = repository.getAccountRepository().getRewardShare(mintingAccountData.getPublicKey());
+ if (rewardShareData == null) {
+ // Reward-share doesn't exist - probably cancelled but not yet removed from node's list of minting accounts
+ madi.remove();
+ continue;
+ }
+
+ Account mintingAccount = new Account(repository, rewardShareData.getMinter());
+ if (!mintingAccount.canMint(true)) {
+ // Minting-account component of reward-share can no longer mint - disregard
+ madi.remove();
+ continue;
+ }
+
+ // Optional (non-validated) prevention of block submissions below a defined level.
+ // This is an unvalidated version of Blockchain.minAccountLevelToMint
+ // and exists only to reduce block candidates by default.
+ int level = mintingAccount.getEffectiveMintingLevel();
+ if (level < BlockChain.getInstance().getMinAccountLevelForBlockSubmissions()) {
+ madi.remove();
+ }
+ }
+
+ // Needs a mutable copy of the unmodifiableList
+ List peers = new ArrayList<>(Network.getInstance().getImmutableHandshakedPeers());
+ BlockData lastBlockData = blockRepository.getLastBlock();
+
+ // Disregard peers that have "misbehaved" recently
+ peers.removeIf(Controller.hasMisbehaved);
+
+ // Disregard peers that don't have a recent block, but only if we're not in recovery mode.
+ // In that mode, we want to allow minting on top of older blocks, to recover stalled networks.
+ if (!Synchronizer.getInstance().getRecoveryMode())
+ peers.removeIf(Controller.hasNoRecentBlock);
+
+ // Don't mint if we don't have enough up-to-date peers as where would the transactions/consensus come from?
+ if (peers.size() < Settings.getInstance().getMinBlockchainPeers())
+ continue;
+
+ // If we are stuck on an invalid block, we should allow an alternative to be minted
+ boolean recoverInvalidBlock = false;
+ if (Synchronizer.getInstance().timeInvalidBlockLastReceived != null) {
+ // We've had at least one invalid block
+ long timeSinceLastValidBlock = NTP.getTime() - Synchronizer.getInstance().timeValidBlockLastReceived;
+ long timeSinceLastInvalidBlock = NTP.getTime() - Synchronizer.getInstance().timeInvalidBlockLastReceived;
+ if (timeSinceLastValidBlock > INVALID_BLOCK_RECOVERY_TIMEOUT) {
+ if (timeSinceLastInvalidBlock < INVALID_BLOCK_RECOVERY_TIMEOUT) {
+ // Last valid block was more than 10 mins ago, but we've had an invalid block since then
+ // Assume that the chain has stalled because there is no alternative valid candidate
+ // Enter recovery mode to allow alternative, valid candidates to be minted
+ recoverInvalidBlock = true;
+ }
+ }
+ }
+
+ // If our latest block isn't recent then we need to synchronize instead of minting, unless we're in recovery mode.
+ if (!peers.isEmpty() && lastBlockData.getTimestamp() < minLatestBlockTimestamp)
+ if (!Synchronizer.getInstance().getRecoveryMode() && !recoverInvalidBlock)
continue;
- testBlock.preProcess();
+ // There are enough peers with a recent block and our latest block is recent
+ // so go ahead and mint a block if possible.
+ isMintingPossible = true;
- // Is new block valid yet? (Before adding unconfirmed transactions)
- ValidationResult result = testBlock.isValid();
- if (result != ValidationResult.OK) {
- moderatedLog(() -> LOGGER.error(String.format("To-be-minted block invalid '%s' before adding transactions?", result.name())));
+ // Check blockchain hasn't changed
+ if (previousBlockData == null || !Arrays.equals(previousBlockData.getSignature(), lastBlockData.getSignature())) {
+ previousBlockData = lastBlockData;
+ newBlocks.clear();
- newBlocksIterator.remove();
- wasInvalidBlockDiscarded = true;
- /*
- * Bail out fast so that we loop around from the top again.
- * This gives BlockMinter the possibility to remint this candidate block using another block from newBlocks,
- * via the Blocks.remint() method, which avoids having to re-process Block ATs all over again.
- * Particularly useful if some aspect of Blocks changes due a timestamp-based feature-trigger (see BlockChain class).
- */
- break;
- }
+ // Reduce log timeout
+ logTimeout = 10 * 1000L;
- goodBlocks.add(testBlock);
+ // Last low weight block is no longer valid
+ parentSignatureForLastLowWeightBlock = null;
}
- if (wasInvalidBlockDiscarded || goodBlocks.isEmpty())
+ // Discard accounts we have already built blocks with
+ mintingAccountsData.removeIf(mintingAccountData -> newBlocks.stream().anyMatch(newBlock -> Arrays.equals(newBlock.getBlockData().getMinterPublicKey(), mintingAccountData.getPublicKey())));
+
+ // Do we need to build any potential new blocks?
+ List newBlocksMintingAccounts = mintingAccountsData.stream().map(accountData -> new PrivateKeyAccount(repository, accountData.getPrivateKey())).collect(Collectors.toList());
+
+ // We might need to sit the next block out, if one of our minting accounts signed the previous one
+ // Skip this check for single node testnets, since they definitely need to mint every block
+ byte[] previousBlockMinter = previousBlockData.getMinterPublicKey();
+ boolean mintedLastBlock = mintingAccountsData.stream().anyMatch(mintingAccount -> Arrays.equals(mintingAccount.getPublicKey(), previousBlockMinter));
+ if (mintedLastBlock && !isSingleNodeTestnet) {
+ LOGGER.trace(String.format("One of our keys signed the last block, so we won't sign the next one"));
continue;
-
- // Pick best block
- final int parentHeight = previousBlockData.getHeight();
- final byte[] parentBlockSignature = previousBlockData.getSignature();
-
- BigInteger bestWeight = null;
-
- for (int bi = 0; bi < goodBlocks.size(); ++bi) {
- BlockData blockData = goodBlocks.get(bi).getBlockData();
-
- BlockSummaryData blockSummaryData = new BlockSummaryData(blockData);
- int minterLevel = Account.getRewardShareEffectiveMintingLevel(repository, blockData.getMinterPublicKey());
- blockSummaryData.setMinterLevel(minterLevel);
-
- BigInteger blockWeight = Block.calcBlockWeight(parentHeight, parentBlockSignature, blockSummaryData);
-
- if (bestWeight == null || blockWeight.compareTo(bestWeight) < 0) {
- newBlock = goodBlocks.get(bi);
- bestWeight = blockWeight;
- }
}
- try {
- if (this.higherWeightChainExists(repository, bestWeight)) {
+ if (parentSignatureForLastLowWeightBlock != null) {
+ // The last iteration found a higher weight block in the network, so sleep for a while
+ // to allow is to sync the higher weight chain. We are sleeping here rather than when
+ // detected as we don't want to hold the blockchain lock open.
+ LOGGER.info("Sleeping for 10 seconds...");
+ Thread.sleep(10 * 1000L);
+ }
- // Check if the base block has updated since the last time we were here
- if (parentSignatureForLastLowWeightBlock == null || timeOfLastLowWeightBlock == null ||
- !Arrays.equals(parentSignatureForLastLowWeightBlock, previousBlockData.getSignature())) {
- // We've switched to a different chain, so reset the timer
- timeOfLastLowWeightBlock = NTP.getTime();
- }
- parentSignatureForLastLowWeightBlock = previousBlockData.getSignature();
-
- // If less than 30 seconds has passed since first detection the higher weight chain,
- // we should skip our block submission to give us the opportunity to sync to the better chain
- if (NTP.getTime() - timeOfLastLowWeightBlock < 30 * 1000L) {
- LOGGER.info("Higher weight chain found in peers, so not signing a block this round");
- LOGGER.info("Time since detected: {}", NTP.getTime() - timeOfLastLowWeightBlock);
+ for (PrivateKeyAccount mintingAccount : newBlocksMintingAccounts) {
+ // First block does the AT heavy-lifting
+ if (newBlocks.isEmpty()) {
+ Block newBlock = Block.mint(repository, previousBlockData, mintingAccount);
+ if (newBlock == null) {
+ // For some reason we can't mint right now
+ moderatedLog(() -> LOGGER.info("Couldn't build a to-be-minted block"));
continue;
- } else {
- // More than 30 seconds have passed, so we should submit our block candidate anyway.
- LOGGER.info("More than 30 seconds passed, so proceeding to submit block candidate...");
}
+
+ newBlocks.add(newBlock);
} else {
- LOGGER.debug("No higher weight chain found in peers");
+ // The blocks for other minters require less effort...
+ Block newBlock = newBlocks.get(0).remint(mintingAccount);
+ if (newBlock == null) {
+ // For some reason we can't mint right now
+ moderatedLog(() -> LOGGER.error("Couldn't rebuild a to-be-minted block"));
+ continue;
+ }
+
+ newBlocks.add(newBlock);
}
- } catch (DataException e) {
- LOGGER.debug("Unable to check for a higher weight chain. Proceeding anyway...");
}
- // Discard any uncommitted changes as a result of the higher weight chain detection
- repository.discardChanges();
+ // No potential block candidates?
+ if (newBlocks.isEmpty())
+ continue;
- // Clear variables that track low weight blocks
- parentSignatureForLastLowWeightBlock = null;
- timeOfLastLowWeightBlock = null;
-
- Long unconfirmedStartTime = NTP.getTime();
-
- // Add unconfirmed transactions
- addUnconfirmedTransactions(repository, newBlock);
-
- LOGGER.info(String.format("Adding %d unconfirmed transactions took %d ms", newBlock.getTransactions().size(), (NTP.getTime()-unconfirmedStartTime)));
-
- // Sign to create block's signature
- newBlock.sign();
-
- // Is newBlock still valid?
- ValidationResult validationResult = newBlock.isValid();
- if (validationResult != ValidationResult.OK) {
- // No longer valid? Report and discard
- LOGGER.error(String.format("To-be-minted block now invalid '%s' after adding unconfirmed transactions?", validationResult.name()));
-
- // Rebuild block candidates, just to be sure
- newBlocks.clear();
+ // Make sure we're the only thread modifying the blockchain
+ ReentrantLock blockchainLock = Controller.getInstance().getBlockchainLock();
+ if (!blockchainLock.tryLock(30, TimeUnit.SECONDS)) {
+ LOGGER.debug("Couldn't acquire blockchain lock even after waiting 30 seconds");
continue;
}
- // Add to blockchain - something else will notice and broadcast new block to network
+ boolean newBlockMinted = false;
+ Block newBlock = null;
+
try {
- newBlock.process();
+ // Clear repository session state so we have latest view of data
+ repository.discardChanges();
- repository.saveChanges();
+ // Now that we have blockchain lock, do final check that chain hasn't changed
+ BlockData latestBlockData = blockRepository.getLastBlock();
+ if (!Arrays.equals(lastBlockData.getSignature(), latestBlockData.getSignature()))
+ continue;
- LOGGER.info(String.format("Minted new block: %d", newBlock.getBlockData().getHeight()));
+ List goodBlocks = new ArrayList<>();
+ boolean wasInvalidBlockDiscarded = false;
+ Iterator newBlocksIterator = newBlocks.iterator();
- RewardShareData rewardShareData = repository.getAccountRepository().getRewardShare(newBlock.getBlockData().getMinterPublicKey());
+ while (newBlocksIterator.hasNext()) {
+ Block testBlock = newBlocksIterator.next();
- if (rewardShareData != null) {
- LOGGER.info(String.format("Minted block %d, sig %.8s, parent sig: %.8s by %s on behalf of %s",
- newBlock.getBlockData().getHeight(),
- Base58.encode(newBlock.getBlockData().getSignature()),
- Base58.encode(newBlock.getParent().getSignature()),
- rewardShareData.getMinter(),
- rewardShareData.getRecipient()));
- } else {
- LOGGER.info(String.format("Minted block %d, sig %.8s, parent sig: %.8s by %s",
- newBlock.getBlockData().getHeight(),
- Base58.encode(newBlock.getBlockData().getSignature()),
- Base58.encode(newBlock.getParent().getSignature()),
- newBlock.getMinter().getAddress()));
+ // Is new block's timestamp valid yet?
+ // We do a separate check as some timestamp checks are skipped for testchains
+ if (testBlock.isTimestampValid() != ValidationResult.OK)
+ continue;
+
+ testBlock.preProcess();
+
+ // Is new block valid yet? (Before adding unconfirmed transactions)
+ ValidationResult result = testBlock.isValid();
+ if (result != ValidationResult.OK) {
+ moderatedLog(() -> LOGGER.error(String.format("To-be-minted block invalid '%s' before adding transactions?", result.name())));
+
+ newBlocksIterator.remove();
+ wasInvalidBlockDiscarded = true;
+ /*
+ * Bail out fast so that we loop around from the top again.
+ * This gives BlockMinter the possibility to remint this candidate block using another block from newBlocks,
+ * via the Blocks.remint() method, which avoids having to re-process Block ATs all over again.
+ * Particularly useful if some aspect of Blocks changes due a timestamp-based feature-trigger (see BlockChain class).
+ */
+ break;
+ }
+
+ goodBlocks.add(testBlock);
}
- // Notify network after we're released blockchain lock
- newBlockMinted = true;
+ if (wasInvalidBlockDiscarded || goodBlocks.isEmpty())
+ continue;
- // Notify Controller
- repository.discardChanges(); // clear transaction status to prevent deadlocks
- Controller.getInstance().onNewBlock(newBlock.getBlockData());
- } catch (DataException e) {
- // Unable to process block - report and discard
- LOGGER.error("Unable to process newly minted block?", e);
- newBlocks.clear();
- } catch (ArithmeticException e) {
- // Unable to process block - report and discard
- LOGGER.error("Unable to process newly minted block?", e);
- newBlocks.clear();
+ // Pick best block
+ final int parentHeight = previousBlockData.getHeight();
+ final byte[] parentBlockSignature = previousBlockData.getSignature();
+
+ BigInteger bestWeight = null;
+
+ for (int bi = 0; bi < goodBlocks.size(); ++bi) {
+ BlockData blockData = goodBlocks.get(bi).getBlockData();
+
+ BlockSummaryData blockSummaryData = new BlockSummaryData(blockData);
+ int minterLevel = Account.getRewardShareEffectiveMintingLevel(repository, blockData.getMinterPublicKey());
+ blockSummaryData.setMinterLevel(minterLevel);
+
+ BigInteger blockWeight = Block.calcBlockWeight(parentHeight, parentBlockSignature, blockSummaryData);
+
+ if (bestWeight == null || blockWeight.compareTo(bestWeight) < 0) {
+ newBlock = goodBlocks.get(bi);
+ bestWeight = blockWeight;
+ }
+ }
+
+ try {
+ if (this.higherWeightChainExists(repository, bestWeight)) {
+
+ // Check if the base block has updated since the last time we were here
+ if (parentSignatureForLastLowWeightBlock == null || timeOfLastLowWeightBlock == null ||
+ !Arrays.equals(parentSignatureForLastLowWeightBlock, previousBlockData.getSignature())) {
+ // We've switched to a different chain, so reset the timer
+ timeOfLastLowWeightBlock = NTP.getTime();
+ }
+ parentSignatureForLastLowWeightBlock = previousBlockData.getSignature();
+
+ // If less than 30 seconds has passed since first detection the higher weight chain,
+ // we should skip our block submission to give us the opportunity to sync to the better chain
+ if (NTP.getTime() - timeOfLastLowWeightBlock < 30 * 1000L) {
+ LOGGER.info("Higher weight chain found in peers, so not signing a block this round");
+ LOGGER.info("Time since detected: {}", NTP.getTime() - timeOfLastLowWeightBlock);
+ continue;
+ } else {
+ // More than 30 seconds have passed, so we should submit our block candidate anyway.
+ LOGGER.info("More than 30 seconds passed, so proceeding to submit block candidate...");
+ }
+ } else {
+ LOGGER.debug("No higher weight chain found in peers");
+ }
+ } catch (DataException e) {
+ LOGGER.debug("Unable to check for a higher weight chain. Proceeding anyway...");
+ }
+
+ // Discard any uncommitted changes as a result of the higher weight chain detection
+ repository.discardChanges();
+
+ // Clear variables that track low weight blocks
+ parentSignatureForLastLowWeightBlock = null;
+ timeOfLastLowWeightBlock = null;
+
+ Long unconfirmedStartTime = NTP.getTime();
+
+ // Add unconfirmed transactions
+ addUnconfirmedTransactions(repository, newBlock);
+
+ LOGGER.info(String.format("Adding %d unconfirmed transactions took %d ms", newBlock.getTransactions().size(), (NTP.getTime() - unconfirmedStartTime)));
+
+ // Sign to create block's signature
+ newBlock.sign();
+
+ // Is newBlock still valid?
+ ValidationResult validationResult = newBlock.isValid();
+ if (validationResult != ValidationResult.OK) {
+ // No longer valid? Report and discard
+ LOGGER.error(String.format("To-be-minted block now invalid '%s' after adding unconfirmed transactions?", validationResult.name()));
+
+ // Rebuild block candidates, just to be sure
+ newBlocks.clear();
+ continue;
+ }
+
+ // Add to blockchain - something else will notice and broadcast new block to network
+ try {
+ newBlock.process();
+
+ repository.saveChanges();
+
+ LOGGER.info(String.format("Minted new block: %d", newBlock.getBlockData().getHeight()));
+
+ RewardShareData rewardShareData = repository.getAccountRepository().getRewardShare(newBlock.getBlockData().getMinterPublicKey());
+
+ if (rewardShareData != null) {
+ LOGGER.info(String.format("Minted block %d, sig %.8s, parent sig: %.8s by %s on behalf of %s",
+ newBlock.getBlockData().getHeight(),
+ Base58.encode(newBlock.getBlockData().getSignature()),
+ Base58.encode(newBlock.getParent().getSignature()),
+ rewardShareData.getMinter(),
+ rewardShareData.getRecipient()));
+ } else {
+ LOGGER.info(String.format("Minted block %d, sig %.8s, parent sig: %.8s by %s",
+ newBlock.getBlockData().getHeight(),
+ Base58.encode(newBlock.getBlockData().getSignature()),
+ Base58.encode(newBlock.getParent().getSignature()),
+ newBlock.getMinter().getAddress()));
+ }
+
+ // Notify network after we're released blockchain lock
+ newBlockMinted = true;
+
+ // Notify Controller
+ repository.discardChanges(); // clear transaction status to prevent deadlocks
+ Controller.getInstance().onNewBlock(newBlock.getBlockData());
+ } catch (DataException e) {
+ // Unable to process block - report and discard
+ LOGGER.error("Unable to process newly minted block?", e);
+ newBlocks.clear();
+ } catch (ArithmeticException e) {
+ // Unable to process block - report and discard
+ LOGGER.error("Unable to process newly minted block?", e);
+ newBlocks.clear();
+ }
+ } finally {
+ blockchainLock.unlock();
}
- } finally {
- blockchainLock.unlock();
- }
- if (newBlockMinted) {
- // Broadcast our new chain to network
- Network.getInstance().broadcastOurChain();
- }
+ if (newBlockMinted) {
+ // Broadcast our new chain to network
+ Network.getInstance().broadcastOurChain();
+ }
- } catch (InterruptedException e) {
- // We've been interrupted - time to exit
- return;
+ } catch (InterruptedException e) {
+ // We've been interrupted - time to exit
+ return;
+ }
+ } catch (DataException e) {
+ LOGGER.warn("Repository issue while running block minter - NO LONGER MINTING", e);
+ } catch (Exception e) {
+ LOGGER.error(e.getMessage(), e);
}
}
- } catch (DataException e) {
- LOGGER.warn("Repository issue while running block minter - NO LONGER MINTING", e);
+ } catch (Exception e) {
+ LOGGER.error(e.getMessage(), e);
}
}
diff --git a/src/main/java/org/qortal/controller/Controller.java b/src/main/java/org/qortal/controller/Controller.java
index 43a2b209..86e3d8ae 100644
--- a/src/main/java/org/qortal/controller/Controller.java
+++ b/src/main/java/org/qortal/controller/Controller.java
@@ -13,6 +13,7 @@ import org.qortal.block.Block;
import org.qortal.block.BlockChain;
import org.qortal.block.BlockChain.BlockTimingByHeight;
import org.qortal.controller.arbitrary.*;
+import org.qortal.controller.hsqldb.HSQLDBBalanceRecorder;
import org.qortal.controller.hsqldb.HSQLDBDataCacheManager;
import org.qortal.controller.repository.NamesDatabaseIntegrityCheck;
import org.qortal.controller.repository.PruneManager;
@@ -37,7 +38,6 @@ import org.qortal.network.Peer;
import org.qortal.network.PeerAddress;
import org.qortal.network.message.*;
import org.qortal.repository.*;
-import org.qortal.repository.hsqldb.HSQLDBRepository;
import org.qortal.repository.hsqldb.HSQLDBRepositoryFactory;
import org.qortal.settings.Settings;
import org.qortal.transaction.Transaction;
@@ -74,6 +74,8 @@ import java.util.stream.Collectors;
public class Controller extends Thread {
+ public static HSQLDBRepositoryFactory REPOSITORY_FACTORY;
+
static {
// This must go before any calls to LogManager/Logger
System.setProperty("log4j2.formatMsgNoLookups", "true");
@@ -405,23 +407,38 @@ public class Controller extends Thread {
LOGGER.info("Starting repository");
try {
- RepositoryFactory repositoryFactory = new HSQLDBRepositoryFactory(getRepositoryUrl());
- RepositoryManager.setRepositoryFactory(repositoryFactory);
+ REPOSITORY_FACTORY = new HSQLDBRepositoryFactory(getRepositoryUrl());
+ RepositoryManager.setRepositoryFactory(REPOSITORY_FACTORY);
RepositoryManager.setRequestedCheckpoint(Boolean.TRUE);
try (final Repository repository = RepositoryManager.getRepository()) {
// RepositoryManager.rebuildTransactionSequences(repository);
ArbitraryDataCacheManager.getInstance().buildArbitraryResourcesCache(repository, false);
+ }
- if( Settings.getInstance().isDbCacheEnabled() ) {
- LOGGER.info("Db Cache Starting ...");
- HSQLDBDataCacheManager hsqldbDataCacheManager = new HSQLDBDataCacheManager((HSQLDBRepository) repositoryFactory.getRepository());
- hsqldbDataCacheManager.start();
+ if( Settings.getInstance().isDbCacheEnabled() ) {
+ LOGGER.info("Db Cache Starting ...");
+ HSQLDBDataCacheManager hsqldbDataCacheManager = new HSQLDBDataCacheManager();
+ hsqldbDataCacheManager.start();
+ }
+ else {
+ LOGGER.info("Db Cache Disabled");
+ }
+
+ if( Settings.getInstance().isBalanceRecorderEnabled() ) {
+ Optional recorder = HSQLDBBalanceRecorder.getInstance();
+
+ if( recorder.isPresent() ) {
+ LOGGER.info("Balance Recorder Starting ...");
+ recorder.get().start();
}
else {
- LOGGER.info("Db Cache Disabled");
+ LOGGER.info("Balance Recorder won't start.");
}
}
+ else {
+ LOGGER.info("Balance Recorder Disabled");
+ }
} catch (DataException e) {
// If exception has no cause or message then repository is in use by some other process.
if (e.getCause() == null && e.getMessage() == null) {
@@ -650,10 +667,8 @@ public class Controller extends Thread {
boolean canBootstrap = Settings.getInstance().getBootstrap();
boolean needsArchiveRebuild = false;
int checkHeight = 0;
- Repository repository = null;
- try {
- repository = RepositoryManager.getRepository();
+ try (final Repository repository = RepositoryManager.getRepository()){
needsArchiveRebuild = (repository.getBlockArchiveRepository().fromHeight(2) == null);
checkHeight = repository.getBlockRepository().getBlockchainHeight();
} catch (DataException e) {
@@ -1229,6 +1244,24 @@ public class Controller extends Thread {
network.broadcast(network::buildGetUnconfirmedTransactionsMessage);
}
+ public void doRNSNetworkBroadcast() {
+ if (Settings.getInstance().isLite()) {
+ // Lite nodes have nothing to broadcast
+ return;
+ }
+ RNSNetwork network = RNSNetwork.getInstance();
+
+ //// Send our current height
+ //network.broadcastOurChain();
+
+ //// Requiest unconfirmed transaction signatures, but only if we're up-to-date.
+ //// if we're not up-to-dat then then priority is synchronizing first
+ //if (isUpToDate()) {
+ // network.broadcast(network::buildGetUnconfirmedTransactionsMessage);
+ //}
+
+ }
+
public void onMintingPossibleChange(boolean isMintingPossible) {
this.isMintingPossible = isMintingPossible;
requestSysTrayUpdate = true;
diff --git a/src/main/java/org/qortal/controller/hsqldb/HSQLDBBalanceRecorder.java b/src/main/java/org/qortal/controller/hsqldb/HSQLDBBalanceRecorder.java
new file mode 100644
index 00000000..7a7009ff
--- /dev/null
+++ b/src/main/java/org/qortal/controller/hsqldb/HSQLDBBalanceRecorder.java
@@ -0,0 +1,117 @@
+package org.qortal.controller.hsqldb;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.qortal.data.account.AccountBalanceData;
+import org.qortal.repository.hsqldb.HSQLDBCacheUtils;
+import org.qortal.settings.Settings;
+
+import java.util.ArrayList;
+import java.util.Comparator;
+import java.util.List;
+import java.util.Optional;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.stream.Collectors;
+
+public class HSQLDBBalanceRecorder extends Thread{
+
+ private static final Logger LOGGER = LogManager.getLogger(HSQLDBBalanceRecorder.class);
+
+ private static HSQLDBBalanceRecorder SINGLETON = null;
+
+ private ConcurrentHashMap> balancesByHeight = new ConcurrentHashMap<>();
+
+ private ConcurrentHashMap> balancesByAddress = new ConcurrentHashMap<>();
+
+ private int priorityRequested;
+ private int frequency;
+ private int capacity;
+
+ private HSQLDBBalanceRecorder( int priorityRequested, int frequency, int capacity) {
+
+ super("Balance Recorder");
+
+ this.priorityRequested = priorityRequested;
+ this.frequency = frequency;
+ this.capacity = capacity;
+ }
+
+ public static Optional getInstance() {
+
+ if( SINGLETON == null ) {
+
+ SINGLETON
+ = new HSQLDBBalanceRecorder(
+ Settings.getInstance().getBalanceRecorderPriority(),
+ Settings.getInstance().getBalanceRecorderFrequency(),
+ Settings.getInstance().getBalanceRecorderCapacity()
+ );
+
+ }
+ else if( SINGLETON == null ) {
+
+ return Optional.empty();
+ }
+
+ return Optional.of(SINGLETON);
+ }
+
+ @Override
+ public void run() {
+
+ Thread.currentThread().setName("Balance Recorder");
+
+ HSQLDBCacheUtils.startRecordingBalances(this.balancesByHeight, this.balancesByAddress, this.priorityRequested, this.frequency, this.capacity);
+ }
+
+ public List getLatestRecordings(int limit, long offset) {
+ ArrayList data;
+
+ Optional lastHeight = getLastHeight();
+
+ if(lastHeight.isPresent() ) {
+ List latest = this.balancesByHeight.get(lastHeight.get());
+
+ if( latest != null ) {
+ data = new ArrayList<>(latest.size());
+ data.addAll(
+ latest.stream()
+ .sorted(Comparator.comparingDouble(AccountBalanceData::getBalance).reversed())
+ .skip(offset)
+ .limit(limit)
+ .collect(Collectors.toList())
+ );
+ }
+ else {
+ data = new ArrayList<>(0);
+ }
+ }
+ else {
+ data = new ArrayList<>(0);
+ }
+
+ return data;
+ }
+
+ private Optional getLastHeight() {
+ return this.balancesByHeight.keySet().stream().sorted(Comparator.reverseOrder()).findFirst();
+ }
+
+ public List getBlocksRecorded() {
+
+ return this.balancesByHeight.keySet().stream().collect(Collectors.toList());
+ }
+
+ public List getAccountBalanceRecordings(String address) {
+ return this.balancesByAddress.get(address);
+ }
+
+ @Override
+ public String toString() {
+ return "HSQLDBBalanceRecorder{" +
+ "priorityRequested=" + priorityRequested +
+ ", frequency=" + frequency +
+ ", capacity=" + capacity +
+ '}';
+ }
+}
diff --git a/src/main/java/org/qortal/controller/hsqldb/HSQLDBDataCacheManager.java b/src/main/java/org/qortal/controller/hsqldb/HSQLDBDataCacheManager.java
index 0f678a3c..434a67f1 100644
--- a/src/main/java/org/qortal/controller/hsqldb/HSQLDBDataCacheManager.java
+++ b/src/main/java/org/qortal/controller/hsqldb/HSQLDBDataCacheManager.java
@@ -8,11 +8,7 @@ import org.qortal.settings.Settings;
public class HSQLDBDataCacheManager extends Thread{
- private HSQLDBRepository respository;
-
- public HSQLDBDataCacheManager(HSQLDBRepository respository) {
- this.respository = respository;
- }
+ public HSQLDBDataCacheManager() {}
@Override
public void run() {
@@ -20,8 +16,7 @@ public class HSQLDBDataCacheManager extends Thread{
HSQLDBCacheUtils.startCaching(
Settings.getInstance().getDbCacheThreadPriority(),
- Settings.getInstance().getDbCacheFrequency(),
- this.respository
+ Settings.getInstance().getDbCacheFrequency()
);
}
}
diff --git a/src/main/java/org/qortal/controller/repository/AtStatesPruner.java b/src/main/java/org/qortal/controller/repository/AtStatesPruner.java
index 467333c8..3bc3db99 100644
--- a/src/main/java/org/qortal/controller/repository/AtStatesPruner.java
+++ b/src/main/java/org/qortal/controller/repository/AtStatesPruner.java
@@ -39,15 +39,24 @@ public class AtStatesPruner implements Runnable {
}
}
+ int pruneStartHeight;
+ int maxLatestAtStatesHeight;
+
try (final Repository repository = RepositoryManager.getRepository()) {
- int pruneStartHeight = repository.getATRepository().getAtPruneHeight();
- int maxLatestAtStatesHeight = PruneManager.getMaxHeightForLatestAtStates(repository);
+ pruneStartHeight = repository.getATRepository().getAtPruneHeight();
+ maxLatestAtStatesHeight = PruneManager.getMaxHeightForLatestAtStates(repository);
repository.discardChanges();
repository.getATRepository().rebuildLatestAtStates(maxLatestAtStatesHeight);
repository.saveChanges();
+ } catch (Exception e) {
+ LOGGER.error("AT States Pruning is not working! Not trying again. Restart ASAP. Report this error immediately to the developers.", e);
+ return;
+ }
+
+ while (!Controller.isStopping()) {
+ try (final Repository repository = RepositoryManager.getRepository()) {
- while (!Controller.isStopping()) {
try {
repository.discardChanges();
@@ -102,28 +111,25 @@ public class AtStatesPruner implements Runnable {
final int finalPruneStartHeight = pruneStartHeight;
LOGGER.info(() -> String.format("Bumping AT state base prune height to %d", finalPruneStartHeight));
- }
- else {
+ } else {
// We've pruned up to the upper prunable height
// Back off for a while to save CPU for syncing
repository.discardChanges();
- Thread.sleep(5*60*1000L);
+ Thread.sleep(5 * 60 * 1000L);
}
}
} catch (InterruptedException e) {
- if(Controller.isStopping()) {
+ if (Controller.isStopping()) {
LOGGER.info("AT States Pruning Shutting Down");
- }
- else {
+ } else {
LOGGER.warn("AT States Pruning interrupted. Trying again. Report this error immediately to the developers.", e);
}
} catch (Exception e) {
LOGGER.warn("AT States Pruning stopped working. Trying again. Report this error immediately to the developers.", e);
}
+ } catch(Exception e){
+ LOGGER.error("AT States Pruning is not working! Not trying again. Restart ASAP. Report this error immediately to the developers.", e);
}
- } catch (Exception e) {
- LOGGER.error("AT States Pruning is not working! Not trying again. Restart ASAP. Report this error immediately to the developers.", e);
}
}
-
}
diff --git a/src/main/java/org/qortal/controller/repository/AtStatesTrimmer.java b/src/main/java/org/qortal/controller/repository/AtStatesTrimmer.java
index 26c2931d..d188f81a 100644
--- a/src/main/java/org/qortal/controller/repository/AtStatesTrimmer.java
+++ b/src/main/java/org/qortal/controller/repository/AtStatesTrimmer.java
@@ -26,15 +26,23 @@ public class AtStatesTrimmer implements Runnable {
return;
}
+ int trimStartHeight;
+ int maxLatestAtStatesHeight;
+
try (final Repository repository = RepositoryManager.getRepository()) {
- int trimStartHeight = repository.getATRepository().getAtTrimHeight();
- int maxLatestAtStatesHeight = PruneManager.getMaxHeightForLatestAtStates(repository);
+ trimStartHeight = repository.getATRepository().getAtTrimHeight();
+ maxLatestAtStatesHeight = PruneManager.getMaxHeightForLatestAtStates(repository);
repository.discardChanges();
repository.getATRepository().rebuildLatestAtStates(maxLatestAtStatesHeight);
repository.saveChanges();
+ } catch (Exception e) {
+ LOGGER.error("AT States Trimming is not working! Not trying again. Restart ASAP. Report this error immediately to the developers.", e);
+ return;
+ }
- while (!Controller.isStopping()) {
+ while (!Controller.isStopping()) {
+ try (final Repository repository = RepositoryManager.getRepository()) {
try {
repository.discardChanges();
@@ -92,9 +100,9 @@ public class AtStatesTrimmer implements Runnable {
} catch (Exception e) {
LOGGER.warn("AT States Trimming stopped working. Trying again. Report this error immediately to the developers.", e);
}
+ } catch (Exception e) {
+ LOGGER.error("AT States Trimming is not working! Not trying again. Restart ASAP. Report this error immediately to the developers.", e);
}
- } catch (Exception e) {
- LOGGER.error("AT States Trimming is not working! Not trying again. Restart ASAP. Report this error immediately to the developers.", e);
}
}
diff --git a/src/main/java/org/qortal/controller/repository/BlockArchiver.java b/src/main/java/org/qortal/controller/repository/BlockArchiver.java
index b3c01f35..01cf40ed 100644
--- a/src/main/java/org/qortal/controller/repository/BlockArchiver.java
+++ b/src/main/java/org/qortal/controller/repository/BlockArchiver.java
@@ -30,11 +30,13 @@ public class BlockArchiver implements Runnable {
return;
}
+ int startHeight;
+
try (final Repository repository = RepositoryManager.getRepository()) {
// Don't even start building until initial rush has ended
Thread.sleep(INITIAL_SLEEP_PERIOD);
- int startHeight = repository.getBlockArchiveRepository().getBlockArchiveHeight();
+ startHeight = repository.getBlockArchiveRepository().getBlockArchiveHeight();
// Don't attempt to archive if we have no ATStatesHeightIndex, as it will be too slow
boolean hasAtStatesHeightIndex = repository.getATRepository().hasAtStatesHeightIndex();
@@ -43,10 +45,16 @@ public class BlockArchiver implements Runnable {
repository.discardChanges();
return;
}
+ } catch (Exception e) {
+ LOGGER.error("Block Archiving is not working! Not trying again. Restart ASAP. Report this error immediately to the developers.", e);
+ return;
+ }
- LOGGER.info("Starting block archiver from height {}...", startHeight);
+ LOGGER.info("Starting block archiver from height {}...", startHeight);
+
+ while (!Controller.isStopping()) {
+ try (final Repository repository = RepositoryManager.getRepository()) {
- while (!Controller.isStopping()) {
try {
repository.discardChanges();
@@ -107,20 +115,17 @@ public class BlockArchiver implements Runnable {
LOGGER.info("Caught exception when creating block cache", e);
}
} catch (InterruptedException e) {
- if(Controller.isStopping()) {
+ if (Controller.isStopping()) {
LOGGER.info("Block Archiving Shutting Down");
- }
- else {
+ } else {
LOGGER.warn("Block Archiving interrupted. Trying again. Report this error immediately to the developers.", e);
}
} catch (Exception e) {
LOGGER.warn("Block Archiving stopped working. Trying again. Report this error immediately to the developers.", e);
}
+ } catch(Exception e){
+ LOGGER.error("Block Archiving is not working! Not trying again. Restart ASAP. Report this error immediately to the developers.", e);
}
- } catch (Exception e) {
- LOGGER.error("Block Archiving is not working! Not trying again. Restart ASAP. Report this error immediately to the developers.", e);
}
-
}
-
}
diff --git a/src/main/java/org/qortal/controller/repository/BlockPruner.java b/src/main/java/org/qortal/controller/repository/BlockPruner.java
index 624457eb..7801f284 100644
--- a/src/main/java/org/qortal/controller/repository/BlockPruner.java
+++ b/src/main/java/org/qortal/controller/repository/BlockPruner.java
@@ -39,8 +39,10 @@ public class BlockPruner implements Runnable {
}
}
+ int pruneStartHeight;
+
try (final Repository repository = RepositoryManager.getRepository()) {
- int pruneStartHeight = repository.getBlockRepository().getBlockPruneHeight();
+ pruneStartHeight = repository.getBlockRepository().getBlockPruneHeight();
// Don't attempt to prune if we have no ATStatesHeightIndex, as it will be too slow
boolean hasAtStatesHeightIndex = repository.getATRepository().hasAtStatesHeightIndex();
@@ -48,8 +50,15 @@ public class BlockPruner implements Runnable {
LOGGER.info("Unable to start block pruner due to missing ATStatesHeightIndex. Bootstrapping is recommended.");
return;
}
+ } catch (Exception e) {
+ LOGGER.error("Block Pruning is not working! Not trying again. Restart ASAP. Report this error immediately to the developers.", e);
+ return;
+ }
+
+ while (!Controller.isStopping()) {
+
+ try (final Repository repository = RepositoryManager.getRepository()) {
- while (!Controller.isStopping()) {
try {
repository.discardChanges();
@@ -122,10 +131,9 @@ public class BlockPruner implements Runnable {
} catch (Exception e) {
LOGGER.warn("Block Pruning stopped working. Trying again. Report this error immediately to the developers.", e);
}
+ } catch(Exception e){
+ LOGGER.error("Block Pruning is not working! Not trying again. Restart ASAP. Report this error immediately to the developers.", e);
}
- } catch (Exception e) {
- LOGGER.error("Block Pruning is not working! Not trying again. Restart ASAP. Report this error immediately to the developers.", e);
}
}
-
}
diff --git a/src/main/java/org/qortal/controller/repository/OnlineAccountsSignaturesTrimmer.java b/src/main/java/org/qortal/controller/repository/OnlineAccountsSignaturesTrimmer.java
index f6ee3e05..c2d37e14 100644
--- a/src/main/java/org/qortal/controller/repository/OnlineAccountsSignaturesTrimmer.java
+++ b/src/main/java/org/qortal/controller/repository/OnlineAccountsSignaturesTrimmer.java
@@ -28,13 +28,21 @@ public class OnlineAccountsSignaturesTrimmer implements Runnable {
return;
}
+ int trimStartHeight;
+
try (final Repository repository = RepositoryManager.getRepository()) {
// Don't even start trimming until initial rush has ended
Thread.sleep(INITIAL_SLEEP_PERIOD);
- int trimStartHeight = repository.getBlockRepository().getOnlineAccountsSignaturesTrimHeight();
+ trimStartHeight = repository.getBlockRepository().getOnlineAccountsSignaturesTrimHeight();
+ } catch (Exception e) {
+ LOGGER.error("Online Accounts Signatures Trimming is not working! Not trying again. Restart ASAP. Report this error immediately to the developers.", e);
+ return;
+ }
+
+ while (!Controller.isStopping()) {
+ try (final Repository repository = RepositoryManager.getRepository()) {
- while (!Controller.isStopping()) {
try {
repository.discardChanges();
@@ -88,10 +96,9 @@ public class OnlineAccountsSignaturesTrimmer implements Runnable {
} catch (Exception e) {
LOGGER.warn("Online Accounts Signatures Trimming stopped working. Trying again. Report this error immediately to the developers.", e);
}
+ } catch (Exception e) {
+ LOGGER.error("Online Accounts Signatures Trimming is not working! Not trying again. Restart ASAP. Report this error immediately to the developers.", e);
}
- } catch (Exception e) {
- LOGGER.error("Online Accounts Signatures Trimming is not working! Not trying again. Restart ASAP. Report this error immediately to the developers.", e);
}
}
-
}
diff --git a/src/main/java/org/qortal/data/block/BlockData.java b/src/main/java/org/qortal/data/block/BlockData.java
index 34df0f9a..7e2a1872 100644
--- a/src/main/java/org/qortal/data/block/BlockData.java
+++ b/src/main/java/org/qortal/data/block/BlockData.java
@@ -1,8 +1,11 @@
package org.qortal.data.block;
import com.google.common.primitives.Bytes;
+import org.qortal.account.Account;
import org.qortal.block.BlockChain;
-import org.qortal.crypto.Crypto;
+import org.qortal.repository.DataException;
+import org.qortal.repository.Repository;
+import org.qortal.repository.RepositoryManager;
import org.qortal.settings.Settings;
import org.qortal.utils.NTP;
@@ -224,7 +227,7 @@ public class BlockData implements Serializable {
}
return 0;
}
-
+
public boolean isTrimmed() {
long onlineAccountSignaturesTrimmedTimestamp = NTP.getTime() - BlockChain.getInstance().getOnlineAccountSignaturesMaxLifetime();
long currentTrimmableTimestamp = NTP.getTime() - Settings.getInstance().getAtStatesMaxLifetime();
@@ -232,11 +235,31 @@ public class BlockData implements Serializable {
return blockTimestamp < onlineAccountSignaturesTrimmedTimestamp && blockTimestamp < currentTrimmableTimestamp;
}
+ public String getMinterAddressFromPublicKey() {
+ try (final Repository repository = RepositoryManager.getRepository()) {
+ return Account.getRewardShareMintingAddress(repository, this.minterPublicKey);
+ } catch (DataException e) {
+ return "Unknown";
+ }
+ }
+
+ public int getMinterLevelFromPublicKey() {
+ try (final Repository repository = RepositoryManager.getRepository()) {
+ return Account.getRewardShareEffectiveMintingLevel(repository, this.minterPublicKey);
+ } catch (DataException e) {
+ return 0;
+ }
+ }
+
// JAXB special
@XmlElement(name = "minterAddress")
protected String getMinterAddress() {
- return Crypto.toAddress(this.minterPublicKey);
+ return getMinterAddressFromPublicKey();
}
+ @XmlElement(name = "minterLevel")
+ protected int getMinterLevel() {
+ return getMinterLevelFromPublicKey();
+ }
}
diff --git a/src/main/java/org/qortal/repository/ChatRepository.java b/src/main/java/org/qortal/repository/ChatRepository.java
index d046fe6b..bd636fe3 100644
--- a/src/main/java/org/qortal/repository/ChatRepository.java
+++ b/src/main/java/org/qortal/repository/ChatRepository.java
@@ -22,6 +22,6 @@ public interface ChatRepository {
public ChatMessage toChatMessage(ChatTransactionData chatTransactionData, Encoding encoding) throws DataException;
- public ActiveChats getActiveChats(String address, Encoding encoding) throws DataException;
+ public ActiveChats getActiveChats(String address, Encoding encoding, Boolean hasChatReference) throws DataException;
}
diff --git a/src/main/java/org/qortal/repository/hsqldb/HSQLDBCacheUtils.java b/src/main/java/org/qortal/repository/hsqldb/HSQLDBCacheUtils.java
index 81fcb3c5..a4aacbf5 100644
--- a/src/main/java/org/qortal/repository/hsqldb/HSQLDBCacheUtils.java
+++ b/src/main/java/org/qortal/repository/hsqldb/HSQLDBCacheUtils.java
@@ -5,10 +5,13 @@ import org.apache.logging.log4j.Logger;
import org.qortal.api.SearchMode;
import org.qortal.arbitrary.misc.Category;
import org.qortal.arbitrary.misc.Service;
+import org.qortal.controller.Controller;
+import org.qortal.data.account.AccountBalanceData;
import org.qortal.data.arbitrary.ArbitraryResourceCache;
import org.qortal.data.arbitrary.ArbitraryResourceData;
import org.qortal.data.arbitrary.ArbitraryResourceMetadata;
import org.qortal.data.arbitrary.ArbitraryResourceStatus;
+import org.qortal.repository.DataException;
import java.sql.ResultSet;
import java.sql.SQLException;
@@ -48,6 +51,11 @@ public class HSQLDBCacheUtils {
}
};
private static final String DEFAULT_IDENTIFIER = "default";
+ private static final int ZERO = 0;
+ public static final String DB_CACHE_TIMER = "DB Cache Timer";
+ public static final String DB_CACHE_TIMER_TASK = "DB Cache Timer Task";
+ public static final String BALANCE_RECORDER_TIMER = "Balance Recorder Timer";
+ public static final String BALANCE_RECORDER_TIMER_TASK = "Balance Recorder Timer Task";
/**
*
@@ -351,13 +359,124 @@ public class HSQLDBCacheUtils {
* Start Caching
*
* @param priorityRequested the thread priority to fill cache in
- * @param frequency the frequency to fill the cache (in seconds)
- * @param respository the data source
+ * @param frequency the frequency to fill the cache (in seconds)
*
* @return the data cache
*/
- public static void startCaching(int priorityRequested, int frequency, HSQLDBRepository respository) {
+ public static void startCaching(int priorityRequested, int frequency) {
+ Timer timer = buildTimer(DB_CACHE_TIMER, priorityRequested);
+
+ TimerTask task = new TimerTask() {
+ @Override
+ public void run() {
+
+ Thread.currentThread().setName(DB_CACHE_TIMER_TASK);
+
+ try (final HSQLDBRepository respository = (HSQLDBRepository) Controller.REPOSITORY_FACTORY.getRepository()) {
+ fillCache(ArbitraryResourceCache.getInstance(), respository);
+ }
+ catch( DataException e ) {
+ LOGGER.error(e.getMessage(), e);
+ }
+ }
+ };
+
+ // delay 1 second
+ timer.scheduleAtFixedRate(task, 1000, frequency * 1000);
+ }
+
+ /**
+ * Start Recording Balances
+ *
+ * @param queue the queue to add to, remove oldest data if necssary
+ * @param repository the db repsoitory
+ * @param priorityRequested the requested thread priority
+ * @param frequency the recording frequencies, in minutes
+ */
+ public static void startRecordingBalances(
+ final ConcurrentHashMap> balancesByHeight,
+ final ConcurrentHashMap> balancesByAddress,
+ int priorityRequested,
+ int frequency,
+ int capacity) {
+
+ Timer timer = buildTimer(BALANCE_RECORDER_TIMER, priorityRequested);
+
+ TimerTask task = new TimerTask() {
+ @Override
+ public void run() {
+
+ Thread.currentThread().setName(BALANCE_RECORDER_TIMER_TASK);
+
+ try (final HSQLDBRepository repository = (HSQLDBRepository) Controller.REPOSITORY_FACTORY.getRepository()) {
+ while (balancesByHeight.size() > capacity + 1) {
+ Optional firstHeight = balancesByHeight.keySet().stream().sorted().findFirst();
+
+ if (firstHeight.isPresent()) balancesByHeight.remove(firstHeight.get());
+ }
+
+ // get current balances
+ List accountBalances = getAccountBalances(repository);
+
+ // get anyone of the balances
+ Optional data = accountBalances.stream().findAny();
+
+ // if there are any balances, then record them
+ if (data.isPresent()) {
+ // map all new balances to the current height
+ balancesByHeight.put(data.get().getHeight(), accountBalances);
+
+ // for each new balance, map to address
+ for (AccountBalanceData accountBalance : accountBalances) {
+
+ // get recorded balances for this address
+ List establishedBalances
+ = balancesByAddress.getOrDefault(accountBalance.getAddress(), new ArrayList<>(0));
+
+ // start a new list of recordings for this address, add the new balance and add the established
+ // balances
+ List balances = new ArrayList<>(establishedBalances.size() + 1);
+ balances.add(accountBalance);
+ balances.addAll(establishedBalances);
+
+ // reset tha balances for this address
+ balancesByAddress.put(accountBalance.getAddress(), balances);
+
+ // TODO: reduce account balances to capacity
+ }
+
+ // reduce height balances to capacity
+ while( balancesByHeight.size() > capacity ) {
+ Optional lowestHeight
+ = balancesByHeight.entrySet().stream()
+ .min(Comparator.comparingInt(Map.Entry::getKey))
+ .map(Map.Entry::getKey);
+
+ if (lowestHeight.isPresent()) balancesByHeight.entrySet().remove(lowestHeight);
+ }
+ }
+ } catch (DataException e) {
+ LOGGER.error(e.getMessage(), e);
+ }
+ }
+ };
+
+ // wait 5 minutes
+ timer.scheduleAtFixedRate(task, 300_000, frequency * 60_000);
+ }
+
+ /**
+ * Build Timer
+ *
+ * Build a timer for scheduling a timer task.
+ *
+ * @param name the name for the thread running the timer task
+ * @param priorityRequested the priority for the thread running the timer task
+ *
+ * @return a timer for scheduling a timer task
+ */
+ private static Timer buildTimer( final String name, int priorityRequested) {
// ensure priority is in between 1-10
final int priority = Math.max(0, Math.min(10, priorityRequested));
@@ -365,7 +484,7 @@ public class HSQLDBCacheUtils {
Timer timer = new Timer(true) { // 'true' to make the Timer daemon
@Override
public void schedule(TimerTask task, long delay) {
- Thread thread = new Thread(task) {
+ Thread thread = new Thread(task, name) {
@Override
public void run() {
this.setPriority(priority);
@@ -376,17 +495,7 @@ public class HSQLDBCacheUtils {
thread.start();
}
};
-
- TimerTask task = new TimerTask() {
- @Override
- public void run() {
-
- fillCache(ArbitraryResourceCache.getInstance(), respository);
- }
- };
-
- // delay 1 second
- timer.scheduleAtFixedRate(task, 1000, frequency * 1000);
+ return timer;
}
/**
@@ -541,4 +650,43 @@ public class HSQLDBCacheUtils {
return resources;
}
+
+ public static List getAccountBalances(HSQLDBRepository repository) {
+
+ StringBuilder sql = new StringBuilder();
+
+ sql.append("SELECT account, balance, height ");
+ sql.append("FROM ACCOUNTBALANCES as balances ");
+ sql.append("JOIN (SELECT height FROM BLOCKS ORDER BY height DESC LIMIT 1) AS max_height ON true ");
+ sql.append("WHERE asset_id=0");
+
+ List data = new ArrayList<>();
+
+ LOGGER.info( "Getting account balances ...");
+
+ try {
+ Statement statement = repository.connection.createStatement();
+
+ ResultSet resultSet = statement.executeQuery(sql.toString());
+
+ if (resultSet == null || !resultSet.next())
+ return new ArrayList<>(0);
+
+ do {
+ String account = resultSet.getString(1);
+ long balance = resultSet.getLong(2);
+ int height = resultSet.getInt(3);
+
+ data.add(new AccountBalanceData(account, ZERO, balance, height));
+ } while (resultSet.next());
+ } catch (SQLException e) {
+ LOGGER.warn(e.getMessage());
+ } catch (Exception e) {
+ LOGGER.error(e.getMessage(), e);
+ }
+
+ LOGGER.info("Retrieved account balances: count = " + data.size());
+
+ return data;
+ }
}
\ No newline at end of file
diff --git a/src/main/java/org/qortal/repository/hsqldb/HSQLDBChatRepository.java b/src/main/java/org/qortal/repository/hsqldb/HSQLDBChatRepository.java
index 571a587d..80865739 100644
--- a/src/main/java/org/qortal/repository/hsqldb/HSQLDBChatRepository.java
+++ b/src/main/java/org/qortal/repository/hsqldb/HSQLDBChatRepository.java
@@ -23,7 +23,7 @@ public class HSQLDBChatRepository implements ChatRepository {
public HSQLDBChatRepository(HSQLDBRepository repository) {
this.repository = repository;
}
-
+
@Override
public List getMessagesMatchingCriteria(Long before, Long after, Integer txGroupId, byte[] referenceBytes,
byte[] chatReferenceBytes, Boolean hasChatReference, List involving, String senderAddress,
@@ -176,14 +176,14 @@ public class HSQLDBChatRepository implements ChatRepository {
}
@Override
- public ActiveChats getActiveChats(String address, Encoding encoding) throws DataException {
- List groupChats = getActiveGroupChats(address, encoding);
- List directChats = getActiveDirectChats(address);
+ public ActiveChats getActiveChats(String address, Encoding encoding, Boolean hasChatReference) throws DataException {
+ List groupChats = getActiveGroupChats(address, encoding, hasChatReference);
+ List directChats = getActiveDirectChats(address, hasChatReference);
return new ActiveChats(groupChats, directChats);
}
-
- private List getActiveGroupChats(String address, Encoding encoding) throws DataException {
+
+ private List getActiveGroupChats(String address, Encoding encoding, Boolean hasChatReference) throws DataException {
// Find groups where address is a member and potential latest message details
String groupsSql = "SELECT group_id, group_name, latest_timestamp, sender, sender_name, signature, data "
+ "FROM GroupMembers "
@@ -194,11 +194,19 @@ public class HSQLDBChatRepository implements ChatRepository {
+ "JOIN Transactions USING (signature) "
+ "LEFT OUTER JOIN Names AS SenderNames ON SenderNames.owner = sender "
// NOTE: We need to qualify "Groups.group_id" here to avoid "General error" bug in HSQLDB v2.5.0
- + "WHERE tx_group_id = Groups.group_id AND type = " + TransactionType.CHAT.value + " "
- + "ORDER BY created_when DESC "
+ + "WHERE tx_group_id = Groups.group_id AND type = " + TransactionType.CHAT.value + " ";
+
+ if (hasChatReference != null) {
+ if (hasChatReference) {
+ groupsSql += "AND chat_reference IS NOT NULL ";
+ } else {
+ groupsSql += "AND chat_reference IS NULL ";
+ }
+ }
+ groupsSql += "ORDER BY created_when DESC "
+ "LIMIT 1"
- + ") AS LatestMessages ON TRUE "
- + "WHERE address = ?";
+ + ") AS LatestMessages ON TRUE "
+ + "WHERE address = ?";
List groupChats = new ArrayList<>();
try (ResultSet resultSet = this.repository.checkedExecute(groupsSql, address)) {
@@ -230,8 +238,16 @@ public class HSQLDBChatRepository implements ChatRepository {
+ "JOIN Transactions USING (signature) "
+ "LEFT OUTER JOIN Names AS SenderNames ON SenderNames.owner = sender "
+ "WHERE tx_group_id = 0 "
- + "AND recipient IS NULL "
- + "ORDER BY created_when DESC "
+ + "AND recipient IS NULL ";
+
+ if (hasChatReference != null) {
+ if (hasChatReference) {
+ grouplessSql += "AND chat_reference IS NOT NULL ";
+ } else {
+ grouplessSql += "AND chat_reference IS NULL ";
+ }
+ }
+ grouplessSql += "ORDER BY created_when DESC "
+ "LIMIT 1";
try (ResultSet resultSet = this.repository.checkedExecute(grouplessSql)) {
@@ -259,7 +275,7 @@ public class HSQLDBChatRepository implements ChatRepository {
return groupChats;
}
- private List getActiveDirectChats(String address) throws DataException {
+ private List getActiveDirectChats(String address, Boolean hasChatReference) throws DataException {
// Find chat messages involving address
String directSql = "SELECT other_address, name, latest_timestamp, sender, sender_name "
+ "FROM ("
@@ -275,11 +291,21 @@ public class HSQLDBChatRepository implements ChatRepository {
+ "NATURAL JOIN Transactions "
+ "LEFT OUTER JOIN Names AS SenderNames ON SenderNames.owner = sender "
+ "WHERE (sender = other_address AND recipient = ?) "
- + "OR (sender = ? AND recipient = other_address) "
- + "ORDER BY created_when DESC "
- + "LIMIT 1"
- + ") AS LatestMessages "
- + "LEFT OUTER JOIN Names ON owner = other_address";
+ + "OR (sender = ? AND recipient = other_address) ";
+
+ // Apply hasChatReference filter
+ if (hasChatReference != null) {
+ if (hasChatReference) {
+ directSql += "AND chat_reference IS NOT NULL ";
+ } else {
+ directSql += "AND chat_reference IS NULL ";
+ }
+ }
+
+ directSql += "ORDER BY created_when DESC "
+ + "LIMIT 1"
+ + ") AS LatestMessages "
+ + "LEFT OUTER JOIN Names ON owner = other_address";
Object[] bindParams = new Object[] { address, address, address, address };
diff --git a/src/main/java/org/qortal/settings/Settings.java b/src/main/java/org/qortal/settings/Settings.java
index 305021d5..2a749242 100644
--- a/src/main/java/org/qortal/settings/Settings.java
+++ b/src/main/java/org/qortal/settings/Settings.java
@@ -213,7 +213,7 @@ public class Settings {
public long recoveryModeTimeout = 9999999999999L;
/** Minimum peer version number required in order to sync with them */
- private String minPeerVersion = "4.6.3";
+ private String minPeerVersion = "4.6.5";
/** Whether to allow connections with peers below minPeerVersion
* If true, we won't sync with them but they can still sync with us, and will show in the peers list
* If false, sync will be blocked both ways, and they will not appear in the peers list */
@@ -222,7 +222,7 @@ public class Settings {
/** Minimum time (in seconds) that we should attempt to remain connected to a peer for */
private int minPeerConnectionTime = 2 * 60 * 60; // seconds
/** Maximum time (in seconds) that we should attempt to remain connected to a peer for */
- private int maxPeerConnectionTime = 4 * 60 * 60; // seconds
+ private int maxPeerConnectionTime = 6 * 60 * 60; // seconds
/** Maximum time (in seconds) that a peer should remain connected when requesting QDN data */
private int maxDataPeerConnectionTime = 30 * 60; // seconds
@@ -281,7 +281,10 @@ public class Settings {
// Auto-update sources
private String[] autoUpdateRepos = new String[] {
"https://github.com/Qortal/qortal/raw/%s/qortal.update",
- "https://raw.githubusercontent.com@151.101.16.133/Qortal/qortal/%s/qortal.update"
+ "https://raw.githubusercontent.com@151.101.16.133/Qortal/qortal/%s/qortal.update",
+ "https://qortal.link/Auto-Update/%s/qortal.update",
+ "https://qortal.name/Auto-Update/%s/qortal.update",
+ "https://update.qortal.org/Auto-Update/%s/qortal.update"
};
// Lists
@@ -441,6 +444,14 @@ public class Settings {
*/
private long archivingPause = 3000;
+ private boolean balanceRecorderEnabled = false;
+
+ private int balanceRecorderPriority = 1;
+
+ private int balanceRecorderFrequency = 2*60*1000;
+
+ private int balanceRecorderCapacity = 1000;
+
// Domain mapping
public static class ThreadLimit {
private String messageType;
@@ -1230,4 +1241,20 @@ public class Settings {
public long getArchivingPause() {
return archivingPause;
}
+
+ public int getBalanceRecorderPriority() {
+ return balanceRecorderPriority;
+ }
+
+ public int getBalanceRecorderFrequency() {
+ return balanceRecorderFrequency;
+ }
+
+ public int getBalanceRecorderCapacity() {
+ return balanceRecorderCapacity;
+ }
+
+ public boolean isBalanceRecorderEnabled() {
+ return balanceRecorderEnabled;
+ }
}
diff --git a/src/main/resources/q-apps/q-apps.js b/src/main/resources/q-apps/q-apps.js
index e8a42537..25656370 100644
--- a/src/main/resources/q-apps/q-apps.js
+++ b/src/main/resources/q-apps/q-apps.js
@@ -614,6 +614,7 @@ function getDefaultTimeout(action) {
switch (action) {
case "GET_USER_ACCOUNT":
case "SAVE_FILE":
+ case "SIGN_TRANSACTION":
case "DECRYPT_DATA":
// User may take a long time to accept/deny the popup
return 60 * 60 * 1000;
@@ -635,6 +636,11 @@ function getDefaultTimeout(action) {
// Chat messages rely on PoW computations, so allow extra time
return 60 * 1000;
+ case "CREATE_TRADE_BUY_ORDER":
+ case "CREATE_TRADE_SELL_ORDER":
+ case "CANCEL_TRADE_SELL_ORDER":
+ case "VOTE_ON_POLL":
+ case "CREATE_POLL":
case "JOIN_GROUP":
case "DEPLOY_AT":
case "SEND_COIN":
@@ -649,7 +655,7 @@ function getDefaultTimeout(action) {
break;
}
}
- return 10 * 1000;
+ return 30 * 1000;
}
/**
diff --git a/src/test/java/org/qortal/test/BlockArchiveV1Tests.java b/src/test/java/org/qortal/test/BlockArchiveV1Tests.java
index a28bd28d..2cf8ef79 100644
--- a/src/test/java/org/qortal/test/BlockArchiveV1Tests.java
+++ b/src/test/java/org/qortal/test/BlockArchiveV1Tests.java
@@ -54,26 +54,39 @@ public class BlockArchiveV1Tests extends Common {
public void testWriter() throws DataException, InterruptedException, TransformationException, IOException {
try (final Repository repository = RepositoryManager.getRepository()) {
+ System.out.println("Starting testWriter");
+
// Mint some blocks so that we are able to archive them later
+ System.out.println("Minting 1000 blocks...");
for (int i = 0; i < 1000; i++) {
BlockMinter.mintTestingBlock(repository, Common.getTestAccount(repository, "alice-reward-share"));
+ // Log every 100 blocks
+ if ((i + 1) % 100 == 0) {
+ System.out.println("Minted block " + (i + 1));
+ }
}
+ System.out.println("Finished minting blocks.");
// 900 blocks are trimmed (this specifies the first untrimmed height)
repository.getBlockRepository().setOnlineAccountsSignaturesTrimHeight(901);
repository.getATRepository().setAtTrimHeight(901);
+ System.out.println("Set trim heights to 901.");
// Check the max archive height - this should be one less than the first untrimmed height
final int maximumArchiveHeight = BlockArchiveWriter.getMaxArchiveHeight(repository);
+ System.out.println("Maximum archive height (Expected 900): " + maximumArchiveHeight);
assertEquals(900, maximumArchiveHeight);
// Write blocks 2-900 to the archive
+ System.out.println("Writing blocks 2 to " + maximumArchiveHeight + " to the archive...");
BlockArchiveWriter writer = new BlockArchiveWriter(0, maximumArchiveHeight, repository);
writer.setShouldEnforceFileSizeTarget(false); // To avoid the need to pre-calculate file sizes
BlockArchiveWriter.BlockArchiveWriteResult result = writer.write();
+ System.out.println("Finished writing blocks to archive. Result: " + result);
assertEquals(BlockArchiveWriter.BlockArchiveWriteResult.OK, result);
// Make sure that the archive contains the correct number of blocks
+ System.out.println("Archive contains " + writer.getWrittenCount() + " blocks. (Expected 899)");
assertEquals(900 - 1, writer.getWrittenCount());
// Increment block archive height
@@ -84,6 +97,9 @@ public class BlockArchiveV1Tests extends Common {
// Ensure the file exists
File outputFile = writer.getOutputPath().toFile();
assertTrue(outputFile.exists());
+ System.out.println("Archive file exists at: " + outputFile.getAbsolutePath());
+
+ System.out.println("testWriter completed successfully.");
}
}
@@ -91,26 +107,39 @@ public class BlockArchiveV1Tests extends Common {
public void testWriterAndReader() throws DataException, InterruptedException, TransformationException, IOException {
try (final Repository repository = RepositoryManager.getRepository()) {
+ System.out.println("Starting testWriterAndReader");
+
// Mint some blocks so that we are able to archive them later
+ System.out.println("Minting 1000 blocks...");
for (int i = 0; i < 1000; i++) {
BlockMinter.mintTestingBlock(repository, Common.getTestAccount(repository, "alice-reward-share"));
+ // Log every 100 blocks
+ if ((i + 1) % 100 == 0) {
+ System.out.println("Minted block " + (i + 1));
+ }
}
+ System.out.println("Finished minting blocks.");
// 900 blocks are trimmed (this specifies the first untrimmed height)
repository.getBlockRepository().setOnlineAccountsSignaturesTrimHeight(901);
repository.getATRepository().setAtTrimHeight(901);
+ System.out.println("Set trim heights to 901.");
// Check the max archive height - this should be one less than the first untrimmed height
final int maximumArchiveHeight = BlockArchiveWriter.getMaxArchiveHeight(repository);
+ System.out.println("Maximum archive height (Expected 900): " + maximumArchiveHeight);
assertEquals(900, maximumArchiveHeight);
// Write blocks 2-900 to the archive
+ System.out.println("Writing blocks 2 to " + maximumArchiveHeight + " to the archive...");
BlockArchiveWriter writer = new BlockArchiveWriter(0, maximumArchiveHeight, repository);
writer.setShouldEnforceFileSizeTarget(false); // To avoid the need to pre-calculate file sizes
BlockArchiveWriter.BlockArchiveWriteResult result = writer.write();
+ System.out.println("Finished writing blocks to archive. Result: " + result);
assertEquals(BlockArchiveWriter.BlockArchiveWriteResult.OK, result);
// Make sure that the archive contains the correct number of blocks
+ System.out.println("Archive contains " + writer.getWrittenCount() + " blocks. (Expected 899)");
assertEquals(900 - 1, writer.getWrittenCount());
// Increment block archive height
@@ -121,8 +150,10 @@ public class BlockArchiveV1Tests extends Common {
// Ensure the file exists
File outputFile = writer.getOutputPath().toFile();
assertTrue(outputFile.exists());
+ System.out.println("Archive file exists at: " + outputFile.getAbsolutePath());
// Read block 2 from the archive
+ System.out.println("Reading block 2 from the archive...");
BlockArchiveReader reader = BlockArchiveReader.getInstance();
BlockTransformation block2Info = reader.fetchBlockAtHeight(2);
BlockData block2ArchiveData = block2Info.getBlockData();
@@ -131,6 +162,7 @@ public class BlockArchiveV1Tests extends Common {
BlockData block2RepositoryData = repository.getBlockRepository().fromHeight(2);
// Ensure the values match
+ System.out.println("Comparing block 2 data...");
assertEquals(block2ArchiveData.getHeight(), block2RepositoryData.getHeight());
assertArrayEquals(block2ArchiveData.getSignature(), block2RepositoryData.getSignature());
@@ -138,6 +170,7 @@ public class BlockArchiveV1Tests extends Common {
assertEquals(1, block2ArchiveData.getOnlineAccountsCount());
// Read block 900 from the archive
+ System.out.println("Reading block 900 from the archive...");
BlockTransformation block900Info = reader.fetchBlockAtHeight(900);
BlockData block900ArchiveData = block900Info.getBlockData();
@@ -145,12 +178,14 @@ public class BlockArchiveV1Tests extends Common {
BlockData block900RepositoryData = repository.getBlockRepository().fromHeight(900);
// Ensure the values match
+ System.out.println("Comparing block 900 data...");
assertEquals(block900ArchiveData.getHeight(), block900RepositoryData.getHeight());
assertArrayEquals(block900ArchiveData.getSignature(), block900RepositoryData.getSignature());
// Test some values in the archive
assertEquals(1, block900ArchiveData.getOnlineAccountsCount());
+ System.out.println("testWriterAndReader completed successfully.");
}
}
@@ -158,33 +193,48 @@ public class BlockArchiveV1Tests extends Common {
public void testArchivedAtStates() throws DataException, InterruptedException, TransformationException, IOException {
try (final Repository repository = RepositoryManager.getRepository()) {
+ System.out.println("Starting testArchivedAtStates");
+
// Deploy an AT so that we have AT state data
+ System.out.println("Deploying AT...");
PrivateKeyAccount deployer = Common.getTestAccount(repository, "alice");
byte[] creationBytes = AtUtils.buildSimpleAT();
long fundingAmount = 1_00000000L;
DeployAtTransaction deployAtTransaction = AtUtils.doDeployAT(repository, deployer, creationBytes, fundingAmount);
String atAddress = deployAtTransaction.getATAccount().getAddress();
+ System.out.println("AT deployed at address: " + atAddress);
// Mint some blocks so that we are able to archive them later
+ System.out.println("Minting 1000 blocks...");
for (int i = 0; i < 1000; i++) {
BlockMinter.mintTestingBlock(repository, Common.getTestAccount(repository, "alice-reward-share"));
+ // Log every 100 blocks
+ if ((i + 1) % 100 == 0) {
+ System.out.println("Minted block " + (i + 1));
+ }
}
+ System.out.println("Finished minting blocks.");
// 9 blocks are trimmed (this specifies the first untrimmed height)
repository.getBlockRepository().setOnlineAccountsSignaturesTrimHeight(10);
repository.getATRepository().setAtTrimHeight(10);
+ System.out.println("Set trim heights to 10.");
// Check the max archive height
final int maximumArchiveHeight = BlockArchiveWriter.getMaxArchiveHeight(repository);
+ System.out.println("Maximum archive height (Expected 9): " + maximumArchiveHeight);
assertEquals(9, maximumArchiveHeight);
// Write blocks 2-9 to the archive
+ System.out.println("Writing blocks 2 to " + maximumArchiveHeight + " to the archive...");
BlockArchiveWriter writer = new BlockArchiveWriter(0, maximumArchiveHeight, repository);
writer.setShouldEnforceFileSizeTarget(false); // To avoid the need to pre-calculate file sizes
BlockArchiveWriter.BlockArchiveWriteResult result = writer.write();
+ System.out.println("Finished writing blocks to archive. Result: " + result);
assertEquals(BlockArchiveWriter.BlockArchiveWriteResult.OK, result);
// Make sure that the archive contains the correct number of blocks
+ System.out.println("Archive contains " + writer.getWrittenCount() + " blocks. (Expected 8)");
assertEquals(9 - 1, writer.getWrittenCount());
// Increment block archive height
@@ -195,10 +245,13 @@ public class BlockArchiveV1Tests extends Common {
// Ensure the file exists
File outputFile = writer.getOutputPath().toFile();
assertTrue(outputFile.exists());
+ System.out.println("Archive file exists at: " + outputFile.getAbsolutePath());
// Check blocks 3-9
+ System.out.println("Checking blocks 3 to 9...");
for (Integer testHeight = 2; testHeight <= 9; testHeight++) {
+ System.out.println("Reading block " + testHeight + " from the archive...");
// Read a block from the archive
BlockArchiveReader reader = BlockArchiveReader.getInstance();
BlockTransformation blockInfo = reader.fetchBlockAtHeight(testHeight);
@@ -216,6 +269,7 @@ public class BlockArchiveV1Tests extends Common {
// Check the archived AT state
if (testHeight == 2) {
+ System.out.println("Checking block " + testHeight + " AT state data (expected null)...");
// Block 2 won't have an AT state hash because it's initial (and has the DEPLOY_AT in the same block)
assertNull(archivedAtStateData);
@@ -223,6 +277,7 @@ public class BlockArchiveV1Tests extends Common {
assertEquals(Transaction.TransactionType.DEPLOY_AT, archivedTransactions.get(0).getType());
}
else {
+ System.out.println("Checking block " + testHeight + " AT state data...");
// For blocks 3+, ensure the archive has the AT state data, but not the hashes
assertNotNull(archivedAtStateData.getStateHash());
assertNull(archivedAtStateData.getStateData());
@@ -255,10 +310,12 @@ public class BlockArchiveV1Tests extends Common {
}
// Check block 10 (unarchived)
+ System.out.println("Checking block 10 (should not be in archive)...");
BlockArchiveReader reader = BlockArchiveReader.getInstance();
BlockTransformation blockInfo = reader.fetchBlockAtHeight(10);
assertNull(blockInfo);
+ System.out.println("testArchivedAtStates completed successfully.");
}
}
@@ -267,32 +324,46 @@ public class BlockArchiveV1Tests extends Common {
public void testArchiveAndPrune() throws DataException, InterruptedException, TransformationException, IOException {
try (final Repository repository = RepositoryManager.getRepository()) {
+ System.out.println("Starting testArchiveAndPrune");
+
// Deploy an AT so that we have AT state data
+ System.out.println("Deploying AT...");
PrivateKeyAccount deployer = Common.getTestAccount(repository, "alice");
byte[] creationBytes = AtUtils.buildSimpleAT();
long fundingAmount = 1_00000000L;
AtUtils.doDeployAT(repository, deployer, creationBytes, fundingAmount);
// Mint some blocks so that we are able to archive them later
+ System.out.println("Minting 1000 blocks...");
for (int i = 0; i < 1000; i++) {
BlockMinter.mintTestingBlock(repository, Common.getTestAccount(repository, "alice-reward-share"));
+ // Log every 100 blocks
+ if ((i + 1) % 100 == 0) {
+ System.out.println("Minted block " + (i + 1));
+ }
}
+ System.out.println("Finished minting blocks.");
// Assume 900 blocks are trimmed (this specifies the first untrimmed height)
repository.getBlockRepository().setOnlineAccountsSignaturesTrimHeight(901);
repository.getATRepository().setAtTrimHeight(901);
+ System.out.println("Set trim heights to 901.");
// Check the max archive height - this should be one less than the first untrimmed height
final int maximumArchiveHeight = BlockArchiveWriter.getMaxArchiveHeight(repository);
+ System.out.println("Maximum archive height (Expected 900): " + maximumArchiveHeight);
assertEquals(900, maximumArchiveHeight);
// Write blocks 2-900 to the archive
+ System.out.println("Writing blocks 2 to " + maximumArchiveHeight + " to the archive...");
BlockArchiveWriter writer = new BlockArchiveWriter(0, maximumArchiveHeight, repository);
writer.setShouldEnforceFileSizeTarget(false); // To avoid the need to pre-calculate file sizes
BlockArchiveWriter.BlockArchiveWriteResult result = writer.write();
+ System.out.println("Finished writing blocks to archive. Result: " + result);
assertEquals(BlockArchiveWriter.BlockArchiveWriteResult.OK, result);
// Make sure that the archive contains the correct number of blocks
+ System.out.println("Archive contains " + writer.getWrittenCount() + " blocks. (Expected 899)");
assertEquals(900 - 1, writer.getWrittenCount());
// Increment block archive height
@@ -303,17 +374,21 @@ public class BlockArchiveV1Tests extends Common {
// Ensure the file exists
File outputFile = writer.getOutputPath().toFile();
assertTrue(outputFile.exists());
+ System.out.println("Archive file exists at: " + outputFile.getAbsolutePath());
// Ensure the SQL repository contains blocks 2 and 900...
assertNotNull(repository.getBlockRepository().fromHeight(2));
assertNotNull(repository.getBlockRepository().fromHeight(900));
+ System.out.println("Blocks 2 and 900 exist in the repository.");
// Prune all the archived blocks
+ System.out.println("Pruning blocks 2 to 900...");
int numBlocksPruned = repository.getBlockRepository().pruneBlocks(0, 900);
assertEquals(900-1, numBlocksPruned);
repository.getBlockRepository().setBlockPruneHeight(901);
// Prune the AT states for the archived blocks
+ System.out.println("Pruning AT states up to height 900...");
repository.getATRepository().rebuildLatestAtStates(900);
repository.saveChanges();
int numATStatesPruned = repository.getATRepository().pruneAtStates(0, 900);
@@ -323,14 +398,19 @@ public class BlockArchiveV1Tests extends Common {
// Now ensure the SQL repository is missing blocks 2 and 900...
assertNull(repository.getBlockRepository().fromHeight(2));
assertNull(repository.getBlockRepository().fromHeight(900));
+ System.out.println("Blocks 2 and 900 have been pruned from the repository.");
// ... but it's not missing blocks 1 and 901 (we don't prune the genesis block)
assertNotNull(repository.getBlockRepository().fromHeight(1));
assertNotNull(repository.getBlockRepository().fromHeight(901));
+ System.out.println("Blocks 1 and 901 still exist in the repository.");
// Validate the latest block height in the repository
- assertEquals(1002, (int) repository.getBlockRepository().getLastBlock().getHeight());
+ int lastBlockHeight = repository.getBlockRepository().getLastBlock().getHeight();
+ System.out.println("Latest block height in repository (Expected 1002): " + lastBlockHeight);
+ assertEquals(1002, lastBlockHeight);
+ System.out.println("testArchiveAndPrune completed successfully.");
}
}
@@ -338,137 +418,190 @@ public class BlockArchiveV1Tests extends Common {
public void testTrimArchivePruneAndOrphan() throws DataException, InterruptedException, TransformationException, IOException {
try (final Repository repository = RepositoryManager.getRepository()) {
+ System.out.println("Starting testTrimArchivePruneAndOrphan");
+
// Deploy an AT so that we have AT state data
+ System.out.println("Deploying AT...");
PrivateKeyAccount deployer = Common.getTestAccount(repository, "alice");
byte[] creationBytes = AtUtils.buildSimpleAT();
long fundingAmount = 1_00000000L;
AtUtils.doDeployAT(repository, deployer, creationBytes, fundingAmount);
+ System.out.println("AT deployed successfully.");
// Mint some blocks so that we are able to archive them later
+ System.out.println("Minting 1000 blocks...");
for (int i = 0; i < 1000; i++) {
BlockMinter.mintTestingBlock(repository, Common.getTestAccount(repository, "alice-reward-share"));
+ // Log every 100 blocks
+ if ((i + 1) % 100 == 0) {
+ System.out.println("Minted block " + (i + 1));
+ }
}
+ System.out.println("Finished minting blocks.");
// Make sure that block 500 has full AT state data and data hash
+ System.out.println("Verifying block 500 AT state data...");
List block500AtStatesData = repository.getATRepository().getBlockATStatesAtHeight(500);
ATStateData atStatesData = repository.getATRepository().getATStateAtHeight(block500AtStatesData.get(0).getATAddress(), 500);
assertNotNull(atStatesData.getStateHash());
assertNotNull(atStatesData.getStateData());
+ System.out.println("Block 500 AT state data verified.");
// Trim the first 500 blocks
+ System.out.println("Trimming first 500 blocks...");
repository.getBlockRepository().trimOldOnlineAccountsSignatures(0, 500);
repository.getBlockRepository().setOnlineAccountsSignaturesTrimHeight(501);
repository.getATRepository().rebuildLatestAtStates(500);
repository.getATRepository().trimAtStates(0, 500, 1000);
repository.getATRepository().setAtTrimHeight(501);
+ System.out.println("Trimming completed.");
// Now block 499 should only have the AT state data hash
+ System.out.println("Checking block 499 AT state data...");
List block499AtStatesData = repository.getATRepository().getBlockATStatesAtHeight(499);
atStatesData = repository.getATRepository().getATStateAtHeight(block499AtStatesData.get(0).getATAddress(), 499);
assertNotNull(atStatesData.getStateHash());
assertNull(atStatesData.getStateData());
+ System.out.println("Block 499 AT state data contains only state hash as expected.");
// ... but block 500 should have the full data (due to being retained as the "latest" AT state in the trimmed range
+ System.out.println("Verifying block 500 AT state data again...");
block500AtStatesData = repository.getATRepository().getBlockATStatesAtHeight(500);
atStatesData = repository.getATRepository().getATStateAtHeight(block500AtStatesData.get(0).getATAddress(), 500);
assertNotNull(atStatesData.getStateHash());
assertNotNull(atStatesData.getStateData());
+ System.out.println("Block 500 AT state data contains full data.");
// ... and block 501 should also have the full data
+ System.out.println("Verifying block 501 AT state data...");
List block501AtStatesData = repository.getATRepository().getBlockATStatesAtHeight(501);
atStatesData = repository.getATRepository().getATStateAtHeight(block501AtStatesData.get(0).getATAddress(), 501);
assertNotNull(atStatesData.getStateHash());
assertNotNull(atStatesData.getStateData());
+ System.out.println("Block 501 AT state data contains full data.");
// Check the max archive height - this should be one less than the first untrimmed height
final int maximumArchiveHeight = BlockArchiveWriter.getMaxArchiveHeight(repository);
+ System.out.println("Maximum archive height determined (Expected 500): " + maximumArchiveHeight);
assertEquals(500, maximumArchiveHeight);
BlockData block3DataPreArchive = repository.getBlockRepository().fromHeight(3);
// Write blocks 2-500 to the archive
+ System.out.println("Writing blocks 2 to " + maximumArchiveHeight + " to the archive...");
BlockArchiveWriter writer = new BlockArchiveWriter(0, maximumArchiveHeight, repository);
writer.setShouldEnforceFileSizeTarget(false); // To avoid the need to pre-calculate file sizes
BlockArchiveWriter.BlockArchiveWriteResult result = writer.write();
+ System.out.println("Finished writing blocks to archive. Result: " + result);
assertEquals(BlockArchiveWriter.BlockArchiveWriteResult.OK, result);
// Make sure that the archive contains the correct number of blocks
+ System.out.println("Number of blocks written to archive (Expected 499): " + writer.getWrittenCount());
assertEquals(500 - 1, writer.getWrittenCount()); // -1 for the genesis block
// Increment block archive height
repository.getBlockArchiveRepository().setBlockArchiveHeight(writer.getWrittenCount());
repository.saveChanges();
assertEquals(500 - 1, repository.getBlockArchiveRepository().getBlockArchiveHeight());
+ System.out.println("Block archive height updated to: " + (500 - 1));
// Ensure the file exists
File outputFile = writer.getOutputPath().toFile();
assertTrue(outputFile.exists());
+ System.out.println("Archive file exists at: " + outputFile.getAbsolutePath());
// Ensure the SQL repository contains blocks 2 and 500...
+ System.out.println("Verifying that blocks 2 and 500 exist in the repository...");
assertNotNull(repository.getBlockRepository().fromHeight(2));
assertNotNull(repository.getBlockRepository().fromHeight(500));
+ System.out.println("Blocks 2 and 500 are present in the repository.");
// Prune all the archived blocks
+ System.out.println("Pruning blocks 2 to 500...");
int numBlocksPruned = repository.getBlockRepository().pruneBlocks(0, 500);
+ System.out.println("Number of blocks pruned (Expected 499): " + numBlocksPruned);
assertEquals(500-1, numBlocksPruned);
repository.getBlockRepository().setBlockPruneHeight(501);
// Prune the AT states for the archived blocks
+ System.out.println("Pruning AT states up to height 500...");
repository.getATRepository().rebuildLatestAtStates(500);
repository.saveChanges();
int numATStatesPruned = repository.getATRepository().pruneAtStates(2, 500);
+ System.out.println("Number of AT states pruned (Expected 498): " + numATStatesPruned);
assertEquals(498, numATStatesPruned); // Minus 1 for genesis block, and another for the latest AT state
repository.getATRepository().setAtPruneHeight(501);
// Now ensure the SQL repository is missing blocks 2 and 500...
+ System.out.println("Verifying that blocks 2 and 500 have been pruned...");
assertNull(repository.getBlockRepository().fromHeight(2));
assertNull(repository.getBlockRepository().fromHeight(500));
+ System.out.println("Blocks 2 and 500 have been successfully pruned.");
// ... but it's not missing blocks 1 and 501 (we don't prune the genesis block)
+ System.out.println("Verifying that blocks 1 and 501 still exist...");
assertNotNull(repository.getBlockRepository().fromHeight(1));
assertNotNull(repository.getBlockRepository().fromHeight(501));
+ System.out.println("Blocks 1 and 501 are present in the repository.");
// Validate the latest block height in the repository
- assertEquals(1002, (int) repository.getBlockRepository().getLastBlock().getHeight());
+ int lastBlockHeight = repository.getBlockRepository().getLastBlock().getHeight();
+ System.out.println("Latest block height in repository (Expected 1002): " + lastBlockHeight);
+ assertEquals(1002, lastBlockHeight);
// Now orphan some unarchived blocks.
+ System.out.println("Orphaning 500 blocks...");
BlockUtils.orphanBlocks(repository, 500);
- assertEquals(502, (int) repository.getBlockRepository().getLastBlock().getHeight());
+ int currentLastBlockHeight = repository.getBlockRepository().getLastBlock().getHeight();
+ System.out.println("New last block height after orphaning (Expected 502): " + currentLastBlockHeight);
+ assertEquals(502, currentLastBlockHeight);
// We're close to the lower limit of the SQL database now, so
// we need to import some blocks from the archive
+ System.out.println("Importing blocks 401 to 500 from the archive...");
BlockArchiveUtils.importFromArchive(401, 500, repository);
// Ensure the SQL repository now contains block 401 but not 400...
+ System.out.println("Verifying that block 401 exists and block 400 does not...");
assertNotNull(repository.getBlockRepository().fromHeight(401));
assertNull(repository.getBlockRepository().fromHeight(400));
+ System.out.println("Block 401 exists, block 400 does not.");
// Import the remaining 399 blocks
+ System.out.println("Importing blocks 2 to 400 from the archive...");
BlockArchiveUtils.importFromArchive(2, 400, repository);
// Verify that block 3 matches the original
+ System.out.println("Verifying that block 3 matches the original data...");
BlockData block3DataPostArchive = repository.getBlockRepository().fromHeight(3);
assertArrayEquals(block3DataPreArchive.getSignature(), block3DataPostArchive.getSignature());
assertEquals(block3DataPreArchive.getHeight(), block3DataPostArchive.getHeight());
+ System.out.println("Block 3 data matches the original.");
// Orphan 1 more block, which should be the last one that is possible to be orphaned
+ System.out.println("Orphaning 1 more block...");
BlockUtils.orphanBlocks(repository, 1);
+ System.out.println("Orphaned 1 block successfully.");
// Orphan another block, which should fail
+ System.out.println("Attempting to orphan another block, which should fail...");
Exception exception = null;
try {
BlockUtils.orphanBlocks(repository, 1);
} catch (DataException e) {
exception = e;
+ System.out.println("Caught expected DataException: " + e.getMessage());
}
// Ensure that a DataException is thrown because there is no more AT states data available
assertNotNull(exception);
assertEquals(DataException.class, exception.getClass());
+ System.out.println("DataException confirmed due to lack of AT states data.");
// FUTURE: we may be able to retain unique AT states when trimming, to avoid this exception
// and allow orphaning back through blocks with trimmed AT states.
+ System.out.println("testTrimArchivePruneAndOrphan completed successfully.");
}
}
@@ -482,16 +615,26 @@ public class BlockArchiveV1Tests extends Common {
public void testMissingAtStatesHeightIndex() throws DataException, SQLException {
try (final HSQLDBRepository repository = (HSQLDBRepository) RepositoryManager.getRepository()) {
+ System.out.println("Starting testMissingAtStatesHeightIndex");
+
// Firstly check that we're able to prune or archive when the index exists
+ System.out.println("Checking existence of ATStatesHeightIndex...");
assertTrue(repository.getATRepository().hasAtStatesHeightIndex());
assertTrue(RepositoryManager.canArchiveOrPrune());
+ System.out.println("ATStatesHeightIndex exists. Archiving and pruning are possible.");
// Delete the index
+ System.out.println("Dropping ATStatesHeightIndex...");
repository.prepareStatement("DROP INDEX ATSTATESHEIGHTINDEX").execute();
+ System.out.println("ATStatesHeightIndex dropped.");
// Ensure check that we're unable to prune or archive when the index doesn't exist
+ System.out.println("Verifying that ATStatesHeightIndex no longer exists...");
assertFalse(repository.getATRepository().hasAtStatesHeightIndex());
assertFalse(RepositoryManager.canArchiveOrPrune());
+ System.out.println("ATStatesHeightIndex does not exist. Archiving and pruning are disabled.");
+
+ System.out.println("testMissingAtStatesHeightIndex completed successfully.");
}
}
@@ -501,8 +644,10 @@ public class BlockArchiveV1Tests extends Common {
Path archivePath = Paths.get(Settings.getInstance().getRepositoryPath(), "archive").toAbsolutePath();
try {
FileUtils.deleteDirectory(archivePath.toFile());
+ System.out.println("Deleted archive directory at: " + archivePath);
} catch (IOException e) {
-
+
+ System.out.println("Failed to delete archive directory: " + e.getMessage());
}
}
diff --git a/src/test/java/org/qortal/test/BlockArchiveV2Tests.java b/src/test/java/org/qortal/test/BlockArchiveV2Tests.java
index 3b1d12d3..8ab02b40 100644
--- a/src/test/java/org/qortal/test/BlockArchiveV2Tests.java
+++ b/src/test/java/org/qortal/test/BlockArchiveV2Tests.java
@@ -54,26 +54,39 @@ public class BlockArchiveV2Tests extends Common {
public void testWriter() throws DataException, InterruptedException, TransformationException, IOException {
try (final Repository repository = RepositoryManager.getRepository()) {
+ System.out.println("Starting testWriter");
+
// Mint some blocks so that we are able to archive them later
+ System.out.println("Minting 1000 blocks...");
for (int i = 0; i < 1000; i++) {
BlockMinter.mintTestingBlock(repository, Common.getTestAccount(repository, "alice-reward-share"));
+ // Log every 100 blocks
+ if ((i + 1) % 100 == 0) {
+ System.out.println("Minted block " + (i + 1));
+ }
}
+ System.out.println("Finished minting blocks.");
// 900 blocks are trimmed (this specifies the first untrimmed height)
repository.getBlockRepository().setOnlineAccountsSignaturesTrimHeight(901);
repository.getATRepository().setAtTrimHeight(901);
+ System.out.println("Set trim heights to 901.");
// Check the max archive height - this should be one less than the first untrimmed height
final int maximumArchiveHeight = BlockArchiveWriter.getMaxArchiveHeight(repository);
+ System.out.println("Maximum archive height (Expected 900): " + maximumArchiveHeight);
assertEquals(900, maximumArchiveHeight);
// Write blocks 2-900 to the archive
+ System.out.println("Writing blocks 2 to " + maximumArchiveHeight + " to the archive...");
BlockArchiveWriter writer = new BlockArchiveWriter(0, maximumArchiveHeight, repository);
writer.setShouldEnforceFileSizeTarget(false); // To avoid the need to pre-calculate file sizes
BlockArchiveWriter.BlockArchiveWriteResult result = writer.write();
+ System.out.println("Finished writing blocks to archive. Result: " + result);
assertEquals(BlockArchiveWriter.BlockArchiveWriteResult.OK, result);
// Make sure that the archive contains the correct number of blocks
+ System.out.println("Archive contains " + writer.getWrittenCount() + " blocks. (Expected 899)");
assertEquals(900 - 1, writer.getWrittenCount());
// Increment block archive height
@@ -84,6 +97,9 @@ public class BlockArchiveV2Tests extends Common {
// Ensure the file exists
File outputFile = writer.getOutputPath().toFile();
assertTrue(outputFile.exists());
+ System.out.println("Archive file exists at: " + outputFile.getAbsolutePath());
+
+ System.out.println("testWriter completed successfully.");
}
}
@@ -91,26 +107,39 @@ public class BlockArchiveV2Tests extends Common {
public void testWriterAndReader() throws DataException, InterruptedException, TransformationException, IOException {
try (final Repository repository = RepositoryManager.getRepository()) {
+ System.out.println("Starting testWriterAndReader");
+
// Mint some blocks so that we are able to archive them later
+ System.out.println("Minting 1000 blocks...");
for (int i = 0; i < 1000; i++) {
BlockMinter.mintTestingBlock(repository, Common.getTestAccount(repository, "alice-reward-share"));
+ // Log every 100 blocks
+ if ((i + 1) % 100 == 0) {
+ System.out.println("Minted block " + (i + 1));
+ }
}
+ System.out.println("Finished minting blocks.");
// 900 blocks are trimmed (this specifies the first untrimmed height)
repository.getBlockRepository().setOnlineAccountsSignaturesTrimHeight(901);
repository.getATRepository().setAtTrimHeight(901);
+ System.out.println("Set trim heights to 901.");
// Check the max archive height - this should be one less than the first untrimmed height
final int maximumArchiveHeight = BlockArchiveWriter.getMaxArchiveHeight(repository);
+ System.out.println("Maximum archive height (Expected 900): " + maximumArchiveHeight);
assertEquals(900, maximumArchiveHeight);
// Write blocks 2-900 to the archive
+ System.out.println("Writing blocks 2 to " + maximumArchiveHeight + " to the archive...");
BlockArchiveWriter writer = new BlockArchiveWriter(0, maximumArchiveHeight, repository);
writer.setShouldEnforceFileSizeTarget(false); // To avoid the need to pre-calculate file sizes
BlockArchiveWriter.BlockArchiveWriteResult result = writer.write();
+ System.out.println("Finished writing blocks to archive. Result: " + result);
assertEquals(BlockArchiveWriter.BlockArchiveWriteResult.OK, result);
// Make sure that the archive contains the correct number of blocks
+ System.out.println("Archive contains " + writer.getWrittenCount() + " blocks. (Expected 899)");
assertEquals(900 - 1, writer.getWrittenCount());
// Increment block archive height
@@ -121,8 +150,10 @@ public class BlockArchiveV2Tests extends Common {
// Ensure the file exists
File outputFile = writer.getOutputPath().toFile();
assertTrue(outputFile.exists());
+ System.out.println("Archive file exists at: " + outputFile.getAbsolutePath());
// Read block 2 from the archive
+ System.out.println("Reading block 2 from the archive...");
BlockArchiveReader reader = BlockArchiveReader.getInstance();
BlockTransformation block2Info = reader.fetchBlockAtHeight(2);
BlockData block2ArchiveData = block2Info.getBlockData();
@@ -131,6 +162,7 @@ public class BlockArchiveV2Tests extends Common {
BlockData block2RepositoryData = repository.getBlockRepository().fromHeight(2);
// Ensure the values match
+ System.out.println("Comparing block 2 data...");
assertEquals(block2ArchiveData.getHeight(), block2RepositoryData.getHeight());
assertArrayEquals(block2ArchiveData.getSignature(), block2RepositoryData.getSignature());
@@ -138,6 +170,7 @@ public class BlockArchiveV2Tests extends Common {
assertEquals(1, block2ArchiveData.getOnlineAccountsCount());
// Read block 900 from the archive
+ System.out.println("Reading block 900 from the archive...");
BlockTransformation block900Info = reader.fetchBlockAtHeight(900);
BlockData block900ArchiveData = block900Info.getBlockData();
@@ -145,12 +178,14 @@ public class BlockArchiveV2Tests extends Common {
BlockData block900RepositoryData = repository.getBlockRepository().fromHeight(900);
// Ensure the values match
+ System.out.println("Comparing block 900 data...");
assertEquals(block900ArchiveData.getHeight(), block900RepositoryData.getHeight());
assertArrayEquals(block900ArchiveData.getSignature(), block900RepositoryData.getSignature());
// Test some values in the archive
assertEquals(1, block900ArchiveData.getOnlineAccountsCount());
+ System.out.println("testWriterAndReader completed successfully.");
}
}
@@ -158,47 +193,66 @@ public class BlockArchiveV2Tests extends Common {
public void testArchivedAtStates() throws DataException, InterruptedException, TransformationException, IOException {
try (final Repository repository = RepositoryManager.getRepository()) {
+ System.out.println("Starting testArchivedAtStates");
+
// Deploy an AT so that we have AT state data
+ System.out.println("Deploying AT...");
PrivateKeyAccount deployer = Common.getTestAccount(repository, "alice");
byte[] creationBytes = AtUtils.buildSimpleAT();
long fundingAmount = 1_00000000L;
DeployAtTransaction deployAtTransaction = AtUtils.doDeployAT(repository, deployer, creationBytes, fundingAmount);
String atAddress = deployAtTransaction.getATAccount().getAddress();
+ System.out.println("AT deployed at address: " + atAddress);
// Mint some blocks so that we are able to archive them later
+ System.out.println("Minting 1000 blocks...");
for (int i = 0; i < 1000; i++) {
BlockMinter.mintTestingBlock(repository, Common.getTestAccount(repository, "alice-reward-share"));
+ // Log every 100 blocks
+ if ((i + 1) % 100 == 0) {
+ System.out.println("Minted block " + (i + 1));
+ }
}
+ System.out.println("Finished minting blocks.");
// 9 blocks are trimmed (this specifies the first untrimmed height)
repository.getBlockRepository().setOnlineAccountsSignaturesTrimHeight(10);
repository.getATRepository().setAtTrimHeight(10);
+ System.out.println("Set trim heights to 10.");
// Check the max archive height
final int maximumArchiveHeight = BlockArchiveWriter.getMaxArchiveHeight(repository);
+ System.out.println("Maximum archive height (Expected 9): " + maximumArchiveHeight);
assertEquals(9, maximumArchiveHeight);
// Write blocks 2-9 to the archive
+ System.out.println("Writing blocks 2 to " + maximumArchiveHeight + " to the archive...");
BlockArchiveWriter writer = new BlockArchiveWriter(0, maximumArchiveHeight, repository);
writer.setShouldEnforceFileSizeTarget(false); // To avoid the need to pre-calculate file sizes
BlockArchiveWriter.BlockArchiveWriteResult result = writer.write();
+ System.out.println("Finished writing blocks to archive. Result: " + result);
assertEquals(BlockArchiveWriter.BlockArchiveWriteResult.OK, result);
// Make sure that the archive contains the correct number of blocks
+ System.out.println("Archive contains " + writer.getWrittenCount() + " blocks. (Expected 8)");
assertEquals(9 - 1, writer.getWrittenCount());
// Increment block archive height
repository.getBlockArchiveRepository().setBlockArchiveHeight(writer.getWrittenCount());
repository.saveChanges();
assertEquals(9 - 1, repository.getBlockArchiveRepository().getBlockArchiveHeight());
+ System.out.println("Block archive height updated to: " + (9 - 1));
// Ensure the file exists
File outputFile = writer.getOutputPath().toFile();
assertTrue(outputFile.exists());
+ System.out.println("Archive file exists at: " + outputFile.getAbsolutePath());
// Check blocks 3-9
+ System.out.println("Checking blocks 2 to 9...");
for (Integer testHeight = 2; testHeight <= 9; testHeight++) {
+ System.out.println("Reading block " + testHeight + " from the archive...");
// Read a block from the archive
BlockArchiveReader reader = BlockArchiveReader.getInstance();
BlockTransformation blockInfo = reader.fetchBlockAtHeight(testHeight);
@@ -216,15 +270,18 @@ public class BlockArchiveV2Tests extends Common {
// Check the archived AT state
if (testHeight == 2) {
+ System.out.println("Checking block " + testHeight + " AT state data (expected transactions)...");
assertEquals(1, archivedTransactions.size());
assertEquals(Transaction.TransactionType.DEPLOY_AT, archivedTransactions.get(0).getType());
}
else {
+ System.out.println("Checking block " + testHeight + " AT state data (no transactions expected)...");
// Blocks 3+ shouldn't have any transactions
assertTrue(archivedTransactions.isEmpty());
}
// Ensure the archive has the AT states hash
+ System.out.println("Checking block " + testHeight + " AT states hash...");
assertNotNull(archivedAtStateHash);
// Also check the online accounts count and height
@@ -232,6 +289,7 @@ public class BlockArchiveV2Tests extends Common {
assertEquals(testHeight, archivedBlockData.getHeight());
// Ensure the values match
+ System.out.println("Comparing block " + testHeight + " data...");
assertEquals(archivedBlockData.getHeight(), repositoryBlockData.getHeight());
assertArrayEquals(archivedBlockData.getSignature(), repositoryBlockData.getSignature());
assertEquals(archivedBlockData.getOnlineAccountsCount(), repositoryBlockData.getOnlineAccountsCount());
@@ -249,10 +307,12 @@ public class BlockArchiveV2Tests extends Common {
}
// Check block 10 (unarchived)
+ System.out.println("Checking block 10 (should not be in archive)...");
BlockArchiveReader reader = BlockArchiveReader.getInstance();
BlockTransformation blockInfo = reader.fetchBlockAtHeight(10);
assertNull(blockInfo);
+ System.out.println("testArchivedAtStates completed successfully.");
}
}
@@ -261,32 +321,47 @@ public class BlockArchiveV2Tests extends Common {
public void testArchiveAndPrune() throws DataException, InterruptedException, TransformationException, IOException {
try (final Repository repository = RepositoryManager.getRepository()) {
+ System.out.println("Starting testArchiveAndPrune");
+
// Deploy an AT so that we have AT state data
+ System.out.println("Deploying AT...");
PrivateKeyAccount deployer = Common.getTestAccount(repository, "alice");
byte[] creationBytes = AtUtils.buildSimpleAT();
long fundingAmount = 1_00000000L;
AtUtils.doDeployAT(repository, deployer, creationBytes, fundingAmount);
+ System.out.println("AT deployed successfully.");
// Mint some blocks so that we are able to archive them later
+ System.out.println("Minting 1000 blocks...");
for (int i = 0; i < 1000; i++) {
BlockMinter.mintTestingBlock(repository, Common.getTestAccount(repository, "alice-reward-share"));
+ // Log every 100 blocks
+ if ((i + 1) % 100 == 0) {
+ System.out.println("Minted block " + (i + 1));
+ }
}
+ System.out.println("Finished minting blocks.");
// Assume 900 blocks are trimmed (this specifies the first untrimmed height)
repository.getBlockRepository().setOnlineAccountsSignaturesTrimHeight(901);
repository.getATRepository().setAtTrimHeight(901);
+ System.out.println("Set trim heights to 901.");
// Check the max archive height - this should be one less than the first untrimmed height
final int maximumArchiveHeight = BlockArchiveWriter.getMaxArchiveHeight(repository);
+ System.out.println("Maximum archive height (Expected 900): " + maximumArchiveHeight);
assertEquals(900, maximumArchiveHeight);
// Write blocks 2-900 to the archive
+ System.out.println("Writing blocks 2 to " + maximumArchiveHeight + " to the archive...");
BlockArchiveWriter writer = new BlockArchiveWriter(0, maximumArchiveHeight, repository);
writer.setShouldEnforceFileSizeTarget(false); // To avoid the need to pre-calculate file sizes
BlockArchiveWriter.BlockArchiveWriteResult result = writer.write();
+ System.out.println("Finished writing blocks to archive. Result: " + result);
assertEquals(BlockArchiveWriter.BlockArchiveWriteResult.OK, result);
// Make sure that the archive contains the correct number of blocks
+ System.out.println("Archive contains " + writer.getWrittenCount() + " blocks. (Expected 899)");
assertEquals(900 - 1, writer.getWrittenCount());
// Increment block archive height
@@ -297,34 +372,48 @@ public class BlockArchiveV2Tests extends Common {
// Ensure the file exists
File outputFile = writer.getOutputPath().toFile();
assertTrue(outputFile.exists());
+ System.out.println("Archive file exists at: " + outputFile.getAbsolutePath());
// Ensure the SQL repository contains blocks 2 and 900...
+ System.out.println("Verifying that blocks 2 and 900 exist in the repository...");
assertNotNull(repository.getBlockRepository().fromHeight(2));
assertNotNull(repository.getBlockRepository().fromHeight(900));
+ System.out.println("Blocks 2 and 900 are present in the repository.");
// Prune all the archived blocks
+ System.out.println("Pruning blocks 2 to 900...");
int numBlocksPruned = repository.getBlockRepository().pruneBlocks(0, 900);
+ System.out.println("Number of blocks pruned (Expected 899): " + numBlocksPruned);
assertEquals(900-1, numBlocksPruned);
repository.getBlockRepository().setBlockPruneHeight(901);
// Prune the AT states for the archived blocks
+ System.out.println("Pruning AT states up to height 900...");
repository.getATRepository().rebuildLatestAtStates(900);
repository.saveChanges();
int numATStatesPruned = repository.getATRepository().pruneAtStates(0, 900);
+ System.out.println("Number of AT states pruned (Expected 898): " + numATStatesPruned);
assertEquals(900-2, numATStatesPruned); // Minus 1 for genesis block, and another for the latest AT state
repository.getATRepository().setAtPruneHeight(901);
// Now ensure the SQL repository is missing blocks 2 and 900...
+ System.out.println("Verifying that blocks 2 and 900 have been pruned...");
assertNull(repository.getBlockRepository().fromHeight(2));
assertNull(repository.getBlockRepository().fromHeight(900));
+ System.out.println("Blocks 2 and 900 have been successfully pruned.");
// ... but it's not missing blocks 1 and 901 (we don't prune the genesis block)
+ System.out.println("Verifying that blocks 1 and 901 still exist...");
assertNotNull(repository.getBlockRepository().fromHeight(1));
assertNotNull(repository.getBlockRepository().fromHeight(901));
+ System.out.println("Blocks 1 and 901 are present in the repository.");
// Validate the latest block height in the repository
- assertEquals(1002, (int) repository.getBlockRepository().getLastBlock().getHeight());
+ int lastBlockHeight = repository.getBlockRepository().getLastBlock().getHeight();
+ System.out.println("Latest block height in repository (Expected 1002): " + lastBlockHeight);
+ assertEquals(1002, lastBlockHeight);
+ System.out.println("testArchiveAndPrune completed successfully.");
}
}
@@ -332,138 +421,191 @@ public class BlockArchiveV2Tests extends Common {
public void testTrimArchivePruneAndOrphan() throws DataException, InterruptedException, TransformationException, IOException {
try (final Repository repository = RepositoryManager.getRepository()) {
+ System.out.println("Starting testTrimArchivePruneAndOrphan");
+
// Deploy an AT so that we have AT state data
+ System.out.println("Deploying AT...");
PrivateKeyAccount deployer = Common.getTestAccount(repository, "alice");
byte[] creationBytes = AtUtils.buildSimpleAT();
long fundingAmount = 1_00000000L;
AtUtils.doDeployAT(repository, deployer, creationBytes, fundingAmount);
+ System.out.println("AT deployed successfully.");
// Mint some blocks so that we are able to archive them later
+ System.out.println("Minting 1000 blocks...");
for (int i = 0; i < 1000; i++) {
BlockMinter.mintTestingBlock(repository, Common.getTestAccount(repository, "alice-reward-share"));
+ // Log every 100 blocks
+ if ((i + 1) % 100 == 0) {
+ System.out.println("Minted block " + (i + 1));
+ }
}
+ System.out.println("Finished minting blocks.");
// Make sure that block 500 has full AT state data and data hash
+ System.out.println("Verifying block 500 AT state data...");
List block500AtStatesData = repository.getATRepository().getBlockATStatesAtHeight(500);
ATStateData atStatesData = repository.getATRepository().getATStateAtHeight(block500AtStatesData.get(0).getATAddress(), 500);
assertNotNull(atStatesData.getStateHash());
assertNotNull(atStatesData.getStateData());
+ System.out.println("Block 500 AT state data verified.");
// Trim the first 500 blocks
+ System.out.println("Trimming first 500 blocks...");
repository.getBlockRepository().trimOldOnlineAccountsSignatures(0, 500);
repository.getBlockRepository().setOnlineAccountsSignaturesTrimHeight(501);
repository.getATRepository().rebuildLatestAtStates(500);
repository.getATRepository().trimAtStates(0, 500, 1000);
repository.getATRepository().setAtTrimHeight(501);
+ System.out.println("Trimming completed.");
// Now block 499 should only have the AT state data hash
+ System.out.println("Checking block 499 AT state data...");
List block499AtStatesData = repository.getATRepository().getBlockATStatesAtHeight(499);
atStatesData = repository.getATRepository().getATStateAtHeight(block499AtStatesData.get(0).getATAddress(), 499);
assertNotNull(atStatesData.getStateHash());
assertNull(atStatesData.getStateData());
+ System.out.println("Block 499 AT state data contains only state hash as expected.");
// ... but block 500 should have the full data (due to being retained as the "latest" AT state in the trimmed range
+ System.out.println("Verifying block 500 AT state data again...");
block500AtStatesData = repository.getATRepository().getBlockATStatesAtHeight(500);
atStatesData = repository.getATRepository().getATStateAtHeight(block500AtStatesData.get(0).getATAddress(), 500);
assertNotNull(atStatesData.getStateHash());
assertNotNull(atStatesData.getStateData());
+ System.out.println("Block 500 AT state data contains full data.");
// ... and block 501 should also have the full data
+ System.out.println("Verifying block 501 AT state data...");
List block501AtStatesData = repository.getATRepository().getBlockATStatesAtHeight(501);
atStatesData = repository.getATRepository().getATStateAtHeight(block501AtStatesData.get(0).getATAddress(), 501);
assertNotNull(atStatesData.getStateHash());
assertNotNull(atStatesData.getStateData());
+ System.out.println("Block 501 AT state data contains full data.");
// Check the max archive height - this should be one less than the first untrimmed height
final int maximumArchiveHeight = BlockArchiveWriter.getMaxArchiveHeight(repository);
+ System.out.println("Maximum archive height determined (Expected 500): " + maximumArchiveHeight);
assertEquals(500, maximumArchiveHeight);
BlockData block3DataPreArchive = repository.getBlockRepository().fromHeight(3);
// Write blocks 2-500 to the archive
+ System.out.println("Writing blocks 2 to " + maximumArchiveHeight + " to the archive...");
BlockArchiveWriter writer = new BlockArchiveWriter(0, maximumArchiveHeight, repository);
writer.setShouldEnforceFileSizeTarget(false); // To avoid the need to pre-calculate file sizes
BlockArchiveWriter.BlockArchiveWriteResult result = writer.write();
+ System.out.println("Finished writing blocks to archive. Result: " + result);
assertEquals(BlockArchiveWriter.BlockArchiveWriteResult.OK, result);
// Make sure that the archive contains the correct number of blocks
+ System.out.println("Number of blocks written to archive (Expected 499): " + writer.getWrittenCount());
assertEquals(500 - 1, writer.getWrittenCount()); // -1 for the genesis block
// Increment block archive height
repository.getBlockArchiveRepository().setBlockArchiveHeight(writer.getWrittenCount());
repository.saveChanges();
assertEquals(500 - 1, repository.getBlockArchiveRepository().getBlockArchiveHeight());
+ System.out.println("Block archive height updated to: " + (500 - 1));
// Ensure the file exists
File outputFile = writer.getOutputPath().toFile();
assertTrue(outputFile.exists());
+ System.out.println("Archive file exists at: " + outputFile.getAbsolutePath());
// Ensure the SQL repository contains blocks 2 and 500...
+ System.out.println("Verifying that blocks 2 and 500 exist in the repository...");
assertNotNull(repository.getBlockRepository().fromHeight(2));
assertNotNull(repository.getBlockRepository().fromHeight(500));
+ System.out.println("Blocks 2 and 500 are present in the repository.");
// Prune all the archived blocks
+ System.out.println("Pruning blocks 2 to 500...");
int numBlocksPruned = repository.getBlockRepository().pruneBlocks(0, 500);
+ System.out.println("Number of blocks pruned (Expected 499): " + numBlocksPruned);
assertEquals(500-1, numBlocksPruned);
repository.getBlockRepository().setBlockPruneHeight(501);
// Prune the AT states for the archived blocks
+ System.out.println("Pruning AT states up to height 500...");
repository.getATRepository().rebuildLatestAtStates(500);
repository.saveChanges();
int numATStatesPruned = repository.getATRepository().pruneAtStates(2, 500);
+ System.out.println("Number of AT states pruned (Expected 498): " + numATStatesPruned);
assertEquals(498, numATStatesPruned); // Minus 1 for genesis block, and another for the latest AT state
repository.getATRepository().setAtPruneHeight(501);
// Now ensure the SQL repository is missing blocks 2 and 500...
+ System.out.println("Verifying that blocks 2 and 500 have been pruned...");
assertNull(repository.getBlockRepository().fromHeight(2));
assertNull(repository.getBlockRepository().fromHeight(500));
+ System.out.println("Blocks 2 and 500 have been successfully pruned.");
// ... but it's not missing blocks 1 and 501 (we don't prune the genesis block)
+ System.out.println("Verifying that blocks 1 and 501 still exist...");
assertNotNull(repository.getBlockRepository().fromHeight(1));
assertNotNull(repository.getBlockRepository().fromHeight(501));
+ System.out.println("Blocks 1 and 501 are present in the repository.");
// Validate the latest block height in the repository
- assertEquals(1002, (int) repository.getBlockRepository().getLastBlock().getHeight());
+ int lastBlockHeight = repository.getBlockRepository().getLastBlock().getHeight();
+ System.out.println("Latest block height in repository (Expected 1002): " + lastBlockHeight);
+ assertEquals(1002, lastBlockHeight);
// Now orphan some unarchived blocks.
+ System.out.println("Orphaning 500 blocks...");
BlockUtils.orphanBlocks(repository, 500);
- assertEquals(502, (int) repository.getBlockRepository().getLastBlock().getHeight());
+ int currentLastBlockHeight = repository.getBlockRepository().getLastBlock().getHeight();
+ System.out.println("New last block height after orphaning (Expected 502): " + currentLastBlockHeight);
+ assertEquals(502, currentLastBlockHeight);
// We're close to the lower limit of the SQL database now, so
// we need to import some blocks from the archive
+ System.out.println("Importing blocks 401 to 500 from the archive...");
BlockArchiveUtils.importFromArchive(401, 500, repository);
// Ensure the SQL repository now contains block 401 but not 400...
+ System.out.println("Verifying that block 401 exists and block 400 does not...");
assertNotNull(repository.getBlockRepository().fromHeight(401));
assertNull(repository.getBlockRepository().fromHeight(400));
+ System.out.println("Block 401 exists, block 400 does not.");
// Import the remaining 399 blocks
+ System.out.println("Importing blocks 2 to 400 from the archive...");
BlockArchiveUtils.importFromArchive(2, 400, repository);
// Verify that block 3 matches the original
+ System.out.println("Verifying that block 3 matches the original data...");
BlockData block3DataPostArchive = repository.getBlockRepository().fromHeight(3);
assertArrayEquals(block3DataPreArchive.getSignature(), block3DataPostArchive.getSignature());
assertEquals(block3DataPreArchive.getHeight(), block3DataPostArchive.getHeight());
+ System.out.println("Block 3 data matches the original.");
// Orphan 2 more block, which should be the last one that is possible to be orphaned
// TODO: figure out why this is 1 block more than in the equivalent block archive V1 test
+ System.out.println("Orphaning 2 more blocks...");
BlockUtils.orphanBlocks(repository, 2);
+ System.out.println("Orphaned 2 blocks successfully.");
// Orphan another block, which should fail
+ System.out.println("Attempting to orphan another block, which should fail...");
Exception exception = null;
try {
BlockUtils.orphanBlocks(repository, 1);
} catch (DataException e) {
exception = e;
+ System.out.println("Caught expected DataException: " + e.getMessage());
}
// Ensure that a DataException is thrown because there is no more AT states data available
assertNotNull(exception);
assertEquals(DataException.class, exception.getClass());
+ System.out.println("DataException confirmed due to lack of AT states data.");
// FUTURE: we may be able to retain unique AT states when trimming, to avoid this exception
// and allow orphaning back through blocks with trimmed AT states.
+ System.out.println("testTrimArchivePruneAndOrphan completed successfully.");
}
}
@@ -477,16 +619,26 @@ public class BlockArchiveV2Tests extends Common {
public void testMissingAtStatesHeightIndex() throws DataException, SQLException {
try (final HSQLDBRepository repository = (HSQLDBRepository) RepositoryManager.getRepository()) {
+ System.out.println("Starting testMissingAtStatesHeightIndex");
+
// Firstly check that we're able to prune or archive when the index exists
+ System.out.println("Checking existence of ATStatesHeightIndex...");
assertTrue(repository.getATRepository().hasAtStatesHeightIndex());
assertTrue(RepositoryManager.canArchiveOrPrune());
+ System.out.println("ATStatesHeightIndex exists. Archiving and pruning are possible.");
// Delete the index
+ System.out.println("Dropping ATStatesHeightIndex...");
repository.prepareStatement("DROP INDEX ATSTATESHEIGHTINDEX").execute();
+ System.out.println("ATStatesHeightIndex dropped.");
// Ensure check that we're unable to prune or archive when the index doesn't exist
+ System.out.println("Verifying that ATStatesHeightIndex no longer exists...");
assertFalse(repository.getATRepository().hasAtStatesHeightIndex());
assertFalse(RepositoryManager.canArchiveOrPrune());
+ System.out.println("ATStatesHeightIndex does not exist. Archiving and pruning are disabled.");
+
+ System.out.println("testMissingAtStatesHeightIndex completed successfully.");
}
}
@@ -496,8 +648,10 @@ public class BlockArchiveV2Tests extends Common {
Path archivePath = Paths.get(Settings.getInstance().getRepositoryPath(), "archive").toAbsolutePath();
try {
FileUtils.deleteDirectory(archivePath.toFile());
+ System.out.println("Deleted archive directory at: " + archivePath);
} catch (IOException e) {
+ System.out.println("Failed to delete archive directory: " + e.getMessage());
}
}
diff --git a/src/test/java/org/qortal/test/RepositoryTests.java b/src/test/java/org/qortal/test/RepositoryTests.java
index 0d07be4b..1b0a0e52 100644
--- a/src/test/java/org/qortal/test/RepositoryTests.java
+++ b/src/test/java/org/qortal/test/RepositoryTests.java
@@ -411,13 +411,20 @@ public class RepositoryTests extends Common {
}
}
- /** Specifically test LATERAL() usage in Chat repository */
+ /** Specifically test LATERAL() usage in Chat repository with hasChatReference */
@Test
public void testChatLateral() {
try (final HSQLDBRepository hsqldb = (HSQLDBRepository) RepositoryManager.getRepository()) {
String address = Crypto.toAddress(new byte[32]);
- hsqldb.getChatRepository().getActiveChats(address, ChatMessage.Encoding.BASE58);
+ // Test without hasChatReference
+ hsqldb.getChatRepository().getActiveChats(address, ChatMessage.Encoding.BASE58, null);
+
+ // Test with hasChatReference = true
+ hsqldb.getChatRepository().getActiveChats(address, ChatMessage.Encoding.BASE58, true);
+
+ // Test with hasChatReference = false
+ hsqldb.getChatRepository().getActiveChats(address, ChatMessage.Encoding.BASE58, false);
} catch (DataException e) {
fail("HSQLDB bug #1580");
}
diff --git a/src/test/resources/test-chain-v2-block-timestamps.json b/src/test/resources/test-chain-v2-block-timestamps.json
index 17fc80c4..4e49e86d 100644
--- a/src/test/resources/test-chain-v2-block-timestamps.json
+++ b/src/test/resources/test-chain-v2-block-timestamps.json
@@ -81,7 +81,7 @@
"transactionV5Timestamp": 0,
"transactionV6Timestamp": 9999999999999,
"disableReferenceTimestamp": 9999999999999,
- "increaseOnlineAccountsDifficultyTimestamp": 9999999999999,
+ "increaseOnlineAccountsDifficultyTimestamp": 9999999999990,
"onlineAccountMinterLevelValidationHeight": 0,
"selfSponsorshipAlgoV1Height": 999999999,
"selfSponsorshipAlgoV2Height": 999999999,
@@ -91,7 +91,14 @@
"arbitraryOptionalFeeTimestamp": 0,
"unconfirmableRewardSharesHeight": 99999999,
"disableTransferPrivsTimestamp": 9999999999500,
- "enableTransferPrivsTimestamp": 9999999999950
+ "enableTransferPrivsTimestamp": 9999999999950,
+ "cancelSellNameValidationTimestamp": 9999999999999,
+ "disableRewardshareHeight": 9999999999990,
+ "enableRewardshareHeight": 9999999999999,
+ "onlyMintWithNameHeight": 9999999999990,
+ "groupMemberCheckHeight": 9999999999999,
+ "decreaseOnlineAccountsDifficultyTimestamp": 9999999999999,
+ "removeOnlyMintWithNameHeight": 9999999999999
},
"genesisInfo": {
"version": 4,
diff --git a/src/test/resources/test-chain-v2-disable-reference.json b/src/test/resources/test-chain-v2-disable-reference.json
index 33054732..9ad59d79 100644
--- a/src/test/resources/test-chain-v2-disable-reference.json
+++ b/src/test/resources/test-chain-v2-disable-reference.json
@@ -84,7 +84,7 @@
"transactionV5Timestamp": 0,
"transactionV6Timestamp": 0,
"disableReferenceTimestamp": 0,
- "increaseOnlineAccountsDifficultyTimestamp": 9999999999999,
+ "increaseOnlineAccountsDifficultyTimestamp": 9999999999990,
"onlineAccountMinterLevelValidationHeight": 0,
"selfSponsorshipAlgoV1Height": 999999999,
"selfSponsorshipAlgoV2Height": 999999999,
@@ -94,7 +94,14 @@
"arbitraryOptionalFeeTimestamp": 0,
"unconfirmableRewardSharesHeight": 99999999,
"disableTransferPrivsTimestamp": 9999999999500,
- "enableTransferPrivsTimestamp": 9999999999950
+ "enableTransferPrivsTimestamp": 9999999999950,
+ "cancelSellNameValidationTimestamp": 9999999999999,
+ "disableRewardshareHeight": 9999999999990,
+ "enableRewardshareHeight": 9999999999999,
+ "onlyMintWithNameHeight": 9999999999990,
+ "groupMemberCheckHeight": 9999999999999,
+ "decreaseOnlineAccountsDifficultyTimestamp": 9999999999999,
+ "removeOnlyMintWithNameHeight": 9999999999999
},
"genesisInfo": {
"version": 4,
diff --git a/src/test/resources/test-chain-v2-founder-rewards.json b/src/test/resources/test-chain-v2-founder-rewards.json
index 577a07f1..e4182d7d 100644
--- a/src/test/resources/test-chain-v2-founder-rewards.json
+++ b/src/test/resources/test-chain-v2-founder-rewards.json
@@ -85,7 +85,7 @@
"transactionV5Timestamp": 0,
"transactionV6Timestamp": 0,
"disableReferenceTimestamp": 9999999999999,
- "increaseOnlineAccountsDifficultyTimestamp": 9999999999999,
+ "increaseOnlineAccountsDifficultyTimestamp": 9999999999990,
"onlineAccountMinterLevelValidationHeight": 0,
"selfSponsorshipAlgoV1Height": 999999999,
"selfSponsorshipAlgoV2Height": 999999999,
@@ -95,7 +95,14 @@
"arbitraryOptionalFeeTimestamp": 0,
"unconfirmableRewardSharesHeight": 99999999,
"disableTransferPrivsTimestamp": 9999999999500,
- "enableTransferPrivsTimestamp": 9999999999950
+ "enableTransferPrivsTimestamp": 9999999999950,
+ "cancelSellNameValidationTimestamp": 9999999999999,
+ "disableRewardshareHeight": 9999999999990,
+ "enableRewardshareHeight": 9999999999999,
+ "onlyMintWithNameHeight": 9999999999990,
+ "groupMemberCheckHeight": 9999999999999,
+ "decreaseOnlineAccountsDifficultyTimestamp": 9999999999999,
+ "removeOnlyMintWithNameHeight": 9999999999999
},
"genesisInfo": {
"version": 4,
diff --git a/src/test/resources/test-chain-v2-leftover-reward.json b/src/test/resources/test-chain-v2-leftover-reward.json
index 82e4ace7..04005b2b 100644
--- a/src/test/resources/test-chain-v2-leftover-reward.json
+++ b/src/test/resources/test-chain-v2-leftover-reward.json
@@ -85,7 +85,7 @@
"transactionV5Timestamp": 0,
"transactionV6Timestamp": 0,
"disableReferenceTimestamp": 9999999999999,
- "increaseOnlineAccountsDifficultyTimestamp": 9999999999999,
+ "increaseOnlineAccountsDifficultyTimestamp": 9999999999990,
"onlineAccountMinterLevelValidationHeight": 0,
"selfSponsorshipAlgoV1Height": 999999999,
"selfSponsorshipAlgoV2Height": 999999999,
@@ -95,7 +95,14 @@
"arbitraryOptionalFeeTimestamp": 0,
"unconfirmableRewardSharesHeight": 99999999,
"disableTransferPrivsTimestamp": 9999999999500,
- "enableTransferPrivsTimestamp": 9999999999950
+ "enableTransferPrivsTimestamp": 9999999999950,
+ "cancelSellNameValidationTimestamp": 9999999999999,
+ "disableRewardshareHeight": 9999999999990,
+ "enableRewardshareHeight": 9999999999999,
+ "onlyMintWithNameHeight": 9999999999990,
+ "groupMemberCheckHeight": 9999999999999,
+ "decreaseOnlineAccountsDifficultyTimestamp": 9999999999999,
+ "removeOnlyMintWithNameHeight": 9999999999999
},
"genesisInfo": {
"version": 4,
diff --git a/src/test/resources/test-chain-v2-minting.json b/src/test/resources/test-chain-v2-minting.json
index 16032a9c..ddb29ca5 100644
--- a/src/test/resources/test-chain-v2-minting.json
+++ b/src/test/resources/test-chain-v2-minting.json
@@ -85,7 +85,7 @@
"transactionV5Timestamp": 0,
"transactionV6Timestamp": 0,
"disableReferenceTimestamp": 0,
- "increaseOnlineAccountsDifficultyTimestamp": 9999999999999,
+ "increaseOnlineAccountsDifficultyTimestamp": 9999999999990,
"onlineAccountMinterLevelValidationHeight": 0,
"selfSponsorshipAlgoV1Height": 999999999,
"selfSponsorshipAlgoV2Height": 999999999,
@@ -95,7 +95,14 @@
"arbitraryOptionalFeeTimestamp": 9999999999999,
"unconfirmableRewardSharesHeight": 99999999,
"disableTransferPrivsTimestamp": 9999999999500,
- "enableTransferPrivsTimestamp": 9999999999950
+ "enableTransferPrivsTimestamp": 9999999999950,
+ "cancelSellNameValidationTimestamp": 9999999999999,
+ "disableRewardshareHeight": 9999999999990,
+ "enableRewardshareHeight": 9999999999999,
+ "onlyMintWithNameHeight": 9999999999990,
+ "groupMemberCheckHeight": 9999999999999,
+ "decreaseOnlineAccountsDifficultyTimestamp": 9999999999999,
+ "removeOnlyMintWithNameHeight": 9999999999999
},
"genesisInfo": {
"version": 4,
diff --git a/src/test/resources/test-chain-v2-penalty-fix.json b/src/test/resources/test-chain-v2-penalty-fix.json
index e62fc9f2..cac92c16 100644
--- a/src/test/resources/test-chain-v2-penalty-fix.json
+++ b/src/test/resources/test-chain-v2-penalty-fix.json
@@ -83,16 +83,24 @@
"transactionV5Timestamp": 0,
"transactionV6Timestamp": 0,
"disableReferenceTimestamp": 9999999999999,
- "increaseOnlineAccountsDifficultyTimestamp": 9999999999999,
+ "increaseOnlineAccountsDifficultyTimestamp": 9999999999990,
"onlineAccountMinterLevelValidationHeight": 0,
- "selfSponsorshipAlgoV1Height": 99999999,
+ "selfSponsorshipAlgoV1Height": 999999999,
+ "selfSponsorshipAlgoV2Height": 999999999,
+ "selfSponsorshipAlgoV3Height": 999999999,
"feeValidationFixTimestamp": 0,
"chatReferenceTimestamp": 0,
"arbitraryOptionalFeeTimestamp": 0,
"unconfirmableRewardSharesHeight": 99999999,
- "selfSponsorshipAlgoV2Height": 9999999,
"disableTransferPrivsTimestamp": 9999999999500,
"enableTransferPrivsTimestamp": 9999999999950,
+ "cancelSellNameValidationTimestamp": 9999999999999,
+ "disableRewardshareHeight": 9999999999990,
+ "enableRewardshareHeight": 9999999999999,
+ "onlyMintWithNameHeight": 9999999999990,
+ "groupMemberCheckHeight": 9999999999999,
+ "decreaseOnlineAccountsDifficultyTimestamp": 9999999999999,
+ "removeOnlyMintWithNameHeight": 9999999999999,
"penaltyFixHeight": 5
},
"genesisInfo": {
diff --git a/src/test/resources/test-chain-v2-qora-holder-extremes.json b/src/test/resources/test-chain-v2-qora-holder-extremes.json
index 3ec11942..566d8515 100644
--- a/src/test/resources/test-chain-v2-qora-holder-extremes.json
+++ b/src/test/resources/test-chain-v2-qora-holder-extremes.json
@@ -85,7 +85,7 @@
"transactionV5Timestamp": 0,
"transactionV6Timestamp": 0,
"disableReferenceTimestamp": 9999999999999,
- "increaseOnlineAccountsDifficultyTimestamp": 9999999999999,
+ "increaseOnlineAccountsDifficultyTimestamp": 9999999999990,
"onlineAccountMinterLevelValidationHeight": 0,
"selfSponsorshipAlgoV1Height": 999999999,
"selfSponsorshipAlgoV2Height": 999999999,
@@ -95,7 +95,14 @@
"arbitraryOptionalFeeTimestamp": 0,
"unconfirmableRewardSharesHeight": 99999999,
"disableTransferPrivsTimestamp": 9999999999500,
- "enableTransferPrivsTimestamp": 9999999999950
+ "enableTransferPrivsTimestamp": 9999999999950,
+ "cancelSellNameValidationTimestamp": 9999999999999,
+ "disableRewardshareHeight": 9999999999990,
+ "enableRewardshareHeight": 9999999999999,
+ "onlyMintWithNameHeight": 9999999999990,
+ "groupMemberCheckHeight": 9999999999999,
+ "decreaseOnlineAccountsDifficultyTimestamp": 9999999999999,
+ "removeOnlyMintWithNameHeight": 9999999999999
},
"genesisInfo": {
"version": 4,
diff --git a/src/test/resources/test-chain-v2-qora-holder-reduction.json b/src/test/resources/test-chain-v2-qora-holder-reduction.json
index 2b8834ce..c7ed2270 100644
--- a/src/test/resources/test-chain-v2-qora-holder-reduction.json
+++ b/src/test/resources/test-chain-v2-qora-holder-reduction.json
@@ -86,7 +86,7 @@
"transactionV6Timestamp": 0,
"disableReferenceTimestamp": 9999999999999,
"aggregateSignatureTimestamp": 0,
- "increaseOnlineAccountsDifficultyTimestamp": 9999999999999,
+ "increaseOnlineAccountsDifficultyTimestamp": 9999999999990,
"onlineAccountMinterLevelValidationHeight": 0,
"selfSponsorshipAlgoV1Height": 999999999,
"selfSponsorshipAlgoV2Height": 999999999,
@@ -96,7 +96,14 @@
"arbitraryOptionalFeeTimestamp": 0,
"unconfirmableRewardSharesHeight": 99999999,
"disableTransferPrivsTimestamp": 9999999999500,
- "enableTransferPrivsTimestamp": 9999999999950
+ "enableTransferPrivsTimestamp": 9999999999950,
+ "cancelSellNameValidationTimestamp": 9999999999999,
+ "disableRewardshareHeight": 9999999999990,
+ "enableRewardshareHeight": 9999999999999,
+ "onlyMintWithNameHeight": 9999999999990,
+ "groupMemberCheckHeight": 9999999999999,
+ "decreaseOnlineAccountsDifficultyTimestamp": 9999999999999,
+ "removeOnlyMintWithNameHeight": 9999999999999
},
"genesisInfo": {
"version": 4,
diff --git a/src/test/resources/test-chain-v2-qora-holder.json b/src/test/resources/test-chain-v2-qora-holder.json
index ab96a243..1c4f0d93 100644
--- a/src/test/resources/test-chain-v2-qora-holder.json
+++ b/src/test/resources/test-chain-v2-qora-holder.json
@@ -85,7 +85,7 @@
"transactionV5Timestamp": 0,
"transactionV6Timestamp": 0,
"disableReferenceTimestamp": 9999999999999,
- "increaseOnlineAccountsDifficultyTimestamp": 9999999999999,
+ "increaseOnlineAccountsDifficultyTimestamp": 9999999999990,
"onlineAccountMinterLevelValidationHeight": 0,
"selfSponsorshipAlgoV1Height": 999999999,
"selfSponsorshipAlgoV2Height": 999999999,
@@ -95,7 +95,14 @@
"arbitraryOptionalFeeTimestamp": 0,
"unconfirmableRewardSharesHeight": 99999999,
"disableTransferPrivsTimestamp": 9999999999500,
- "enableTransferPrivsTimestamp": 9999999999950
+ "enableTransferPrivsTimestamp": 9999999999950,
+ "cancelSellNameValidationTimestamp": 9999999999999,
+ "disableRewardshareHeight": 9999999999990,
+ "enableRewardshareHeight": 9999999999999,
+ "onlyMintWithNameHeight": 9999999999990,
+ "groupMemberCheckHeight": 9999999999999,
+ "decreaseOnlineAccountsDifficultyTimestamp": 9999999999999,
+ "removeOnlyMintWithNameHeight": 9999999999999
},
"genesisInfo": {
"version": 4,
diff --git a/src/test/resources/test-chain-v2-reward-levels.json b/src/test/resources/test-chain-v2-reward-levels.json
index 35535c75..30d952e1 100644
--- a/src/test/resources/test-chain-v2-reward-levels.json
+++ b/src/test/resources/test-chain-v2-reward-levels.json
@@ -85,7 +85,7 @@
"transactionV5Timestamp": 0,
"transactionV6Timestamp": 0,
"disableReferenceTimestamp": 9999999999999,
- "increaseOnlineAccountsDifficultyTimestamp": 9999999999999,
+ "increaseOnlineAccountsDifficultyTimestamp": 9999999999990,
"onlineAccountMinterLevelValidationHeight": 0,
"selfSponsorshipAlgoV1Height": 999999999,
"selfSponsorshipAlgoV2Height": 999999999,
@@ -95,7 +95,14 @@
"arbitraryOptionalFeeTimestamp": 0,
"unconfirmableRewardSharesHeight": 99999999,
"disableTransferPrivsTimestamp": 9999999999500,
- "enableTransferPrivsTimestamp": 9999999999950
+ "enableTransferPrivsTimestamp": 9999999999950,
+ "cancelSellNameValidationTimestamp": 9999999999999,
+ "disableRewardshareHeight": 9999999999990,
+ "enableRewardshareHeight": 9999999999999,
+ "onlyMintWithNameHeight": 9999999999990,
+ "groupMemberCheckHeight": 9999999999999,
+ "decreaseOnlineAccountsDifficultyTimestamp": 9999999999999,
+ "removeOnlyMintWithNameHeight": 9999999999999
},
"genesisInfo": {
"version": 4,
diff --git a/src/test/resources/test-chain-v2-reward-scaling.json b/src/test/resources/test-chain-v2-reward-scaling.json
index 616d0925..612f02a5 100644
--- a/src/test/resources/test-chain-v2-reward-scaling.json
+++ b/src/test/resources/test-chain-v2-reward-scaling.json
@@ -85,7 +85,7 @@
"transactionV5Timestamp": 0,
"transactionV6Timestamp": 0,
"disableReferenceTimestamp": 9999999999999,
- "increaseOnlineAccountsDifficultyTimestamp": 9999999999999,
+ "increaseOnlineAccountsDifficultyTimestamp": 9999999999990,
"onlineAccountMinterLevelValidationHeight": 0,
"selfSponsorshipAlgoV1Height": 999999999,
"selfSponsorshipAlgoV2Height": 999999999,
@@ -95,7 +95,14 @@
"arbitraryOptionalFeeTimestamp": 0,
"unconfirmableRewardSharesHeight": 500,
"disableTransferPrivsTimestamp": 9999999999500,
- "enableTransferPrivsTimestamp": 9999999999950
+ "enableTransferPrivsTimestamp": 9999999999950,
+ "cancelSellNameValidationTimestamp": 9999999999999,
+ "disableRewardshareHeight": 9999999999990,
+ "enableRewardshareHeight": 9999999999999,
+ "onlyMintWithNameHeight": 9999999999990,
+ "groupMemberCheckHeight": 9999999999999,
+ "decreaseOnlineAccountsDifficultyTimestamp": 9999999999999,
+ "removeOnlyMintWithNameHeight": 9999999999999
},
"genesisInfo": {
"version": 4,
diff --git a/src/test/resources/test-chain-v2-reward-shares.json b/src/test/resources/test-chain-v2-reward-shares.json
index ec6ffd2e..2f332233 100644
--- a/src/test/resources/test-chain-v2-reward-shares.json
+++ b/src/test/resources/test-chain-v2-reward-shares.json
@@ -85,7 +85,7 @@
"transactionV5Timestamp": 0,
"transactionV6Timestamp": 0,
"disableReferenceTimestamp": 9999999999999,
- "increaseOnlineAccountsDifficultyTimestamp": 9999999999999,
+ "increaseOnlineAccountsDifficultyTimestamp": 9999999999990,
"onlineAccountMinterLevelValidationHeight": 0,
"selfSponsorshipAlgoV1Height": 999999999,
"selfSponsorshipAlgoV2Height": 999999999,
@@ -95,7 +95,14 @@
"arbitraryOptionalFeeTimestamp": 0,
"unconfirmableRewardSharesHeight": 99999999,
"disableTransferPrivsTimestamp": 9999999999500,
- "enableTransferPrivsTimestamp": 9999999999950
+ "enableTransferPrivsTimestamp": 9999999999950,
+ "cancelSellNameValidationTimestamp": 9999999999999,
+ "disableRewardshareHeight": 9999999999990,
+ "enableRewardshareHeight": 9999999999999,
+ "onlyMintWithNameHeight": 9999999999990,
+ "groupMemberCheckHeight": 9999999999999,
+ "decreaseOnlineAccountsDifficultyTimestamp": 9999999999999,
+ "removeOnlyMintWithNameHeight": 9999999999999
},
"genesisInfo": {
"version": 4,
diff --git a/src/test/resources/test-chain-v2-self-sponsorship-algo-v1.json b/src/test/resources/test-chain-v2-self-sponsorship-algo-v1.json
index d0d989cf..3ea8bc70 100644
--- a/src/test/resources/test-chain-v2-self-sponsorship-algo-v1.json
+++ b/src/test/resources/test-chain-v2-self-sponsorship-algo-v1.json
@@ -85,7 +85,7 @@
"transactionV5Timestamp": 0,
"transactionV6Timestamp": 0,
"disableReferenceTimestamp": 9999999999999,
- "increaseOnlineAccountsDifficultyTimestamp": 9999999999999,
+ "increaseOnlineAccountsDifficultyTimestamp": 9999999999990,
"onlineAccountMinterLevelValidationHeight": 0,
"selfSponsorshipAlgoV1Height": 20,
"selfSponsorshipAlgoV2Height": 999999999,
@@ -95,7 +95,14 @@
"arbitraryOptionalFeeTimestamp": 0,
"unconfirmableRewardSharesHeight": 99999999,
"disableTransferPrivsTimestamp": 9999999999500,
- "enableTransferPrivsTimestamp": 9999999999950
+ "enableTransferPrivsTimestamp": 9999999999950,
+ "cancelSellNameValidationTimestamp": 9999999999999,
+ "disableRewardshareHeight": 9999999999990,
+ "enableRewardshareHeight": 9999999999999,
+ "onlyMintWithNameHeight": 9999999999990,
+ "groupMemberCheckHeight": 9999999999999,
+ "decreaseOnlineAccountsDifficultyTimestamp": 9999999999999,
+ "removeOnlyMintWithNameHeight": 9999999999999
},
"genesisInfo": {
"version": 4,
diff --git a/src/test/resources/test-chain-v2-self-sponsorship-algo-v2.json b/src/test/resources/test-chain-v2-self-sponsorship-algo-v2.json
index 5f09cb47..ae424704 100644
--- a/src/test/resources/test-chain-v2-self-sponsorship-algo-v2.json
+++ b/src/test/resources/test-chain-v2-self-sponsorship-algo-v2.json
@@ -85,7 +85,7 @@
"transactionV5Timestamp": 0,
"transactionV6Timestamp": 0,
"disableReferenceTimestamp": 9999999999999,
- "increaseOnlineAccountsDifficultyTimestamp": 9999999999999,
+ "increaseOnlineAccountsDifficultyTimestamp": 9999999999990,
"onlineAccountMinterLevelValidationHeight": 0,
"selfSponsorshipAlgoV1Height": 999999999,
"selfSponsorshipAlgoV2Height": 30,
@@ -95,7 +95,14 @@
"arbitraryOptionalFeeTimestamp": 0,
"unconfirmableRewardSharesHeight": 99999999,
"disableTransferPrivsTimestamp": 9999999999500,
- "enableTransferPrivsTimestamp": 9999999999950
+ "enableTransferPrivsTimestamp": 9999999999950,
+ "cancelSellNameValidationTimestamp": 9999999999999,
+ "disableRewardshareHeight": 9999999999990,
+ "enableRewardshareHeight": 9999999999999,
+ "onlyMintWithNameHeight": 9999999999990,
+ "groupMemberCheckHeight": 9999999999999,
+ "decreaseOnlineAccountsDifficultyTimestamp": 9999999999999,
+ "removeOnlyMintWithNameHeight": 9999999999999
},
"genesisInfo": {
"version": 4,
diff --git a/src/test/resources/test-chain-v2-self-sponsorship-algo-v3.json b/src/test/resources/test-chain-v2-self-sponsorship-algo-v3.json
index f7d1faa2..2a24473b 100644
--- a/src/test/resources/test-chain-v2-self-sponsorship-algo-v3.json
+++ b/src/test/resources/test-chain-v2-self-sponsorship-algo-v3.json
@@ -85,7 +85,7 @@
"transactionV5Timestamp": 0,
"transactionV6Timestamp": 0,
"disableReferenceTimestamp": 9999999999999,
- "increaseOnlineAccountsDifficultyTimestamp": 9999999999999,
+ "increaseOnlineAccountsDifficultyTimestamp": 9999999999990,
"onlineAccountMinterLevelValidationHeight": 0,
"selfSponsorshipAlgoV1Height": 999999999,
"selfSponsorshipAlgoV2Height": 999999999,
@@ -95,7 +95,14 @@
"arbitraryOptionalFeeTimestamp": 0,
"unconfirmableRewardSharesHeight": 99999999,
"disableTransferPrivsTimestamp": 9999999999500,
- "enableTransferPrivsTimestamp": 9999999999950
+ "enableTransferPrivsTimestamp": 9999999999950,
+ "cancelSellNameValidationTimestamp": 9999999999999,
+ "disableRewardshareHeight": 9999999999990,
+ "enableRewardshareHeight": 9999999999999,
+ "onlyMintWithNameHeight": 9999999999990,
+ "groupMemberCheckHeight": 9999999999999,
+ "decreaseOnlineAccountsDifficultyTimestamp": 9999999999999,
+ "removeOnlyMintWithNameHeight": 9999999999999
},
"genesisInfo": {
"version": 4,
diff --git a/src/test/resources/test-chain-v2.json b/src/test/resources/test-chain-v2.json
index 086c126e..c829975b 100644
--- a/src/test/resources/test-chain-v2.json
+++ b/src/test/resources/test-chain-v2.json
@@ -86,7 +86,7 @@
"transactionV5Timestamp": 0,
"transactionV6Timestamp": 0,
"disableReferenceTimestamp": 9999999999999,
- "increaseOnlineAccountsDifficultyTimestamp": 9999999999999,
+ "increaseOnlineAccountsDifficultyTimestamp": 9999999999990,
"onlineAccountMinterLevelValidationHeight": 0,
"selfSponsorshipAlgoV1Height": 999999999,
"selfSponsorshipAlgoV2Height": 999999999,
@@ -100,8 +100,10 @@
"cancelSellNameValidationTimestamp": 9999999999999,
"disableRewardshareHeight": 9999999999990,
"enableRewardshareHeight": 9999999999999,
- "onlyMintWithNameHeight": 9999999999999,
- "groupMemberCheckHeight": 9999999999999
+ "onlyMintWithNameHeight": 9999999999990,
+ "groupMemberCheckHeight": 9999999999999,
+ "decreaseOnlineAccountsDifficultyTimestamp": 9999999999999,
+ "removeOnlyMintWithNameHeight": 9999999999999
},
"genesisInfo": {
"version": 4,
diff --git a/start.sh b/start.sh
index cb738fa2..88937026 100755
--- a/start.sh
+++ b/start.sh
@@ -33,8 +33,13 @@ fi
# Limits Java JVM stack size and maximum heap usage.
# Comment out for bigger systems, e.g. non-routers
# or when API documentation is enabled
-# Uncomment (remove '#' sign) line below if your system has less than 12GB of RAM for optimal RAM defaults
-#JVM_MEMORY_ARGS="-Xss256m -XX:+UseSerialGC"
+# JAVA MEMORY SETTINGS BELOW - These settings are essentially optimized default settings.
+# Combined with the latest changes on the Qortal Core in version 4.6.6 and beyond,
+# should give a dramatic increase In performance due to optimized Garbage Collection.
+# These memory arguments should work on machines with as little as 6GB of RAM.
+# If you want to run on a machine with less than 6GB of RAM, it is suggested to increase the '50' below to '75'
+# The Qortal Core will utilize only as much RAM as it needs, but up-to the amount set in percentage below.
+JVM_MEMORY_ARGS="-XX:MaxRAMPercentage=50 -XX:+UseG1GC -Xss1024k"
# Although java.net.preferIPv4Stack is supposed to be false
# by default in Java 11, on some platforms (e.g. FreeBSD 12),
@@ -43,9 +48,6 @@ fi
nohup nice -n 20 java \
-Djava.net.preferIPv4Stack=false \
${JVM_MEMORY_ARGS} \
- --add-opens=java.base/java.lang=ALL-UNNAMED \
- --add-opens=java.base/java.net=ALL-UNNAMED \
- --illegal-access=warn \
-jar qortal.jar \
1>run.log 2>&1 &