myName = nameRepository.getNamesByOwner(myAddress);
+ if (Account.isFounder(accountData.getFlags())) {
+ return accountData.getBlocksMintedPenalty() == 0 && !myName.isEmpty() && (isGroupValidated || Groups.memberExistsInAnyGroup(groupRepository, groupIdsToMint, myAddress));
+ } else {
+ return level >= levelToMint && !myName.isEmpty() && (isGroupValidated || Groups.memberExistsInAnyGroup(groupRepository, groupIdsToMint, myAddress));
+ }
+ }
+
+ // Can only mint on removeOnlyMintWithNameHeight from blockchain config if:
+ // Account's level is at least minAccountLevelToMint from blockchain config
+ // Account's address is a member of the minter group
+ if (blockchainHeight >= removeNameCheckHeight) {
+ if (Account.isFounder(accountData.getFlags())) {
+ return accountData.getBlocksMintedPenalty() == 0 && (isGroupValidated || Groups.memberExistsInAnyGroup(groupRepository, groupIdsToMint, myAddress));
+ } else {
+ return level >= levelToMint && (isGroupValidated || Groups.memberExistsInAnyGroup(groupRepository, groupIdsToMint, myAddress));
+ }
+ }
return false;
}
@@ -228,7 +292,6 @@ public class Account {
return this.repository.getAccountRepository().getBlocksMintedPenaltyCount(this.address);
}
-
/** Returns whether account can build reward-shares.
*
* To be able to create reward-shares, the account needs to pass at least one of these tests:
@@ -242,6 +305,7 @@ public class Account {
*/
public boolean canRewardShare() throws DataException {
AccountData accountData = this.repository.getAccountRepository().getAccount(this.address);
+
if (accountData == null)
return false;
@@ -252,6 +316,9 @@ public class Account {
if (Account.isFounder(accountData.getFlags()) && accountData.getBlocksMintedPenalty() == 0)
return true;
+ if( this.repository.getBlockRepository().getBlockchainHeight() >= BlockChain.getInstance().getIgnoreLevelForRewardShareHeight() )
+ return true;
+
return false;
}
@@ -295,10 +362,28 @@ public class Account {
}
/**
- * Returns 'effective' minting level, or zero if reward-share does not exist.
+ * Returns reward-share minting address, or unknown if reward-share does not exist.
*
* @param repository
* @param rewardSharePublicKey
+ * @return address or unknown
+ * @throws DataException
+ */
+ public static String getRewardShareMintingAddress(Repository repository, byte[] rewardSharePublicKey) throws DataException {
+ // Find actual minter address
+ RewardShareData rewardShareData = repository.getAccountRepository().getRewardShare(rewardSharePublicKey);
+
+ if (rewardShareData == null)
+ return "Unknown";
+
+ return rewardShareData.getMinter();
+ }
+
+ /**
+ * Returns 'effective' minting level, or zero if reward-share does not exist.
+ *
+ * @param repository
+ * @param rewardSharePublicKey
* @return 0+
* @throws DataException
*/
@@ -311,6 +396,7 @@ public class Account {
Account rewardShareMinter = new Account(repository, rewardShareData.getMinter());
return rewardShareMinter.getEffectiveMintingLevel();
}
+
/**
* Returns 'effective' minting level, with a fix for the zero level.
*
diff --git a/src/main/java/org/qortal/api/ApiService.java b/src/main/java/org/qortal/api/ApiService.java
index fbef50d3..2cebe8e5 100644
--- a/src/main/java/org/qortal/api/ApiService.java
+++ b/src/main/java/org/qortal/api/ApiService.java
@@ -194,6 +194,7 @@ public class ApiService {
context.addServlet(AdminStatusWebSocket.class, "/websockets/admin/status");
context.addServlet(BlocksWebSocket.class, "/websockets/blocks");
+ context.addServlet(DataMonitorSocket.class, "/websockets/datamonitor");
context.addServlet(ActiveChatsWebSocket.class, "/websockets/chat/active/*");
context.addServlet(ChatMessagesWebSocket.class, "/websockets/chat/messages");
context.addServlet(TradeOffersWebSocket.class, "/websockets/crosschain/tradeoffers");
diff --git a/src/main/java/org/qortal/api/model/ApiOnlineAccount.java b/src/main/java/org/qortal/api/model/ApiOnlineAccount.java
index 08b697aa..e26eb816 100644
--- a/src/main/java/org/qortal/api/model/ApiOnlineAccount.java
+++ b/src/main/java/org/qortal/api/model/ApiOnlineAccount.java
@@ -1,7 +1,13 @@
package org.qortal.api.model;
+import org.qortal.account.Account;
+import org.qortal.repository.DataException;
+import org.qortal.repository.RepositoryManager;
+import org.qortal.repository.Repository;
+
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlElement;
// All properties to be converted to JSON via JAXB
@XmlAccessorType(XmlAccessType.FIELD)
@@ -47,4 +53,31 @@ public class ApiOnlineAccount {
return this.recipientAddress;
}
+ public int getMinterLevelFromPublicKey() {
+ try (final Repository repository = RepositoryManager.getRepository()) {
+ return Account.getRewardShareEffectiveMintingLevel(repository, this.rewardSharePublicKey);
+ } catch (DataException e) {
+ return 0;
+ }
+ }
+
+ public boolean getIsMember() {
+ try (final Repository repository = RepositoryManager.getRepository()) {
+ return repository.getGroupRepository().memberExists(694, getMinterAddress());
+ } catch (DataException e) {
+ return false;
+ }
+ }
+
+ // JAXB special
+
+ @XmlElement(name = "minterLevel")
+ protected int getMinterLevel() {
+ return getMinterLevelFromPublicKey();
+ }
+
+ @XmlElement(name = "isMinterMember")
+ protected boolean getMinterMember() {
+ return getIsMember();
+ }
}
diff --git a/src/main/java/org/qortal/api/model/BlockMintingInfo.java b/src/main/java/org/qortal/api/model/BlockMintingInfo.java
index f84e179e..02765a89 100644
--- a/src/main/java/org/qortal/api/model/BlockMintingInfo.java
+++ b/src/main/java/org/qortal/api/model/BlockMintingInfo.java
@@ -9,6 +9,7 @@ import java.math.BigInteger;
public class BlockMintingInfo {
public byte[] minterPublicKey;
+ public String minterAddress;
public int minterLevel;
public int onlineAccountsCount;
public BigDecimal maxDistance;
@@ -19,5 +20,4 @@ public class BlockMintingInfo {
public BlockMintingInfo() {
}
-
}
diff --git a/src/main/java/org/qortal/api/model/CrossChainTradeLedgerEntry.java b/src/main/java/org/qortal/api/model/CrossChainTradeLedgerEntry.java
new file mode 100644
index 00000000..34f8fc57
--- /dev/null
+++ b/src/main/java/org/qortal/api/model/CrossChainTradeLedgerEntry.java
@@ -0,0 +1,72 @@
+package org.qortal.api.model;
+
+import io.swagger.v3.oas.annotations.media.Schema;
+import org.qortal.data.crosschain.CrossChainTradeData;
+
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.adapters.XmlJavaTypeAdapter;
+
+// All properties to be converted to JSON via JAXB
+@XmlAccessorType(XmlAccessType.FIELD)
+public class CrossChainTradeLedgerEntry {
+
+ private String market;
+
+ private String currency;
+
+ @XmlJavaTypeAdapter(value = org.qortal.api.AmountTypeAdapter.class)
+ private long quantity;
+
+ @XmlJavaTypeAdapter(value = org.qortal.api.AmountTypeAdapter.class)
+ private long feeAmount;
+
+ private String feeCurrency;
+
+ @XmlJavaTypeAdapter(value = org.qortal.api.AmountTypeAdapter.class)
+ private long totalPrice;
+
+ private long tradeTimestamp;
+
+ protected CrossChainTradeLedgerEntry() {
+ /* For JAXB */
+ }
+
+ public CrossChainTradeLedgerEntry(String market, String currency, long quantity, long feeAmount, String feeCurrency, long totalPrice, long tradeTimestamp) {
+ this.market = market;
+ this.currency = currency;
+ this.quantity = quantity;
+ this.feeAmount = feeAmount;
+ this.feeCurrency = feeCurrency;
+ this.totalPrice = totalPrice;
+ this.tradeTimestamp = tradeTimestamp;
+ }
+
+ public String getMarket() {
+ return market;
+ }
+
+ public String getCurrency() {
+ return currency;
+ }
+
+ public long getQuantity() {
+ return quantity;
+ }
+
+ public long getFeeAmount() {
+ return feeAmount;
+ }
+
+ public String getFeeCurrency() {
+ return feeCurrency;
+ }
+
+ public long getTotalPrice() {
+ return totalPrice;
+ }
+
+ public long getTradeTimestamp() {
+ return tradeTimestamp;
+ }
+}
\ No newline at end of file
diff --git a/src/main/java/org/qortal/api/model/DatasetStatus.java b/src/main/java/org/qortal/api/model/DatasetStatus.java
new file mode 100644
index 00000000..b587be51
--- /dev/null
+++ b/src/main/java/org/qortal/api/model/DatasetStatus.java
@@ -0,0 +1,50 @@
+package org.qortal.api.model;
+
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import java.util.Objects;
+
+// All properties to be converted to JSON via JAXB
+@XmlAccessorType(XmlAccessType.FIELD)
+public class DatasetStatus {
+
+ private String name;
+
+ private long count;
+
+ public DatasetStatus() {}
+
+ public DatasetStatus(String name, long count) {
+ this.name = name;
+ this.count = count;
+ }
+
+ public String getName() {
+ return name;
+ }
+
+ public long getCount() {
+ return count;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ DatasetStatus that = (DatasetStatus) o;
+ return count == that.count && Objects.equals(name, that.name);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(name, count);
+ }
+
+ @Override
+ public String toString() {
+ return "DatasetStatus{" +
+ "name='" + name + '\'' +
+ ", count=" + count +
+ '}';
+ }
+}
diff --git a/src/main/java/org/qortal/api/model/crosschain/BitcoinyTBDRequest.java b/src/main/java/org/qortal/api/model/crosschain/BitcoinyTBDRequest.java
new file mode 100644
index 00000000..3a531413
--- /dev/null
+++ b/src/main/java/org/qortal/api/model/crosschain/BitcoinyTBDRequest.java
@@ -0,0 +1,692 @@
+package org.qortal.api.model.crosschain;
+
+import org.qortal.crosschain.ServerInfo;
+
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import java.util.Arrays;
+
+@XmlAccessorType(XmlAccessType.FIELD)
+public class BitcoinyTBDRequest {
+
+ /**
+ * Target Timespan
+ *
+ * extracted from /src/chainparams.cpp class
+ * consensus.nPowTargetTimespan
+ */
+ private int targetTimespan;
+
+ /**
+ * Target Spacing
+ *
+ * extracted from /src/chainparams.cpp class
+ * consensus.nPowTargetSpacing
+ */
+ private int targetSpacing;
+
+ /**
+ * Packet Magic
+ *
+ * extracted from /src/chainparams.cpp class
+ * Concatenate the 4 values in pchMessageStart, then convert the hex to decimal.
+ *
+ * Ex. litecoin
+ * pchMessageStart[0] = 0xfb;
+ * pchMessageStart[1] = 0xc0;
+ * pchMessageStart[2] = 0xb6;
+ * pchMessageStart[3] = 0xdb;
+ * packetMagic = 0xfbc0b6db = 4223710939
+ */
+ private long packetMagic;
+
+ /**
+ * Port
+ *
+ * extracted from /src/chainparams.cpp class
+ * nDefaultPort
+ */
+ private int port;
+
+ /**
+ * Address Header
+ *
+ * extracted from /src/chainparams.cpp class
+ * base58Prefixes[PUBKEY_ADDRESS] from Main Network
+ */
+ private int addressHeader;
+
+ /**
+ * P2sh Header
+ *
+ * extracted from /src/chainparams.cpp class
+ * base58Prefixes[SCRIPT_ADDRESS] from Main Network
+ */
+ private int p2shHeader;
+
+ /**
+ * Segwit Address Hrp
+ *
+ * HRP -> Human Readable Parts
+ *
+ * extracted from /src/chainparams.cpp class
+ * bech32_hrp
+ */
+ private String segwitAddressHrp;
+
+ /**
+ * Dumped Private Key Header
+ *
+ * extracted from /src/chainparams.cpp class
+ * base58Prefixes[SECRET_KEY] from Main Network
+ * This is usually, but not always ... addressHeader + 128
+ */
+ private int dumpedPrivateKeyHeader;
+
+ /**
+ * Subsidy Decreased Block Count
+ *
+ * extracted from /src/chainparams.cpp class
+ * consensus.nSubsidyHalvingInterval
+ *
+ * Digibyte does not support this, because they do halving differently.
+ */
+ private int subsidyDecreaseBlockCount;
+
+ /**
+ * Expected Genesis Hash
+ *
+ * extracted from /src/chainparams.cpp class
+ * consensus.hashGenesisBlock
+ * Remove '0x' prefix
+ */
+ private String expectedGenesisHash;
+
+ /**
+ * Common Script Pub Key
+ *
+ * extracted from /src/chainparams.cpp class
+ * This is the key commonly used to sign alerts for altcoins. Bitcoin and Digibyte are know exceptions.
+ */
+ public static final String SCRIPT_PUB_KEY = "040184710fa689ad5023690c80f3a49c8f13f8d45b8c857fbcbc8bc4a8e4d3eb4b10f4d4604fa08dce601aaf0f470216fe1b51850b4acf21b179c45070ac7b03a9";
+
+ /**
+ * The Script Pub Key
+ *
+ * extracted from /src/chainparams.cpp class
+ * The key to sign alerts.
+ *
+ * const CScript genesisOutputScript = CScript() << ParseHex("040184710fa689ad5023690c80f3a49c8f13f8d45b8c857fbcbc8bc4a8e4d3eb4b10f4d4604fa08dce601aaf0f470216fe1b51850b4acf21b179c45070ac7b03a9") << OP_CHECKSIG;
+ *
+ * ie LTC = 040184710fa689ad5023690c80f3a49c8f13f8d45b8c857fbcbc8bc4a8e4d3eb4b10f4d4604fa08dce601aaf0f470216fe1b51850b4acf21b179c45070ac7b03a9
+ *
+ * this may be the same value as scripHex
+ */
+ private String pubKey;
+
+ /**
+ * DNS Seeds
+ *
+ * extracted from /src/chainparams.cpp class
+ * vSeeds
+ */
+ private String[] dnsSeeds;
+
+ /**
+ * BIP32 Header P2PKH Pub
+ *
+ * extracted from /src/chainparams.cpp class
+ * Concatenate the 4 values in base58Prefixes[EXT_PUBLIC_KEY]
+ * base58Prefixes[EXT_PUBLIC_KEY] = {0x04, 0x88, 0xB2, 0x1E} = 0x0488B21E
+ */
+ private int bip32HeaderP2PKHpub;
+
+ /**
+ * BIP32 Header P2PKH Priv
+ *
+ * extracted from /src/chainparams.cpp class
+ * Concatenate the 4 values in base58Prefixes[EXT_SECRET_KEY]
+ * base58Prefixes[EXT_SECRET_KEY] = {0x04, 0x88, 0xAD, 0xE4} = 0x0488ADE4
+ */
+ private int bip32HeaderP2PKHpriv;
+
+ /**
+ * Address Header (Testnet)
+ *
+ * extracted from /src/chainparams.cpp class
+ * base58Prefixes[PUBKEY_ADDRESS] from Testnet
+ */
+ private int addressHeaderTestnet;
+
+ /**
+ * BIP32 Header P2PKH Pub (Testnet)
+ *
+ * extracted from /src/chainparams.cpp class
+ * Concatenate the 4 values in base58Prefixes[EXT_PUBLIC_KEY]
+ * base58Prefixes[EXT_PUBLIC_KEY] = {0x04, 0x88, 0xB2, 0x1E} = 0x0488B21E
+ */
+ private int bip32HeaderP2PKHpubTestnet;
+
+ /**
+ * BIP32 Header P2PKH Priv (Testnet)
+ *
+ * extracted from /src/chainparams.cpp class
+ * Concatenate the 4 values in base58Prefixes[EXT_SECRET_KEY]
+ * base58Prefixes[EXT_SECRET_KEY] = {0x04, 0x88, 0xAD, 0xE4} = 0x0488ADE4
+ */
+ private int bip32HeaderP2PKHprivTestnet;
+
+ /**
+ * Id
+ *
+ * "org.litecoin.production" for LTC
+ * I'm guessing this just has to match others for trading purposes.
+ */
+ private String id;
+
+ /**
+ * Majority Enforce Block Upgrade
+ *
+ * All coins are setting this to 750, except DOGE is setting this to 1500.
+ */
+ private int majorityEnforceBlockUpgrade;
+
+ /**
+ * Majority Reject Block Outdated
+ *
+ * All coins are setting this to 950, except DOGE is setting this to 1900.
+ */
+ private int majorityRejectBlockOutdated;
+
+ /**
+ * Majority Window
+ *
+ * All coins are setting this to 1000, except DOGE is setting this to 2000.
+ */
+ private int majorityWindow;
+
+ /**
+ * Code
+ *
+ * "LITE" for LTC
+ * Currency code for full unit.
+ */
+ private String code;
+
+ /**
+ * mCode
+ *
+ * "mLITE" for LTC
+ * Currency code for milli unit.
+ */
+ private String mCode;
+
+ /**
+ * Base Code
+ *
+ * "Liteoshi" for LTC
+ * Currency code for base unit.
+ */
+ private String baseCode;
+
+ /**
+ * Min Non Dust Output
+ *
+ * 100000 for LTC, web search for minimum transaction fee per kB
+ */
+ private int minNonDustOutput;
+
+ /**
+ * URI Scheme
+ *
+ * uriScheme = "litecoin" for LTC
+ * Do a web search to find this value.
+ */
+ private String uriScheme;
+
+ /**
+ * Protocol Version Minimum
+ *
+ * 70002 for LTC
+ * extracted from /src/protocol.h class
+ */
+ private int protocolVersionMinimum;
+
+ /**
+ * Protocol Version Current
+ *
+ * 70003 for LTC
+ * extracted from /src/protocol.h class
+ */
+ private int protocolVersionCurrent;
+
+ /**
+ * Has Max Money
+ *
+ * false for DOGE, true for BTC and LTC
+ */
+ private boolean hasMaxMoney;
+
+ /**
+ * Max Money
+ *
+ * 84000000 for LTC, 21000000 for BTC
+ * extracted from src/amount.h class
+ */
+ private long maxMoney;
+
+ /**
+ * Currency Code
+ *
+ * The trading symbol, ie LTC, BTC, DOGE
+ */
+ private String currencyCode;
+
+ /**
+ * Minimum Order Amount
+ *
+ * web search, LTC minimumOrderAmount = 1000000, 0.01 LTC minimum order to avoid dust errors
+ */
+ private long minimumOrderAmount;
+
+ /**
+ * Fee Per Kb
+ *
+ * web search, LTC feePerKb = 10000, 0.0001 LTC per 1000 bytes
+ */
+ private long feePerKb;
+
+ /**
+ * Network Name
+ *
+ * ie Litecoin-MAIN
+ */
+ private String networkName;
+
+ /**
+ * Fee Ceiling
+ *
+ * web search, LTC fee ceiling = 1000L
+ */
+ private long feeCeiling;
+
+ /**
+ * Extended Public Key
+ *
+ * xpub for operations that require wallet watching
+ */
+ private String extendedPublicKey;
+
+ /**
+ * Send Amount
+ *
+ * The amount to send in base units. Also, requires sending fee per byte, receiving address and sender's extended private key.
+ */
+ private long sendAmount;
+
+ /**
+ * Sending Fee Per Byte
+ *
+ * The fee to include on a send request in base units. Also, requires receiving address, sender's extended private key and send amount.
+ */
+ private long sendingFeePerByte;
+
+ /**
+ * Receiving Address
+ *
+ * The receiving address for a send request. Also, requires send amount, sender's extended private key and sending fee per byte.
+ */
+ private String receivingAddress;
+
+ /**
+ * Extended Private Key
+ *
+ * xpriv address for a send request. Also, requires receiving address, send amount and sending fee per byte.
+ */
+ private String extendedPrivateKey;
+
+ /**
+ * Server Info
+ *
+ * For adding, removing, setting current server requests.
+ */
+ private ServerInfo serverInfo;
+
+ /**
+ * Script Sig
+ *
+ * extracted from /src/chainparams.cpp class
+ * pszTimestamp
+ *
+ * transform this value - https://bitcoin.stackexchange.com/questions/13122/scriptsig-coinbase-structure-of-the-genesis-block
+ * ie LTC = 04ffff001d0104404e592054696d65732030352f4f63742f32303131205374657665204a6f62732c204170706c65e280997320566973696f6e6172792c2044696573206174203536
+ * ie DOGE = 04ffff001d0104084e696e746f6e646f
+ */
+ private String scriptSig;
+
+ /**
+ * Script Hex
+ *
+ * extracted from /src/chainparams.cpp class
+ * genesisOutputScript
+ *
+ * ie LTC = 040184710fa689ad5023690c80f3a49c8f13f8d45b8c857fbcbc8bc4a8e4d3eb4b10f4d4604fa08dce601aaf0f470216fe1b51850b4acf21b179c45070ac7b03a9
+ *
+ * this may be the same value as pubKey
+ */
+ private String scriptHex;
+
+ /**
+ * Reward
+ *
+ * extracted from /src/chainparams.cpp class
+ * CreateGenesisBlock(..., [reward] * COIN)
+ *
+ * ie LTC = 50, BTC = 50, DOGE = 88
+ */
+ private int reward;
+
+ /**
+ * Genesis Creation Version
+ */
+ private int genesisCreationVersion;
+
+ /**
+ * Genesis Block Version
+ */
+ private long genesisBlockVersion;
+
+ /**
+ * Genesis Time
+ *
+ * extracted from /src/chainparams.cpp class
+ * CreateGenesisBlock(nTime, ...)
+ *
+ * ie LTC = 1317972665
+ */
+ private long genesisTime;
+
+ /**
+ * Difficulty Target
+ *
+ * extracted from /src/chainparams.cpp class
+ * CreateGenesisBlock(genesisTime, nonce, difficultyTarget, 1, reward * COIN);
+ *
+ * convert from hex to decimal
+ *
+ * ie LTC = 0x1e0ffff0 = 504365040
+ */
+ private long difficultyTarget;
+
+ /**
+ * Merkle Hex
+ */
+ private String merkleHex;
+
+ /**
+ * Nonce
+ *
+ * extracted from /src/chainparams.cpp class
+ * CreateGenesisBlock(genesisTime, nonce, difficultyTarget, 1, reward * COIN);
+ *
+ * ie LTC = 2084524493
+ */
+ private long nonce;
+
+
+ public int getTargetTimespan() {
+ return targetTimespan;
+ }
+
+ public int getTargetSpacing() {
+ return targetSpacing;
+ }
+
+ public long getPacketMagic() {
+ return packetMagic;
+ }
+
+ public int getPort() {
+ return port;
+ }
+
+ public int getAddressHeader() {
+ return addressHeader;
+ }
+
+ public int getP2shHeader() {
+ return p2shHeader;
+ }
+
+ public String getSegwitAddressHrp() {
+ return segwitAddressHrp;
+ }
+
+ public int getDumpedPrivateKeyHeader() {
+ return dumpedPrivateKeyHeader;
+ }
+
+ public int getSubsidyDecreaseBlockCount() {
+ return subsidyDecreaseBlockCount;
+ }
+
+ public String getExpectedGenesisHash() {
+ return expectedGenesisHash;
+ }
+
+ public String getPubKey() {
+ return pubKey;
+ }
+
+ public String[] getDnsSeeds() {
+ return dnsSeeds;
+ }
+
+ public int getBip32HeaderP2PKHpub() {
+ return bip32HeaderP2PKHpub;
+ }
+
+ public int getBip32HeaderP2PKHpriv() {
+ return bip32HeaderP2PKHpriv;
+ }
+
+ public int getAddressHeaderTestnet() {
+ return addressHeaderTestnet;
+ }
+
+ public int getBip32HeaderP2PKHpubTestnet() {
+ return bip32HeaderP2PKHpubTestnet;
+ }
+
+ public int getBip32HeaderP2PKHprivTestnet() {
+ return bip32HeaderP2PKHprivTestnet;
+ }
+
+ public String getId() {
+ return this.id;
+ }
+
+ public int getMajorityEnforceBlockUpgrade() {
+ return this.majorityEnforceBlockUpgrade;
+ }
+
+ public int getMajorityRejectBlockOutdated() {
+ return this.majorityRejectBlockOutdated;
+ }
+
+ public int getMajorityWindow() {
+ return this.majorityWindow;
+ }
+
+ public String getCode() {
+ return this.code;
+ }
+
+ public String getmCode() {
+ return this.mCode;
+ }
+
+ public String getBaseCode() {
+ return this.baseCode;
+ }
+
+ public int getMinNonDustOutput() {
+ return this.minNonDustOutput;
+ }
+
+ public String getUriScheme() {
+ return this.uriScheme;
+ }
+
+ public int getProtocolVersionMinimum() {
+ return this.protocolVersionMinimum;
+ }
+
+ public int getProtocolVersionCurrent() {
+ return this.protocolVersionCurrent;
+ }
+
+ public boolean isHasMaxMoney() {
+ return this.hasMaxMoney;
+ }
+
+ public long getMaxMoney() {
+ return this.maxMoney;
+ }
+
+ public String getCurrencyCode() {
+ return this.currencyCode;
+ }
+
+ public long getMinimumOrderAmount() {
+ return this.minimumOrderAmount;
+ }
+
+ public long getFeePerKb() {
+ return this.feePerKb;
+ }
+
+ public String getNetworkName() {
+ return this.networkName;
+ }
+
+ public long getFeeCeiling() {
+ return this.feeCeiling;
+ }
+
+ public String getExtendedPublicKey() {
+ return this.extendedPublicKey;
+ }
+
+ public long getSendAmount() {
+ return this.sendAmount;
+ }
+
+ public long getSendingFeePerByte() {
+ return this.sendingFeePerByte;
+ }
+
+ public String getReceivingAddress() {
+ return this.receivingAddress;
+ }
+
+ public String getExtendedPrivateKey() {
+ return this.extendedPrivateKey;
+ }
+
+ public ServerInfo getServerInfo() {
+ return this.serverInfo;
+ }
+
+ public String getScriptSig() {
+ return this.scriptSig;
+ }
+
+ public String getScriptHex() {
+ return this.scriptHex;
+ }
+
+ public int getReward() {
+ return this.reward;
+ }
+
+ public int getGenesisCreationVersion() {
+ return this.genesisCreationVersion;
+ }
+
+ public long getGenesisBlockVersion() {
+ return this.genesisBlockVersion;
+ }
+
+ public long getGenesisTime() {
+ return this.genesisTime;
+ }
+
+ public long getDifficultyTarget() {
+ return this.difficultyTarget;
+ }
+
+ public String getMerkleHex() {
+ return this.merkleHex;
+ }
+
+ public long getNonce() {
+ return this.nonce;
+ }
+
+ @Override
+ public String toString() {
+ return "BitcoinyTBDRequest{" +
+ "targetTimespan=" + targetTimespan +
+ ", targetSpacing=" + targetSpacing +
+ ", packetMagic=" + packetMagic +
+ ", port=" + port +
+ ", addressHeader=" + addressHeader +
+ ", p2shHeader=" + p2shHeader +
+ ", segwitAddressHrp='" + segwitAddressHrp + '\'' +
+ ", dumpedPrivateKeyHeader=" + dumpedPrivateKeyHeader +
+ ", subsidyDecreaseBlockCount=" + subsidyDecreaseBlockCount +
+ ", expectedGenesisHash='" + expectedGenesisHash + '\'' +
+ ", pubKey='" + pubKey + '\'' +
+ ", dnsSeeds=" + Arrays.toString(dnsSeeds) +
+ ", bip32HeaderP2PKHpub=" + bip32HeaderP2PKHpub +
+ ", bip32HeaderP2PKHpriv=" + bip32HeaderP2PKHpriv +
+ ", addressHeaderTestnet=" + addressHeaderTestnet +
+ ", bip32HeaderP2PKHpubTestnet=" + bip32HeaderP2PKHpubTestnet +
+ ", bip32HeaderP2PKHprivTestnet=" + bip32HeaderP2PKHprivTestnet +
+ ", id='" + id + '\'' +
+ ", majorityEnforceBlockUpgrade=" + majorityEnforceBlockUpgrade +
+ ", majorityRejectBlockOutdated=" + majorityRejectBlockOutdated +
+ ", majorityWindow=" + majorityWindow +
+ ", code='" + code + '\'' +
+ ", mCode='" + mCode + '\'' +
+ ", baseCode='" + baseCode + '\'' +
+ ", minNonDustOutput=" + minNonDustOutput +
+ ", uriScheme='" + uriScheme + '\'' +
+ ", protocolVersionMinimum=" + protocolVersionMinimum +
+ ", protocolVersionCurrent=" + protocolVersionCurrent +
+ ", hasMaxMoney=" + hasMaxMoney +
+ ", maxMoney=" + maxMoney +
+ ", currencyCode='" + currencyCode + '\'' +
+ ", minimumOrderAmount=" + minimumOrderAmount +
+ ", feePerKb=" + feePerKb +
+ ", networkName='" + networkName + '\'' +
+ ", feeCeiling=" + feeCeiling +
+ ", extendedPublicKey='" + extendedPublicKey + '\'' +
+ ", sendAmount=" + sendAmount +
+ ", sendingFeePerByte=" + sendingFeePerByte +
+ ", receivingAddress='" + receivingAddress + '\'' +
+ ", extendedPrivateKey='" + extendedPrivateKey + '\'' +
+ ", serverInfo=" + serverInfo +
+ ", scriptSig='" + scriptSig + '\'' +
+ ", scriptHex='" + scriptHex + '\'' +
+ ", reward=" + reward +
+ ", genesisCreationVersion=" + genesisCreationVersion +
+ ", genesisBlockVersion=" + genesisBlockVersion +
+ ", genesisTime=" + genesisTime +
+ ", difficultyTarget=" + difficultyTarget +
+ ", merkleHex='" + merkleHex + '\'' +
+ ", nonce=" + nonce +
+ '}';
+ }
+}
diff --git a/src/main/java/org/qortal/api/model/crosschain/TradeBotRespondRequests.java b/src/main/java/org/qortal/api/model/crosschain/TradeBotRespondRequests.java
new file mode 100644
index 00000000..e78f951d
--- /dev/null
+++ b/src/main/java/org/qortal/api/model/crosschain/TradeBotRespondRequests.java
@@ -0,0 +1,68 @@
+package org.qortal.api.model.crosschain;
+
+import io.swagger.v3.oas.annotations.media.Schema;
+
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlElement;
+import java.util.List;
+
+@XmlAccessorType(XmlAccessType.FIELD)
+public class TradeBotRespondRequests {
+
+ @Schema(description = "Foreign blockchain private key, e.g. BIP32 'm' key for Bitcoin/Litecoin starting with 'xprv'",
+ example = "xprv___________________________________________________________________________________________________________")
+ public String foreignKey;
+
+ @Schema(description = "List of address matches")
+ @XmlElement(name = "addresses")
+ public List addresses;
+
+ @Schema(description = "Qortal address for receiving QORT from AT", example = "Qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq")
+ public String receivingAddress;
+
+ public TradeBotRespondRequests() {
+ }
+
+ public TradeBotRespondRequests(String foreignKey, List addresses, String receivingAddress) {
+ this.foreignKey = foreignKey;
+ this.addresses = addresses;
+ this.receivingAddress = receivingAddress;
+ }
+
+ @Schema(description = "Address Match")
+ // All properties to be converted to JSON via JAX-RS
+ @XmlAccessorType(XmlAccessType.FIELD)
+ public static class AddressMatch {
+ @Schema(description = "AT Address")
+ public String atAddress;
+
+ @Schema(description = "Receiving Address")
+ public String receivingAddress;
+
+ // For JAX-RS
+ protected AddressMatch() {
+ }
+
+ public AddressMatch(String atAddress, String receivingAddress) {
+ this.atAddress = atAddress;
+ this.receivingAddress = receivingAddress;
+ }
+
+ @Override
+ public String toString() {
+ return "AddressMatch{" +
+ "atAddress='" + atAddress + '\'' +
+ ", receivingAddress='" + receivingAddress + '\'' +
+ '}';
+ }
+ }
+
+ @Override
+ public String toString() {
+ return "TradeBotRespondRequests{" +
+ "foreignKey='" + foreignKey + '\'' +
+ ", addresses=" + addresses +
+ '}';
+ }
+}
\ No newline at end of file
diff --git a/src/main/java/org/qortal/api/resource/AddressesResource.java b/src/main/java/org/qortal/api/resource/AddressesResource.java
index 66d8412c..beb73734 100644
--- a/src/main/java/org/qortal/api/resource/AddressesResource.java
+++ b/src/main/java/org/qortal/api/resource/AddressesResource.java
@@ -20,9 +20,7 @@ import org.qortal.asset.Asset;
import org.qortal.controller.LiteNode;
import org.qortal.controller.OnlineAccountsManager;
import org.qortal.crypto.Crypto;
-import org.qortal.data.account.AccountData;
-import org.qortal.data.account.AccountPenaltyData;
-import org.qortal.data.account.RewardShareData;
+import org.qortal.data.account.*;
import org.qortal.data.network.OnlineAccountData;
import org.qortal.data.network.OnlineAccountLevel;
import org.qortal.data.transaction.PublicizeTransactionData;
@@ -52,6 +50,7 @@ import java.math.BigDecimal;
import java.util.ArrayList;
import java.util.Comparator;
import java.util.List;
+import java.util.Optional;
import java.util.stream.Collectors;
@Path("/addresses")
@@ -327,11 +326,8 @@ public class AddressesResource {
)
}
)
- @ApiErrors({ApiError.INVALID_PUBLIC_KEY, ApiError.NON_PRODUCTION, ApiError.REPOSITORY_ISSUE})
+ @ApiErrors({ApiError.INVALID_PUBLIC_KEY, ApiError.REPOSITORY_ISSUE})
public String fromPublicKey(@PathParam("publickey") String publicKey58) {
- if (Settings.getInstance().isApiRestricted())
- throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.NON_PRODUCTION);
-
// Decode public key
byte[] publicKey;
try {
@@ -630,4 +626,160 @@ public class AddressesResource {
}
}
-}
+ @GET
+ @Path("/sponsorship/{address}")
+ @Operation(
+ summary = "Returns sponsorship statistics for an account",
+ description = "Returns sponsorship statistics for an account, excluding the recipients that get real reward shares",
+ responses = {
+ @ApiResponse(
+ description = "the statistics",
+ content = @Content(mediaType = MediaType.APPLICATION_JSON, schema = @Schema(implementation = SponsorshipReport.class))
+ )
+ }
+ )
+ @ApiErrors({ApiError.INVALID_ADDRESS, ApiError.ADDRESS_UNKNOWN, ApiError.REPOSITORY_ISSUE})
+ public SponsorshipReport getSponsorshipReport(
+ @PathParam("address") String address,
+ @QueryParam(("realRewardShareRecipient")) String[] realRewardShareRecipients) {
+ if (!Crypto.isValidAddress(address))
+ throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_ADDRESS);
+
+ try (final Repository repository = RepositoryManager.getRepository()) {
+ SponsorshipReport report = repository.getAccountRepository().getSponsorshipReport(address, realRewardShareRecipients);
+ // Not found?
+ if (report == null)
+ throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.ADDRESS_UNKNOWN);
+
+ return report;
+ } catch (DataException e) {
+ throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
+ }
+ }
+
+ @GET
+ @Path("/sponsorship/{address}/sponsor")
+ @Operation(
+ summary = "Returns sponsorship statistics for an account's sponsor",
+ description = "Returns sponsorship statistics for an account's sponsor, excluding the recipients that get real reward shares",
+ responses = {
+ @ApiResponse(
+ description = "the statistics",
+ content = @Content(mediaType = MediaType.APPLICATION_JSON, schema = @Schema(implementation = SponsorshipReport.class))
+ )
+ }
+ )
+ @ApiErrors({ApiError.INVALID_ADDRESS, ApiError.ADDRESS_UNKNOWN, ApiError.REPOSITORY_ISSUE})
+ public SponsorshipReport getSponsorshipReportForSponsor(
+ @PathParam("address") String address,
+ @QueryParam("realRewardShareRecipient") String[] realRewardShareRecipients) {
+ if (!Crypto.isValidAddress(address))
+ throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_ADDRESS);
+
+ try (final Repository repository = RepositoryManager.getRepository()) {
+
+ // get sponsor
+ Optional sponsor = repository.getAccountRepository().getSponsor(address);
+
+ // if there is not sponsor, throw error
+ if(sponsor.isEmpty()) throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.ADDRESS_UNKNOWN);
+
+ // get report for sponsor
+ SponsorshipReport report = repository.getAccountRepository().getSponsorshipReport(sponsor.get(), realRewardShareRecipients);
+
+ // Not found?
+ if (report == null)
+ throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.ADDRESS_UNKNOWN);
+
+ return report;
+ } catch (DataException e) {
+ throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
+ }
+ }
+
+ @GET
+ @Path("/mintership/{address}")
+ @Operation(
+ summary = "Returns mintership statistics for an account",
+ description = "Returns mintership statistics for an account",
+ responses = {
+ @ApiResponse(
+ description = "the statistics",
+ content = @Content(mediaType = MediaType.APPLICATION_JSON, schema = @Schema(implementation = MintershipReport.class))
+ )
+ }
+ )
+ @ApiErrors({ApiError.INVALID_ADDRESS, ApiError.ADDRESS_UNKNOWN, ApiError.REPOSITORY_ISSUE})
+ public MintershipReport getMintershipReport(@PathParam("address") String address,
+ @QueryParam("realRewardShareRecipient") String[] realRewardShareRecipients ) {
+ if (!Crypto.isValidAddress(address))
+ throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_ADDRESS);
+
+ try (final Repository repository = RepositoryManager.getRepository()) {
+
+ // get sponsorship report for minter, fetch a list of one minter
+ SponsorshipReport report = repository.getAccountRepository().getMintershipReport(address, account -> List.of(account));
+
+ // Not found?
+ if (report == null)
+ throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.ADDRESS_UNKNOWN);
+
+ // since the report is for one minter, must get sponsee count separately
+ int sponseeCount = repository.getAccountRepository().getSponseeAddresses(address, realRewardShareRecipients).size();
+
+ // since the report is for one minter, must get the first name from a array of names that should be size 1
+ String name = report.getNames().length > 0 ? report.getNames()[0] : null;
+
+ // transform sponsorship report to mintership report
+ MintershipReport mintershipReport
+ = new MintershipReport(
+ report.getAddress(),
+ report.getLevel(),
+ report.getBlocksMinted(),
+ report.getAdjustments(),
+ report.getPenalties(),
+ report.isTransfer(),
+ name,
+ sponseeCount,
+ report.getAvgBalance(),
+ report.getArbitraryCount(),
+ report.getTransferAssetCount(),
+ report.getTransferPrivsCount(),
+ report.getSellCount(),
+ report.getSellAmount(),
+ report.getBuyCount(),
+ report.getBuyAmount()
+ );
+
+ return mintershipReport;
+ } catch (DataException e) {
+ throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
+ }
+ }
+
+ @GET
+ @Path("/levels/{minLevel}")
+ @Operation(
+ summary = "Return accounts with levels greater than or equal to input",
+ responses = {
+ @ApiResponse(
+ description = "online accounts",
+ content = @Content(mediaType = MediaType.APPLICATION_JSON, array = @ArraySchema(schema = @Schema(implementation = AddressLevelPairing.class)))
+ )
+ }
+ )
+ @ApiErrors({ApiError.REPOSITORY_ISSUE})
+
+ public List getAddressLevelPairings(@PathParam("minLevel") int minLevel) {
+
+ try (final Repository repository = RepositoryManager.getRepository()) {
+
+ // get the level address pairings
+ List pairings = repository.getAccountRepository().getAddressLevelPairings(minLevel);
+
+ return pairings;
+ } catch (DataException e) {
+ throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
+ }
+ }
+}
\ No newline at end of file
diff --git a/src/main/java/org/qortal/api/resource/ArbitraryResource.java b/src/main/java/org/qortal/api/resource/ArbitraryResource.java
index 99fc0020..a6f44373 100644
--- a/src/main/java/org/qortal/api/resource/ArbitraryResource.java
+++ b/src/main/java/org/qortal/api/resource/ArbitraryResource.java
@@ -33,9 +33,13 @@ import org.qortal.controller.arbitrary.ArbitraryDataStorageManager;
import org.qortal.controller.arbitrary.ArbitraryMetadataManager;
import org.qortal.data.account.AccountData;
import org.qortal.data.arbitrary.ArbitraryCategoryInfo;
+import org.qortal.data.arbitrary.ArbitraryDataIndexDetail;
+import org.qortal.data.arbitrary.ArbitraryDataIndexScoreKey;
+import org.qortal.data.arbitrary.ArbitraryDataIndexScorecard;
import org.qortal.data.arbitrary.ArbitraryResourceData;
import org.qortal.data.arbitrary.ArbitraryResourceMetadata;
import org.qortal.data.arbitrary.ArbitraryResourceStatus;
+import org.qortal.data.arbitrary.IndexCache;
import org.qortal.data.naming.NameData;
import org.qortal.data.transaction.ArbitraryTransactionData;
import org.qortal.data.transaction.TransactionData;
@@ -69,8 +73,11 @@ import java.nio.file.Files;
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.Arrays;
+import java.util.Comparator;
import java.util.List;
+import java.util.Map;
import java.util.Objects;
+import java.util.stream.Collectors;
@Path("/arbitrary")
@Tag(name = "Arbitrary")
@@ -172,6 +179,7 @@ public class ArbitraryResource {
@Parameter(description = "Name (searches name field only)") @QueryParam("name") List names,
@Parameter(description = "Title (searches title metadata field only)") @QueryParam("title") String title,
@Parameter(description = "Description (searches description metadata field only)") @QueryParam("description") String description,
+ @Parameter(description = "Keyword (searches description metadata field by keywords)") @QueryParam("keywords") List keywords,
@Parameter(description = "Prefix only (if true, only the beginning of fields are matched)") @QueryParam("prefix") Boolean prefixOnly,
@Parameter(description = "Exact match names only (if true, partial name matches are excluded)") @QueryParam("exactmatchnames") Boolean exactMatchNamesOnly,
@Parameter(description = "Default resources (without identifiers) only") @QueryParam("default") Boolean defaultResource,
@@ -212,7 +220,7 @@ public class ArbitraryResource {
}
List resources = repository.getArbitraryRepository()
- .searchArbitraryResources(service, query, identifier, names, title, description, usePrefixOnly,
+ .searchArbitraryResources(service, query, identifier, names, title, description, keywords, usePrefixOnly,
exactMatchNames, defaultRes, mode, minLevel, followedOnly, excludeBlocked, includeMetadata, includeStatus,
before, after, limit, offset, reverse);
@@ -227,6 +235,49 @@ public class ArbitraryResource {
}
}
+ @GET
+ @Path("/resources/searchsimple")
+ @Operation(
+ summary = "Search arbitrary resources available on chain, optionally filtered by service.",
+ responses = {
+ @ApiResponse(
+ content = @Content(mediaType = MediaType.APPLICATION_JSON, schema = @Schema(implementation = ArbitraryResourceData.class))
+ )
+ }
+ )
+ @ApiErrors({ApiError.REPOSITORY_ISSUE})
+ public List searchResourcesSimple(
+ @QueryParam("service") Service service,
+ @Parameter(description = "Identifier (searches identifier field only)") @QueryParam("identifier") String identifier,
+ @Parameter(description = "Name (searches name field only)") @QueryParam("name") List names,
+ @Parameter(description = "Prefix only (if true, only the beginning of fields are matched)") @QueryParam("prefix") Boolean prefixOnly,
+ @Parameter(description = "Case insensitive (ignore leter case on search)") @QueryParam("caseInsensitive") Boolean caseInsensitive,
+ @Parameter(description = "Creation date before timestamp") @QueryParam("before") Long before,
+ @Parameter(description = "Creation date after timestamp") @QueryParam("after") Long after,
+ @Parameter(ref = "limit") @QueryParam("limit") Integer limit,
+ @Parameter(ref = "offset") @QueryParam("offset") Integer offset,
+ @Parameter(ref = "reverse") @QueryParam("reverse") Boolean reverse) {
+
+ try (final Repository repository = RepositoryManager.getRepository()) {
+
+ boolean usePrefixOnly = Boolean.TRUE.equals(prefixOnly);
+ boolean ignoreCase = Boolean.TRUE.equals(caseInsensitive);
+
+ List resources = repository.getArbitraryRepository()
+ .searchArbitraryResourcesSimple(service, identifier, names, usePrefixOnly,
+ before, after, limit, offset, reverse, ignoreCase);
+
+ if (resources == null) {
+ return new ArrayList<>();
+ }
+
+ return resources;
+
+ } catch (DataException e) {
+ throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
+ }
+ }
+
@GET
@Path("/resource/status/{service}/{name}")
@Operation(
@@ -1142,6 +1193,90 @@ public class ArbitraryResource {
}
}
+ @GET
+ @Path("/indices")
+ @Operation(
+ summary = "Find matching arbitrary resource indices",
+ description = "",
+ responses = {
+ @ApiResponse(
+ description = "indices",
+ content = @Content(
+ array = @ArraySchema(
+ schema = @Schema(
+ implementation = ArbitraryDataIndexScorecard.class
+ )
+ )
+ )
+ )
+ }
+ )
+ public List searchIndices(@QueryParam("terms") String[] terms) {
+
+ List indices = new ArrayList<>();
+
+ // get index details for each term
+ for( String term : terms ) {
+ List details = IndexCache.getInstance().getIndicesByTerm().get(term);
+
+ if( details != null ) {
+ indices.addAll(details);
+ }
+ }
+
+ // sum up the scores for each index with identical attributes
+ Map scoreForKey
+ = indices.stream()
+ .collect(
+ Collectors.groupingBy(
+ index -> new ArbitraryDataIndexScoreKey(index.name, index.category, index.link),
+ Collectors.summingDouble(detail -> 1.0 / detail.rank)
+ )
+ );
+
+ // create scorecards for each index group and put them in descending order by score
+ List scorecards
+ = scoreForKey.entrySet().stream().map(
+ entry
+ ->
+ new ArbitraryDataIndexScorecard(
+ entry.getValue(),
+ entry.getKey().name,
+ entry.getKey().category,
+ entry.getKey().link)
+ )
+ .sorted(Comparator.comparingDouble(ArbitraryDataIndexScorecard::getScore).reversed())
+ .collect(Collectors.toList());
+
+ return scorecards;
+ }
+
+ @GET
+ @Path("/indices/{name}/{idPrefix}")
+ @Operation(
+ summary = "Find matching arbitrary resource indices for a registered name and identifier prefix",
+ description = "",
+ responses = {
+ @ApiResponse(
+ description = "indices",
+ content = @Content(
+ array = @ArraySchema(
+ schema = @Schema(
+ implementation = ArbitraryDataIndexDetail.class
+ )
+ )
+ )
+ )
+ }
+ )
+ public List searchIndicesByName(@PathParam("name") String name, @PathParam("idPrefix") String idPrefix) {
+
+ return
+ IndexCache.getInstance().getIndicesByIssuer()
+ .getOrDefault(name, new ArrayList<>(0)).stream()
+ .filter( indexDetail -> indexDetail.indexIdentifer.startsWith(idPrefix))
+ .collect(Collectors.toList());
+ }
// Shared methods
diff --git a/src/main/java/org/qortal/api/resource/AssetsResource.java b/src/main/java/org/qortal/api/resource/AssetsResource.java
index 40e04256..49ed251a 100644
--- a/src/main/java/org/qortal/api/resource/AssetsResource.java
+++ b/src/main/java/org/qortal/api/resource/AssetsResource.java
@@ -16,9 +16,13 @@ import org.qortal.api.model.AggregatedOrder;
import org.qortal.api.model.TradeWithOrderInfo;
import org.qortal.api.resource.TransactionsResource.ConfirmationStatus;
import org.qortal.asset.Asset;
+import org.qortal.controller.hsqldb.HSQLDBBalanceRecorder;
import org.qortal.crypto.Crypto;
import org.qortal.data.account.AccountBalanceData;
import org.qortal.data.account.AccountData;
+import org.qortal.data.account.AddressAmountData;
+import org.qortal.data.account.BlockHeightRange;
+import org.qortal.data.account.BlockHeightRangeAddressAmounts;
import org.qortal.data.asset.AssetData;
import org.qortal.data.asset.OrderData;
import org.qortal.data.asset.RecentTradeData;
@@ -33,6 +37,7 @@ import org.qortal.transaction.Transaction;
import org.qortal.transaction.Transaction.ValidationResult;
import org.qortal.transform.TransformationException;
import org.qortal.transform.transaction.*;
+import org.qortal.utils.BalanceRecorderUtils;
import org.qortal.utils.Base58;
import javax.servlet.http.HttpServletRequest;
@@ -42,6 +47,7 @@ import javax.ws.rs.core.MediaType;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
+import java.util.Optional;
import java.util.stream.Collectors;
@Path("/assets")
@@ -179,6 +185,122 @@ public class AssetsResource {
}
}
+ @GET
+ @Path("/balancedynamicranges")
+ @Operation(
+ summary = "Get balance dynamic ranges listed.",
+ description = ".",
+ responses = {
+ @ApiResponse(
+ content = @Content(
+ array = @ArraySchema(
+ schema = @Schema(
+ implementation = BlockHeightRange.class
+ )
+ )
+ )
+ )
+ }
+ )
+ public List getBalanceDynamicRanges(
+ @Parameter(ref = "offset") @QueryParam("offset") Integer offset,
+ @Parameter(ref = "limit") @QueryParam("limit") Integer limit,
+ @Parameter(ref = "reverse") @QueryParam("reverse") Boolean reverse) {
+
+ Optional recorder = HSQLDBBalanceRecorder.getInstance();
+
+ if( recorder.isPresent()) {
+ return recorder.get().getRanges(offset, limit, reverse);
+ }
+ else {
+ return new ArrayList<>(0);
+ }
+ }
+
+ @GET
+ @Path("/balancedynamicrange/{height}")
+ @Operation(
+ summary = "Get balance dynamic range for a given height.",
+ description = ".",
+ responses = {
+ @ApiResponse(
+ content = @Content(
+ schema = @Schema(
+ implementation = BlockHeightRange.class
+ )
+ )
+ )
+ }
+ )
+ @ApiErrors({
+ ApiError.INVALID_CRITERIA, ApiError.INVALID_DATA
+ })
+ public BlockHeightRange getBalanceDynamicRange(@PathParam("height") int height) {
+
+ Optional recorder = HSQLDBBalanceRecorder.getInstance();
+
+ if( recorder.isPresent()) {
+ Optional range = recorder.get().getRange(height);
+
+ if( range.isPresent() ) {
+ return range.get();
+ }
+ else {
+ throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA);
+ }
+ }
+ else {
+ throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_DATA);
+ }
+ }
+
+ @GET
+ @Path("/balancedynamicamounts/{begin}/{end}")
+ @Operation(
+ summary = "Get balance dynamic ranges address amounts listed.",
+ description = ".",
+ responses = {
+ @ApiResponse(
+ content = @Content(
+ array = @ArraySchema(
+ schema = @Schema(
+ implementation = AddressAmountData.class
+ )
+ )
+ )
+ )
+ }
+ )
+ @ApiErrors({
+ ApiError.INVALID_CRITERIA, ApiError.INVALID_DATA
+ })
+ public List getBalanceDynamicAddressAmounts(
+ @PathParam("begin") int begin,
+ @PathParam("end") int end,
+ @Parameter(ref = "offset") @QueryParam("offset") Integer offset,
+ @Parameter(ref = "limit") @QueryParam("limit") Integer limit) {
+
+ Optional recorder = HSQLDBBalanceRecorder.getInstance();
+
+ if( recorder.isPresent()) {
+ Optional addressAmounts = recorder.get().getAddressAmounts(new BlockHeightRange(begin, end, false));
+
+ if( addressAmounts.isPresent() ) {
+ return addressAmounts.get().getAmounts().stream()
+ .sorted(BalanceRecorderUtils.ADDRESS_AMOUNT_DATA_COMPARATOR.reversed())
+ .skip(offset)
+ .limit(limit)
+ .collect(Collectors.toList());
+ }
+ else {
+ throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA);
+ }
+ }
+ else {
+ throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_DATA);
+ }
+ }
+
@GET
@Path("/openorders/{assetid}/{otherassetid}")
@Operation(
diff --git a/src/main/java/org/qortal/api/resource/BlocksResource.java b/src/main/java/org/qortal/api/resource/BlocksResource.java
index 01d8d2ab..0203bafc 100644
--- a/src/main/java/org/qortal/api/resource/BlocksResource.java
+++ b/src/main/java/org/qortal/api/resource/BlocksResource.java
@@ -19,6 +19,8 @@ import org.qortal.crypto.Crypto;
import org.qortal.data.account.AccountData;
import org.qortal.data.block.BlockData;
import org.qortal.data.block.BlockSummaryData;
+import org.qortal.data.block.DecodedOnlineAccountData;
+import org.qortal.data.network.OnlineAccountData;
import org.qortal.data.transaction.TransactionData;
import org.qortal.repository.BlockArchiveReader;
import org.qortal.repository.DataException;
@@ -27,6 +29,7 @@ import org.qortal.repository.RepositoryManager;
import org.qortal.transform.TransformationException;
import org.qortal.transform.block.BlockTransformer;
import org.qortal.utils.Base58;
+import org.qortal.utils.Blocks;
import org.qortal.utils.Triple;
import javax.servlet.http.HttpServletRequest;
@@ -45,6 +48,7 @@ import java.util.ArrayList;
import java.util.Arrays;
import java.util.Comparator;
import java.util.List;
+import java.util.Set;
@Path("/blocks")
@Tag(name = "Blocks")
@@ -542,6 +546,7 @@ public class BlocksResource {
}
}
+ String minterAddress = Account.getRewardShareMintingAddress(repository, blockData.getMinterPublicKey());
int minterLevel = Account.getRewardShareEffectiveMintingLevel(repository, blockData.getMinterPublicKey());
if (minterLevel == 0)
// This may be unavailable when requesting a trimmed block
@@ -554,6 +559,7 @@ public class BlocksResource {
BlockMintingInfo blockMintingInfo = new BlockMintingInfo();
blockMintingInfo.minterPublicKey = blockData.getMinterPublicKey();
+ blockMintingInfo.minterAddress = minterAddress;
blockMintingInfo.minterLevel = minterLevel;
blockMintingInfo.onlineAccountsCount = blockData.getOnlineAccountsCount();
blockMintingInfo.maxDistance = new BigDecimal(block.MAX_DISTANCE);
@@ -888,4 +894,49 @@ public class BlocksResource {
}
}
-}
+ @GET
+ @Path("/onlineaccounts/{height}")
+ @Operation(
+ summary = "Get online accounts for block",
+ description = "Returns the online accounts who submitted signatures for this block",
+ responses = {
+ @ApiResponse(
+ description = "online accounts",
+ content = @Content(
+ array = @ArraySchema(
+ schema = @Schema(
+ implementation = DecodedOnlineAccountData.class
+ )
+ )
+ )
+ )
+ }
+ )
+ @ApiErrors({
+ ApiError.BLOCK_UNKNOWN, ApiError.REPOSITORY_ISSUE
+ })
+ public Set getOnlineAccounts(@PathParam("height") int height) {
+
+ try (final Repository repository = RepositoryManager.getRepository()) {
+
+ // get block from database
+ BlockData blockData = repository.getBlockRepository().fromHeight(height);
+
+ // if block data is not in the database, then try the archive
+ if (blockData == null) {
+ blockData = repository.getBlockArchiveRepository().fromHeight(height);
+
+ // if the block is not in the database or the archive, then the block is unknown
+ if( blockData == null ) {
+ throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.BLOCK_UNKNOWN);
+ }
+ }
+
+ Set onlineAccounts = Blocks.getDecodedOnlineAccountsForBlock(repository, blockData);
+
+ return onlineAccounts;
+ } catch (DataException e) {
+ throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE);
+ }
+ }
+}
\ No newline at end of file
diff --git a/src/main/java/org/qortal/api/resource/ChatResource.java b/src/main/java/org/qortal/api/resource/ChatResource.java
index 66a2bd46..df2ca399 100644
--- a/src/main/java/org/qortal/api/resource/ChatResource.java
+++ b/src/main/java/org/qortal/api/resource/ChatResource.java
@@ -234,17 +234,21 @@ public class ChatResource {
}
)
@ApiErrors({ApiError.INVALID_CRITERIA, ApiError.INVALID_ADDRESS, ApiError.REPOSITORY_ISSUE})
- public ActiveChats getActiveChats(@PathParam("address") String address, @QueryParam("encoding") Encoding encoding) {
+ public ActiveChats getActiveChats(
+ @PathParam("address") String address,
+ @QueryParam("encoding") Encoding encoding,
+ @QueryParam("haschatreference") Boolean hasChatReference
+ ) {
if (address == null || !Crypto.isValidAddress(address))
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_ADDRESS);
-
+
try (final Repository repository = RepositoryManager.getRepository()) {
- return repository.getChatRepository().getActiveChats(address, encoding);
+ return repository.getChatRepository().getActiveChats(address, encoding, hasChatReference);
} catch (DataException e) {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
}
}
-
+
@POST
@Operation(
summary = "Build raw, unsigned, CHAT transaction",
diff --git a/src/main/java/org/qortal/api/resource/CrossChainHtlcResource.java b/src/main/java/org/qortal/api/resource/CrossChainHtlcResource.java
index 3f05643d..c8f9ea6b 100644
--- a/src/main/java/org/qortal/api/resource/CrossChainHtlcResource.java
+++ b/src/main/java/org/qortal/api/resource/CrossChainHtlcResource.java
@@ -157,7 +157,7 @@ public class CrossChainHtlcResource {
htlcStatus.bitcoinP2shAddress = p2shAddress;
htlcStatus.bitcoinP2shBalance = BigDecimal.valueOf(p2shBalance, 8);
- List fundingOutputs = bitcoiny.getUnspentOutputs(p2shAddress.toString());
+ List fundingOutputs = bitcoiny.getUnspentOutputs(p2shAddress.toString(), false);
if (p2shBalance > 0L && !fundingOutputs.isEmpty()) {
htlcStatus.canRedeem = now >= medianBlockTime * 1000L;
@@ -401,7 +401,7 @@ public class CrossChainHtlcResource {
case FUNDED: {
Coin redeemAmount = Coin.valueOf(crossChainTradeData.expectedForeignAmount);
ECKey redeemKey = ECKey.fromPrivate(decodedTradePrivateKey);
- List fundingOutputs = bitcoiny.getUnspentOutputs(p2shAddressA);
+ List fundingOutputs = bitcoiny.getUnspentOutputs(p2shAddressA, false);
Transaction p2shRedeemTransaction = BitcoinyHTLC.buildRedeemTransaction(bitcoiny.getNetworkParameters(), redeemAmount, redeemKey,
fundingOutputs, redeemScriptA, decodedSecret, foreignBlockchainReceivingAccountInfo);
@@ -664,7 +664,7 @@ public class CrossChainHtlcResource {
// ElectrumX coins
ECKey refundKey = ECKey.fromPrivate(tradeBotData.getTradePrivateKey());
- List fundingOutputs = bitcoiny.getUnspentOutputs(p2shAddressA);
+ List fundingOutputs = bitcoiny.getUnspentOutputs(p2shAddressA, false);
// Validate the destination foreign blockchain address
Address receiving = Address.fromString(bitcoiny.getNetworkParameters(), receiveAddress);
diff --git a/src/main/java/org/qortal/api/resource/CrossChainResource.java b/src/main/java/org/qortal/api/resource/CrossChainResource.java
index 9e411127..3f7acf68 100644
--- a/src/main/java/org/qortal/api/resource/CrossChainResource.java
+++ b/src/main/java/org/qortal/api/resource/CrossChainResource.java
@@ -10,11 +10,13 @@ import io.swagger.v3.oas.annotations.parameters.RequestBody;
import io.swagger.v3.oas.annotations.responses.ApiResponse;
import io.swagger.v3.oas.annotations.security.SecurityRequirement;
import io.swagger.v3.oas.annotations.tags.Tag;
+import org.glassfish.jersey.media.multipart.ContentDisposition;
import org.qortal.api.ApiError;
import org.qortal.api.ApiErrors;
import org.qortal.api.ApiExceptionFactory;
import org.qortal.api.Security;
import org.qortal.api.model.CrossChainCancelRequest;
+import org.qortal.api.model.CrossChainTradeLedgerEntry;
import org.qortal.api.model.CrossChainTradeSummary;
import org.qortal.controller.tradebot.TradeBot;
import org.qortal.crosschain.ACCT;
@@ -44,14 +46,20 @@ import org.qortal.utils.Base58;
import org.qortal.utils.ByteArray;
import org.qortal.utils.NTP;
+import javax.servlet.ServletContext;
import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
import javax.ws.rs.*;
import javax.ws.rs.core.Context;
+import javax.ws.rs.core.HttpHeaders;
import javax.ws.rs.core.MediaType;
+import java.io.IOException;
import java.util.*;
import java.util.function.Supplier;
import java.util.stream.Collectors;
+
+
@Path("/crosschain")
@Tag(name = "Cross-Chain")
public class CrossChainResource {
@@ -59,6 +67,13 @@ public class CrossChainResource {
@Context
HttpServletRequest request;
+ @Context
+ HttpServletResponse response;
+
+ @Context
+ ServletContext context;
+
+
@GET
@Path("/tradeoffers")
@Operation(
@@ -255,6 +270,12 @@ public class CrossChainResource {
description = "Only return trades that completed on/after this timestamp (milliseconds since epoch)",
example = "1597310000000"
) @QueryParam("minimumTimestamp") Long minimumTimestamp,
+ @Parameter(
+ description = "Optionally filter by buyer Qortal public key"
+ ) @QueryParam("buyerPublicKey") String buyerPublicKey58,
+ @Parameter(
+ description = "Optionally filter by seller Qortal public key"
+ ) @QueryParam("sellerPublicKey") String sellerPublicKey58,
@Parameter( ref = "limit") @QueryParam("limit") Integer limit,
@Parameter( ref = "offset" ) @QueryParam("offset") Integer offset,
@Parameter( ref = "reverse" ) @QueryParam("reverse") Boolean reverse) {
@@ -266,6 +287,10 @@ public class CrossChainResource {
if (minimumTimestamp != null && minimumTimestamp <= 0)
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA);
+ // Decode public keys
+ byte[] buyerPublicKey = decodePublicKey(buyerPublicKey58);
+ byte[] sellerPublicKey = decodePublicKey(sellerPublicKey58);
+
final Boolean isFinished = Boolean.TRUE;
try (final Repository repository = RepositoryManager.getRepository()) {
@@ -296,7 +321,7 @@ public class CrossChainResource {
byte[] codeHash = acctInfo.getKey().value;
ACCT acct = acctInfo.getValue().get();
- List atStates = repository.getATRepository().getMatchingFinalATStates(codeHash,
+ List atStates = repository.getATRepository().getMatchingFinalATStates(codeHash, buyerPublicKey, sellerPublicKey,
isFinished, acct.getModeByteOffset(), (long) AcctMode.REDEEMED.value, minimumFinalHeight,
limit, offset, reverse);
@@ -335,6 +360,120 @@ public class CrossChainResource {
}
}
+ /**
+ * Decode Public Key
+ *
+ * @param publicKey58 the public key in a string
+ *
+ * @return the public key in bytes
+ */
+ private byte[] decodePublicKey(String publicKey58) {
+
+ if( publicKey58 == null ) return null;
+ if( publicKey58.isEmpty() ) return new byte[0];
+
+ byte[] publicKey;
+ try {
+ publicKey = Base58.decode(publicKey58);
+ } catch (NumberFormatException e) {
+ throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_PUBLIC_KEY, e);
+ }
+
+ // Correct size for public key?
+ if (publicKey.length != Transformer.PUBLIC_KEY_LENGTH)
+ throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_PUBLIC_KEY);
+
+ return publicKey;
+ }
+
+ @GET
+ @Path("/ledger/{publicKey}")
+ @Operation(
+ summary = "Accounting entries for all trades.",
+ description = "Returns accounting entries for all completed cross-chain trades",
+ responses = {
+ @ApiResponse(
+ content = @Content(
+ schema = @Schema(
+ type = "string",
+ format = "byte"
+ )
+ )
+ )
+ }
+ )
+ @ApiErrors({ApiError.INVALID_CRITERIA, ApiError.REPOSITORY_ISSUE})
+ public HttpServletResponse getLedgerEntries(
+ @PathParam("publicKey") String publicKey58,
+ @Parameter(
+ description = "Only return trades that completed on/after this timestamp (milliseconds since epoch)",
+ example = "1597310000000"
+ ) @QueryParam("minimumTimestamp") Long minimumTimestamp) {
+
+ byte[] publicKey = decodePublicKey(publicKey58);
+
+ // minimumTimestamp (if given) needs to be positive
+ if (minimumTimestamp != null && minimumTimestamp <= 0)
+ throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA);
+
+ try (final Repository repository = RepositoryManager.getRepository()) {
+ Integer minimumFinalHeight = null;
+
+ if (minimumTimestamp != null) {
+ minimumFinalHeight = repository.getBlockRepository().getHeightFromTimestamp(minimumTimestamp);
+ // If not found in the block repository it will return either 0 or 1
+ if (minimumFinalHeight == 0 || minimumFinalHeight == 1) {
+ // Try the archive
+ minimumFinalHeight = repository.getBlockArchiveRepository().getHeightFromTimestamp(minimumTimestamp);
+ }
+
+ if (minimumFinalHeight == 0)
+ // We don't have any blocks since minimumTimestamp, let alone trades, so nothing to return
+ return response;
+
+ // height returned from repository is for block BEFORE timestamp
+ // but we want trades AFTER timestamp so bump height accordingly
+ minimumFinalHeight++;
+ }
+
+ List crossChainTradeLedgerEntries = new ArrayList<>();
+
+ Map> acctsByCodeHash = SupportedBlockchain.getAcctMap();
+
+ // collect ledger entries for each ACCT
+ for (Map.Entry> acctInfo : acctsByCodeHash.entrySet()) {
+ byte[] codeHash = acctInfo.getKey().value;
+ ACCT acct = acctInfo.getValue().get();
+
+ // collect buys and sells
+ CrossChainUtils.collectLedgerEntries(publicKey, repository, minimumFinalHeight, crossChainTradeLedgerEntries, codeHash, acct, true);
+ CrossChainUtils.collectLedgerEntries(publicKey, repository, minimumFinalHeight, crossChainTradeLedgerEntries, codeHash, acct, false);
+ }
+
+ crossChainTradeLedgerEntries.sort((a, b) -> Longs.compare(a.getTradeTimestamp(), b.getTradeTimestamp()));
+
+ response.setStatus(HttpServletResponse.SC_OK);
+ response.setContentType("text/csv");
+ response.setHeader(
+ HttpHeaders.CONTENT_DISPOSITION,
+ ContentDisposition
+ .type("attachment")
+ .fileName(CrossChainUtils.createLedgerFileName(Crypto.toAddress(publicKey)))
+ .build()
+ .toString()
+ );
+
+ CrossChainUtils.writeToLedger( response.getWriter(), crossChainTradeLedgerEntries);
+
+ return response;
+ } catch (DataException e) {
+ throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
+ } catch (IOException e) {
+ response.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
+ return response;
+ }
+ }
+
@GET
@Path("/price/{blockchain}")
@Operation(
diff --git a/src/main/java/org/qortal/api/resource/CrossChainTradeBotResource.java b/src/main/java/org/qortal/api/resource/CrossChainTradeBotResource.java
index 5a50222a..de646a9f 100644
--- a/src/main/java/org/qortal/api/resource/CrossChainTradeBotResource.java
+++ b/src/main/java/org/qortal/api/resource/CrossChainTradeBotResource.java
@@ -17,13 +17,16 @@ import org.qortal.api.ApiExceptionFactory;
import org.qortal.api.Security;
import org.qortal.api.model.crosschain.TradeBotCreateRequest;
import org.qortal.api.model.crosschain.TradeBotRespondRequest;
+import org.qortal.api.model.crosschain.TradeBotRespondRequests;
import org.qortal.asset.Asset;
import org.qortal.controller.Controller;
import org.qortal.controller.tradebot.AcctTradeBot;
import org.qortal.controller.tradebot.TradeBot;
import org.qortal.crosschain.ACCT;
import org.qortal.crosschain.AcctMode;
+import org.qortal.crosschain.Bitcoiny;
import org.qortal.crosschain.ForeignBlockchain;
+import org.qortal.crosschain.PirateChain;
import org.qortal.crosschain.SupportedBlockchain;
import org.qortal.crypto.Crypto;
import org.qortal.data.at.ATData;
@@ -42,8 +45,10 @@ import javax.servlet.http.HttpServletRequest;
import javax.ws.rs.*;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.MediaType;
+import java.util.ArrayList;
import java.util.List;
import java.util.Objects;
+import java.util.Optional;
import java.util.stream.Collectors;
@Path("/crosschain/tradebot")
@@ -187,6 +192,39 @@ public class CrossChainTradeBotResource {
public String tradeBotResponder(@HeaderParam(Security.API_KEY_HEADER) String apiKey, TradeBotRespondRequest tradeBotRespondRequest) {
Security.checkApiCallAllowed(request);
+ return createTradeBotResponse(tradeBotRespondRequest);
+ }
+
+ @POST
+ @Path("/respondmultiple")
+ @Operation(
+ summary = "Respond to multiple trade offers. NOTE: WILL SPEND FUNDS!)",
+ description = "Start a new trade-bot entry to respond to chosen trade offers. Pirate Chain is not supported and will throw an invalid criteria error.",
+ requestBody = @RequestBody(
+ required = true,
+ content = @Content(
+ mediaType = MediaType.APPLICATION_JSON,
+ schema = @Schema(
+ implementation = TradeBotRespondRequests.class
+ )
+ )
+ ),
+ responses = {
+ @ApiResponse(
+ content = @Content(mediaType = MediaType.TEXT_PLAIN, schema = @Schema(type = "string"))
+ )
+ }
+ )
+ @ApiErrors({ApiError.INVALID_PRIVATE_KEY, ApiError.INVALID_ADDRESS, ApiError.INVALID_CRITERIA, ApiError.FOREIGN_BLOCKCHAIN_BALANCE_ISSUE, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE, ApiError.REPOSITORY_ISSUE})
+ @SuppressWarnings("deprecation")
+ @SecurityRequirement(name = "apiKey")
+ public String tradeBotResponderMultiple(@HeaderParam(Security.API_KEY_HEADER) String apiKey, TradeBotRespondRequests tradeBotRespondRequest) {
+ Security.checkApiCallAllowed(request);
+
+ return createTradeBotResponseMultiple(tradeBotRespondRequest);
+ }
+
+ private String createTradeBotResponse(TradeBotRespondRequest tradeBotRespondRequest) {
final String atAddress = tradeBotRespondRequest.atAddress;
// We prefer foreignKey to deprecated xprv58
@@ -257,6 +295,99 @@ public class CrossChainTradeBotResource {
}
}
+ private String createTradeBotResponseMultiple(TradeBotRespondRequests respondRequests) {
+ try (final Repository repository = RepositoryManager.getRepository()) {
+
+ if (respondRequests.foreignKey == null)
+ throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_PRIVATE_KEY);
+
+ List crossChainTradeDataList = new ArrayList<>(respondRequests.addresses.size());
+ Optional acct = Optional.empty();
+
+ for(String atAddress : respondRequests.addresses ) {
+
+ if (atAddress == null || !Crypto.isValidAtAddress(atAddress))
+ throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_ADDRESS);
+
+ if (respondRequests.receivingAddress == null || !Crypto.isValidAddress(respondRequests.receivingAddress))
+ throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_ADDRESS);
+
+ final Long minLatestBlockTimestamp = NTP.getTime() - (60 * 60 * 1000L);
+ if (!Controller.getInstance().isUpToDate(minLatestBlockTimestamp))
+ throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.BLOCKCHAIN_NEEDS_SYNC);
+
+ // Extract data from cross-chain trading AT
+ ATData atData = fetchAtDataWithChecking(repository, atAddress);
+
+ // TradeBot uses AT's code hash to map to ACCT
+ ACCT acctUsingAtData = TradeBot.getInstance().getAcctUsingAtData(atData);
+ if (acctUsingAtData == null)
+ throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_ADDRESS);
+ // if the optional is empty,
+ // then ensure the ACCT blockchain is a Bitcoiny blockchain, but not Pirate Chain and fill the optional
+ // Even though the Pirate Chain protocol does support multi send,
+ // the Pirate Chain API we are using does not support multi send
+ else if( acct.isEmpty() ) {
+ if( !(acctUsingAtData.getBlockchain() instanceof Bitcoiny) ||
+ acctUsingAtData.getBlockchain() instanceof PirateChain )
+ throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA);
+ acct = Optional.of(acctUsingAtData);
+ }
+ // if the optional is filled, then ensure it is equal to the AT in this iteration
+ else if( !acctUsingAtData.getCodeBytesHash().equals(acct.get().getCodeBytesHash()) )
+ throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA);
+
+ if (!acctUsingAtData.getBlockchain().isValidWalletKey(respondRequests.foreignKey))
+ throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_PRIVATE_KEY);
+
+ CrossChainTradeData crossChainTradeData = acctUsingAtData.populateTradeData(repository, atData);
+ if (crossChainTradeData == null)
+ throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_ADDRESS);
+
+ if (crossChainTradeData.mode != AcctMode.OFFERING)
+ throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA);
+
+ // Check if there is a buy or a cancel request in progress for this trade
+ List txTypes = List.of(Transaction.TransactionType.MESSAGE);
+ List unconfirmed = repository.getTransactionRepository().getUnconfirmedTransactions(txTypes, null, 0, 0, false);
+ for (TransactionData transactionData : unconfirmed) {
+ MessageTransactionData messageTransactionData = (MessageTransactionData) transactionData;
+ if (Objects.equals(messageTransactionData.getRecipient(), atAddress)) {
+ // There is a pending request for this trade, so block this buy attempt to reduce the risk of refunds
+ throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.INVALID_CRITERIA, "Trade has an existing buy request or is pending cancellation.");
+ }
+ }
+
+ crossChainTradeDataList.add(crossChainTradeData);
+ }
+
+ AcctTradeBot.ResponseResult result
+ = TradeBot.getInstance().startResponseMultiple(
+ repository,
+ acct.get(),
+ crossChainTradeDataList,
+ respondRequests.receivingAddress,
+ respondRequests.foreignKey,
+ (Bitcoiny) acct.get().getBlockchain());
+
+ switch (result) {
+ case OK:
+ return "true";
+
+ case BALANCE_ISSUE:
+ throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.FOREIGN_BLOCKCHAIN_BALANCE_ISSUE);
+
+ case NETWORK_ISSUE:
+ throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE);
+
+ default:
+ return "false";
+ }
+ } catch (DataException e) {
+ throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.REPOSITORY_ISSUE, e.getMessage());
+ }
+ }
+
@DELETE
@Operation(
summary = "Delete completed trade",
diff --git a/src/main/java/org/qortal/api/resource/CrossChainUtils.java b/src/main/java/org/qortal/api/resource/CrossChainUtils.java
index d1453bda..ddd1d2d6 100644
--- a/src/main/java/org/qortal/api/resource/CrossChainUtils.java
+++ b/src/main/java/org/qortal/api/resource/CrossChainUtils.java
@@ -1,5 +1,6 @@
package org.qortal.api.resource;
+import com.google.common.primitives.Bytes;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.bitcoinj.core.Address;
@@ -7,19 +8,38 @@ import org.bitcoinj.core.Coin;
import org.bitcoinj.script.Script;
import org.bitcoinj.script.ScriptBuilder;
+import org.bouncycastle.util.Strings;
+import org.json.simple.JSONObject;
+import org.qortal.api.model.CrossChainTradeLedgerEntry;
+import org.qortal.api.model.crosschain.BitcoinyTBDRequest;
import org.qortal.crosschain.*;
import org.qortal.data.at.ATData;
+import org.qortal.data.at.ATStateData;
import org.qortal.data.crosschain.*;
import org.qortal.repository.DataException;
import org.qortal.repository.Repository;
+import org.qortal.utils.Amounts;
+import org.qortal.utils.BitTwiddling;
+import java.io.BufferedWriter;
+import java.io.IOException;
+import java.io.OutputStreamWriter;
+import java.io.PrintWriter;
+import java.io.Writer;
+import java.text.DateFormat;
+import java.text.SimpleDateFormat;
+import java.time.Instant;
+import java.time.ZoneId;
+import java.time.ZonedDateTime;
import java.util.*;
import java.util.stream.Collectors;
public class CrossChainUtils {
+ public static final String QORT_CURRENCY_CODE = "QORT";
private static final Logger LOGGER = LogManager.getLogger(CrossChainUtils.class);
public static final String CORE_API_CALL = "Core API Call";
+ public static final String QORTAL_EXCHANGE_LABEL = "Qortal";
public static ServerConfigurationInfo buildServerConfigurationInfo(Bitcoiny blockchain) {
@@ -545,4 +565,210 @@ public class CrossChainUtils {
server.getConnectionType().toString(),
false);
}
+
+ /**
+ * Get Bitcoiny TBD (To Be Determined)
+ *
+ * @param bitcoinyTBDRequest the parameters for the Bitcoiny TBD
+ * @return the Bitcoiny TBD
+ * @throws DataException
+ */
+ public static BitcoinyTBD getBitcoinyTBD(BitcoinyTBDRequest bitcoinyTBDRequest) throws DataException {
+
+ try {
+ DeterminedNetworkParams networkParams = new DeterminedNetworkParams(bitcoinyTBDRequest);
+
+ BitcoinyTBD bitcoinyTBD
+ = BitcoinyTBD.getInstance(bitcoinyTBDRequest.getCode())
+ .orElse(BitcoinyTBD.buildInstance(
+ bitcoinyTBDRequest,
+ networkParams)
+ );
+
+ return bitcoinyTBD;
+ } catch (Exception e) {
+ LOGGER.error(e.getMessage(), e);
+ }
+
+ return null;
+ }
+
+ /**
+ * Get Version Decimal
+ *
+ * @param jsonObject the JSON object with the version attribute
+ * @param attribute the attribute that hold the version value
+ * @return the version as a decimal number, discarding
+ * @throws NumberFormatException
+ */
+ public static double getVersionDecimal(JSONObject jsonObject, String attribute) throws NumberFormatException {
+ String versionString = (String) jsonObject.get(attribute);
+ return Double.parseDouble(reduceDelimeters(versionString, 1, '.'));
+ }
+
+ /**
+ * Reduce Delimeters
+ *
+ * @param value the raw string
+ * @param max the max number of the delimeter
+ * @param delimeter the delimeter
+ * @return the processed value with the max number of delimeters
+ */
+ public static String reduceDelimeters(String value, int max, char delimeter) {
+
+ if( max < 1 ) return value;
+
+ String[] splits = Strings.split(value, delimeter);
+
+ StringBuffer buffer = new StringBuffer(splits[0]);
+
+ int limit = Math.min(max + 1, splits.length);
+
+ for( int index = 1; index < limit; index++) {
+ buffer.append(delimeter);
+ buffer.append(splits[index]);
+ }
+
+ return buffer.toString();
+ }
+
+ /** Returns
+
+
+ /**
+ * Build Offer Message
+ *
+ * @param partnerBitcoinPKH
+ * @param hashOfSecretA
+ * @param lockTimeA
+ * @return 'offer' MESSAGE payload for trade partner to send to AT creator's trade address
+ */
+ public static byte[] buildOfferMessage(byte[] partnerBitcoinPKH, byte[] hashOfSecretA, int lockTimeA) {
+ byte[] lockTimeABytes = BitTwiddling.toBEByteArray((long) lockTimeA);
+ return Bytes.concat(partnerBitcoinPKH, hashOfSecretA, lockTimeABytes);
+ }
+
+ /**
+ * Write To Ledger
+ *
+ * @param writer the writer to the ledger
+ * @param entries the entries to write to the ledger
+ *
+ * @throws IOException
+ */
+ public static void writeToLedger(Writer writer, List entries) throws IOException {
+
+ BufferedWriter bufferedWriter = new BufferedWriter(writer);
+
+ StringJoiner header = new StringJoiner(",");
+ header.add("Market");
+ header.add("Currency");
+ header.add("Quantity");
+ header.add("Commission Paid");
+ header.add("Commission Currency");
+ header.add("Total Price");
+ header.add("Date Time");
+ header.add("Exchange");
+
+ bufferedWriter.append(header.toString());
+
+ DateFormat dateFormatter = new SimpleDateFormat("yyyyMMdd HH:mm");
+ dateFormatter.setTimeZone(TimeZone.getTimeZone("UTC"));
+
+ for( CrossChainTradeLedgerEntry entry : entries ) {
+ StringJoiner joiner = new StringJoiner(",");
+
+ joiner.add(entry.getMarket());
+ joiner.add(entry.getCurrency());
+ joiner.add(String.valueOf(Amounts.prettyAmount(entry.getQuantity())));
+ joiner.add(String.valueOf(Amounts.prettyAmount(entry.getFeeAmount())));
+ joiner.add(entry.getFeeCurrency());
+ joiner.add(String.valueOf(Amounts.prettyAmount(entry.getTotalPrice())));
+ joiner.add(dateFormatter.format(new Date(entry.getTradeTimestamp())));
+ joiner.add(QORTAL_EXCHANGE_LABEL);
+
+ bufferedWriter.newLine();
+ bufferedWriter.append(joiner.toString());
+ }
+
+ bufferedWriter.newLine();
+ bufferedWriter.flush();
+ }
+
+ /**
+ * Create Ledger File Name
+ *
+ * Create a file name the includes timestamp and address.
+ *
+ * @param address the address
+ *
+ * @return the file name created
+ */
+ public static String createLedgerFileName(String address) {
+ DateFormat dateFormatter = new SimpleDateFormat("yyyyMMddHHmmss");
+ String fileName = "ledger-" + address + "-" + dateFormatter.format(new Date());
+ return fileName;
+ }
+
+ /**
+ * Collect Ledger Entries
+ *
+ * @param publicKey the public key for the ledger entries, buy and sell
+ * @param repository the data repository
+ * @param minimumFinalHeight the minimum block height for entries to be collected
+ * @param entries the ledger entries to add to
+ * @param codeHash code hash for the entry blockchain
+ * @param acct the ACCT for the entry blockchain
+ * @param isBuy true collecting entries for a buy, otherwise false
+ *
+ * @throws DataException
+ */
+ public static void collectLedgerEntries(
+ byte[] publicKey,
+ Repository repository,
+ Integer minimumFinalHeight,
+ List entries,
+ byte[] codeHash,
+ ACCT acct,
+ boolean isBuy) throws DataException {
+
+ // get all the final AT states for the code hash (foreign coin)
+ List atStates
+ = repository.getATRepository().getMatchingFinalATStates(
+ codeHash,
+ isBuy ? publicKey : null,
+ !isBuy ? publicKey : null,
+ Boolean.TRUE, acct.getModeByteOffset(),
+ (long) AcctMode.REDEEMED.value,
+ minimumFinalHeight,
+ null, null, false
+ );
+
+ String foreignBlockchainCurrencyCode = acct.getBlockchain().getCurrencyCode();
+
+ // for each trade, build ledger entry, collect ledger entry
+ for (ATStateData atState : atStates) {
+ CrossChainTradeData crossChainTradeData = acct.populateTradeData(repository, atState);
+
+ // We also need block timestamp for use as trade timestamp
+ long localTimestamp = repository.getBlockRepository().getTimestampFromHeight(atState.getHeight());
+
+ if (localTimestamp == 0) {
+ // Try the archive
+ localTimestamp = repository.getBlockArchiveRepository().getTimestampFromHeight(atState.getHeight());
+ }
+
+ CrossChainTradeLedgerEntry ledgerEntry
+ = new CrossChainTradeLedgerEntry(
+ isBuy ? QORT_CURRENCY_CODE : foreignBlockchainCurrencyCode,
+ isBuy ? foreignBlockchainCurrencyCode : QORT_CURRENCY_CODE,
+ isBuy ? crossChainTradeData.qortAmount : crossChainTradeData.expectedForeignAmount,
+ 0,
+ foreignBlockchainCurrencyCode,
+ isBuy ? crossChainTradeData.expectedForeignAmount : crossChainTradeData.qortAmount,
+ localTimestamp);
+
+ entries.add(ledgerEntry);
+ }
+ }
}
\ No newline at end of file
diff --git a/src/main/java/org/qortal/api/restricted/resource/AdminResource.java b/src/main/java/org/qortal/api/restricted/resource/AdminResource.java
index 837288e5..439904eb 100644
--- a/src/main/java/org/qortal/api/restricted/resource/AdminResource.java
+++ b/src/main/java/org/qortal/api/restricted/resource/AdminResource.java
@@ -32,13 +32,16 @@ import org.qortal.controller.Synchronizer.SynchronizationResult;
import org.qortal.controller.repository.BlockArchiveRebuilder;
import org.qortal.data.account.MintingAccountData;
import org.qortal.data.account.RewardShareData;
+import org.qortal.data.system.DbConnectionInfo;
import org.qortal.network.Network;
import org.qortal.network.Peer;
import org.qortal.network.PeerAddress;
+import org.qortal.repository.ReindexManager;
import org.qortal.repository.DataException;
import org.qortal.repository.Repository;
import org.qortal.repository.RepositoryManager;
import org.qortal.settings.Settings;
+import org.qortal.data.system.SystemInfo;
import org.qortal.utils.Base58;
import org.qortal.utils.NTP;
@@ -51,6 +54,7 @@ import java.net.InetSocketAddress;
import java.net.UnknownHostException;
import java.nio.file.Files;
import java.nio.file.Paths;
+import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
@@ -458,7 +462,7 @@ public class AdminResource {
// Qortal: check reward-share's minting account is still allowed to mint
Account rewardShareMintingAccount = new Account(repository, rewardShareData.getMinter());
- if (!rewardShareMintingAccount.canMint())
+ if (!rewardShareMintingAccount.canMint(false))
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.CANNOT_MINT);
MintingAccountData mintingAccountData = new MintingAccountData(mintingAccount.getPrivateKey(), mintingAccount.getPublicKey());
@@ -894,6 +898,50 @@ public class AdminResource {
}
}
+ @POST
+ @Path("/repository/reindex")
+ @Operation(
+ summary = "Reindex repository",
+ description = "Rebuilds all transactions and balances from archived blocks. Warning: takes around 1 week, and the core will not function normally during this time. If 'false' is returned, the database may be left in an inconsistent state, requiring another reindex or a bootstrap to correct it.",
+ responses = {
+ @ApiResponse(
+ description = "\"true\"",
+ content = @Content(mediaType = MediaType.TEXT_PLAIN, schema = @Schema(type = "string"))
+ )
+ }
+ )
+ @ApiErrors({ApiError.REPOSITORY_ISSUE, ApiError.BLOCKCHAIN_NEEDS_SYNC})
+ @SecurityRequirement(name = "apiKey")
+ public String reindex(@HeaderParam(Security.API_KEY_HEADER) String apiKey) {
+ Security.checkApiCallAllowed(request);
+
+ if (Synchronizer.getInstance().isSynchronizing())
+ throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.BLOCKCHAIN_NEEDS_SYNC);
+
+ try {
+ ReentrantLock blockchainLock = Controller.getInstance().getBlockchainLock();
+
+ blockchainLock.lockInterruptibly();
+
+ try {
+ ReindexManager reindexManager = new ReindexManager();
+ reindexManager.reindex();
+ return "true";
+
+ } catch (DataException e) {
+ LOGGER.info("DataException when reindexing: {}", e.getMessage());
+
+ } finally {
+ blockchainLock.unlock();
+ }
+ } catch (InterruptedException e) {
+ // We couldn't lock blockchain to perform reindex
+ return "false";
+ }
+
+ return "false";
+ }
+
@DELETE
@Path("/repository")
@Operation(
@@ -966,8 +1014,6 @@ public class AdminResource {
}
}
-
-
@POST
@Path("/apikey/generate")
@Operation(
@@ -1021,4 +1067,50 @@ public class AdminResource {
return "true";
}
-}
+ @GET
+ @Path("/systeminfo")
+ @Operation(
+ summary = "System Information",
+ description = "System memory usage and available processors.",
+ responses = {
+ @ApiResponse(
+ description = "memory usage and available processors",
+ content = @Content(mediaType = MediaType.APPLICATION_JSON, schema = @Schema(implementation = SystemInfo.class))
+ )
+ }
+ )
+ @ApiErrors({ApiError.REPOSITORY_ISSUE})
+ public SystemInfo getSystemInformation() {
+
+ SystemInfo info
+ = new SystemInfo(
+ Runtime.getRuntime().freeMemory(),
+ Runtime.getRuntime().totalMemory() - Runtime.getRuntime().freeMemory(),
+ Runtime.getRuntime().totalMemory(),
+ Runtime.getRuntime().maxMemory(),
+ Runtime.getRuntime().availableProcessors());
+
+ return info;
+ }
+
+ @GET
+ @Path("/dbstates")
+ @Operation(
+ summary = "Get DB States",
+ description = "Get DB States",
+ responses = {
+ @ApiResponse(
+ content = @Content(mediaType = MediaType.APPLICATION_JSON, array = @ArraySchema(schema = @Schema(implementation = DbConnectionInfo.class)))
+ )
+ }
+ )
+ public List getDbConnectionsStates() {
+
+ try {
+ return Controller.REPOSITORY_FACTORY.getDbConnectionsStates();
+ } catch (Exception e) {
+ LOGGER.error(e.getMessage(), e);
+ return new ArrayList<>(0);
+ }
+ }
+}
\ No newline at end of file
diff --git a/src/main/java/org/qortal/api/websocket/ActiveChatsWebSocket.java b/src/main/java/org/qortal/api/websocket/ActiveChatsWebSocket.java
index b92fb19f..ca3ef2b3 100644
--- a/src/main/java/org/qortal/api/websocket/ActiveChatsWebSocket.java
+++ b/src/main/java/org/qortal/api/websocket/ActiveChatsWebSocket.java
@@ -77,7 +77,9 @@ public class ActiveChatsWebSocket extends ApiWebSocket {
}
try (final Repository repository = RepositoryManager.getRepository()) {
- ActiveChats activeChats = repository.getChatRepository().getActiveChats(ourAddress, getTargetEncoding(session));
+ Boolean hasChatReference = getHasChatReference(session);
+
+ ActiveChats activeChats = repository.getChatRepository().getActiveChats(ourAddress, getTargetEncoding(session), hasChatReference);
StringWriter stringWriter = new StringWriter();
@@ -103,4 +105,20 @@ public class ActiveChatsWebSocket extends ApiWebSocket {
return Encoding.valueOf(encoding);
}
+ private Boolean getHasChatReference(Session session) {
+ Map> queryParams = session.getUpgradeRequest().getParameterMap();
+ List hasChatReferenceList = queryParams.get("haschatreference");
+
+ // Return null if not specified
+ if (hasChatReferenceList != null && hasChatReferenceList.size() == 1) {
+ String value = hasChatReferenceList.get(0).toLowerCase();
+ if (value.equals("true")) {
+ return true;
+ } else if (value.equals("false")) {
+ return false;
+ }
+ }
+ return null; // Ignored if not present
+ }
+
}
diff --git a/src/main/java/org/qortal/api/websocket/DataMonitorSocket.java b/src/main/java/org/qortal/api/websocket/DataMonitorSocket.java
new file mode 100644
index 00000000..a93bf2ed
--- /dev/null
+++ b/src/main/java/org/qortal/api/websocket/DataMonitorSocket.java
@@ -0,0 +1,102 @@
+package org.qortal.api.websocket;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.eclipse.jetty.websocket.api.Session;
+import org.eclipse.jetty.websocket.api.WebSocketException;
+import org.eclipse.jetty.websocket.api.annotations.OnWebSocketClose;
+import org.eclipse.jetty.websocket.api.annotations.OnWebSocketConnect;
+import org.eclipse.jetty.websocket.api.annotations.OnWebSocketError;
+import org.eclipse.jetty.websocket.api.annotations.OnWebSocketMessage;
+import org.eclipse.jetty.websocket.api.annotations.WebSocket;
+import org.eclipse.jetty.websocket.servlet.WebSocketServletFactory;
+import org.qortal.api.ApiError;
+import org.qortal.controller.Controller;
+import org.qortal.data.arbitrary.DataMonitorInfo;
+import org.qortal.event.DataMonitorEvent;
+import org.qortal.event.Event;
+import org.qortal.event.EventBus;
+import org.qortal.event.Listener;
+import org.qortal.repository.DataException;
+import org.qortal.repository.Repository;
+import org.qortal.repository.RepositoryManager;
+import org.qortal.utils.Base58;
+
+import java.io.IOException;
+import java.io.StringWriter;
+import java.util.List;
+
+@WebSocket
+@SuppressWarnings("serial")
+public class DataMonitorSocket extends ApiWebSocket implements Listener {
+
+ private static final Logger LOGGER = LogManager.getLogger(DataMonitorSocket.class);
+
+ @Override
+ public void configure(WebSocketServletFactory factory) {
+ LOGGER.info("configure");
+
+ factory.register(DataMonitorSocket.class);
+
+ EventBus.INSTANCE.addListener(this);
+ }
+
+ @Override
+ public void listen(Event event) {
+ if (!(event instanceof DataMonitorEvent))
+ return;
+
+ DataMonitorEvent dataMonitorEvent = (DataMonitorEvent) event;
+
+ for (Session session : getSessions())
+ sendDataEventSummary(session, buildInfo(dataMonitorEvent));
+ }
+
+ private DataMonitorInfo buildInfo(DataMonitorEvent dataMonitorEvent) {
+
+ return new DataMonitorInfo(
+ dataMonitorEvent.getTimestamp(),
+ dataMonitorEvent.getIdentifier(),
+ dataMonitorEvent.getName(),
+ dataMonitorEvent.getService(),
+ dataMonitorEvent.getDescription(),
+ dataMonitorEvent.getTransactionTimestamp(),
+ dataMonitorEvent.getLatestPutTimestamp()
+ );
+ }
+
+ @OnWebSocketConnect
+ @Override
+ public void onWebSocketConnect(Session session) {
+ super.onWebSocketConnect(session);
+ }
+
+ @OnWebSocketClose
+ @Override
+ public void onWebSocketClose(Session session, int statusCode, String reason) {
+ super.onWebSocketClose(session, statusCode, reason);
+ }
+
+ @OnWebSocketError
+ public void onWebSocketError(Session session, Throwable throwable) {
+ /* We ignore errors for now, but method here to silence log spam */
+ }
+
+ @OnWebSocketMessage
+ public void onWebSocketMessage(Session session, String message) {
+ LOGGER.info("onWebSocketMessage: message = " + message);
+ }
+
+ private void sendDataEventSummary(Session session, DataMonitorInfo dataMonitorInfo) {
+ StringWriter stringWriter = new StringWriter();
+
+ try {
+ marshall(stringWriter, dataMonitorInfo);
+
+ session.getRemote().sendStringByFuture(stringWriter.toString());
+ } catch (IOException | WebSocketException e) {
+ // No output this time
+ }
+ }
+
+}
diff --git a/src/main/java/org/qortal/api/websocket/TradeOffersWebSocket.java b/src/main/java/org/qortal/api/websocket/TradeOffersWebSocket.java
index 96257f4a..911cf188 100644
--- a/src/main/java/org/qortal/api/websocket/TradeOffersWebSocket.java
+++ b/src/main/java/org/qortal/api/websocket/TradeOffersWebSocket.java
@@ -98,7 +98,7 @@ public class TradeOffersWebSocket extends ApiWebSocket implements Listener {
byte[] codeHash = acctInfo.getKey().value;
ACCT acct = acctInfo.getValue().get();
- List atStates = repository.getATRepository().getMatchingFinalATStates(codeHash,
+ List atStates = repository.getATRepository().getMatchingFinalATStates(codeHash, null, null,
isFinished, dataByteOffset, expectedValue, minimumFinalHeight,
null, null, null);
@@ -259,7 +259,7 @@ public class TradeOffersWebSocket extends ApiWebSocket implements Listener {
ACCT acct = acctInfo.getValue().get();
Integer dataByteOffset = acct.getModeByteOffset();
- List initialAtStates = repository.getATRepository().getMatchingFinalATStates(codeHash,
+ List initialAtStates = repository.getATRepository().getMatchingFinalATStates(codeHash, null, null,
isFinished, dataByteOffset, expectedValue, minimumFinalHeight,
null, null, null);
@@ -298,7 +298,7 @@ public class TradeOffersWebSocket extends ApiWebSocket implements Listener {
byte[] codeHash = acctInfo.getKey().value;
ACCT acct = acctInfo.getValue().get();
- List historicAtStates = repository.getATRepository().getMatchingFinalATStates(codeHash,
+ List historicAtStates = repository.getATRepository().getMatchingFinalATStates(codeHash, null, null,
isFinished, dataByteOffset, expectedValue, minimumFinalHeight,
null, null, null);
diff --git a/src/main/java/org/qortal/arbitrary/ArbitraryDataReader.java b/src/main/java/org/qortal/arbitrary/ArbitraryDataReader.java
index 78a9ee86..6d7e0e23 100644
--- a/src/main/java/org/qortal/arbitrary/ArbitraryDataReader.java
+++ b/src/main/java/org/qortal/arbitrary/ArbitraryDataReader.java
@@ -439,7 +439,15 @@ public class ArbitraryDataReader {
// Ensure the complete hash matches the joined chunks
if (!Arrays.equals(arbitraryDataFile.digest(), transactionData.getData())) {
// Delete the invalid file
- arbitraryDataFile.delete();
+ LOGGER.info("Deleting invalid file: path = " + arbitraryDataFile.getFilePath());
+
+ if( arbitraryDataFile.delete() ) {
+ LOGGER.info("Deleted invalid file successfully: path = " + arbitraryDataFile.getFilePath());
+ }
+ else {
+ LOGGER.warn("Could not delete invalid file: path = " + arbitraryDataFile.getFilePath());
+ }
+
throw new DataException("Unable to validate complete file hash");
}
}
diff --git a/src/main/java/org/qortal/arbitrary/ArbitraryDataRenderer.java b/src/main/java/org/qortal/arbitrary/ArbitraryDataRenderer.java
index 5200b5e2..eb51e8a4 100644
--- a/src/main/java/org/qortal/arbitrary/ArbitraryDataRenderer.java
+++ b/src/main/java/org/qortal/arbitrary/ArbitraryDataRenderer.java
@@ -168,7 +168,7 @@ public class ArbitraryDataRenderer {
byte[] data = Files.readAllBytes(filePath); // TODO: limit file size that can be read into memory
HTMLParser htmlParser = new HTMLParser(resourceId, inPath, prefix, includeResourceIdInPrefix, data, qdnContext, service, identifier, theme, usingCustomRouting);
htmlParser.addAdditionalHeaderTags();
- response.addHeader("Content-Security-Policy", "default-src 'self' 'unsafe-inline' 'unsafe-eval'; media-src 'self' data: blob:; img-src 'self' data: blob:;");
+ response.addHeader("Content-Security-Policy", "default-src 'self' 'unsafe-inline' 'unsafe-eval'; font-src 'self' data:; media-src 'self' data: blob:; img-src 'self' data: blob:; connect-src 'self' wss:;");
response.setContentType(context.getMimeType(filename));
response.setContentLength(htmlParser.getData().length);
response.getOutputStream().write(htmlParser.getData());
diff --git a/src/main/java/org/qortal/arbitrary/misc/Service.java b/src/main/java/org/qortal/arbitrary/misc/Service.java
index 02a513fd..fccbb535 100644
--- a/src/main/java/org/qortal/arbitrary/misc/Service.java
+++ b/src/main/java/org/qortal/arbitrary/misc/Service.java
@@ -167,7 +167,7 @@ public enum Service {
COMMENT(1800, true, 500*1024L, true, false, null),
CHAIN_COMMENT(1810, true, 239L, true, false, null),
MAIL(1900, true, 1024*1024L, true, false, null),
- MAIL_PRIVATE(1901, true, 1024*1024L, true, true, null),
+ MAIL_PRIVATE(1901, true, 5*1024*1024L, true, true, null),
MESSAGE(1910, true, 1024*1024L, true, false, null),
MESSAGE_PRIVATE(1911, true, 1024*1024L, true, true, null);
diff --git a/src/main/java/org/qortal/block/Block.java b/src/main/java/org/qortal/block/Block.java
index bacff5b4..67e6dd43 100644
--- a/src/main/java/org/qortal/block/Block.java
+++ b/src/main/java/org/qortal/block/Block.java
@@ -23,12 +23,12 @@ import org.qortal.data.at.ATStateData;
import org.qortal.data.block.BlockData;
import org.qortal.data.block.BlockSummaryData;
import org.qortal.data.block.BlockTransactionData;
+import org.qortal.data.group.GroupAdminData;
import org.qortal.data.network.OnlineAccountData;
import org.qortal.data.transaction.TransactionData;
-import org.qortal.repository.ATRepository;
-import org.qortal.repository.DataException;
-import org.qortal.repository.Repository;
-import org.qortal.repository.TransactionRepository;
+import org.qortal.group.Group;
+import org.qortal.repository.*;
+import org.qortal.settings.Settings;
import org.qortal.transaction.AtTransaction;
import org.qortal.transaction.Transaction;
import org.qortal.transaction.Transaction.ApprovalStatus;
@@ -39,6 +39,7 @@ import org.qortal.transform.block.BlockTransformer;
import org.qortal.transform.transaction.TransactionTransformer;
import org.qortal.utils.Amounts;
import org.qortal.utils.Base58;
+import org.qortal.utils.Groups;
import org.qortal.utils.NTP;
import java.io.ByteArrayOutputStream;
@@ -104,6 +105,7 @@ public class Block {
protected Repository repository;
protected BlockData blockData;
protected PublicKeyAccount minter;
+ boolean isTestnet = Settings.getInstance().isTestNet();
// Other properties
private static final Logger LOGGER = LogManager.getLogger(Block.class);
@@ -142,11 +144,14 @@ public class Block {
private final Account mintingAccount;
private final AccountData mintingAccountData;
private final boolean isMinterFounder;
+ private final boolean isMinterMember;
private final Account recipientAccount;
private final AccountData recipientAccountData;
- ExpandedAccount(Repository repository, RewardShareData rewardShareData) throws DataException {
+ final BlockChain blockChain = BlockChain.getInstance();
+
+ ExpandedAccount(Repository repository, RewardShareData rewardShareData, int blockHeight) throws DataException {
this.rewardShareData = rewardShareData;
this.sharePercent = this.rewardShareData.getSharePercent();
@@ -155,6 +160,12 @@ public class Block {
this.isMinterFounder = Account.isFounder(mintingAccountData.getFlags());
this.isRecipientAlsoMinter = this.rewardShareData.getRecipient().equals(this.mintingAccount.getAddress());
+ this.isMinterMember
+ = Groups.memberExistsInAnyGroup(
+ repository.getGroupRepository(),
+ Groups.getGroupIdsToMint(BlockChain.getInstance(), blockHeight),
+ this.mintingAccount.getAddress()
+ );
if (this.isRecipientAlsoMinter) {
// Self-share: minter is also recipient
@@ -167,6 +178,19 @@ public class Block {
}
}
+ /**
+ * Get Effective Minting Level
+ *
+ * @return the effective minting level, if a data exception is thrown, it catches the exception and returns a zero
+ */
+ public int getEffectiveMintingLevel() {
+ try {
+ return this.mintingAccount.getEffectiveMintingLevel();
+ } catch (DataException e) {
+ return 0;
+ }
+ }
+
public Account getMintingAccount() {
return this.mintingAccount;
}
@@ -179,19 +203,23 @@ public class Block {
*
* This is a method, not a final variable, because account's level can change between construction and call,
* e.g. during Block.process() where account levels are bumped right before Block.distributeBlockReward().
- *
+ *
* @return account-level share "bin" from blockchain config, or null if founder / none found
*/
public AccountLevelShareBin getShareBin(int blockHeight) {
- if (this.isMinterFounder)
+ if (this.isMinterFounder && blockHeight < BlockChain.getInstance().getAdminsReplaceFoundersHeight())
return null;
final int accountLevel = this.mintingAccountData.getLevel();
if (accountLevel <= 0)
return null; // level 0 isn't included in any share bins
+ if (blockHeight >= blockChain.getFixBatchRewardHeight()) {
+ if (!this.isMinterMember)
+ return null; // not member of minter group isn't included in any share bins
+ }
+
// Select the correct set of share bins based on block height
- final BlockChain blockChain = BlockChain.getInstance();
final AccountLevelShareBin[] shareBinsByLevel = (blockHeight >= blockChain.getSharesByLevelV2Height()) ?
blockChain.getShareBinsByAccountLevelV2() : blockChain.getShareBinsByAccountLevelV1();
@@ -260,7 +288,7 @@ public class Block {
* Constructs new Block without loading transactions and AT states.
*
* Transactions and AT states are loaded on first call to getTransactions() or getATStates() respectively.
- *
+ *
* @param repository
* @param blockData
*/
@@ -331,7 +359,7 @@ public class Block {
/**
* Constructs new Block with empty transaction list, using passed minter account.
- *
+ *
* @param repository
* @param blockData
* @param minter
@@ -349,7 +377,7 @@ public class Block {
* This constructor typically used when minting a new block.
*
* Note that CIYAM ATs will be executed and AT-Transactions prepended to this block, along with AT state data and fees.
- *
+ *
* @param repository
* @param parentBlockData
* @param minter
@@ -375,7 +403,7 @@ public class Block {
byte[] encodedOnlineAccounts = new byte[0];
int onlineAccountsCount = 0;
byte[] onlineAccountsSignatures = null;
-
+
if (isBatchRewardDistributionBlock(height)) {
// Batch reward distribution block - copy online accounts from recent block with highest online accounts count
@@ -396,7 +424,9 @@ public class Block {
onlineAccounts.removeIf(a -> a.getNonce() == null || a.getNonce() < 0);
// After feature trigger, remove any online accounts that are level 0
- if (height >= BlockChain.getInstance().getOnlineAccountMinterLevelValidationHeight()) {
+ // but only if they are before the ignore level feature trigger
+ if (height < BlockChain.getInstance().getIgnoreLevelForRewardShareHeight() &&
+ height >= BlockChain.getInstance().getOnlineAccountMinterLevelValidationHeight()) {
onlineAccounts.removeIf(a -> {
try {
return Account.getRewardShareEffectiveMintingLevel(repository, a.getPublicKey()) == 0;
@@ -407,6 +437,21 @@ public class Block {
});
}
+ // After feature trigger, remove any online accounts that are not minter group member
+ if (height >= BlockChain.getInstance().getGroupMemberCheckHeight()) {
+ onlineAccounts.removeIf(a -> {
+ try {
+ List groupIdsToMint = Groups.getGroupIdsToMint(BlockChain.getInstance(), height);
+ String address = Account.getRewardShareMintingAddress(repository, a.getPublicKey());
+ boolean isMinterGroupMember = Groups.memberExistsInAnyGroup(repository.getGroupRepository(), groupIdsToMint, address);
+ return !isMinterGroupMember;
+ } catch (DataException e) {
+ // Something went wrong, so remove the account
+ return true;
+ }
+ });
+ }
+
if (onlineAccounts.isEmpty()) {
LOGGER.debug("No online accounts - not even our own?");
return null;
@@ -510,7 +555,7 @@ public class Block {
* Mints new block using this block as template, but with different minting account.
*
* NOTE: uses the same transactions list, AT states, etc.
- *
+ *
* @param minter
* @return
* @throws DataException
@@ -596,7 +641,7 @@ public class Block {
/**
* Return composite block signature (minterSignature + transactionsSignature).
- *
+ *
* @return byte[], or null if either component signature is null.
*/
public byte[] getSignature() {
@@ -611,7 +656,7 @@ public class Block {
*
* We're starting with version 4 as a nod to being newer than successor Qora,
* whose latest block version was 3.
- *
+ *
* @return 1, 2, 3 or 4
*/
public int getNextBlockVersion() {
@@ -625,7 +670,7 @@ public class Block {
* Return block's transactions.
*
* If the block was loaded from repository then it's possible this method will call the repository to fetch the transactions if not done already.
- *
+ *
* @return
* @throws DataException
*/
@@ -659,7 +704,7 @@ public class Block {
* If the block was loaded from repository then it's possible this method will call the repository to fetch the AT states if not done already.
*
* Note: AT states fetched from repository only contain summary info, not actual data like serialized state data or AT creation timestamps!
- *
+ *
* @return
* @throws DataException
*/
@@ -695,7 +740,7 @@ public class Block {
*
* Typically called as part of Block.process() or Block.orphan()
* so ideally after any calls to Block.isValid().
- *
+ *
* @throws DataException
*/
public List getExpandedAccounts() throws DataException {
@@ -713,10 +758,12 @@ public class Block {
List expandedAccounts = new ArrayList<>();
- for (RewardShareData rewardShare : this.cachedOnlineRewardShares)
- expandedAccounts.add(new ExpandedAccount(repository, rewardShare));
+ for (RewardShareData rewardShare : this.cachedOnlineRewardShares) {
+ expandedAccounts.add(new ExpandedAccount(repository, rewardShare, this.blockData.getHeight()));
+ }
this.cachedExpandedAccounts = expandedAccounts;
+ LOGGER.trace(() -> String.format("Online reward-shares after expanded accounts %s", this.cachedOnlineRewardShares));
return this.cachedExpandedAccounts;
}
@@ -725,7 +772,7 @@ public class Block {
/**
* Load parent block's data from repository via this block's reference.
- *
+ *
* @return parent's BlockData, or null if no parent found
* @throws DataException
*/
@@ -739,7 +786,7 @@ public class Block {
/**
* Load child block's data from repository via this block's signature.
- *
+ *
* @return child's BlockData, or null if no parent found
* @throws DataException
*/
@@ -759,7 +806,7 @@ public class Block {
* Used when constructing a new block during minting.
*
* Requires block's {@code minter} being a {@code PrivateKeyAccount} so block's transactions signature can be recalculated.
- *
+ *
* @param transactionData
* @return true if transaction successfully added to block, false otherwise
* @throws IllegalStateException
@@ -812,7 +859,7 @@ public class Block {
* Used when constructing a new block during minting.
*
* Requires block's {@code minter} being a {@code PrivateKeyAccount} so block's transactions signature can be recalculated.
- *
+ *
* @param transactionData
* @throws IllegalStateException
* if block's {@code minter} is not a {@code PrivateKeyAccount}.
@@ -857,7 +904,7 @@ public class Block {
* previous block's minter signature + minter's public key + (encoded) online-accounts data
*
* (Previous block's minter signature is extracted from this block's reference).
- *
+ *
* @throws IllegalStateException
* if block's {@code minter} is not a {@code PrivateKeyAccount}.
* @throws RuntimeException
@@ -874,7 +921,7 @@ public class Block {
* Recalculate block's transactions signature.
*
* Requires block's {@code minter} being a {@code PrivateKeyAccount}.
- *
+ *
* @throws IllegalStateException
* if block's {@code minter} is not a {@code PrivateKeyAccount}.
* @throws RuntimeException
@@ -996,7 +1043,7 @@ public class Block {
* Recalculate block's minter and transactions signatures, thus giving block full signature.
*
* Note: Block instance must have been constructed with a PrivateKeyAccount minter or this call will throw an IllegalStateException.
- *
+ *
* @throws IllegalStateException
* if block's {@code minter} is not a {@code PrivateKeyAccount}.
*/
@@ -1009,7 +1056,7 @@ public class Block {
/**
* Returns whether this block's signatures are valid.
- *
+ *
* @return true if both minter and transaction signatures are valid, false otherwise
*/
public boolean isSignatureValid() {
@@ -1033,7 +1080,7 @@ public class Block {
*
* Used by BlockMinter to check whether it's time to mint a new block,
* and also used by Block.isValid for checks (if not a testchain).
- *
+ *
* @return ValidationResult.OK if timestamp valid, or some other ValidationResult otherwise.
* @throws DataException
*/
@@ -1122,14 +1169,32 @@ public class Block {
if (onlineRewardShares == null)
return ValidationResult.ONLINE_ACCOUNT_UNKNOWN;
- // After feature trigger, require all online account minters to be greater than level 0
- if (this.getBlockData().getHeight() >= BlockChain.getInstance().getOnlineAccountMinterLevelValidationHeight()) {
- List expandedAccounts = this.getExpandedAccounts();
+ // After feature trigger, require all online account minters to be greater than level 0,
+ // but only if it is before the feature trigger where we ignore level again
+ if (this.blockData.getHeight() < BlockChain.getInstance().getIgnoreLevelForRewardShareHeight() &&
+ this.getBlockData().getHeight() >= BlockChain.getInstance().getOnlineAccountMinterLevelValidationHeight()) {
+ List expandedAccounts
+ = this.getExpandedAccounts().stream()
+ .filter(expandedAccount -> expandedAccount.isMinterMember)
+ .collect(Collectors.toList());
+
for (ExpandedAccount account : expandedAccounts) {
if (account.getMintingAccount().getEffectiveMintingLevel() == 0)
return ValidationResult.ONLINE_ACCOUNTS_INVALID;
+
+ if (this.getBlockData().getHeight() >= BlockChain.getInstance().getFixBatchRewardHeight()) {
+ if (!account.isMinterMember)
+ return ValidationResult.ONLINE_ACCOUNTS_INVALID;
+ }
}
}
+ else if (this.blockData.getHeight() >= BlockChain.getInstance().getIgnoreLevelForRewardShareHeight()){
+ Optional anyInvalidAccount
+ = this.getExpandedAccounts().stream()
+ .filter(account -> !account.isMinterMember)
+ .findAny();
+ if( anyInvalidAccount.isPresent() ) return ValidationResult.ONLINE_ACCOUNTS_INVALID;
+ }
// If block is past a certain age then we simply assume the signatures were correct
long signatureRequirementThreshold = NTP.getTime() - BlockChain.getInstance().getOnlineAccountSignaturesMinLifetime();
@@ -1213,7 +1278,7 @@ public class Block {
*
* Checks block's transactions by testing their validity then processing them.
* Hence uses a repository savepoint during execution.
- *
+ *
* @return ValidationResult.OK if block is valid, or some other ValidationResult otherwise.
* @throws DataException
*/
@@ -1256,6 +1321,7 @@ public class Block {
// Online Accounts
ValidationResult onlineAccountsResult = this.areOnlineAccountsValid();
+ LOGGER.trace("Accounts valid = {}", onlineAccountsResult);
if (onlineAccountsResult != ValidationResult.OK)
return onlineAccountsResult;
@@ -1281,13 +1347,20 @@ public class Block {
// Create repository savepoint here so we can rollback to it after testing transactions
repository.setSavepoint();
- if (this.blockData.getHeight() == 212937) {
- // Apply fix for block 212937 but fix will be rolled back before we exit method
- Block212937.processFix(this);
- }
- else if (InvalidNameRegistrationBlocks.isAffectedBlock(this.blockData.getHeight())) {
- // Apply fix for affected name registration blocks, but fix will be rolled back before we exit method
- InvalidNameRegistrationBlocks.processFix(this);
+ if (!isTestnet) {
+ if (this.blockData.getHeight() == 212937) {
+ // Apply fix for block 212937 but fix will be rolled back before we exit method
+ Block212937.processFix(this);
+ } else if (this.blockData.getHeight() == 1333492) {
+ // Apply fix for block 1333492 but fix will be rolled back before we exit method
+ Block1333492.processFix(this);
+ } else if (InvalidNameRegistrationBlocks.isAffectedBlock(this.blockData.getHeight())) {
+ // Apply fix for affected name registration blocks, but fix will be rolled back before we exit method
+ InvalidNameRegistrationBlocks.processFix(this);
+ } else if (InvalidBalanceBlocks.isAffectedBlock(this.blockData.getHeight())) {
+ // Apply fix for affected balance blocks, but fix will be rolled back before we exit method
+ InvalidBalanceBlocks.processFix(this);
+ }
}
for (Transaction transaction : this.getTransactions()) {
@@ -1337,7 +1410,7 @@ public class Block {
// Check transaction can even be processed
validationResult = transaction.isProcessable();
if (validationResult != Transaction.ValidationResult.OK) {
- LOGGER.info(String.format("Error during transaction validation, tx %s: %s", Base58.encode(transactionData.getSignature()), validationResult.name()));
+ LOGGER.debug(String.format("Error during transaction validation, tx %s: %s", Base58.encode(transactionData.getSignature()), validationResult.name()));
return ValidationResult.TRANSACTION_INVALID;
}
@@ -1377,7 +1450,7 @@ public class Block {
*
* NOTE: will execute ATs locally if not already done.
* This is so we have locally-generated AT states for comparison.
- *
+ *
* @return OK, or some AT-related validation result
* @throws DataException
*/
@@ -1453,11 +1526,11 @@ public class Block {
* Note: this method does not store new AT state data into repository - that is handled by process().
*
* This method is not needed if fetching an existing block from the repository as AT state data will be loaded from repository as well.
- *
+ *
* @see #isValid()
- *
+ *
* @throws DataException
- *
+ *
*/
private void executeATs() throws DataException {
// We're expecting a lack of AT state data at this point.
@@ -1509,7 +1582,7 @@ public class Block {
return false;
Account mintingAccount = new PublicKeyAccount(this.repository, rewardShareData.getMinterPublicKey());
- return mintingAccount.canMint();
+ return mintingAccount.canMint(false);
}
/**
@@ -1529,7 +1602,7 @@ public class Block {
/**
* Process block, and its transactions, adding them to the blockchain.
- *
+ *
* @throws DataException
*/
public void process() throws DataException {
@@ -1538,6 +1611,7 @@ public class Block {
this.blockData.setHeight(blockchainHeight + 1);
LOGGER.trace(() -> String.format("Processing block %d", this.blockData.getHeight()));
+ LOGGER.trace(() -> String.format("Online Reward Shares in process %s", this.cachedOnlineRewardShares));
if (this.blockData.getHeight() > 1) {
@@ -1550,21 +1624,23 @@ public class Block {
processBlockRewards();
}
- if (this.blockData.getHeight() == 212937) {
- // Apply fix for block 212937
- Block212937.processFix(this);
- }
-
- if (this.blockData.getHeight() == BlockChain.getInstance().getSelfSponsorshipAlgoV1Height()) {
- SelfSponsorshipAlgoV1Block.processAccountPenalties(this);
- }
-
- if (this.blockData.getHeight() == BlockChain.getInstance().getSelfSponsorshipAlgoV2Height()) {
- SelfSponsorshipAlgoV2Block.processAccountPenalties(this);
- }
-
- if (this.blockData.getHeight() == BlockChain.getInstance().getSelfSponsorshipAlgoV3Height()) {
- SelfSponsorshipAlgoV3Block.processAccountPenalties(this);
+ if (!isTestnet) {
+ if (this.blockData.getHeight() == 212937) {
+ // Apply fix for block 212937
+ Block212937.processFix(this);
+ } else if (this.blockData.getHeight() == 1333492) {
+ // Apply fix for block 1333492
+ Block1333492.processFix(this);
+ } else if (InvalidBalanceBlocks.isAffectedBlock(this.blockData.getHeight())) {
+ // Apply fix for affected balance blocks
+ InvalidBalanceBlocks.processFix(this);
+ } else if (this.blockData.getHeight() == BlockChain.getInstance().getSelfSponsorshipAlgoV1Height()) {
+ SelfSponsorshipAlgoV1Block.processAccountPenalties(this);
+ } else if (this.blockData.getHeight() == BlockChain.getInstance().getSelfSponsorshipAlgoV2Height()) {
+ SelfSponsorshipAlgoV2Block.processAccountPenalties(this);
+ } else if (this.blockData.getHeight() == BlockChain.getInstance().getSelfSponsorshipAlgoV3Height()) {
+ SelfSponsorshipAlgoV3Block.processAccountPenalties(this);
+ }
}
}
@@ -1607,7 +1683,17 @@ public class Block {
final List cumulativeBlocksByLevel = BlockChain.getInstance().getCumulativeBlocksByLevel();
final int maximumLevel = cumulativeBlocksByLevel.size() - 1;
- final List expandedAccounts = this.getExpandedAccounts();
+ final List expandedAccounts;
+
+ if (this.getBlockData().getHeight() < BlockChain.getInstance().getFixBatchRewardHeight()) {
+ expandedAccounts = this.getExpandedAccounts().stream().collect(Collectors.toList());
+ }
+ else {
+ expandedAccounts
+ = this.getExpandedAccounts().stream()
+ .filter(expandedAccount -> expandedAccount.isMinterMember)
+ .collect(Collectors.toList());
+ }
Set allUniqueExpandedAccounts = new HashSet<>();
for (ExpandedAccount expandedAccount : expandedAccounts) {
@@ -1828,7 +1914,7 @@ public class Block {
/**
* Removes block from blockchain undoing transactions and adding them to unconfirmed pile.
- *
+ *
* @throws DataException
*/
public void orphan() throws DataException {
@@ -1850,23 +1936,25 @@ public class Block {
// Invalidate expandedAccounts as they may have changed due to orphaning TRANSFER_PRIVS transactions, etc.
this.cachedExpandedAccounts = null;
- if (this.blockData.getHeight() == 212937) {
- // Revert fix for block 212937
- Block212937.orphanFix(this);
+ if (!isTestnet) {
+ if (this.blockData.getHeight() == 212937) {
+ // Revert fix for block 212937
+ Block212937.orphanFix(this);
+ } else if (this.blockData.getHeight() == 1333492) {
+ // Revert fix for block 1333492
+ Block1333492.orphanFix(this);
+ } else if (InvalidBalanceBlocks.isAffectedBlock(this.blockData.getHeight())) {
+ // Revert fix for affected balance blocks
+ InvalidBalanceBlocks.orphanFix(this);
+ } else if (this.blockData.getHeight() == BlockChain.getInstance().getSelfSponsorshipAlgoV1Height()) {
+ SelfSponsorshipAlgoV1Block.orphanAccountPenalties(this);
+ } else if (this.blockData.getHeight() == BlockChain.getInstance().getSelfSponsorshipAlgoV2Height()) {
+ SelfSponsorshipAlgoV2Block.orphanAccountPenalties(this);
+ } else if (this.blockData.getHeight() == BlockChain.getInstance().getSelfSponsorshipAlgoV3Height()) {
+ SelfSponsorshipAlgoV3Block.orphanAccountPenalties(this);
+ }
}
- if (this.blockData.getHeight() == BlockChain.getInstance().getSelfSponsorshipAlgoV1Height()) {
- SelfSponsorshipAlgoV1Block.orphanAccountPenalties(this);
- }
-
- if (this.blockData.getHeight() == BlockChain.getInstance().getSelfSponsorshipAlgoV2Height()) {
- SelfSponsorshipAlgoV2Block.orphanAccountPenalties(this);
- }
-
- if (this.blockData.getHeight() == BlockChain.getInstance().getSelfSponsorshipAlgoV3Height()) {
- SelfSponsorshipAlgoV3Block.orphanAccountPenalties(this);
- }
-
// Account levels and block rewards are only processed/orphaned on block reward distribution blocks
if (this.isRewardDistributionBlock()) {
// Block rewards, including transaction fees, removed after transactions undone
@@ -2005,7 +2093,17 @@ public class Block {
final List cumulativeBlocksByLevel = BlockChain.getInstance().getCumulativeBlocksByLevel();
final int maximumLevel = cumulativeBlocksByLevel.size() - 1;
- final List expandedAccounts = this.getExpandedAccounts();
+ final List expandedAccounts;
+
+ if (this.getBlockData().getHeight() < BlockChain.getInstance().getFixBatchRewardHeight()) {
+ expandedAccounts = this.getExpandedAccounts().stream().collect(Collectors.toList());
+ }
+ else {
+ expandedAccounts
+ = this.getExpandedAccounts().stream()
+ .filter(expandedAccount -> expandedAccount.isMinterMember)
+ .collect(Collectors.toList());
+ }
Set allUniqueExpandedAccounts = new HashSet<>();
for (ExpandedAccount expandedAccount : expandedAccounts) {
@@ -2200,6 +2298,7 @@ public class Block {
List accountBalanceDeltas = balanceChanges.entrySet().stream()
.map(entry -> new AccountBalanceData(entry.getKey(), Asset.QORT, entry.getValue()))
.collect(Collectors.toList());
+ LOGGER.trace("Account Balance Deltas: {}", accountBalanceDeltas);
this.repository.getAccountRepository().modifyAssetBalances(accountBalanceDeltas);
}
@@ -2208,34 +2307,44 @@ public class Block {
List rewardCandidates = new ArrayList<>();
// All online accounts
- final List expandedAccounts = this.getExpandedAccounts();
+ final List expandedAccounts;
+
+ if (this.getBlockData().getHeight() < BlockChain.getInstance().getFixBatchRewardHeight()) {
+ expandedAccounts = this.getExpandedAccounts().stream().collect(Collectors.toList());
+ }
+ else {
+ expandedAccounts
+ = this.getExpandedAccounts().stream()
+ .filter(expandedAccount -> expandedAccount.isMinterMember)
+ .collect(Collectors.toList());
+ }
/*
* Distribution rules:
- *
+ *
* Distribution is based on the minting account of 'online' reward-shares.
- *
+ *
* If ANY founders are online, then they receive the leftover non-distributed reward.
* If NO founders are online, then account-level-based rewards are scaled up so 100% of reward is allocated.
- *
+ *
* If ANY non-maxxed legacy QORA holders exist then they are always allocated their fixed share (e.g. 20%).
- *
+ *
* There has to be either at least one 'online' account for blocks to be minted
* so there is always either one account-level-based or founder reward candidate.
- *
+ *
* Examples:
- *
+ *
* With at least one founder online:
* Level 1/2 accounts: 5%
* Legacy QORA holders: 20%
* Founders: ~75%
- *
+ *
* No online founders:
* Level 1/2 accounts: 5%
* Level 5/6 accounts: 15%
* Legacy QORA holders: 20%
* Total: 40%
- *
+ *
* After scaling account-level-based shares to fill 100%:
* Level 1/2 accounts: 20%
* Level 5/6 accounts: 60%
@@ -2251,7 +2360,6 @@ public class Block {
// Select the correct set of share bins based on block height
List accountLevelShareBinsForBlock = (this.blockData.getHeight() >= BlockChain.getInstance().getSharesByLevelV2Height()) ?
BlockChain.getInstance().getAccountLevelShareBinsV2() : BlockChain.getInstance().getAccountLevelShareBinsV1();
-
// Determine reward candidates based on account level
// This needs a deep copy, so the shares can be modified when tiers aren't activated yet
List accountLevelShareBins = new ArrayList<>();
@@ -2334,7 +2442,7 @@ public class Block {
final long qoraHoldersShare = BlockChain.getInstance().getQoraHoldersShareAtHeight(this.blockData.getHeight());
// Perform account-level-based reward scaling if appropriate
- if (!haveFounders) {
+ if (!haveFounders && this.blockData.getHeight() < BlockChain.getInstance().getAdminsReplaceFoundersHeight() ) {
// Recalculate distribution ratios based on candidates
// Nothing shared? This shouldn't happen
@@ -2370,18 +2478,103 @@ public class Block {
}
// Add founders as reward candidate if appropriate
- if (haveFounders) {
+ if (haveFounders && this.blockData.getHeight() < BlockChain.getInstance().getAdminsReplaceFoundersHeight()) {
// Yes: add to reward candidates list
BlockRewardDistributor founderDistributor = (distributionAmount, balanceChanges) -> distributeBlockRewardShare(distributionAmount, onlineFounderAccounts, balanceChanges);
final long foundersShare = 1_00000000 - totalShares;
BlockRewardCandidate rewardCandidate = new BlockRewardCandidate("Founders", foundersShare, founderDistributor);
rewardCandidates.add(rewardCandidate);
+ LOGGER.info("logging foundersShare prior to reward modifications {}",foundersShare);
+ }
+ else if (this.blockData.getHeight() >= BlockChain.getInstance().getAdminsReplaceFoundersHeight()) {
+ try (final Repository repository = RepositoryManager.getRepository()) {
+ GroupRepository groupRepository = repository.getGroupRepository();
+
+ List mintingGroupIds = Groups.getGroupIdsToMint(BlockChain.getInstance(), this.blockData.getHeight());
+
+ // all minter admins
+ List minterAdmins = Groups.getAllAdmins(groupRepository, mintingGroupIds);
+
+ // all minter admins that are online
+ List onlineMinterAdminAccounts
+ = expandedAccounts.stream()
+ .filter(expandedAccount -> minterAdmins.contains(expandedAccount.getMintingAccount().getAddress()))
+ .collect(Collectors.toList());
+
+ long minterAdminShare;
+
+ if( onlineMinterAdminAccounts.isEmpty() ) {
+ minterAdminShare = 0;
+ }
+ else {
+ BlockRewardDistributor minterAdminDistributor
+ = (distributionAmount, balanceChanges)
+ ->
+ distributeBlockRewardShare(distributionAmount, onlineMinterAdminAccounts, balanceChanges);
+
+ long adminShare = 1_00000000 - totalShares;
+ LOGGER.info("initial total Shares: {}", totalShares);
+ LOGGER.info("logging adminShare after hardfork, this is the primary reward that will be split {}", adminShare);
+
+ minterAdminShare = adminShare / 2;
+ BlockRewardCandidate minterAdminRewardCandidate
+ = new BlockRewardCandidate("Minter Admins", minterAdminShare, minterAdminDistributor);
+ rewardCandidates.add(minterAdminRewardCandidate);
+
+ totalShares += minterAdminShare;
+ }
+
+ LOGGER.info("MINTER ADMIN SHARE: {}",minterAdminShare);
+
+ // all dev admins
+ List devAdminAddresses
+ = groupRepository.getGroupAdmins(1).stream()
+ .map(GroupAdminData::getAdmin)
+ .collect(Collectors.toList());
+
+ LOGGER.info("Removing NULL Account Address, Dev Admin Count = {}", devAdminAddresses.size());
+ devAdminAddresses.removeIf( address -> Group.NULL_OWNER_ADDRESS.equals(address) );
+ LOGGER.info("Removed NULL Account Address, Dev Admin Count = {}", devAdminAddresses.size());
+
+ BlockRewardDistributor devAdminDistributor
+ = (distributionAmount, balanceChanges) -> distributeToAccounts(distributionAmount, devAdminAddresses, balanceChanges);
+
+ long devAdminShare = 1_00000000 - totalShares;
+ LOGGER.info("DEV ADMIN SHARE: {}",devAdminShare);
+ BlockRewardCandidate devAdminRewardCandidate
+ = new BlockRewardCandidate("Dev Admins", devAdminShare,devAdminDistributor);
+ rewardCandidates.add(devAdminRewardCandidate);
+ }
}
return rewardCandidates;
}
+ /**
+ * Distribute To Accounts
+ *
+ * Merges distribute shares to a map of distribution shares.
+ *
+ * @param distributionAmount the amount to distribute
+ * @param accountAddressess the addresses to distribute to
+ * @param balanceChanges the map of distribution shares, this gets appended to
+ *
+ * @return the total amount mapped to addresses for distribution
+ */
+ public static long distributeToAccounts(long distributionAmount, List accountAddressess, Map balanceChanges) {
+
+ if( accountAddressess.isEmpty() ) return 0;
+
+ long distibutionShare = distributionAmount / accountAddressess.size();
+
+ for(String accountAddress : accountAddressess ) {
+ balanceChanges.merge(accountAddress, distibutionShare, Long::sum);
+ }
+
+ return distibutionShare * accountAddressess.size();
+ }
+
private static long distributeBlockRewardShare(long distributionAmount, List accounts, Map balanceChanges) {
// Collate all expanded accounts by minting account
Map> accountsByMinter = new HashMap<>();
@@ -2541,9 +2734,11 @@ public class Block {
return;
int minterLevel = Account.getRewardShareEffectiveMintingLevel(this.repository, this.getMinter().getPublicKey());
+ String minterAddress = Account.getRewardShareMintingAddress(this.repository, this.getMinter().getPublicKey());
LOGGER.debug(String.format("======= BLOCK %d (%.8s) =======", this.getBlockData().getHeight(), Base58.encode(this.getSignature())));
LOGGER.debug(String.format("Timestamp: %d", this.getBlockData().getTimestamp()));
+ LOGGER.debug(String.format("Minter address: %s", minterAddress));
LOGGER.debug(String.format("Minter level: %d", minterLevel));
LOGGER.debug(String.format("Online accounts: %d", this.getBlockData().getOnlineAccountsCount()));
LOGGER.debug(String.format("AT count: %d", this.getBlockData().getATCount()));
diff --git a/src/main/java/org/qortal/block/Block1333492.java b/src/main/java/org/qortal/block/Block1333492.java
new file mode 100644
index 00000000..ce2d7f99
--- /dev/null
+++ b/src/main/java/org/qortal/block/Block1333492.java
@@ -0,0 +1,101 @@
+package org.qortal.block;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.eclipse.persistence.jaxb.JAXBContextFactory;
+import org.eclipse.persistence.jaxb.UnmarshallerProperties;
+import org.qortal.data.account.AccountBalanceData;
+import org.qortal.repository.DataException;
+
+import javax.xml.bind.JAXBContext;
+import javax.xml.bind.JAXBException;
+import javax.xml.bind.UnmarshalException;
+import javax.xml.bind.Unmarshaller;
+import javax.xml.transform.stream.StreamSource;
+import java.io.InputStream;
+import java.util.List;
+import java.util.stream.Collectors;
+
+/**
+ * Block 1333492
+ *
+ * As described in InvalidBalanceBlocks.java, legacy bugs caused a small drift in account balances.
+ * This block adjusts any remaining differences between a clean reindex/resync and a recent bootstrap.
+ *
+ * The block height 1333492 isn't significant - it's simply the height of a recent bootstrap at the
+ * time of development, so that the account balances could be accessed and compared against the same
+ * block in a reindexed db.
+ *
+ * As with InvalidBalanceBlocks, the discrepancies are insignificant, except for a single
+ * account which has a 3.03 QORT discrepancy. This was due to the account being the first recipient
+ * of a name sale and encountering an early bug in this area.
+ *
+ * The total offset for this block is 3.02816514 QORT.
+ */
+public final class Block1333492 {
+
+ private static final Logger LOGGER = LogManager.getLogger(Block1333492.class);
+ private static final String ACCOUNT_DELTAS_SOURCE = "block-1333492-deltas.json";
+
+ private static final List accountDeltas = readAccountDeltas();
+
+ private Block1333492() {
+ /* Do not instantiate */
+ }
+
+ @SuppressWarnings("unchecked")
+ private static List readAccountDeltas() {
+ Unmarshaller unmarshaller;
+
+ try {
+ // Create JAXB context aware of classes we need to unmarshal
+ JAXBContext jc = JAXBContextFactory.createContext(new Class[] {
+ AccountBalanceData.class
+ }, null);
+
+ // Create unmarshaller
+ unmarshaller = jc.createUnmarshaller();
+
+ // Set the unmarshaller media type to JSON
+ unmarshaller.setProperty(UnmarshallerProperties.MEDIA_TYPE, "application/json");
+
+ // Tell unmarshaller that there's no JSON root element in the JSON input
+ unmarshaller.setProperty(UnmarshallerProperties.JSON_INCLUDE_ROOT, false);
+ } catch (JAXBException e) {
+ String message = "Failed to setup unmarshaller to read block 1333492 deltas";
+ LOGGER.error(message, e);
+ throw new RuntimeException(message, e);
+ }
+
+ ClassLoader classLoader = BlockChain.class.getClassLoader();
+ InputStream in = classLoader.getResourceAsStream(ACCOUNT_DELTAS_SOURCE);
+ StreamSource jsonSource = new StreamSource(in);
+
+ try {
+ // Attempt to unmarshal JSON stream to BlockChain config
+ return (List) unmarshaller.unmarshal(jsonSource, AccountBalanceData.class).getValue();
+ } catch (UnmarshalException e) {
+ String message = "Failed to parse block 1333492 deltas";
+ LOGGER.error(message, e);
+ throw new RuntimeException(message, e);
+ } catch (JAXBException e) {
+ String message = "Unexpected JAXB issue while processing block 1333492 deltas";
+ LOGGER.error(message, e);
+ throw new RuntimeException(message, e);
+ }
+ }
+
+ public static void processFix(Block block) throws DataException {
+ block.repository.getAccountRepository().modifyAssetBalances(accountDeltas);
+ }
+
+ public static void orphanFix(Block block) throws DataException {
+ // Create inverse deltas
+ List inverseDeltas = accountDeltas.stream()
+ .map(delta -> new AccountBalanceData(delta.getAddress(), delta.getAssetId(), 0 - delta.getBalance()))
+ .collect(Collectors.toList());
+
+ block.repository.getAccountRepository().modifyAssetBalances(inverseDeltas);
+ }
+
+}
diff --git a/src/main/java/org/qortal/block/BlockChain.java b/src/main/java/org/qortal/block/BlockChain.java
index dc9dfe4c..bce09aed 100644
--- a/src/main/java/org/qortal/block/BlockChain.java
+++ b/src/main/java/org/qortal/block/BlockChain.java
@@ -71,6 +71,7 @@ public class BlockChain {
transactionV6Timestamp,
disableReferenceTimestamp,
increaseOnlineAccountsDifficultyTimestamp,
+ decreaseOnlineAccountsDifficultyTimestamp,
onlineAccountMinterLevelValidationHeight,
selfSponsorshipAlgoV1Height,
selfSponsorshipAlgoV2Height,
@@ -80,7 +81,18 @@ public class BlockChain {
arbitraryOptionalFeeTimestamp,
unconfirmableRewardSharesHeight,
disableTransferPrivsTimestamp,
- enableTransferPrivsTimestamp
+ enableTransferPrivsTimestamp,
+ cancelSellNameValidationTimestamp,
+ disableRewardshareHeight,
+ enableRewardshareHeight,
+ onlyMintWithNameHeight,
+ removeOnlyMintWithNameHeight,
+ groupMemberCheckHeight,
+ fixBatchRewardHeight,
+ adminsReplaceFoundersHeight,
+ nullGroupMembershipHeight,
+ ignoreLevelForRewardShareHeight,
+ adminQueryFixHeight
}
// Custom transaction fees
@@ -201,6 +213,13 @@ public class BlockChain {
private int maxRewardSharesPerFounderMintingAccount;
private int founderEffectiveMintingLevel;
+ public static class IdsForHeight {
+ public int height;
+ public List ids;
+ }
+
+ private List mintingGroupIds;
+
/** Minimum time to retain online account signatures (ms) for block validity checks. */
private long onlineAccountSignaturesMinLifetime;
@@ -211,6 +230,10 @@ public class BlockChain {
* featureTriggers because unit tests need to set this value via Reflection. */
private long onlineAccountsModulusV2Timestamp;
+ /** Feature trigger timestamp for ONLINE_ACCOUNTS_MODULUS time interval decrease. Can't use
+ * featureTriggers because unit tests need to set this value via Reflection. */
+ private long onlineAccountsModulusV3Timestamp;
+
/** Snapshot timestamp for self sponsorship algo V1 */
private long selfSponsorshipAlgoV1SnapshotTimestamp;
@@ -397,6 +420,9 @@ public class BlockChain {
return this.onlineAccountsModulusV2Timestamp;
}
+ public long getOnlineAccountsModulusV3Timestamp() {
+ return this.onlineAccountsModulusV3Timestamp;
+ }
/* Block reward batching */
public long getBlockRewardBatchStartHeight() {
@@ -524,6 +550,10 @@ public class BlockChain {
return this.onlineAccountSignaturesMaxLifetime;
}
+ public List getMintingGroupIds() {
+ return mintingGroupIds;
+ }
+
public CiyamAtSettings getCiyamAtSettings() {
return this.ciyamAtSettings;
}
@@ -570,6 +600,10 @@ public class BlockChain {
return this.featureTriggers.get(FeatureTrigger.increaseOnlineAccountsDifficultyTimestamp.name()).longValue();
}
+ public long getDecreaseOnlineAccountsDifficultyTimestamp() {
+ return this.featureTriggers.get(FeatureTrigger.decreaseOnlineAccountsDifficultyTimestamp.name()).longValue();
+ }
+
public int getSelfSponsorshipAlgoV1Height() {
return this.featureTriggers.get(FeatureTrigger.selfSponsorshipAlgoV1Height.name()).intValue();
}
@@ -610,6 +644,50 @@ public class BlockChain {
return this.featureTriggers.get(FeatureTrigger.enableTransferPrivsTimestamp.name()).longValue();
}
+ public long getCancelSellNameValidationTimestamp() {
+ return this.featureTriggers.get(FeatureTrigger.cancelSellNameValidationTimestamp.name()).longValue();
+ }
+
+ public int getDisableRewardshareHeight() {
+ return this.featureTriggers.get(FeatureTrigger.disableRewardshareHeight.name()).intValue();
+ }
+
+ public int getEnableRewardshareHeight() {
+ return this.featureTriggers.get(FeatureTrigger.enableRewardshareHeight.name()).intValue();
+ }
+
+ public int getOnlyMintWithNameHeight() {
+ return this.featureTriggers.get(FeatureTrigger.onlyMintWithNameHeight.name()).intValue();
+ }
+
+ public int getRemoveOnlyMintWithNameHeight() {
+ return this.featureTriggers.get(FeatureTrigger.removeOnlyMintWithNameHeight.name()).intValue();
+ }
+
+ public int getGroupMemberCheckHeight() {
+ return this.featureTriggers.get(FeatureTrigger.groupMemberCheckHeight.name()).intValue();
+ }
+
+ public int getFixBatchRewardHeight() {
+ return this.featureTriggers.get(FeatureTrigger.fixBatchRewardHeight.name()).intValue();
+ }
+
+ public int getAdminsReplaceFoundersHeight() {
+ return this.featureTriggers.get(FeatureTrigger.adminsReplaceFoundersHeight.name()).intValue();
+ }
+
+ public int getNullGroupMembershipHeight() {
+ return this.featureTriggers.get(FeatureTrigger.nullGroupMembershipHeight.name()).intValue();
+ }
+
+ public int getIgnoreLevelForRewardShareHeight() {
+ return this.featureTriggers.get(FeatureTrigger.ignoreLevelForRewardShareHeight.name()).intValue();
+ }
+
+ public int getAdminQueryFixHeight() {
+ return this.featureTriggers.get(FeatureTrigger.adminQueryFixHeight.name()).intValue();
+ }
+
// More complex getters for aspects that change by height or timestamp
public long getRewardAtHeight(int ourHeight) {
@@ -805,10 +883,12 @@ public class BlockChain {
boolean isLite = Settings.getInstance().isLite();
boolean canBootstrap = Settings.getInstance().getBootstrap();
boolean needsArchiveRebuild = false;
+ int checkHeight = 0;
BlockData chainTip;
try (final Repository repository = RepositoryManager.getRepository()) {
chainTip = repository.getBlockRepository().getLastBlock();
+ checkHeight = repository.getBlockRepository().getBlockchainHeight();
// Ensure archive is (at least partially) intact, and force a bootstrap if it isn't
if (!isTopOnly && archiveEnabled && canBootstrap) {
@@ -824,6 +904,17 @@ public class BlockChain {
}
}
+ if (!canBootstrap) {
+ if (checkHeight > 2) {
+ LOGGER.info("Retrieved block 2 from archive. Syncing from genesis block resumed!");
+ } else {
+ needsArchiveRebuild = (repository.getBlockArchiveRepository().fromHeight(2) == null);
+ if (needsArchiveRebuild) {
+ LOGGER.info("Couldn't retrieve block 2 from archive. Bootstrapping is disabled. Syncing from genesis block!");
+ }
+ }
+ }
+
// Validate checkpoints
// Limited to topOnly nodes for now, in order to reduce risk, and to solve a real-world problem with divergent topOnly nodes
// TODO: remove the isTopOnly conditional below once this feature has had more testing time
@@ -856,11 +947,12 @@ public class BlockChain {
// Check first block is Genesis Block
if (!isGenesisBlockValid() || needsArchiveRebuild) {
- try {
- rebuildBlockchain();
-
- } catch (InterruptedException e) {
- throw new DataException(String.format("Interrupted when trying to rebuild blockchain: %s", e.getMessage()));
+ if (checkHeight < 3) {
+ try {
+ rebuildBlockchain();
+ } catch (InterruptedException e) {
+ throw new DataException(String.format("Interrupted when trying to rebuild blockchain: %s", e.getMessage()));
+ }
}
}
@@ -1001,5 +1093,4 @@ public class BlockChain {
blockchainLock.unlock();
}
}
-
}
diff --git a/src/main/java/org/qortal/block/InvalidBalanceBlocks.java b/src/main/java/org/qortal/block/InvalidBalanceBlocks.java
new file mode 100644
index 00000000..03b3e434
--- /dev/null
+++ b/src/main/java/org/qortal/block/InvalidBalanceBlocks.java
@@ -0,0 +1,134 @@
+package org.qortal.block;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.eclipse.persistence.jaxb.JAXBContextFactory;
+import org.eclipse.persistence.jaxb.UnmarshallerProperties;
+import org.qortal.data.account.AccountBalanceData;
+import org.qortal.repository.DataException;
+
+import javax.xml.bind.JAXBContext;
+import javax.xml.bind.JAXBException;
+import javax.xml.bind.UnmarshalException;
+import javax.xml.bind.Unmarshaller;
+import javax.xml.transform.stream.StreamSource;
+import java.io.InputStream;
+import java.util.*;
+import java.util.stream.Collectors;
+
+
+/**
+ * Due to various bugs - which have been fixed - a small amount of balance drift occurred
+ * in the chainstate of running nodes and bootstraps, when compared with a clean sync from genesis.
+ * This resulted in a significant number of invalid transactions in the chain history due to
+ * subtle balance discrepancies. The sum of all discrepancies that resulted in an invalid
+ * transaction is 0.00198322 QORT, so despite the large quantity of transactions, they
+ * represent an insignificant amount when summed.
+ *
+ * This class is responsible for retroactively fixing all the past transactions which
+ * are invalid due to the balance discrepancies.
+ */
+
+
+public final class InvalidBalanceBlocks {
+
+ private static final Logger LOGGER = LogManager.getLogger(InvalidBalanceBlocks.class);
+
+ private static final String ACCOUNT_DELTAS_SOURCE = "invalid-transaction-balance-deltas.json";
+
+ private static final List accountDeltas = readAccountDeltas();
+ private static final List affectedHeights = getAffectedHeights();
+
+ private InvalidBalanceBlocks() {
+ /* Do not instantiate */
+ }
+
+ @SuppressWarnings("unchecked")
+ private static List readAccountDeltas() {
+ Unmarshaller unmarshaller;
+
+ try {
+ // Create JAXB context aware of classes we need to unmarshal
+ JAXBContext jc = JAXBContextFactory.createContext(new Class[] {
+ AccountBalanceData.class
+ }, null);
+
+ // Create unmarshaller
+ unmarshaller = jc.createUnmarshaller();
+
+ // Set the unmarshaller media type to JSON
+ unmarshaller.setProperty(UnmarshallerProperties.MEDIA_TYPE, "application/json");
+
+ // Tell unmarshaller that there's no JSON root element in the JSON input
+ unmarshaller.setProperty(UnmarshallerProperties.JSON_INCLUDE_ROOT, false);
+ } catch (JAXBException e) {
+ String message = "Failed to setup unmarshaller to read block 212937 deltas";
+ LOGGER.error(message, e);
+ throw new RuntimeException(message, e);
+ }
+
+ ClassLoader classLoader = BlockChain.class.getClassLoader();
+ InputStream in = classLoader.getResourceAsStream(ACCOUNT_DELTAS_SOURCE);
+ StreamSource jsonSource = new StreamSource(in);
+
+ try {
+ // Attempt to unmarshal JSON stream to BlockChain config
+ return (List) unmarshaller.unmarshal(jsonSource, AccountBalanceData.class).getValue();
+ } catch (UnmarshalException e) {
+ String message = "Failed to parse balance deltas";
+ LOGGER.error(message, e);
+ throw new RuntimeException(message, e);
+ } catch (JAXBException e) {
+ String message = "Unexpected JAXB issue while processing balance deltas";
+ LOGGER.error(message, e);
+ throw new RuntimeException(message, e);
+ }
+ }
+
+ private static List getAffectedHeights() {
+ List heights = new ArrayList<>();
+ for (AccountBalanceData accountBalanceData : accountDeltas) {
+ if (!heights.contains(accountBalanceData.getHeight())) {
+ heights.add(accountBalanceData.getHeight());
+ }
+ }
+ return heights;
+ }
+
+ private static List getAccountDeltasAtHeight(int height) {
+ return accountDeltas.stream().filter(a -> a.getHeight() == height).collect(Collectors.toList());
+ }
+
+ public static boolean isAffectedBlock(int height) {
+ return affectedHeights.contains(Integer.valueOf(height));
+ }
+
+ public static void processFix(Block block) throws DataException {
+ Integer blockHeight = block.getBlockData().getHeight();
+ List deltas = getAccountDeltasAtHeight(blockHeight);
+ if (deltas == null) {
+ throw new DataException(String.format("Unable to lookup invalid balance data for block height %d", blockHeight));
+ }
+
+ block.repository.getAccountRepository().modifyAssetBalances(deltas);
+
+ LOGGER.info("Applied balance patch for block {}", blockHeight);
+ }
+
+ public static void orphanFix(Block block) throws DataException {
+ Integer blockHeight = block.getBlockData().getHeight();
+ List deltas = getAccountDeltasAtHeight(blockHeight);
+ if (deltas == null) {
+ throw new DataException(String.format("Unable to lookup invalid balance data for block height %d", blockHeight));
+ }
+
+ // Create inverse delta(s)
+ for (AccountBalanceData accountBalanceData : deltas) {
+ AccountBalanceData inverseBalanceData = new AccountBalanceData(accountBalanceData.getAddress(), accountBalanceData.getAssetId(), -accountBalanceData.getBalance());
+ block.repository.getAccountRepository().modifyAssetBalances(List.of(inverseBalanceData));
+ }
+
+ LOGGER.info("Reverted balance patch for block {}", blockHeight);
+ }
+
+}
diff --git a/src/main/java/org/qortal/controller/BlockMinter.java b/src/main/java/org/qortal/controller/BlockMinter.java
index 91dd12bb..64024d00 100644
--- a/src/main/java/org/qortal/controller/BlockMinter.java
+++ b/src/main/java/org/qortal/controller/BlockMinter.java
@@ -64,6 +64,7 @@ public class BlockMinter extends Thread {
@Override
public void run() {
Thread.currentThread().setName("BlockMinter");
+ Thread.currentThread().setPriority(MAX_PRIORITY);
if (Settings.getInstance().isTopOnly() || Settings.getInstance().isLite()) {
// Top only and lite nodes do not sign blocks
@@ -96,364 +97,375 @@ public class BlockMinter extends Thread {
final boolean isSingleNodeTestnet = Settings.getInstance().isSingleNodeTestnet();
- try (final Repository repository = RepositoryManager.getRepository()) {
- // Going to need this a lot...
- BlockRepository blockRepository = repository.getBlockRepository();
-
- // Flags for tracking change in whether minting is possible,
- // so we can notify Controller, and further update SysTray, etc.
- boolean isMintingPossible = false;
- boolean wasMintingPossible = isMintingPossible;
+ // Flags for tracking change in whether minting is possible,
+ // so we can notify Controller, and further update SysTray, etc.
+ boolean isMintingPossible = false;
+ boolean wasMintingPossible = isMintingPossible;
+ try {
while (running) {
- if (isMintingPossible != wasMintingPossible)
- Controller.getInstance().onMintingPossibleChange(isMintingPossible);
+ // recreate repository for new loop iteration
+ try (final Repository repository = RepositoryManager.getRepository()) {
- wasMintingPossible = isMintingPossible;
+ // Going to need this a lot...
+ BlockRepository blockRepository = repository.getBlockRepository();
- try {
- // Free up any repository locks
- repository.discardChanges();
+ if (isMintingPossible != wasMintingPossible)
+ Controller.getInstance().onMintingPossibleChange(isMintingPossible);
- // Sleep for a while.
- // It's faster on single node testnets, to allow lots of blocks to be minted quickly.
- Thread.sleep(isSingleNodeTestnet ? 50 : 1000);
-
- isMintingPossible = false;
-
- final Long now = NTP.getTime();
- if (now == null)
- continue;
-
- final Long minLatestBlockTimestamp = Controller.getMinimumLatestBlockTimestamp();
- if (minLatestBlockTimestamp == null)
- continue;
-
- List mintingAccountsData = repository.getAccountRepository().getMintingAccounts();
- // No minting accounts?
- if (mintingAccountsData.isEmpty())
- continue;
-
- // Disregard minting accounts that are no longer valid, e.g. by transfer/loss of founder flag or account level
- // Note that minting accounts are actually reward-shares in Qortal
- Iterator madi = mintingAccountsData.iterator();
- while (madi.hasNext()) {
- MintingAccountData mintingAccountData = madi.next();
-
- RewardShareData rewardShareData = repository.getAccountRepository().getRewardShare(mintingAccountData.getPublicKey());
- if (rewardShareData == null) {
- // Reward-share doesn't exist - probably cancelled but not yet removed from node's list of minting accounts
- madi.remove();
- continue;
- }
-
- Account mintingAccount = new Account(repository, rewardShareData.getMinter());
- if (!mintingAccount.canMint()) {
- // Minting-account component of reward-share can no longer mint - disregard
- madi.remove();
- continue;
- }
-
- // Optional (non-validated) prevention of block submissions below a defined level.
- // This is an unvalidated version of Blockchain.minAccountLevelToMint
- // and exists only to reduce block candidates by default.
- int level = mintingAccount.getEffectiveMintingLevel();
- if (level < BlockChain.getInstance().getMinAccountLevelForBlockSubmissions()) {
- madi.remove();
- }
- }
-
- // Needs a mutable copy of the unmodifiableList
- List peers = new ArrayList<>(Network.getInstance().getImmutableHandshakedPeers());
- BlockData lastBlockData = blockRepository.getLastBlock();
-
- // Disregard peers that have "misbehaved" recently
- peers.removeIf(Controller.hasMisbehaved);
-
- // Disregard peers that don't have a recent block, but only if we're not in recovery mode.
- // In that mode, we want to allow minting on top of older blocks, to recover stalled networks.
- if (!Synchronizer.getInstance().getRecoveryMode())
- peers.removeIf(Controller.hasNoRecentBlock);
-
- // Don't mint if we don't have enough up-to-date peers as where would the transactions/consensus come from?
- if (peers.size() < Settings.getInstance().getMinBlockchainPeers())
- continue;
-
- // If we are stuck on an invalid block, we should allow an alternative to be minted
- boolean recoverInvalidBlock = false;
- if (Synchronizer.getInstance().timeInvalidBlockLastReceived != null) {
- // We've had at least one invalid block
- long timeSinceLastValidBlock = NTP.getTime() - Synchronizer.getInstance().timeValidBlockLastReceived;
- long timeSinceLastInvalidBlock = NTP.getTime() - Synchronizer.getInstance().timeInvalidBlockLastReceived;
- if (timeSinceLastValidBlock > INVALID_BLOCK_RECOVERY_TIMEOUT) {
- if (timeSinceLastInvalidBlock < INVALID_BLOCK_RECOVERY_TIMEOUT) {
- // Last valid block was more than 10 mins ago, but we've had an invalid block since then
- // Assume that the chain has stalled because there is no alternative valid candidate
- // Enter recovery mode to allow alternative, valid candidates to be minted
- recoverInvalidBlock = true;
- }
- }
- }
-
- // If our latest block isn't recent then we need to synchronize instead of minting, unless we're in recovery mode.
- if (!peers.isEmpty() && lastBlockData.getTimestamp() < minLatestBlockTimestamp)
- if (!Synchronizer.getInstance().getRecoveryMode() && !recoverInvalidBlock)
- continue;
-
- // There are enough peers with a recent block and our latest block is recent
- // so go ahead and mint a block if possible.
- isMintingPossible = true;
-
- // Check blockchain hasn't changed
- if (previousBlockData == null || !Arrays.equals(previousBlockData.getSignature(), lastBlockData.getSignature())) {
- previousBlockData = lastBlockData;
- newBlocks.clear();
-
- // Reduce log timeout
- logTimeout = 10 * 1000L;
-
- // Last low weight block is no longer valid
- parentSignatureForLastLowWeightBlock = null;
- }
-
- // Discard accounts we have already built blocks with
- mintingAccountsData.removeIf(mintingAccountData -> newBlocks.stream().anyMatch(newBlock -> Arrays.equals(newBlock.getBlockData().getMinterPublicKey(), mintingAccountData.getPublicKey())));
-
- // Do we need to build any potential new blocks?
- List newBlocksMintingAccounts = mintingAccountsData.stream().map(accountData -> new PrivateKeyAccount(repository, accountData.getPrivateKey())).collect(Collectors.toList());
-
- // We might need to sit the next block out, if one of our minting accounts signed the previous one
- // Skip this check for single node testnets, since they definitely need to mint every block
- byte[] previousBlockMinter = previousBlockData.getMinterPublicKey();
- boolean mintedLastBlock = mintingAccountsData.stream().anyMatch(mintingAccount -> Arrays.equals(mintingAccount.getPublicKey(), previousBlockMinter));
- if (mintedLastBlock && !isSingleNodeTestnet) {
- LOGGER.trace(String.format("One of our keys signed the last block, so we won't sign the next one"));
- continue;
- }
-
- if (parentSignatureForLastLowWeightBlock != null) {
- // The last iteration found a higher weight block in the network, so sleep for a while
- // to allow is to sync the higher weight chain. We are sleeping here rather than when
- // detected as we don't want to hold the blockchain lock open.
- LOGGER.info("Sleeping for 10 seconds...");
- Thread.sleep(10 * 1000L);
- }
-
- for (PrivateKeyAccount mintingAccount : newBlocksMintingAccounts) {
- // First block does the AT heavy-lifting
- if (newBlocks.isEmpty()) {
- Block newBlock = Block.mint(repository, previousBlockData, mintingAccount);
- if (newBlock == null) {
- // For some reason we can't mint right now
- moderatedLog(() -> LOGGER.info("Couldn't build a to-be-minted block"));
- continue;
- }
-
- newBlocks.add(newBlock);
- } else {
- // The blocks for other minters require less effort...
- Block newBlock = newBlocks.get(0).remint(mintingAccount);
- if (newBlock == null) {
- // For some reason we can't mint right now
- moderatedLog(() -> LOGGER.error("Couldn't rebuild a to-be-minted block"));
- continue;
- }
-
- newBlocks.add(newBlock);
- }
- }
-
- // No potential block candidates?
- if (newBlocks.isEmpty())
- continue;
-
- // Make sure we're the only thread modifying the blockchain
- ReentrantLock blockchainLock = Controller.getInstance().getBlockchainLock();
- if (!blockchainLock.tryLock(30, TimeUnit.SECONDS)) {
- LOGGER.debug("Couldn't acquire blockchain lock even after waiting 30 seconds");
- continue;
- }
-
- boolean newBlockMinted = false;
- Block newBlock = null;
+ wasMintingPossible = isMintingPossible;
try {
- // Clear repository session state so we have latest view of data
+ // reset the repository, to the repository recreated for this loop iteration
+ for( Block newBlock : newBlocks ) newBlock.setRepository(repository);
+
+ // Free up any repository locks
repository.discardChanges();
- // Now that we have blockchain lock, do final check that chain hasn't changed
- BlockData latestBlockData = blockRepository.getLastBlock();
- if (!Arrays.equals(lastBlockData.getSignature(), latestBlockData.getSignature()))
+ // Sleep for a while.
+ // It's faster on single node testnets, to allow lots of blocks to be minted quickly.
+ Thread.sleep(isSingleNodeTestnet ? 50 : 1000);
+
+ isMintingPossible = false;
+
+ final Long now = NTP.getTime();
+ if (now == null)
continue;
- List goodBlocks = new ArrayList<>();
- boolean wasInvalidBlockDiscarded = false;
- Iterator newBlocksIterator = newBlocks.iterator();
+ final Long minLatestBlockTimestamp = Controller.getMinimumLatestBlockTimestamp();
+ if (minLatestBlockTimestamp == null)
+ continue;
- while (newBlocksIterator.hasNext()) {
- Block testBlock = newBlocksIterator.next();
+ List mintingAccountsData = repository.getAccountRepository().getMintingAccounts();
+ // No minting accounts?
+ if (mintingAccountsData.isEmpty())
+ continue;
- // Is new block's timestamp valid yet?
- // We do a separate check as some timestamp checks are skipped for testchains
- if (testBlock.isTimestampValid() != ValidationResult.OK)
+ // Disregard minting accounts that are no longer valid, e.g. by transfer/loss of founder flag or account level
+ // Note that minting accounts are actually reward-shares in Qortal
+ Iterator madi = mintingAccountsData.iterator();
+ while (madi.hasNext()) {
+ MintingAccountData mintingAccountData = madi.next();
+
+ RewardShareData rewardShareData = repository.getAccountRepository().getRewardShare(mintingAccountData.getPublicKey());
+ if (rewardShareData == null) {
+ // Reward-share doesn't exist - probably cancelled but not yet removed from node's list of minting accounts
+ madi.remove();
+ continue;
+ }
+
+ Account mintingAccount = new Account(repository, rewardShareData.getMinter());
+ if (!mintingAccount.canMint(true)) {
+ // Minting-account component of reward-share can no longer mint - disregard
+ madi.remove();
+ continue;
+ }
+
+ // Optional (non-validated) prevention of block submissions below a defined level.
+ // This is an unvalidated version of Blockchain.minAccountLevelToMint
+ // and exists only to reduce block candidates by default.
+ int level = mintingAccount.getEffectiveMintingLevel();
+ if (level < BlockChain.getInstance().getMinAccountLevelForBlockSubmissions()) {
+ madi.remove();
+ }
+ }
+
+ // Needs a mutable copy of the unmodifiableList
+ List peers = new ArrayList<>(Network.getInstance().getImmutableHandshakedPeers());
+ BlockData lastBlockData = blockRepository.getLastBlock();
+
+ // Disregard peers that have "misbehaved" recently
+ peers.removeIf(Controller.hasMisbehaved);
+
+ // Disregard peers that don't have a recent block, but only if we're not in recovery mode.
+ // In that mode, we want to allow minting on top of older blocks, to recover stalled networks.
+ if (!Synchronizer.getInstance().getRecoveryMode())
+ peers.removeIf(Controller.hasNoRecentBlock);
+
+ // Don't mint if we don't have enough up-to-date peers as where would the transactions/consensus come from?
+ if (peers.size() < Settings.getInstance().getMinBlockchainPeers())
+ continue;
+
+ // If we are stuck on an invalid block, we should allow an alternative to be minted
+ boolean recoverInvalidBlock = false;
+ if (Synchronizer.getInstance().timeInvalidBlockLastReceived != null) {
+ // We've had at least one invalid block
+ long timeSinceLastValidBlock = NTP.getTime() - Synchronizer.getInstance().timeValidBlockLastReceived;
+ long timeSinceLastInvalidBlock = NTP.getTime() - Synchronizer.getInstance().timeInvalidBlockLastReceived;
+ if (timeSinceLastValidBlock > INVALID_BLOCK_RECOVERY_TIMEOUT) {
+ if (timeSinceLastInvalidBlock < INVALID_BLOCK_RECOVERY_TIMEOUT) {
+ // Last valid block was more than 10 mins ago, but we've had an invalid block since then
+ // Assume that the chain has stalled because there is no alternative valid candidate
+ // Enter recovery mode to allow alternative, valid candidates to be minted
+ recoverInvalidBlock = true;
+ }
+ }
+ }
+
+ // If our latest block isn't recent then we need to synchronize instead of minting, unless we're in recovery mode.
+ if (!peers.isEmpty() && lastBlockData.getTimestamp() < minLatestBlockTimestamp)
+ if (!Synchronizer.getInstance().getRecoveryMode() && !recoverInvalidBlock)
continue;
- testBlock.preProcess();
+ // There are enough peers with a recent block and our latest block is recent
+ // so go ahead and mint a block if possible.
+ isMintingPossible = true;
- // Is new block valid yet? (Before adding unconfirmed transactions)
- ValidationResult result = testBlock.isValid();
- if (result != ValidationResult.OK) {
- moderatedLog(() -> LOGGER.error(String.format("To-be-minted block invalid '%s' before adding transactions?", result.name())));
+ // Check blockchain hasn't changed
+ if (previousBlockData == null || !Arrays.equals(previousBlockData.getSignature(), lastBlockData.getSignature())) {
+ previousBlockData = lastBlockData;
+ newBlocks.clear();
- newBlocksIterator.remove();
- wasInvalidBlockDiscarded = true;
- /*
- * Bail out fast so that we loop around from the top again.
- * This gives BlockMinter the possibility to remint this candidate block using another block from newBlocks,
- * via the Blocks.remint() method, which avoids having to re-process Block ATs all over again.
- * Particularly useful if some aspect of Blocks changes due a timestamp-based feature-trigger (see BlockChain class).
- */
- break;
- }
+ // Reduce log timeout
+ logTimeout = 10 * 1000L;
- goodBlocks.add(testBlock);
+ // Last low weight block is no longer valid
+ parentSignatureForLastLowWeightBlock = null;
}
- if (wasInvalidBlockDiscarded || goodBlocks.isEmpty())
+ // Discard accounts we have already built blocks with
+ mintingAccountsData.removeIf(mintingAccountData -> newBlocks.stream().anyMatch(newBlock -> Arrays.equals(newBlock.getBlockData().getMinterPublicKey(), mintingAccountData.getPublicKey())));
+
+ // Do we need to build any potential new blocks?
+ List newBlocksMintingAccounts = mintingAccountsData.stream().map(accountData -> new PrivateKeyAccount(repository, accountData.getPrivateKey())).collect(Collectors.toList());
+
+ // We might need to sit the next block out, if one of our minting accounts signed the previous one
+ // Skip this check for single node testnets, since they definitely need to mint every block
+ byte[] previousBlockMinter = previousBlockData.getMinterPublicKey();
+ boolean mintedLastBlock = mintingAccountsData.stream().anyMatch(mintingAccount -> Arrays.equals(mintingAccount.getPublicKey(), previousBlockMinter));
+ if (mintedLastBlock && !isSingleNodeTestnet) {
+ LOGGER.trace(String.format("One of our keys signed the last block, so we won't sign the next one"));
continue;
-
- // Pick best block
- final int parentHeight = previousBlockData.getHeight();
- final byte[] parentBlockSignature = previousBlockData.getSignature();
-
- BigInteger bestWeight = null;
-
- for (int bi = 0; bi < goodBlocks.size(); ++bi) {
- BlockData blockData = goodBlocks.get(bi).getBlockData();
-
- BlockSummaryData blockSummaryData = new BlockSummaryData(blockData);
- int minterLevel = Account.getRewardShareEffectiveMintingLevel(repository, blockData.getMinterPublicKey());
- blockSummaryData.setMinterLevel(minterLevel);
-
- BigInteger blockWeight = Block.calcBlockWeight(parentHeight, parentBlockSignature, blockSummaryData);
-
- if (bestWeight == null || blockWeight.compareTo(bestWeight) < 0) {
- newBlock = goodBlocks.get(bi);
- bestWeight = blockWeight;
- }
}
- try {
- if (this.higherWeightChainExists(repository, bestWeight)) {
+ if (parentSignatureForLastLowWeightBlock != null) {
+ // The last iteration found a higher weight block in the network, so sleep for a while
+ // to allow is to sync the higher weight chain. We are sleeping here rather than when
+ // detected as we don't want to hold the blockchain lock open.
+ LOGGER.info("Sleeping for 10 seconds...");
+ Thread.sleep(10 * 1000L);
+ }
- // Check if the base block has updated since the last time we were here
- if (parentSignatureForLastLowWeightBlock == null || timeOfLastLowWeightBlock == null ||
- !Arrays.equals(parentSignatureForLastLowWeightBlock, previousBlockData.getSignature())) {
- // We've switched to a different chain, so reset the timer
- timeOfLastLowWeightBlock = NTP.getTime();
- }
- parentSignatureForLastLowWeightBlock = previousBlockData.getSignature();
-
- // If less than 30 seconds has passed since first detection the higher weight chain,
- // we should skip our block submission to give us the opportunity to sync to the better chain
- if (NTP.getTime() - timeOfLastLowWeightBlock < 30 * 1000L) {
- LOGGER.info("Higher weight chain found in peers, so not signing a block this round");
- LOGGER.info("Time since detected: {}", NTP.getTime() - timeOfLastLowWeightBlock);
+ for (PrivateKeyAccount mintingAccount : newBlocksMintingAccounts) {
+ // First block does the AT heavy-lifting
+ if (newBlocks.isEmpty()) {
+ Block newBlock = Block.mint(repository, previousBlockData, mintingAccount);
+ if (newBlock == null) {
+ // For some reason we can't mint right now
+ moderatedLog(() -> LOGGER.info("Couldn't build a to-be-minted block"));
continue;
- } else {
- // More than 30 seconds have passed, so we should submit our block candidate anyway.
- LOGGER.info("More than 30 seconds passed, so proceeding to submit block candidate...");
}
+
+ newBlocks.add(newBlock);
} else {
- LOGGER.debug("No higher weight chain found in peers");
+ // The blocks for other minters require less effort...
+ Block newBlock = newBlocks.get(0).remint(mintingAccount);
+ if (newBlock == null) {
+ // For some reason we can't mint right now
+ moderatedLog(() -> LOGGER.error("Couldn't rebuild a to-be-minted block"));
+ continue;
+ }
+
+ newBlocks.add(newBlock);
}
- } catch (DataException e) {
- LOGGER.debug("Unable to check for a higher weight chain. Proceeding anyway...");
}
- // Discard any uncommitted changes as a result of the higher weight chain detection
- repository.discardChanges();
+ // No potential block candidates?
+ if (newBlocks.isEmpty())
+ continue;
- // Clear variables that track low weight blocks
- parentSignatureForLastLowWeightBlock = null;
- timeOfLastLowWeightBlock = null;
-
- Long unconfirmedStartTime = NTP.getTime();
-
- // Add unconfirmed transactions
- addUnconfirmedTransactions(repository, newBlock);
-
- LOGGER.info(String.format("Adding %d unconfirmed transactions took %d ms", newBlock.getTransactions().size(), (NTP.getTime()-unconfirmedStartTime)));
-
- // Sign to create block's signature
- newBlock.sign();
-
- // Is newBlock still valid?
- ValidationResult validationResult = newBlock.isValid();
- if (validationResult != ValidationResult.OK) {
- // No longer valid? Report and discard
- LOGGER.error(String.format("To-be-minted block now invalid '%s' after adding unconfirmed transactions?", validationResult.name()));
-
- // Rebuild block candidates, just to be sure
- newBlocks.clear();
+ // Make sure we're the only thread modifying the blockchain
+ ReentrantLock blockchainLock = Controller.getInstance().getBlockchainLock();
+ if (!blockchainLock.tryLock(30, TimeUnit.SECONDS)) {
+ LOGGER.debug("Couldn't acquire blockchain lock even after waiting 30 seconds");
continue;
}
- // Add to blockchain - something else will notice and broadcast new block to network
+ boolean newBlockMinted = false;
+ Block newBlock = null;
+
try {
- newBlock.process();
+ // Clear repository session state so we have latest view of data
+ repository.discardChanges();
- repository.saveChanges();
+ // Now that we have blockchain lock, do final check that chain hasn't changed
+ BlockData latestBlockData = blockRepository.getLastBlock();
+ if (!Arrays.equals(lastBlockData.getSignature(), latestBlockData.getSignature()))
+ continue;
- LOGGER.info(String.format("Minted new block: %d", newBlock.getBlockData().getHeight()));
+ List goodBlocks = new ArrayList<>();
+ boolean wasInvalidBlockDiscarded = false;
+ Iterator newBlocksIterator = newBlocks.iterator();
- RewardShareData rewardShareData = repository.getAccountRepository().getRewardShare(newBlock.getBlockData().getMinterPublicKey());
+ while (newBlocksIterator.hasNext()) {
+ Block testBlock = newBlocksIterator.next();
- if (rewardShareData != null) {
- LOGGER.info(String.format("Minted block %d, sig %.8s, parent sig: %.8s by %s on behalf of %s",
- newBlock.getBlockData().getHeight(),
- Base58.encode(newBlock.getBlockData().getSignature()),
- Base58.encode(newBlock.getParent().getSignature()),
- rewardShareData.getMinter(),
- rewardShareData.getRecipient()));
- } else {
- LOGGER.info(String.format("Minted block %d, sig %.8s, parent sig: %.8s by %s",
- newBlock.getBlockData().getHeight(),
- Base58.encode(newBlock.getBlockData().getSignature()),
- Base58.encode(newBlock.getParent().getSignature()),
- newBlock.getMinter().getAddress()));
+ // Is new block's timestamp valid yet?
+ // We do a separate check as some timestamp checks are skipped for testchains
+ if (testBlock.isTimestampValid() != ValidationResult.OK)
+ continue;
+
+ testBlock.preProcess();
+
+ // Is new block valid yet? (Before adding unconfirmed transactions)
+ ValidationResult result = testBlock.isValid();
+ if (result != ValidationResult.OK) {
+ moderatedLog(() -> LOGGER.error(String.format("To-be-minted block invalid '%s' before adding transactions?", result.name())));
+
+ newBlocksIterator.remove();
+ wasInvalidBlockDiscarded = true;
+ /*
+ * Bail out fast so that we loop around from the top again.
+ * This gives BlockMinter the possibility to remint this candidate block using another block from newBlocks,
+ * via the Blocks.remint() method, which avoids having to re-process Block ATs all over again.
+ * Particularly useful if some aspect of Blocks changes due a timestamp-based feature-trigger (see BlockChain class).
+ */
+ break;
+ }
+
+ goodBlocks.add(testBlock);
}
- // Notify network after we're released blockchain lock
- newBlockMinted = true;
+ if (wasInvalidBlockDiscarded || goodBlocks.isEmpty())
+ continue;
- // Notify Controller
- repository.discardChanges(); // clear transaction status to prevent deadlocks
- Controller.getInstance().onNewBlock(newBlock.getBlockData());
- } catch (DataException e) {
- // Unable to process block - report and discard
- LOGGER.error("Unable to process newly minted block?", e);
- newBlocks.clear();
- } catch (ArithmeticException e) {
- // Unable to process block - report and discard
- LOGGER.error("Unable to process newly minted block?", e);
- newBlocks.clear();
+ // Pick best block
+ final int parentHeight = previousBlockData.getHeight();
+ final byte[] parentBlockSignature = previousBlockData.getSignature();
+
+ BigInteger bestWeight = null;
+
+ for (int bi = 0; bi < goodBlocks.size(); ++bi) {
+ BlockData blockData = goodBlocks.get(bi).getBlockData();
+
+ BlockSummaryData blockSummaryData = new BlockSummaryData(blockData);
+ int minterLevel = Account.getRewardShareEffectiveMintingLevel(repository, blockData.getMinterPublicKey());
+ blockSummaryData.setMinterLevel(minterLevel);
+
+ BigInteger blockWeight = Block.calcBlockWeight(parentHeight, parentBlockSignature, blockSummaryData);
+
+ if (bestWeight == null || blockWeight.compareTo(bestWeight) < 0) {
+ newBlock = goodBlocks.get(bi);
+ bestWeight = blockWeight;
+ }
+ }
+
+ try {
+ if (this.higherWeightChainExists(repository, bestWeight)) {
+
+ // Check if the base block has updated since the last time we were here
+ if (parentSignatureForLastLowWeightBlock == null || timeOfLastLowWeightBlock == null ||
+ !Arrays.equals(parentSignatureForLastLowWeightBlock, previousBlockData.getSignature())) {
+ // We've switched to a different chain, so reset the timer
+ timeOfLastLowWeightBlock = NTP.getTime();
+ }
+ parentSignatureForLastLowWeightBlock = previousBlockData.getSignature();
+
+ // If less than 30 seconds has passed since first detection the higher weight chain,
+ // we should skip our block submission to give us the opportunity to sync to the better chain
+ if (NTP.getTime() - timeOfLastLowWeightBlock < 30 * 1000L) {
+ LOGGER.info("Higher weight chain found in peers, so not signing a block this round");
+ LOGGER.info("Time since detected: {}", NTP.getTime() - timeOfLastLowWeightBlock);
+ continue;
+ } else {
+ // More than 30 seconds have passed, so we should submit our block candidate anyway.
+ LOGGER.info("More than 30 seconds passed, so proceeding to submit block candidate...");
+ }
+ } else {
+ LOGGER.debug("No higher weight chain found in peers");
+ }
+ } catch (DataException e) {
+ LOGGER.debug("Unable to check for a higher weight chain. Proceeding anyway...");
+ }
+
+ // Discard any uncommitted changes as a result of the higher weight chain detection
+ repository.discardChanges();
+
+ // Clear variables that track low weight blocks
+ parentSignatureForLastLowWeightBlock = null;
+ timeOfLastLowWeightBlock = null;
+
+ Long unconfirmedStartTime = NTP.getTime();
+
+ // Add unconfirmed transactions
+ addUnconfirmedTransactions(repository, newBlock);
+
+ LOGGER.info(String.format("Adding %d unconfirmed transactions took %d ms", newBlock.getTransactions().size(), (NTP.getTime() - unconfirmedStartTime)));
+
+ // Sign to create block's signature
+ newBlock.sign();
+
+ // Is newBlock still valid?
+ ValidationResult validationResult = newBlock.isValid();
+ if (validationResult != ValidationResult.OK) {
+ // No longer valid? Report and discard
+ LOGGER.error(String.format("To-be-minted block now invalid '%s' after adding unconfirmed transactions?", validationResult.name()));
+
+ // Rebuild block candidates, just to be sure
+ newBlocks.clear();
+ continue;
+ }
+
+ // Add to blockchain - something else will notice and broadcast new block to network
+ try {
+ newBlock.process();
+
+ repository.saveChanges();
+
+ LOGGER.info(String.format("Minted new block: %d", newBlock.getBlockData().getHeight()));
+
+ RewardShareData rewardShareData = repository.getAccountRepository().getRewardShare(newBlock.getBlockData().getMinterPublicKey());
+
+ if (rewardShareData != null) {
+ LOGGER.info(String.format("Minted block %d, sig %.8s, parent sig: %.8s by %s on behalf of %s",
+ newBlock.getBlockData().getHeight(),
+ Base58.encode(newBlock.getBlockData().getSignature()),
+ Base58.encode(newBlock.getParent().getSignature()),
+ rewardShareData.getMinter(),
+ rewardShareData.getRecipient()));
+ } else {
+ LOGGER.info(String.format("Minted block %d, sig %.8s, parent sig: %.8s by %s",
+ newBlock.getBlockData().getHeight(),
+ Base58.encode(newBlock.getBlockData().getSignature()),
+ Base58.encode(newBlock.getParent().getSignature()),
+ newBlock.getMinter().getAddress()));
+ }
+
+ // Notify network after we're released blockchain lock
+ newBlockMinted = true;
+
+ // Notify Controller
+ repository.discardChanges(); // clear transaction status to prevent deadlocks
+ Controller.getInstance().onNewBlock(newBlock.getBlockData());
+ } catch (DataException e) {
+ // Unable to process block - report and discard
+ LOGGER.error("Unable to process newly minted block?", e);
+ newBlocks.clear();
+ } catch (ArithmeticException e) {
+ // Unable to process block - report and discard
+ LOGGER.error("Unable to process newly minted block?", e);
+ newBlocks.clear();
+ }
+ } finally {
+ blockchainLock.unlock();
}
- } finally {
- blockchainLock.unlock();
- }
- if (newBlockMinted) {
- // Broadcast our new chain to network
- Network.getInstance().broadcastOurChain();
- }
+ if (newBlockMinted) {
+ // Broadcast our new chain to network
+ Network.getInstance().broadcastOurChain();
+ }
- } catch (InterruptedException e) {
- // We've been interrupted - time to exit
- return;
+ } catch (InterruptedException e) {
+ // We've been interrupted - time to exit
+ return;
+ }
+ } catch (DataException e) {
+ LOGGER.warn("Repository issue while running block minter - NO LONGER MINTING", e);
+ } catch (Exception e) {
+ LOGGER.error(e.getMessage(), e);
}
}
- } catch (DataException e) {
- LOGGER.warn("Repository issue while running block minter - NO LONGER MINTING", e);
+ } catch (Exception e) {
+ LOGGER.error(e.getMessage(), e);
}
}
diff --git a/src/main/java/org/qortal/controller/Controller.java b/src/main/java/org/qortal/controller/Controller.java
index 5ebcb2cd..b504c7fe 100644
--- a/src/main/java/org/qortal/controller/Controller.java
+++ b/src/main/java/org/qortal/controller/Controller.java
@@ -13,9 +13,12 @@ import org.qortal.block.Block;
import org.qortal.block.BlockChain;
import org.qortal.block.BlockChain.BlockTimingByHeight;
import org.qortal.controller.arbitrary.*;
+import org.qortal.controller.hsqldb.HSQLDBBalanceRecorder;
+import org.qortal.controller.hsqldb.HSQLDBDataCacheManager;
import org.qortal.controller.repository.NamesDatabaseIntegrityCheck;
import org.qortal.controller.repository.PruneManager;
import org.qortal.controller.tradebot.TradeBot;
+import org.qortal.controller.tradebot.RNSTradeBot;
import org.qortal.data.account.AccountBalanceData;
import org.qortal.data.account.AccountData;
import org.qortal.data.block.BlockData;
@@ -32,7 +35,9 @@ import org.qortal.gui.Gui;
import org.qortal.gui.SysTray;
import org.qortal.network.Network;
import org.qortal.network.RNSNetwork;
+import org.qortal.network.RNSPeer;
import org.qortal.network.Peer;
+import org.qortal.network.PeerAddress;
import org.qortal.network.message.*;
import org.qortal.repository.*;
import org.qortal.repository.hsqldb.HSQLDBRepositoryFactory;
@@ -49,8 +54,11 @@ import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
+import java.net.InetSocketAddress;
+import java.net.UnknownHostException;
import java.nio.file.Path;
import java.nio.file.Paths;
+import java.security.SecureRandom;
import java.security.Security;
import java.time.LocalDateTime;
import java.time.ZoneOffset;
@@ -68,6 +76,8 @@ import java.util.stream.Collectors;
public class Controller extends Thread {
+ public static HSQLDBRepositoryFactory REPOSITORY_FACTORY;
+
static {
// This must go before any calls to LogManager/Logger
System.setProperty("log4j2.formatMsgNoLookups", "true");
@@ -96,7 +106,7 @@ public class Controller extends Thread {
private final long buildTimestamp; // seconds
private final String[] savedArgs;
- private ExecutorService callbackExecutor = Executors.newFixedThreadPool(3);
+ private ExecutorService callbackExecutor = Executors.newFixedThreadPool(4);
private volatile boolean notifyGroupMembershipChange = false;
/** Latest blocks on our chain. Note: tail/last is the latest block. */
@@ -399,14 +409,44 @@ public class Controller extends Thread {
LOGGER.info("Starting repository");
try {
- RepositoryFactory repositoryFactory = new HSQLDBRepositoryFactory(getRepositoryUrl());
- RepositoryManager.setRepositoryFactory(repositoryFactory);
+ REPOSITORY_FACTORY = new HSQLDBRepositoryFactory(getRepositoryUrl());
+ RepositoryManager.setRepositoryFactory(REPOSITORY_FACTORY);
RepositoryManager.setRequestedCheckpoint(Boolean.TRUE);
try (final Repository repository = RepositoryManager.getRepository()) {
- RepositoryManager.rebuildTransactionSequences(repository);
+ // RepositoryManager.rebuildTransactionSequences(repository);
ArbitraryDataCacheManager.getInstance().buildArbitraryResourcesCache(repository, false);
}
+
+ if( Settings.getInstance().isDbCacheEnabled() ) {
+ LOGGER.info("Db Cache Starting ...");
+ HSQLDBDataCacheManager hsqldbDataCacheManager = new HSQLDBDataCacheManager();
+ hsqldbDataCacheManager.start();
+ }
+ else {
+ LOGGER.info("Db Cache Disabled");
+ }
+
+ LOGGER.info("Arbitrary Indexing Starting ...");
+ ArbitraryIndexUtils.startCaching(
+ Settings.getInstance().getArbitraryIndexingPriority(),
+ Settings.getInstance().getArbitraryIndexingFrequency()
+ );
+
+ if( Settings.getInstance().isBalanceRecorderEnabled() ) {
+ Optional recorder = HSQLDBBalanceRecorder.getInstance();
+
+ if( recorder.isPresent() ) {
+ LOGGER.info("Balance Recorder Starting ...");
+ recorder.get().start();
+ }
+ else {
+ LOGGER.info("Balance Recorder won't start.");
+ }
+ }
+ else {
+ LOGGER.info("Balance Recorder Disabled");
+ }
} catch (DataException e) {
// If exception has no cause or message then repository is in use by some other process.
if (e.getCause() == null && e.getMessage() == null) {
@@ -496,7 +536,6 @@ public class Controller extends Thread {
@Override
public void run() {
Thread.currentThread().setName("Shutdown hook");
-
Controller.getInstance().shutdown();
}
});
@@ -521,6 +560,16 @@ public class Controller extends Thread {
ArbitraryDataStorageManager.getInstance().start();
ArbitraryDataRenderManager.getInstance().start();
+ // start rebuild arbitrary resource cache timer task
+ if( Settings.getInstance().isRebuildArbitraryResourceCacheTaskEnabled() ) {
+ new Timer().schedule(
+ new RebuildArbitraryResourceCacheTask(),
+ Settings.getInstance().getRebuildArbitraryResourceCacheTaskDelay() * RebuildArbitraryResourceCacheTask.MILLIS_IN_MINUTE,
+ Settings.getInstance().getRebuildArbitraryResourceCacheTaskPeriod() * RebuildArbitraryResourceCacheTask.MILLIS_IN_HOUR
+ );
+ }
+
+
LOGGER.info("Starting online accounts manager");
OnlineAccountsManager.getInstance().start();
@@ -576,10 +625,33 @@ public class Controller extends Thread {
// If GUI is enabled, we're no longer starting up but actually running now
Gui.getInstance().notifyRunning();
- // Check every 10 minutes to see if the block minter is running
- Timer timer = new Timer();
+ if (Settings.getInstance().isAutoRestartEnabled()) {
+ // Check every 10 minutes if we have enough connected peers
+ Timer checkConnectedPeers = new Timer();
- timer.schedule(new TimerTask() {
+ checkConnectedPeers.schedule(new TimerTask() {
+ @Override
+ public void run() {
+ // Get the connected peers
+ int myConnectedPeers = Network.getInstance().getImmutableHandshakedPeers().size();
+ LOGGER.debug("Node have {} connected peers", myConnectedPeers);
+ if (myConnectedPeers == 0) {
+ // Restart node if we have 0 peers
+ LOGGER.info("Node have no connected peers, restarting node");
+ try {
+ RestartNode.attemptToRestart();
+ } catch (Exception e) {
+ LOGGER.error("Unable to restart the node", e);
+ }
+ }
+ }
+ }, 10*60*1000, 10*60*1000);
+ }
+
+ // Check every 10 minutes to see if the block minter is running
+ Timer checkBlockMinter = new Timer();
+
+ checkBlockMinter.schedule(new TimerTask() {
@Override
public void run() {
if (blockMinter.isAlive()) {
@@ -603,6 +675,71 @@ public class Controller extends Thread {
}
}
}, 10*60*1000, 10*60*1000);
+
+ // Check if we need sync from genesis and start syncing
+ Timer syncFromGenesis = new Timer();
+ syncFromGenesis.schedule(new TimerTask() {
+ @Override
+ public void run() {
+ LOGGER.debug("Start sync from genesis check.");
+ boolean canBootstrap = Settings.getInstance().getBootstrap();
+ boolean needsArchiveRebuild = false;
+ int checkHeight = 0;
+
+ try (final Repository repository = RepositoryManager.getRepository()){
+ needsArchiveRebuild = (repository.getBlockArchiveRepository().fromHeight(2) == null);
+ checkHeight = repository.getBlockRepository().getBlockchainHeight();
+ } catch (DataException e) {
+ throw new RuntimeException(e);
+ }
+
+ if (canBootstrap || !needsArchiveRebuild || checkHeight > 3) {
+ LOGGER.debug("Bootstrapping is enabled or we have more than 2 blocks, cancel sync from genesis check.");
+ syncFromGenesis.cancel();
+ return;
+ }
+
+ if (needsArchiveRebuild && !canBootstrap) {
+ LOGGER.info("Start syncing from genesis!");
+ List seeds = new ArrayList<>(Network.getInstance().getImmutableHandshakedPeers());
+
+ // Check if have a qualified peer to sync
+ if (seeds.isEmpty()) {
+ LOGGER.info("No connected peers, will try again later.");
+ return;
+ }
+
+ int index = new SecureRandom().nextInt(seeds.size());
+ String syncNode = String.valueOf(seeds.get(index));
+ PeerAddress peerAddress = PeerAddress.fromString(syncNode);
+ InetSocketAddress resolvedAddress = null;
+
+ try {
+ resolvedAddress = peerAddress.toSocketAddress();
+ } catch (UnknownHostException e) {
+ throw new RuntimeException(e);
+ }
+
+ InetSocketAddress finalResolvedAddress = resolvedAddress;
+ Peer targetPeer = seeds.stream().filter(peer -> peer.getResolvedAddress().equals(finalResolvedAddress)).findFirst().orElse(null);
+ Synchronizer.SynchronizationResult syncResult;
+
+ try {
+ do {
+ try {
+ syncResult = Synchronizer.getInstance().actuallySynchronize(targetPeer, true);
+ } catch (InterruptedException e) {
+ throw new RuntimeException(e);
+ }
+ }
+ while (syncResult == Synchronizer.SynchronizationResult.OK);
+ } finally {
+ // We are syncing now, so can cancel the check
+ syncFromGenesis.cancel();
+ }
+ }
+ }
+ }, 3*60*1000, 3*60*1000);
}
/** Called by AdvancedInstaller's launch EXE in single-instance mode, when an instance is already running. */
@@ -718,29 +855,29 @@ public class Controller extends Thread {
repositoryMaintenanceInterval = getRandomRepositoryMaintenanceInterval();
}
- // Prune stuck/slow/old peers
- if (now >= prunePeersTimestamp + prunePeersInterval) {
- prunePeersTimestamp = now + prunePeersInterval;
+ //// Prune stuck/slow/old peers
+ //if (now >= prunePeersTimestamp + prunePeersInterval) {
+ // prunePeersTimestamp = now + prunePeersInterval;
+ //
+ // try {
+ // LOGGER.debug("Pruning peers...");
+ // Network.getInstance().prunePeers();
+ // } catch (DataException e) {
+ // LOGGER.warn(String.format("Repository issue when trying to prune peers: %s", e.getMessage()));
+ // }
+ //}
- try {
- LOGGER.debug("Pruning peers...");
- Network.getInstance().prunePeers();
- } catch (DataException e) {
- LOGGER.warn(String.format("Repository issue when trying to prune peers: %s", e.getMessage()));
- }
- }
-
- // Q: Do we need global pruning?
- if (now >= pruneRNSPeersTimestamp + pruneRNSPeersInterval) {
- pruneRNSPeersTimestamp = now + pruneRNSPeersInterval;
-
- try {
- LOGGER.debug("Pruning Reticulum peers...");
- RNSNetwork.getInstance().prunePeers();
- } catch (DataException e) {
- LOGGER.warn(String.format("Repository issue when trying to prune Reticulum peers: %s", e.getMessage()));
- }
- }
+ //// Q: Do we need global pruning?
+ //if (now >= pruneRNSPeersTimestamp + pruneRNSPeersInterval) {
+ // pruneRNSPeersTimestamp = now + pruneRNSPeersInterval;
+ //
+ // try {
+ // LOGGER.debug("Pruning Reticulum peers...");
+ // RNSNetwork.getInstance().prunePeers();
+ // } catch (DataException e) {
+ // LOGGER.warn(String.format("Repository issue when trying to prune Reticulum peers: %s", e.getMessage()));
+ // }
+ //}
// Delete expired transactions
if (now >= deleteExpiredTimestamp) {
@@ -1125,6 +1262,35 @@ public class Controller extends Thread {
network.broadcast(network::buildGetUnconfirmedTransactionsMessage);
}
+ public void doRNSNetworkBroadcast() {
+ if (Settings.getInstance().isLite()) {
+ // Lite nodes have nothing to broadcast
+ return;
+ }
+ RNSNetwork network = RNSNetwork.getInstance();
+
+ // Send our current height
+ network.broadcastOurChain();
+
+ // Request unconfirmed transaction signatures, but only if we're up-to-date.
+ // if we're not up-to-dat then priority is synchronizing first
+ if (isUpToDateRNS()) {
+ network.broadcast(network::buildGetUnconfirmedTransactionsMessage);
+ }
+
+ }
+
+ public void doRNSPrunePeers() {
+ RNSNetwork network = RNSNetwork.getInstance();
+
+ try {
+ LOGGER.debug("Pruning peers...");
+ network.prunePeers();
+ } catch (DataException e) {
+ LOGGER.warn(String.format("Repository issue when trying to prune peers: %s", e.getMessage()));
+ }
+ }
+
public void onMintingPossibleChange(boolean isMintingPossible) {
this.isMintingPossible = isMintingPossible;
requestSysTrayUpdate = true;
@@ -2056,4 +2222,688 @@ public class Controller extends Thread {
public StatsSnapshot getStatsSnapshot() {
return this.stats;
}
+
+ public void onRNSNetworkMessage(RNSPeer peer, Message message) {
+ LOGGER.trace(() -> String.format("Processing %s message from %s", message.getType().name(), peer));
+
+ // Ordered by message type value
+ switch (message.getType()) {
+ case GET_BLOCK:
+ onRNSNetworkGetBlockMessage(peer, message);
+ break;
+
+ case GET_BLOCK_SUMMARIES:
+ onRNSNetworkGetBlockSummariesMessage(peer, message);
+ break;
+
+ case GET_SIGNATURES_V2:
+ onRNSNetworkGetSignaturesV2Message(peer, message);
+ break;
+
+ case HEIGHT_V2:
+ onRNSNetworkHeightV2Message(peer, message);
+ break;
+
+ case BLOCK_SUMMARIES_V2:
+ onRNSNetworkBlockSummariesV2Message(peer, message);
+ break;
+
+ case GET_TRANSACTION:
+ RNSTransactionImporter.getInstance().onNetworkGetTransactionMessage(peer, message);
+ break;
+
+ case TRANSACTION:
+ RNSTransactionImporter.getInstance().onNetworkTransactionMessage(peer, message);
+ break;
+
+ case GET_UNCONFIRMED_TRANSACTIONS:
+ RNSTransactionImporter.getInstance().onNetworkGetUnconfirmedTransactionsMessage(peer, message);
+ break;
+
+ case TRANSACTION_SIGNATURES:
+ RNSTransactionImporter.getInstance().onNetworkTransactionSignaturesMessage(peer, message);
+ break;
+
+ //case GET_ONLINE_ACCOUNTS_V3:
+ // OnlineAccountsManager.getInstance().onNetworkGetOnlineAccountsV3Message(peer, message);
+ // break;
+ //
+ //case ONLINE_ACCOUNTS_V3:
+ // OnlineAccountsManager.getInstance().onNetworkOnlineAccountsV3Message(peer, message);
+ // break;
+
+ //// TODO: Compiles but much of the Manager details need to be rethought for Reticulum
+ //case GET_ARBITRARY_DATA:
+ // // Not currently supported
+ // break;
+ ////
+ //case ARBITRARY_DATA_FILE_LIST:
+ // RNSArbitraryDataFileListManager.getInstance().onNetworkArbitraryDataFileListMessage(peer, message);
+ // break;
+ //
+ //case GET_ARBITRARY_DATA_FILE:
+ // RNSArbitraryDataFileManager.getInstance().onNetworkGetArbitraryDataFileMessage(peer, message);
+ // break;
+ //
+ //case GET_ARBITRARY_DATA_FILE_LIST:
+ // RNSArbitraryDataFileListManager.getInstance().onNetworkGetArbitraryDataFileListMessage(peer, message);
+ // break;
+ //
+ case ARBITRARY_SIGNATURES:
+ // Not currently supported
+ break;
+
+ case GET_ARBITRARY_METADATA:
+ RNSArbitraryMetadataManager.getInstance().onNetworkGetArbitraryMetadataMessage(peer, message);
+ break;
+
+ case ARBITRARY_METADATA:
+ RNSArbitraryMetadataManager.getInstance().onNetworkArbitraryMetadataMessage(peer, message);
+ break;
+
+ case GET_TRADE_PRESENCES:
+ RNSTradeBot.getInstance().onGetTradePresencesMessage(peer, message);
+ break;
+
+ case TRADE_PRESENCES:
+ RNSTradeBot.getInstance().onTradePresencesMessage(peer, message);
+ break;
+
+ case GET_ACCOUNT:
+ onRNSNetworkGetAccountMessage(peer, message);
+ break;
+
+ case GET_ACCOUNT_BALANCE:
+ onRNSNetworkGetAccountBalanceMessage(peer, message);
+ break;
+
+ case GET_ACCOUNT_TRANSACTIONS:
+ onRNSNetworkGetAccountTransactionsMessage(peer, message);
+ break;
+
+ case GET_ACCOUNT_NAMES:
+ onRNSNetworkGetAccountNamesMessage(peer, message);
+ break;
+
+ case GET_NAME:
+ onRNSNetworkGetNameMessage(peer, message);
+ break;
+
+ default:
+ LOGGER.debug(() -> String.format("Unhandled %s message [ID %d] from peer %s", message.getType().name(), message.getId(), peer));
+ break;
+ }
+ }
+
+ private void onRNSNetworkGetBlockMessage(RNSPeer peer, Message message) {
+ GetBlockMessage getBlockMessage = (GetBlockMessage) message;
+ byte[] signature = getBlockMessage.getSignature();
+ this.stats.getBlockMessageStats.requests.incrementAndGet();
+
+ ByteArray signatureAsByteArray = ByteArray.wrap(signature);
+
+ CachedBlockMessage cachedBlockMessage = this.blockMessageCache.get(signatureAsByteArray);
+ int blockCacheSize = Settings.getInstance().getBlockCacheSize();
+
+ // Check cached latest block message
+ if (cachedBlockMessage != null) {
+ this.stats.getBlockMessageStats.cacheHits.incrementAndGet();
+
+ // We need to duplicate it to prevent multiple threads setting ID on the same message
+ CachedBlockMessage clonedBlockMessage = Message.cloneWithNewId(cachedBlockMessage, message.getId());
+
+ //if (!peer.sendMessage(clonedBlockMessage))
+ // peer.disconnect("failed to send block");
+ peer.sendMessage(clonedBlockMessage);
+
+ return;
+ }
+
+ try (final Repository repository = RepositoryManager.getRepository()) {
+ BlockData blockData = repository.getBlockRepository().fromSignature(signature);
+
+ if (blockData != null) {
+ if (PruneManager.getInstance().isBlockPruned(blockData.getHeight())) {
+ // If this is a pruned block, we likely only have partial data, so best not to sent it
+ blockData = null;
+ }
+ }
+
+ // If we have no block data, we should check the archive in case it's there
+ if (blockData == null) {
+ if (Settings.getInstance().isArchiveEnabled()) {
+ Triple serializedBlock = BlockArchiveReader.getInstance().fetchSerializedBlockBytesForSignature(signature, true, repository);
+ if (serializedBlock != null) {
+ byte[] bytes = serializedBlock.getA();
+ Integer serializationVersion = serializedBlock.getB();
+
+ Message blockMessage;
+ switch (serializationVersion) {
+ case 1:
+ blockMessage = new CachedBlockMessage(bytes);
+ break;
+
+ case 2:
+ blockMessage = new CachedBlockV2Message(bytes);
+ break;
+
+ default:
+ return;
+ }
+ blockMessage.setId(message.getId());
+
+ // This call also causes the other needed data to be pulled in from repository
+ //if (!peer.sendMessage(blockMessage)) {
+ // peer.disconnect("failed to send block");
+ // // Don't fall-through to caching because failure to send might be from failure to build message
+ // return;
+ //}
+ peer.sendMessage(blockMessage);
+
+ // Sent successfully from archive, so nothing more to do
+ return;
+ }
+ }
+ }
+
+ if (blockData == null) {
+ // We don't have this block
+ this.stats.getBlockMessageStats.unknownBlocks.getAndIncrement();
+
+ // Send valid, yet unexpected message type in response, so peer's synchronizer doesn't have to wait for timeout
+ LOGGER.debug(() -> String.format("Sending 'block unknown' response to peer %s for GET_BLOCK request for unknown block %s", peer, Base58.encode(signature)));
+
+ // Send generic 'unknown' message as it's very short
+ //Message blockUnknownMessage = peer.getPeersVersion() >= GenericUnknownMessage.MINIMUM_PEER_VERSION
+ // ? new GenericUnknownMessage()
+ // : new BlockSummariesMessage(Collections.emptyList());
+ Message blockUnknownMessage = new GenericUnknownMessage();
+ blockUnknownMessage.setId(message.getId());
+ //if (!peer.sendMessage(blockUnknownMessage))
+ // peer.disconnect("failed to send block-unknown response");
+ peer.sendMessage(blockUnknownMessage);
+ return;
+ }
+
+ Block block = new Block(repository, blockData);
+
+ //// V2 support
+ //if (peer.getPeersVersion() >= BlockV2Message.MIN_PEER_VERSION) {
+ // Message blockMessage = new BlockV2Message(block);
+ // blockMessage.setId(message.getId());
+ // if (!peer.sendMessage(blockMessage)) {
+ // peer.disconnect("failed to send block");
+ // // Don't fall-through to caching because failure to send might be from failure to build message
+ // return;
+ // }
+ // return;
+ //}
+
+ CachedBlockMessage blockMessage = new CachedBlockMessage(block);
+ blockMessage.setId(message.getId());
+
+ //if (!peer.sendMessage(blockMessage)) {
+ // peer.disconnect("failed to send block");
+ // // Don't fall-through to caching because failure to send might be from failure to build message
+ // return;
+ //}
+ peer.sendMessage(blockMessage);
+
+ // If request is for a recent block, cache it
+ if (getChainHeight() - blockData.getHeight() <= blockCacheSize) {
+ this.stats.getBlockMessageStats.cacheFills.incrementAndGet();
+
+ this.blockMessageCache.put(ByteArray.wrap(blockData.getSignature()), blockMessage);
+ }
+ } catch (DataException e) {
+ LOGGER.error(String.format("Repository issue while sending block %s to peer %s", Base58.encode(signature), peer), e);
+ } catch (TransformationException e) {
+ LOGGER.error(String.format("Serialization issue while sending block %s to peer %s", Base58.encode(signature), peer), e);
+ }
+ }
+
+ private void onRNSNetworkGetBlockSummariesMessage(RNSPeer peer, Message message) {
+ GetBlockSummariesMessage getBlockSummariesMessage = (GetBlockSummariesMessage) message;
+ final byte[] parentSignature = getBlockSummariesMessage.getParentSignature();
+ this.stats.getBlockSummariesStats.requests.incrementAndGet();
+
+ // If peer's parent signature matches our latest block signature
+ // then we have no blocks after that and can short-circuit with an empty response
+ BlockData chainTip = getChainTip();
+ if (chainTip != null && Arrays.equals(parentSignature, chainTip.getSignature())) {
+ //Message blockSummariesMessage = peer.getPeersVersion() >= BlockSummariesV2Message.MINIMUM_PEER_VERSION
+ // ? new BlockSummariesV2Message(Collections.emptyList())
+ // : new BlockSummariesMessage(Collections.emptyList());
+ Message blockSummariesMessage = new BlockSummariesV2Message(Collections.emptyList());
+
+ blockSummariesMessage.setId(message.getId());
+
+ //if (!peer.sendMessage(blockSummariesMessage))
+ // peer.disconnect("failed to send block summaries");
+ peer.sendMessage(blockSummariesMessage);
+
+ return;
+ }
+
+ List blockSummaries = new ArrayList<>();
+
+ // Attempt to serve from our cache of latest blocks
+ synchronized (this.latestBlocks) {
+ blockSummaries = this.latestBlocks.stream()
+ .dropWhile(cachedBlockData -> !Arrays.equals(cachedBlockData.getReference(), parentSignature))
+ .map(BlockSummaryData::new)
+ .collect(Collectors.toList());
+ }
+
+ if (blockSummaries.isEmpty()) {
+ try (final Repository repository = RepositoryManager.getRepository()) {
+ int numberRequested = Math.min(Network.MAX_BLOCK_SUMMARIES_PER_REPLY, getBlockSummariesMessage.getNumberRequested());
+
+ BlockData blockData = repository.getBlockRepository().fromReference(parentSignature);
+ if (blockData == null) {
+ // Try the archive
+ blockData = repository.getBlockArchiveRepository().fromReference(parentSignature);
+ }
+
+ if (blockData != null) {
+ if (PruneManager.getInstance().isBlockPruned(blockData.getHeight())) {
+ // If this request contains a pruned block, we likely only have partial data, so best not to sent anything
+ // We always prune from the oldest first, so it's fine to just check the first block requested
+ blockData = null;
+ }
+ }
+
+ while (blockData != null && blockSummaries.size() < numberRequested) {
+ BlockSummaryData blockSummary = new BlockSummaryData(blockData);
+ blockSummaries.add(blockSummary);
+
+ byte[] previousSignature = blockData.getSignature();
+ blockData = repository.getBlockRepository().fromReference(previousSignature);
+ if (blockData == null) {
+ // Try the archive
+ blockData = repository.getBlockArchiveRepository().fromReference(previousSignature);
+ }
+ }
+ } catch (DataException e) {
+ LOGGER.error(String.format("Repository issue while sending block summaries after %s to peer %s", Base58.encode(parentSignature), peer), e);
+ }
+ } else {
+ this.stats.getBlockSummariesStats.cacheHits.incrementAndGet();
+
+ if (blockSummaries.size() >= getBlockSummariesMessage.getNumberRequested())
+ this.stats.getBlockSummariesStats.fullyFromCache.incrementAndGet();
+ }
+
+ //Message blockSummariesMessage = peer.getPeersVersion() >= BlockSummariesV2Message.MINIMUM_PEER_VERSION
+ // ? new BlockSummariesV2Message(blockSummaries)
+ // : new BlockSummariesMessage(blockSummaries);
+ Message blockSummariesMessage = new BlockSummariesV2Message(blockSummaries);
+ blockSummariesMessage.setId(message.getId());
+ //if (!peer.sendMessage(blockSummariesMessage))
+ // peer.disconnect("failed to send block summaries");
+ peer.sendMessage(blockSummariesMessage);
+ }
+
+ private void onRNSNetworkGetSignaturesV2Message(RNSPeer peer, Message message) {
+ GetSignaturesV2Message getSignaturesMessage = (GetSignaturesV2Message) message;
+ final byte[] parentSignature = getSignaturesMessage.getParentSignature();
+ this.stats.getBlockSignaturesV2Stats.requests.incrementAndGet();
+
+ // If peer's parent signature matches our latest block signature
+ // then we can short-circuit with an empty response
+ BlockData chainTip = getChainTip();
+ if (chainTip != null && Arrays.equals(parentSignature, chainTip.getSignature())) {
+ Message signaturesMessage = new SignaturesMessage(Collections.emptyList());
+ signaturesMessage.setId(message.getId());
+ //if (!peer.sendMessage(signaturesMessage))
+ // peer.disconnect("failed to send signatures (v2)");
+ peer.sendMessage(signaturesMessage);
+
+ return;
+ }
+
+ List signatures = new ArrayList<>();
+
+ // Attempt to serve from our cache of latest blocks
+ synchronized (this.latestBlocks) {
+ signatures = this.latestBlocks.stream()
+ .dropWhile(cachedBlockData -> !Arrays.equals(cachedBlockData.getReference(), parentSignature))
+ .map(BlockData::getSignature)
+ .collect(Collectors.toList());
+ }
+
+ if (signatures.isEmpty()) {
+ try (final Repository repository = RepositoryManager.getRepository()) {
+ int numberRequested = getSignaturesMessage.getNumberRequested();
+ BlockData blockData = repository.getBlockRepository().fromReference(parentSignature);
+ if (blockData == null) {
+ // Try the archive
+ blockData = repository.getBlockArchiveRepository().fromReference(parentSignature);
+ }
+
+ while (blockData != null && signatures.size() < numberRequested) {
+ signatures.add(blockData.getSignature());
+
+ byte[] previousSignature = blockData.getSignature();
+ blockData = repository.getBlockRepository().fromReference(previousSignature);
+ if (blockData == null) {
+ // Try the archive
+ blockData = repository.getBlockArchiveRepository().fromReference(previousSignature);
+ }
+ }
+ } catch (DataException e) {
+ LOGGER.error(String.format("Repository issue while sending V2 signatures after %s to peer %s", Base58.encode(parentSignature), peer), e);
+ }
+ } else {
+ this.stats.getBlockSignaturesV2Stats.cacheHits.incrementAndGet();
+
+ if (signatures.size() >= getSignaturesMessage.getNumberRequested())
+ this.stats.getBlockSignaturesV2Stats.fullyFromCache.incrementAndGet();
+ }
+
+ Message signaturesMessage = new SignaturesMessage(signatures);
+ signaturesMessage.setId(message.getId());
+ //if (!peer.sendMessage(signaturesMessage))
+ // peer.disconnect("failed to send signatures (v2)");
+ peer.sendMessage(signaturesMessage);
+ }
+
+ private void onRNSNetworkHeightV2Message(RNSPeer peer, Message message) {
+ HeightV2Message heightV2Message = (HeightV2Message) message;
+
+ if (!Settings.getInstance().isLite()) {
+ // If peer is inbound and we've not updated their height
+ // then this is probably their initial HEIGHT_V2 message
+ // so they need a corresponding HEIGHT_V2 message from us
+ if (!peer.getIsInitiator() && peer.getChainTipData() == null) {
+ Message responseMessage = RNSNetwork.getInstance().buildHeightOrChainTipInfo(peer);
+ peer.sendMessage(responseMessage);
+ }
+ }
+
+ // Update peer chain tip data
+ BlockSummaryData newChainTipData = new BlockSummaryData(heightV2Message.getHeight(), heightV2Message.getSignature(), heightV2Message.getMinterPublicKey(), heightV2Message.getTimestamp());
+ peer.setChainTipData(newChainTipData);
+
+ // Potentially synchronize
+ Synchronizer.getInstance().requestSync();
+ }
+
+ private void onRNSNetworkBlockSummariesV2Message(RNSPeer peer, Message message) {
+ BlockSummariesV2Message blockSummariesV2Message = (BlockSummariesV2Message) message;
+
+ if (!Settings.getInstance().isLite()) {
+ //// If peer is inbound and we've not updated their height
+ //// then this is probably their initial BLOCK_SUMMARIES_V2 message
+ //// so they need a corresponding BLOCK_SUMMARIES_V2 message from us
+ if (!peer.getIsInitiator() && peer.getChainTipData() == null) {
+ Message responseMessage = RNSNetwork.getInstance().buildHeightOrChainTipInfo(peer);
+ peer.sendMessage(responseMessage);
+ }
+ }
+
+ if (message.hasId()) {
+ /*
+ * Experimental proof-of-concept: discard messages with ID
+ * These are 'late' reply messages received after timeout has expired,
+ * having been passed upwards from Peer to Network to Controller.
+ * Hence, these are NOT simple "here's my chain tip" broadcasts from other peers.
+ */
+ LOGGER.debug("Discarding late {} message with ID {} from {}", message.getType().name(), message.getId(), peer);
+ return;
+ }
+
+ // Update peer chain tip data
+ peer.setChainTipSummaries(blockSummariesV2Message.getBlockSummaries());
+
+ // Potentially synchronize
+ Synchronizer.getInstance().requestSync();
+ }
+
+ // ************
+
+ private void onRNSNetworkGetAccountMessage(RNSPeer peer, Message message) {
+ GetAccountMessage getAccountMessage = (GetAccountMessage) message;
+ String address = getAccountMessage.getAddress();
+ this.stats.getAccountMessageStats.requests.incrementAndGet();
+
+ try (final Repository repository = RepositoryManager.getRepository()) {
+ AccountData accountData = repository.getAccountRepository().getAccount(address);
+
+ if (accountData == null) {
+ // We don't have this account
+ this.stats.getAccountMessageStats.unknownAccounts.getAndIncrement();
+
+ // Send valid, yet unexpected message type in response, so peer doesn't have to wait for timeout
+ LOGGER.debug(() -> String.format("Sending 'account unknown' response to peer %s for GET_ACCOUNT request for unknown account %s", peer, address));
+
+ // Send generic 'unknown' message as it's very short
+ Message accountUnknownMessage = new GenericUnknownMessage();
+ accountUnknownMessage.setId(message.getId());
+ peer.sendMessage(accountUnknownMessage);
+ return;
+ }
+
+ AccountMessage accountMessage = new AccountMessage(accountData);
+ accountMessage.setId(message.getId());
+
+ // handle in timeout callback instead
+ //if (!peer.sendMessage(accountMessage)) {
+ // peer.disconnect("failed to send account");
+ //}
+ peer.sendMessage(accountMessage);
+
+ } catch (DataException e) {
+ LOGGER.error(String.format("Repository issue while send account %s to peer %s", address, peer), e);
+ }
+ }
+
+ private void onRNSNetworkGetAccountBalanceMessage(RNSPeer peer, Message message) {
+ GetAccountBalanceMessage getAccountBalanceMessage = (GetAccountBalanceMessage) message;
+ String address = getAccountBalanceMessage.getAddress();
+ long assetId = getAccountBalanceMessage.getAssetId();
+ this.stats.getAccountBalanceMessageStats.requests.incrementAndGet();
+
+ try (final Repository repository = RepositoryManager.getRepository()) {
+ AccountBalanceData accountBalanceData = repository.getAccountRepository().getBalance(address, assetId);
+
+ if (accountBalanceData == null) {
+ // We don't have this account
+ this.stats.getAccountBalanceMessageStats.unknownAccounts.getAndIncrement();
+
+ // Send valid, yet unexpected message type in response, so peer doesn't have to wait for timeout
+ LOGGER.debug(() -> String.format("Sending 'account unknown' response to peer %s for GET_ACCOUNT_BALANCE request for unknown account %s and asset ID %d", peer, address, assetId));
+
+ // Send generic 'unknown' message as it's very short
+ Message accountUnknownMessage = new GenericUnknownMessage();
+ accountUnknownMessage.setId(message.getId());
+ peer.sendMessage(accountUnknownMessage);
+ return;
+ }
+
+ AccountBalanceMessage accountMessage = new AccountBalanceMessage(accountBalanceData);
+ accountMessage.setId(message.getId());
+
+ // handle in timeout callback instead
+ //if (!peer.sendMessage(accountMessage)) {
+ // peer.disconnect("failed to send account balance");
+ //}
+ peer.sendMessage(accountMessage);
+
+ } catch (DataException e) {
+ LOGGER.error(String.format("Repository issue while send balance for account %s and asset ID %d to peer %s", address, assetId, peer), e);
+ }
+ }
+
+ private void onRNSNetworkGetAccountTransactionsMessage(RNSPeer peer, Message message) {
+ GetAccountTransactionsMessage getAccountTransactionsMessage = (GetAccountTransactionsMessage) message;
+ String address = getAccountTransactionsMessage.getAddress();
+ int limit = Math.min(getAccountTransactionsMessage.getLimit(), 100);
+ int offset = getAccountTransactionsMessage.getOffset();
+ this.stats.getAccountTransactionsMessageStats.requests.incrementAndGet();
+
+ try (final Repository repository = RepositoryManager.getRepository()) {
+ List signatures = repository.getTransactionRepository().getSignaturesMatchingCriteria(null, null, null,
+ null, null, null, address, TransactionsResource.ConfirmationStatus.CONFIRMED, limit, offset, false);
+
+ // Expand signatures to transactions
+ List transactions = new ArrayList<>(signatures.size());
+ for (byte[] signature : signatures) {
+ transactions.add(repository.getTransactionRepository().fromSignature(signature));
+ }
+
+ if (transactions == null) {
+ // We don't have this account
+ this.stats.getAccountTransactionsMessageStats.unknownAccounts.getAndIncrement();
+
+ // Send valid, yet unexpected message type in response, so peer doesn't have to wait for timeout
+ LOGGER.debug(() -> String.format("Sending 'account unknown' response to peer %s for GET_ACCOUNT_TRANSACTIONS request for unknown account %s", peer, address));
+
+ // Send generic 'unknown' message as it's very short
+ Message accountUnknownMessage = new GenericUnknownMessage();
+ accountUnknownMessage.setId(message.getId());
+ peer.sendMessage(accountUnknownMessage);
+ return;
+ }
+
+ TransactionsMessage transactionsMessage = new TransactionsMessage(transactions);
+ transactionsMessage.setId(message.getId());
+
+ // handle in timeout callback instead
+ //if (!peer.sendMessage(transactionsMessage)) {
+ // peer.disconnect("failed to send account transactions");
+ //}
+ peer.sendMessage(transactionsMessage);
+
+ } catch (DataException e) {
+ LOGGER.error(String.format("Repository issue while sending transactions for account %s %d to peer %s", address, peer), e);
+ } catch (MessageException e) {
+ LOGGER.error(String.format("Message serialization issue while sending transactions for account %s %d to peer %s", address, peer), e);
+ }
+ }
+
+ private void onRNSNetworkGetAccountNamesMessage(RNSPeer peer, Message message) {
+ GetAccountNamesMessage getAccountNamesMessage = (GetAccountNamesMessage) message;
+ String address = getAccountNamesMessage.getAddress();
+ this.stats.getAccountNamesMessageStats.requests.incrementAndGet();
+
+ try (final Repository repository = RepositoryManager.getRepository()) {
+ List namesDataList = repository.getNameRepository().getNamesByOwner(address);
+
+ if (namesDataList == null) {
+ // We don't have this account
+ this.stats.getAccountNamesMessageStats.unknownAccounts.getAndIncrement();
+
+ // Send valid, yet unexpected message type in response, so peer doesn't have to wait for timeout
+ LOGGER.debug(() -> String.format("Sending 'account unknown' response to peer %s for GET_ACCOUNT_NAMES request for unknown account %s", peer, address));
+
+ // Send generic 'unknown' message as it's very short
+ Message accountUnknownMessage = new GenericUnknownMessage();
+ accountUnknownMessage.setId(message.getId());
+ peer.sendMessage(accountUnknownMessage);
+ return;
+ }
+
+ NamesMessage namesMessage = new NamesMessage(namesDataList);
+ namesMessage.setId(message.getId());
+
+ // handle in timeout callback instead
+ //if (!peer.sendMessage(namesMessage)) {
+ // peer.disconnect("failed to send account names");
+ //}
+ peer.sendMessage(namesMessage);
+
+ } catch (DataException e) {
+ LOGGER.error(String.format("Repository issue while send names for account %s to peer %s", address, peer), e);
+ }
+ }
+
+ private void onRNSNetworkGetNameMessage(RNSPeer peer, Message message) {
+ GetNameMessage getNameMessage = (GetNameMessage) message;
+ String name = getNameMessage.getName();
+ this.stats.getNameMessageStats.requests.incrementAndGet();
+
+ try (final Repository repository = RepositoryManager.getRepository()) {
+ NameData nameData = repository.getNameRepository().fromName(name);
+
+ if (nameData == null) {
+ // We don't have this account
+ this.stats.getNameMessageStats.unknownAccounts.getAndIncrement();
+
+ // Send valid, yet unexpected message type in response, so peer doesn't have to wait for timeout
+ LOGGER.debug(() -> String.format("Sending 'name unknown' response to peer %s for GET_NAME request for unknown name %s", peer, name));
+
+ // Send generic 'unknown' message as it's very short
+ Message nameUnknownMessage = new GenericUnknownMessage();
+ nameUnknownMessage.setId(message.getId());
+ if (!peer.sendMessage(nameUnknownMessage))
+ peer.sendMessage(nameUnknownMessage);
+ return;
+ }
+
+ NamesMessage namesMessage = new NamesMessage(Arrays.asList(nameData));
+ namesMessage.setId(message.getId());
+
+ // handle in timeout callback instead
+ //if (!peer.sendMessage(namesMessage)) {
+ // peer.disconnect("failed to send name data");
+ //}
+ peer.sendMessage(namesMessage);
+
+ } catch (DataException e) {
+ LOGGER.error(String.format("Repository issue while send name %s to peer %s", name, peer), e);
+ }
+ }
+
+ /**
+ * Returns whether we think our node has up-to-date blockchain based on our info about other peers.
+ * @param minLatestBlockTimestamp - the minimum block timestamp to be considered recent
+ * @return boolean - whether our node's blockchain is up to date or not
+ */
+ public boolean isUpToDateRNS(Long minLatestBlockTimestamp) {
+ if (Settings.getInstance().isLite()) {
+ // Lite nodes are always "up to date"
+ return true;
+ }
+
+ // Do we even have a vaguely recent block?
+ if (minLatestBlockTimestamp == null)
+ return false;
+
+ final BlockData latestBlockData = getChainTip();
+ if (latestBlockData == null || latestBlockData.getTimestamp() < minLatestBlockTimestamp)
+ return false;
+
+ if (Settings.getInstance().isSingleNodeTestnet())
+ // Single node testnets won't have peers, so we can assume up to date from this point
+ return true;
+
+ // Needs a mutable copy of the unmodifiableList
+ List peers = new ArrayList<>(RNSNetwork.getInstance().getImmutableLinkedPeers());
+ if (peers == null)
+ return false;
+
+ //// Disregard peers that have "misbehaved" recently
+ //peers.removeIf(hasMisbehaved);
+ //
+ //// Disregard peers that don't have a recent block
+ //peers.removeIf(hasNoRecentBlock);
+
+ // Check we have enough peers to potentially synchronize/mint
+ if (peers.size() < Settings.getInstance().getReticulumMinDesiredPeers())
+ return false;
+
+ // If we don't have any peers left then can't synchronize, therefore consider ourself not up to date
+ return !peers.isEmpty();
+ }
+
+ /**
+ * Returns whether we think our node has up-to-date blockchain based on our info about other peers.
+ * Uses the default minLatestBlockTimestamp value.
+ * @return boolean - whether our node's blockchain is up to date or not
+ */
+ public boolean isUpToDateRNS() {
+ final Long minLatestBlockTimestamp = getMinimumLatestBlockTimestamp();
+ return this.isUpToDate(minLatestBlockTimestamp);
+ }
}
diff --git a/src/main/java/org/qortal/controller/OnlineAccountsManager.java b/src/main/java/org/qortal/controller/OnlineAccountsManager.java
index d37b2aef..bbca4c7b 100644
--- a/src/main/java/org/qortal/controller/OnlineAccountsManager.java
+++ b/src/main/java/org/qortal/controller/OnlineAccountsManager.java
@@ -13,6 +13,7 @@ import org.qortal.crypto.MemoryPoW;
import org.qortal.crypto.Qortal25519Extras;
import org.qortal.data.account.MintingAccountData;
import org.qortal.data.account.RewardShareData;
+import org.qortal.data.group.GroupMemberData;
import org.qortal.data.network.OnlineAccountData;
import org.qortal.network.Network;
import org.qortal.network.Peer;
@@ -24,6 +25,7 @@ import org.qortal.repository.Repository;
import org.qortal.repository.RepositoryManager;
import org.qortal.settings.Settings;
import org.qortal.utils.Base58;
+import org.qortal.utils.Groups;
import org.qortal.utils.NTP;
import org.qortal.utils.NamedThreadFactory;
@@ -44,6 +46,7 @@ public class OnlineAccountsManager {
*/
private static final long ONLINE_TIMESTAMP_MODULUS_V1 = 5 * 60 * 1000L;
private static final long ONLINE_TIMESTAMP_MODULUS_V2 = 30 * 60 * 1000L;
+ private static final long ONLINE_TIMESTAMP_MODULUS_V3 = 10 * 60 * 1000L;
/**
* How many 'current' timestamp-sets of online accounts we cache.
@@ -67,12 +70,13 @@ public class OnlineAccountsManager {
private static final long ONLINE_ACCOUNTS_COMPUTE_INITIAL_SLEEP_INTERVAL = 30 * 1000L; // ms
// MemoryPoW - mainnet
- public static final int POW_BUFFER_SIZE = 1 * 1024 * 1024; // bytes
+ public static final int POW_BUFFER_SIZE = 1024 * 1024; // bytes
public static final int POW_DIFFICULTY_V1 = 18; // leading zero bits
public static final int POW_DIFFICULTY_V2 = 19; // leading zero bits
+ public static final int POW_DIFFICULTY_V3 = 6; // leading zero bits
// MemoryPoW - testnet
- public static final int POW_BUFFER_SIZE_TESTNET = 1 * 1024 * 1024; // bytes
+ public static final int POW_BUFFER_SIZE_TESTNET = 1024 * 1024; // bytes
public static final int POW_DIFFICULTY_TESTNET = 5; // leading zero bits
// IMPORTANT: if we ever need to dynamically modify the buffer size using a feature trigger, the
@@ -80,7 +84,7 @@ public class OnlineAccountsManager {
// one for the transition period.
private static long[] POW_VERIFY_WORK_BUFFER = new long[getPoWBufferSize() / 8];
- private final ScheduledExecutorService executor = Executors.newScheduledThreadPool(4, new NamedThreadFactory("OnlineAccounts"));
+ private final ScheduledExecutorService executor = Executors.newScheduledThreadPool(4, new NamedThreadFactory("OnlineAccounts", Thread.NORM_PRIORITY));
private volatile boolean isStopping = false;
private final Set onlineAccountsImportQueue = ConcurrentHashMap.newKeySet();
@@ -106,11 +110,15 @@ public class OnlineAccountsManager {
public static long getOnlineTimestampModulus() {
Long now = NTP.getTime();
- if (now != null && now >= BlockChain.getInstance().getOnlineAccountsModulusV2Timestamp()) {
+ if (now != null && now >= BlockChain.getInstance().getOnlineAccountsModulusV2Timestamp() && now < BlockChain.getInstance().getOnlineAccountsModulusV3Timestamp()) {
return ONLINE_TIMESTAMP_MODULUS_V2;
}
+ if (now != null && now >= BlockChain.getInstance().getOnlineAccountsModulusV3Timestamp()) {
+ return ONLINE_TIMESTAMP_MODULUS_V3;
+ }
return ONLINE_TIMESTAMP_MODULUS_V1;
}
+
public static Long getCurrentOnlineAccountTimestamp() {
Long now = NTP.getTime();
if (now == null)
@@ -135,9 +143,12 @@ public class OnlineAccountsManager {
if (Settings.getInstance().isTestNet())
return POW_DIFFICULTY_TESTNET;
- if (timestamp >= BlockChain.getInstance().getIncreaseOnlineAccountsDifficultyTimestamp())
+ if (timestamp >= BlockChain.getInstance().getIncreaseOnlineAccountsDifficultyTimestamp() && timestamp < BlockChain.getInstance().getDecreaseOnlineAccountsDifficultyTimestamp())
return POW_DIFFICULTY_V2;
+ if (timestamp >= BlockChain.getInstance().getDecreaseOnlineAccountsDifficultyTimestamp())
+ return POW_DIFFICULTY_V3;
+
return POW_DIFFICULTY_V1;
}
@@ -215,6 +226,15 @@ public class OnlineAccountsManager {
Set onlineAccountsToAdd = new HashSet<>();
Set onlineAccountsToRemove = new HashSet<>();
try (final Repository repository = RepositoryManager.getRepository()) {
+
+ int blockHeight = repository.getBlockRepository().getBlockchainHeight();
+
+ List mintingGroupMemberAddresses
+ = Groups.getAllMembers(
+ repository.getGroupRepository(),
+ Groups.getGroupIdsToMint(BlockChain.getInstance(), blockHeight)
+ );
+
for (OnlineAccountData onlineAccountData : this.onlineAccountsImportQueue) {
if (isStopping)
return;
@@ -227,7 +247,7 @@ public class OnlineAccountsManager {
continue;
}
- boolean isValid = this.isValidCurrentAccount(repository, onlineAccountData);
+ boolean isValid = this.isValidCurrentAccount(repository, mintingGroupMemberAddresses, onlineAccountData);
if (isValid)
onlineAccountsToAdd.add(onlineAccountData);
@@ -306,7 +326,7 @@ public class OnlineAccountsManager {
return inplaceArray;
}
- private static boolean isValidCurrentAccount(Repository repository, OnlineAccountData onlineAccountData) throws DataException {
+ private static boolean isValidCurrentAccount(Repository repository, List mintingGroupMemberAddresses, OnlineAccountData onlineAccountData) throws DataException {
final Long now = NTP.getTime();
if (now == null)
return false;
@@ -341,9 +361,14 @@ public class OnlineAccountsManager {
LOGGER.trace(() -> String.format("Rejecting unknown online reward-share public key %s", Base58.encode(rewardSharePublicKey)));
return false;
}
+ // reject account address that are not in the MINTER Group
+ else if( !mintingGroupMemberAddresses.contains(rewardShareData.getMinter())) {
+ LOGGER.trace(() -> String.format("Rejecting online reward-share that is not in MINTER Group, account %s", rewardShareData.getMinter()));
+ return false;
+ }
Account mintingAccount = new Account(repository, rewardShareData.getMinter());
- if (!mintingAccount.canMint()) {
+ if (!mintingAccount.canMint(true)) { // group validation is a few lines above
// Minting-account component of reward-share can no longer mint - disregard
LOGGER.trace(() -> String.format("Rejecting online reward-share with non-minting account %s", mintingAccount.getAddress()));
return false;
@@ -530,7 +555,7 @@ public class OnlineAccountsManager {
}
Account mintingAccount = new Account(repository, rewardShareData.getMinter());
- if (!mintingAccount.canMint()) {
+ if (!mintingAccount.canMint(true)) {
// Minting-account component of reward-share can no longer mint - disregard
iterator.remove();
continue;
diff --git a/src/main/java/org/qortal/controller/PirateChainWalletController.java b/src/main/java/org/qortal/controller/PirateChainWalletController.java
index e009d531..8f0c63b7 100644
--- a/src/main/java/org/qortal/controller/PirateChainWalletController.java
+++ b/src/main/java/org/qortal/controller/PirateChainWalletController.java
@@ -65,6 +65,7 @@ public class PirateChainWalletController extends Thread {
@Override
public void run() {
Thread.currentThread().setName("Pirate Chain Wallet Controller");
+ Thread.currentThread().setPriority(MIN_PRIORITY);
try {
while (running && !Controller.isStopping()) {
diff --git a/src/main/java/org/qortal/controller/RNSTransactionImporter.java b/src/main/java/org/qortal/controller/RNSTransactionImporter.java
new file mode 100644
index 00000000..40d89ada
--- /dev/null
+++ b/src/main/java/org/qortal/controller/RNSTransactionImporter.java
@@ -0,0 +1,460 @@
+package org.qortal.controller;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.qortal.data.block.BlockData;
+import org.qortal.data.transaction.TransactionData;
+import org.qortal.network.RNSNetwork;
+import org.qortal.network.RNSPeer;
+import org.qortal.network.message.GetTransactionMessage;
+import org.qortal.network.message.Message;
+import org.qortal.network.message.TransactionMessage;
+import org.qortal.network.message.TransactionSignaturesMessage;
+import org.qortal.repository.DataException;
+import org.qortal.repository.Repository;
+import org.qortal.repository.RepositoryManager;
+import org.qortal.settings.Settings;
+import org.qortal.transaction.Transaction;
+import org.qortal.transform.TransformationException;
+import org.qortal.utils.Base58;
+import org.qortal.utils.NTP;
+
+import java.util.*;
+import java.util.concurrent.locks.ReentrantLock;
+import java.util.stream.Collectors;
+
+public class RNSTransactionImporter extends Thread {
+
+ private static final Logger LOGGER = LogManager.getLogger(RNSTransactionImporter.class);
+
+ private static RNSTransactionImporter instance;
+ private volatile boolean isStopping = false;
+
+ private static final int MAX_INCOMING_TRANSACTIONS = 5000;
+
+ /** Minimum time before considering an invalid unconfirmed transaction as "stale" */
+ public static final long INVALID_TRANSACTION_STALE_TIMEOUT = 30 * 60 * 1000L; // ms
+ /** Minimum frequency to re-request stale unconfirmed transactions from peers, to recheck validity */
+ public static final long INVALID_TRANSACTION_RECHECK_INTERVAL = 60 * 60 * 1000L; // ms\
+ /** Minimum frequency to re-request expired unconfirmed transactions from peers, to recheck validity
+ * This mainly exists to stop expired transactions from bloating the list */
+ public static final long EXPIRED_TRANSACTION_RECHECK_INTERVAL = 10 * 60 * 1000L; // ms
+
+
+ /** Map of incoming transaction that are in the import queue. Key is transaction data, value is whether signature has been validated. */
+ private final Map incomingTransactions = Collections.synchronizedMap(new HashMap<>());
+
+ /** Map of recent invalid unconfirmed transactions. Key is base58 transaction signature, value is do-not-request expiry timestamp. */
+ private final Map invalidUnconfirmedTransactions = Collections.synchronizedMap(new HashMap<>());
+
+ /** Cached list of unconfirmed transactions, used when counting per creator. This is replaced regularly */
+ public static List unconfirmedTransactionsCache = null;
+
+
+ public static synchronized RNSTransactionImporter getInstance() {
+ if (instance == null) {
+ instance = new RNSTransactionImporter();
+ }
+
+ return instance;
+ }
+
+ @Override
+ public void run() {
+ Thread.currentThread().setName("Transaction Importer");
+
+ try {
+ while (!Controller.isStopping()) {
+ Thread.sleep(500L);
+
+ // Process incoming transactions queue
+ validateTransactionsInQueue();
+ importTransactionsInQueue();
+
+ // Clean up invalid incoming transactions list
+ cleanupInvalidTransactionsList(NTP.getTime());
+ }
+ } catch (InterruptedException e) {
+ // Fall through to exit thread
+ }
+ }
+
+ public void shutdown() {
+ isStopping = true;
+ this.interrupt();
+ }
+
+
+ // Incoming transactions queue
+
+ private boolean incomingTransactionQueueContains(byte[] signature) {
+ synchronized (incomingTransactions) {
+ return incomingTransactions.keySet().stream().anyMatch(t -> Arrays.equals(t.getSignature(), signature));
+ }
+ }
+
+ private void removeIncomingTransaction(byte[] signature) {
+ incomingTransactions.keySet().removeIf(t -> Arrays.equals(t.getSignature(), signature));
+ }
+
+ /**
+ * Retrieve all pending unconfirmed transactions that have had their signatures validated.
+ * @return a list of TransactionData objects, with valid signatures.
+ */
+ private List getCachedSigValidTransactions() {
+ synchronized (this.incomingTransactions) {
+ return this.incomingTransactions.entrySet().stream()
+ .filter(t -> Boolean.TRUE.equals(t.getValue()))
+ .map(Map.Entry::getKey)
+ .collect(Collectors.toList());
+ }
+ }
+
+ /**
+ * Validate the signatures of any transactions pending import, then update their
+ * entries in the queue to mark them as valid/invalid.
+ *
+ * No database lock is required.
+ */
+ private void validateTransactionsInQueue() {
+ if (this.incomingTransactions.isEmpty()) {
+ // Nothing to do?
+ return;
+ }
+
+ try (final Repository repository = RepositoryManager.getRepository()) {
+ // Take a snapshot of incomingTransactions, so we don't need to lock it while processing
+ Map incomingTransactionsCopy = Map.copyOf(this.incomingTransactions);
+
+ int unvalidatedCount = Collections.frequency(incomingTransactionsCopy.values(), Boolean.FALSE);
+ int validatedCount = 0;
+
+ if (unvalidatedCount > 0) {
+ LOGGER.debug("Validating signatures in incoming transactions queue (size {})...", unvalidatedCount);
+ }
+
+ // A list of all currently pending transactions that have valid signatures
+ List sigValidTransactions = new ArrayList<>();
+
+ // A list of signatures that became valid in this round
+ List newlyValidSignatures = new ArrayList<>();
+
+ boolean isLiteNode = Settings.getInstance().isLite();
+
+ // We need the latest block in order to check for expired transactions
+ BlockData latestBlock = Controller.getInstance().getChainTip();
+
+ // Signature validation round - does not require blockchain lock
+ for (Map.Entry transactionEntry : incomingTransactionsCopy.entrySet()) {
+ // Quick exit?
+ if (isStopping) {
+ return;
+ }
+
+ TransactionData transactionData = transactionEntry.getKey();
+ Transaction transaction = Transaction.fromData(repository, transactionData);
+ String signature58 = Base58.encode(transactionData.getSignature());
+
+ Long now = NTP.getTime();
+ if (now == null) {
+ return;
+ }
+
+ // Drop expired transactions before they are considered "sig valid"
+ if (latestBlock != null && transaction.getDeadline() <= latestBlock.getTimestamp()) {
+ LOGGER.debug("Removing expired {} transaction {} from import queue", transactionData.getType().name(), signature58);
+ removeIncomingTransaction(transactionData.getSignature());
+ invalidUnconfirmedTransactions.put(signature58, (now + EXPIRED_TRANSACTION_RECHECK_INTERVAL));
+ continue;
+ }
+
+ // Only validate signature if we haven't already done so
+ Boolean isSigValid = transactionEntry.getValue();
+ if (!Boolean.TRUE.equals(isSigValid)) {
+ if (isLiteNode) {
+ // Lite nodes can't easily validate transactions, so for now we will have to assume that everything is valid
+ sigValidTransactions.add(transaction);
+ newlyValidSignatures.add(transactionData.getSignature());
+ // Add mark signature as valid if transaction still exists in import queue
+ incomingTransactions.computeIfPresent(transactionData, (k, v) -> Boolean.TRUE);
+ continue;
+ }
+
+ if (!transaction.isSignatureValid()) {
+ LOGGER.debug("Ignoring {} transaction {} with invalid signature", transactionData.getType().name(), signature58);
+ removeIncomingTransaction(transactionData.getSignature());
+
+ // Also add to invalidIncomingTransactions map
+ now = NTP.getTime();
+ if (now != null) {
+ Long expiry = now + INVALID_TRANSACTION_RECHECK_INTERVAL;
+ LOGGER.trace("Adding invalid transaction {} to invalidUnconfirmedTransactions...", signature58);
+ // Add to invalidUnconfirmedTransactions so that we don't keep requesting it
+ invalidUnconfirmedTransactions.put(signature58, expiry);
+ }
+
+ // We're done with this transaction
+ continue;
+ }
+
+ // Count the number that were validated in this round, for logging purposes
+ validatedCount++;
+
+ // Add mark signature as valid if transaction still exists in import queue
+ incomingTransactions.computeIfPresent(transactionData, (k, v) -> Boolean.TRUE);
+
+ // Signature validated in this round
+ newlyValidSignatures.add(transactionData.getSignature());
+
+ } else {
+ LOGGER.trace(() -> String.format("Transaction %s known to have valid signature", Base58.encode(transactionData.getSignature())));
+ }
+
+ // Signature valid - add to shortlist
+ sigValidTransactions.add(transaction);
+ }
+
+ if (unvalidatedCount > 0) {
+ LOGGER.debug("Finished validating signatures in incoming transactions queue (valid this round: {}, total pending import: {})...", validatedCount, sigValidTransactions.size());
+ }
+
+ } catch (DataException e) {
+ LOGGER.error("Repository issue while processing incoming transactions", e);
+ }
+ }
+
+ /**
+ * Import any transactions in the queue that have valid signatures.
+ *
+ * A database lock is required.
+ */
+ private void importTransactionsInQueue() {
+ List sigValidTransactions = this.getCachedSigValidTransactions();
+ if (sigValidTransactions.isEmpty()) {
+ // Don't bother locking if there are no new transactions to process
+ return;
+ }
+
+ if (Synchronizer.getInstance().isSyncRequested() || Synchronizer.getInstance().isSynchronizing()) {
+ // Prioritize syncing, and don't attempt to lock
+ return;
+ }
+
+ ReentrantLock blockchainLock = Controller.getInstance().getBlockchainLock();
+ if (!blockchainLock.tryLock()) {
+ LOGGER.debug("Too busy to import incoming transactions queue");
+ return;
+ }
+
+ LOGGER.debug("Importing incoming transactions queue (size {})...", sigValidTransactions.size());
+
+ int processedCount = 0;
+ try (final Repository repository = RepositoryManager.getRepository()) {
+
+ // Use a single copy of the unconfirmed transactions list for each cycle, to speed up constant lookups
+ // when counting unconfirmed transactions by creator.
+ List unconfirmedTransactions = repository.getTransactionRepository().getUnconfirmedTransactions();
+ unconfirmedTransactions.removeIf(t -> t.getType() == Transaction.TransactionType.CHAT);
+ unconfirmedTransactionsCache = unconfirmedTransactions;
+
+ // A list of signatures were imported in this round
+ List newlyImportedSignatures = new ArrayList<>();
+
+ // Import transactions with valid signatures
+ try {
+ for (int i = 0; i < sigValidTransactions.size(); ++i) {
+ if (isStopping) {
+ return;
+ }
+
+ if (Synchronizer.getInstance().isSyncRequestPending()) {
+ LOGGER.debug("Breaking out of transaction importing with {} remaining, because a sync request is pending", sigValidTransactions.size() - i);
+ return;
+ }
+
+ TransactionData transactionData = sigValidTransactions.get(i);
+ Transaction transaction = Transaction.fromData(repository, transactionData);
+
+ Transaction.ValidationResult validationResult = transaction.importAsUnconfirmed();
+ processedCount++;
+
+ switch (validationResult) {
+ case TRANSACTION_ALREADY_EXISTS: {
+ LOGGER.trace(() -> String.format("Ignoring existing transaction %s", Base58.encode(transactionData.getSignature())));
+ break;
+ }
+
+ case NO_BLOCKCHAIN_LOCK: {
+ // Is this even possible considering we acquired blockchain lock above?
+ LOGGER.trace(() -> String.format("Couldn't lock blockchain to import unconfirmed transaction %s", Base58.encode(transactionData.getSignature())));
+ break;
+ }
+
+ case OK: {
+ LOGGER.debug(() -> String.format("Imported %s transaction %s", transactionData.getType().name(), Base58.encode(transactionData.getSignature())));
+
+ // Add to the unconfirmed transactions cache
+ if (transactionData.getType() != Transaction.TransactionType.CHAT && unconfirmedTransactionsCache != null) {
+ unconfirmedTransactionsCache.add(transactionData);
+ }
+
+ // Signature imported in this round
+ newlyImportedSignatures.add(transactionData.getSignature());
+
+ break;
+ }
+
+ // All other invalid cases:
+ default: {
+ final String signature58 = Base58.encode(transactionData.getSignature());
+ LOGGER.debug(() -> String.format("Ignoring invalid (%s) %s transaction %s", validationResult.name(), transactionData.getType().name(), signature58));
+
+ Long now = NTP.getTime();
+ if (now != null && now - transactionData.getTimestamp() > INVALID_TRANSACTION_STALE_TIMEOUT) {
+ Long expiryLength = INVALID_TRANSACTION_RECHECK_INTERVAL;
+
+ if (validationResult == Transaction.ValidationResult.TIMESTAMP_TOO_OLD) {
+ // Use shorter recheck interval for expired transactions
+ expiryLength = EXPIRED_TRANSACTION_RECHECK_INTERVAL;
+ }
+
+ Long expiry = now + expiryLength;
+ LOGGER.trace("Adding stale invalid transaction {} to invalidUnconfirmedTransactions...", signature58);
+ // Invalid, unconfirmed transaction has become stale - add to invalidUnconfirmedTransactions so that we don't keep requesting it
+ invalidUnconfirmedTransactions.put(signature58, expiry);
+ }
+ }
+ }
+
+ // Transaction has been processed, even if only to reject it
+ removeIncomingTransaction(transactionData.getSignature());
+ }
+
+ if (!newlyImportedSignatures.isEmpty()) {
+ LOGGER.debug("Broadcasting {} newly imported signatures", newlyImportedSignatures.size());
+ Message newTransactionSignatureMessage = new TransactionSignaturesMessage(newlyImportedSignatures);
+ RNSNetwork.getInstance().broadcast(broadcastPeer -> newTransactionSignatureMessage);
+ }
+ } finally {
+ LOGGER.debug("Finished importing {} incoming transaction{}", processedCount, (processedCount == 1 ? "" : "s"));
+ blockchainLock.unlock();
+
+ // Clear the unconfirmed transaction cache so new data can be populated in the next cycle
+ unconfirmedTransactionsCache = null;
+ }
+ } catch (DataException e) {
+ LOGGER.error("Repository issue while importing incoming transactions", e);
+ }
+ }
+
+ private void cleanupInvalidTransactionsList(Long now) {
+ if (now == null) {
+ return;
+ }
+ // Periodically remove invalid unconfirmed transactions from the list, so that they can be fetched again
+ invalidUnconfirmedTransactions.entrySet().removeIf(entry -> entry.getValue() == null || entry.getValue() < now);
+ }
+
+
+ // Network handlers
+
+ public void onNetworkTransactionMessage(RNSPeer peer, Message message) {
+ TransactionMessage transactionMessage = (TransactionMessage) message;
+ TransactionData transactionData = transactionMessage.getTransactionData();
+
+ if (this.incomingTransactions.size() < MAX_INCOMING_TRANSACTIONS) {
+ synchronized (this.incomingTransactions) {
+ if (!incomingTransactionQueueContains(transactionData.getSignature())) {
+ this.incomingTransactions.put(transactionData, Boolean.FALSE);
+ }
+ }
+ }
+ }
+
+ public void onNetworkGetTransactionMessage(RNSPeer peer, Message message) {
+ GetTransactionMessage getTransactionMessage = (GetTransactionMessage) message;
+ byte[] signature = getTransactionMessage.getSignature();
+
+ try (final Repository repository = RepositoryManager.getRepository()) {
+ // Firstly check the sig-valid transactions that are currently queued for import
+ TransactionData transactionData = this.getCachedSigValidTransactions().stream()
+ .filter(t -> Arrays.equals(signature, t.getSignature()))
+ .findFirst().orElse(null);
+
+ if (transactionData == null) {
+ // Not found in import queue, so try the database
+ transactionData = repository.getTransactionRepository().fromSignature(signature);
+ }
+
+ if (transactionData == null) {
+ // Still not found - so we don't have this transaction
+ LOGGER.debug(() -> String.format("Ignoring GET_TRANSACTION request from peer %s for unknown transaction %s", peer, Base58.encode(signature)));
+ // Send no response at all???
+ return;
+ }
+
+ Message transactionMessage = new TransactionMessage(transactionData);
+ transactionMessage.setId(message.getId());
+ peer.sendMessage(transactionMessage);
+ } catch (DataException e) {
+ LOGGER.error(String.format("Repository issue while sending transaction %s to peer %s", Base58.encode(signature), peer), e);
+ } catch (TransformationException e) {
+ LOGGER.error(String.format("Serialization issue while sending transaction %s to peer %s", Base58.encode(signature), peer), e);
+ }
+ }
+
+ public void onNetworkGetUnconfirmedTransactionsMessage(RNSPeer peer, Message message) {
+ try (final Repository repository = RepositoryManager.getRepository()) {
+ List signatures = Collections.emptyList();
+
+ // If we're NOT up-to-date then don't send out unconfirmed transactions
+ // as it's possible they are already included in a later block that we don't have.
+ if (Controller.getInstance().isUpToDate())
+ signatures = repository.getTransactionRepository().getUnconfirmedTransactionSignatures();
+
+ Message transactionSignaturesMessage = new TransactionSignaturesMessage(signatures);
+ peer.sendMessage(transactionSignaturesMessage);
+ } catch (DataException e) {
+ LOGGER.error(String.format("Repository issue while sending unconfirmed transaction signatures to peer %s", peer), e);
+ }
+ }
+
+ public void onNetworkTransactionSignaturesMessage(RNSPeer peer, Message message) {
+ TransactionSignaturesMessage transactionSignaturesMessage = (TransactionSignaturesMessage) message;
+ List signatures = transactionSignaturesMessage.getSignatures();
+
+ try (final Repository repository = RepositoryManager.getRepository()) {
+ for (byte[] signature : signatures) {
+ String signature58 = Base58.encode(signature);
+ if (invalidUnconfirmedTransactions.containsKey(signature58)) {
+ // Previously invalid transaction - don't keep requesting it
+ // It will be periodically removed from invalidUnconfirmedTransactions to allow for rechecks
+ continue;
+ }
+
+ // Ignore if this transaction is in the queue
+ if (incomingTransactionQueueContains(signature)) {
+ LOGGER.trace(() -> String.format("Ignoring existing queued transaction %s from peer %s", Base58.encode(signature), peer));
+ continue;
+ }
+
+ // Do we have it already? (Before requesting transaction data itself)
+ if (repository.getTransactionRepository().exists(signature)) {
+ LOGGER.trace(() -> String.format("Ignoring existing transaction %s from peer %s", Base58.encode(signature), peer));
+ continue;
+ }
+
+ // Check isInterrupted() here and exit fast
+ if (Thread.currentThread().isInterrupted())
+ return;
+
+ // Fetch actual transaction data from peer
+ Message getTransactionMessage = new GetTransactionMessage(signature);
+ peer.sendMessage(getTransactionMessage);
+ }
+ } catch (DataException e) {
+ LOGGER.error(String.format("Repository issue while processing unconfirmed transactions from peer %s", peer), e);
+ }
+ }
+
+}
diff --git a/src/main/java/org/qortal/controller/Synchronizer.java b/src/main/java/org/qortal/controller/Synchronizer.java
index 306784f5..400e7965 100644
--- a/src/main/java/org/qortal/controller/Synchronizer.java
+++ b/src/main/java/org/qortal/controller/Synchronizer.java
@@ -118,8 +118,12 @@ public class Synchronizer extends Thread {
}
public static Synchronizer getInstance() {
- if (instance == null)
+ if (instance == null) {
instance = new Synchronizer();
+ instance.setPriority(Settings.getInstance().getSynchronizerThreadPriority());
+
+ LOGGER.info("thread priority = " + instance.getPriority());
+ }
return instance;
}
diff --git a/src/main/java/org/qortal/controller/arbitrary/ArbitraryDataBuilderThread.java b/src/main/java/org/qortal/controller/arbitrary/ArbitraryDataBuilderThread.java
index 11f613ae..7f70ac05 100644
--- a/src/main/java/org/qortal/controller/arbitrary/ArbitraryDataBuilderThread.java
+++ b/src/main/java/org/qortal/controller/arbitrary/ArbitraryDataBuilderThread.java
@@ -14,6 +14,7 @@ import java.io.IOException;
import java.util.Comparator;
import java.util.Map;
+import static java.lang.Thread.NORM_PRIORITY;
import static org.qortal.data.arbitrary.ArbitraryResourceStatus.Status.NOT_PUBLISHED;
@@ -28,6 +29,7 @@ public class ArbitraryDataBuilderThread implements Runnable {
@Override
public void run() {
Thread.currentThread().setName("Arbitrary Data Builder Thread");
+ Thread.currentThread().setPriority(NORM_PRIORITY);
ArbitraryDataBuildManager buildManager = ArbitraryDataBuildManager.getInstance();
while (!Controller.isStopping()) {
diff --git a/src/main/java/org/qortal/controller/arbitrary/ArbitraryDataCacheManager.java b/src/main/java/org/qortal/controller/arbitrary/ArbitraryDataCacheManager.java
index 36d53761..9accd9c7 100644
--- a/src/main/java/org/qortal/controller/arbitrary/ArbitraryDataCacheManager.java
+++ b/src/main/java/org/qortal/controller/arbitrary/ArbitraryDataCacheManager.java
@@ -2,22 +2,30 @@ package org.qortal.controller.arbitrary;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
-import org.qortal.api.resource.TransactionsResource;
import org.qortal.controller.Controller;
import org.qortal.data.arbitrary.ArbitraryResourceData;
import org.qortal.data.transaction.ArbitraryTransactionData;
+import org.qortal.event.DataMonitorEvent;
+import org.qortal.event.EventBus;
import org.qortal.gui.SplashFrame;
import org.qortal.repository.DataException;
import org.qortal.repository.Repository;
import org.qortal.repository.RepositoryManager;
import org.qortal.settings.Settings;
import org.qortal.transaction.ArbitraryTransaction;
-import org.qortal.transaction.Transaction;
import org.qortal.utils.Base58;
+import java.text.NumberFormat;
import java.util.ArrayList;
import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.Set;
+import java.util.function.Function;
+import java.util.stream.Collectors;
public class ArbitraryDataCacheManager extends Thread {
@@ -29,6 +37,11 @@ public class ArbitraryDataCacheManager extends Thread {
/** Queue of arbitrary transactions that require cache updates */
private final List updateQueue = Collections.synchronizedList(new ArrayList<>());
+ private static final NumberFormat FORMATTER = NumberFormat.getNumberInstance();
+
+ static {
+ FORMATTER.setGroupingUsed(true);
+ }
public static synchronized ArbitraryDataCacheManager getInstance() {
if (instance == null) {
@@ -41,20 +54,26 @@ public class ArbitraryDataCacheManager extends Thread {
@Override
public void run() {
Thread.currentThread().setName("Arbitrary Data Cache Manager");
+ Thread.currentThread().setPriority(NORM_PRIORITY);
try {
while (!Controller.isStopping()) {
- Thread.sleep(500L);
+ try {
+ Thread.sleep(500L);
- // Process queue
- processResourceQueue();
+ // Process queue
+ processResourceQueue();
+ } catch (Exception e) {
+ LOGGER.error(e.getMessage(), e);
+ Thread.sleep(600_000L); // wait 10 minutes to continue
+ }
}
- } catch (InterruptedException e) {
- // Fall through to exit thread
- }
- // Clear queue before terminating thread
- processResourceQueue();
+ // Clear queue before terminating thread
+ processResourceQueue();
+ } catch (Exception e) {
+ LOGGER.error(e.getMessage(), e);
+ }
}
public void shutdown() {
@@ -84,14 +103,25 @@ public class ArbitraryDataCacheManager extends Thread {
// Update arbitrary resource caches
try {
ArbitraryTransaction arbitraryTransaction = new ArbitraryTransaction(repository, transactionData);
- arbitraryTransaction.updateArbitraryResourceCache(repository);
- arbitraryTransaction.updateArbitraryMetadataCache(repository);
+ arbitraryTransaction.updateArbitraryResourceCacheIncludingMetadata(repository, new HashSet<>(0), new HashMap<>(0));
repository.saveChanges();
// Update status as separate commit, as this is more prone to failure
arbitraryTransaction.updateArbitraryResourceStatus(repository);
repository.saveChanges();
+ EventBus.INSTANCE.notify(
+ new DataMonitorEvent(
+ System.currentTimeMillis(),
+ transactionData.getIdentifier(),
+ transactionData.getName(),
+ transactionData.getService().name(),
+ "updated resource cache and status, queue",
+ transactionData.getTimestamp(),
+ transactionData.getTimestamp()
+ )
+ );
+
LOGGER.debug(() -> String.format("Finished processing transaction %.8s in arbitrary resource queue...", Base58.encode(transactionData.getSignature())));
} catch (DataException e) {
@@ -102,6 +132,9 @@ public class ArbitraryDataCacheManager extends Thread {
} catch (DataException e) {
LOGGER.error("Repository issue while processing arbitrary resource cache updates", e);
}
+ catch (Exception e) {
+ LOGGER.error(e.getMessage(), e);
+ }
}
public void addToUpdateQueue(ArbitraryTransactionData transactionData) {
@@ -147,34 +180,66 @@ public class ArbitraryDataCacheManager extends Thread {
LOGGER.info("Building arbitrary resources cache...");
SplashFrame.getInstance().updateStatus("Building QDN cache - please wait...");
- final int batchSize = 100;
+ final int batchSize = Settings.getInstance().getBuildArbitraryResourcesBatchSize();
int offset = 0;
+ List allArbitraryTransactionsInDescendingOrder
+ = repository.getArbitraryRepository().getLatestArbitraryTransactions();
+
+ LOGGER.info("arbitrary transactions: count = " + allArbitraryTransactionsInDescendingOrder.size());
+
+ List resources = repository.getArbitraryRepository().getArbitraryResources(null, null, true);
+
+ Map resourceByWrapper = new HashMap<>(resources.size());
+ for( ArbitraryResourceData resource : resources ) {
+ resourceByWrapper.put(
+ new ArbitraryTransactionDataHashWrapper(resource.service.value, resource.name, resource.identifier),
+ resource
+ );
+ }
+
+ LOGGER.info("arbitrary resources: count = " + resourceByWrapper.size());
+
+ Set latestTransactionsWrapped = new HashSet<>(allArbitraryTransactionsInDescendingOrder.size());
+
// Loop through all ARBITRARY transactions, and determine latest state
while (!Controller.isStopping()) {
- LOGGER.info("Fetching arbitrary transactions {} - {}", offset, offset+batchSize-1);
+ LOGGER.info(
+ "Fetching arbitrary transactions {} - {} / {} Total",
+ FORMATTER.format(offset),
+ FORMATTER.format(offset+batchSize-1),
+ FORMATTER.format(allArbitraryTransactionsInDescendingOrder.size())
+ );
- List signatures = repository.getTransactionRepository().getSignaturesMatchingCriteria(null, null, null, List.of(Transaction.TransactionType.ARBITRARY), null, null, null, TransactionsResource.ConfirmationStatus.BOTH, batchSize, offset, false);
- if (signatures.isEmpty()) {
+ List transactionsToProcess
+ = allArbitraryTransactionsInDescendingOrder.stream()
+ .skip(offset)
+ .limit(batchSize)
+ .collect(Collectors.toList());
+
+ if (transactionsToProcess.isEmpty()) {
// Complete
break;
}
- // Expand signatures to transactions
- for (byte[] signature : signatures) {
- ArbitraryTransactionData transactionData = (ArbitraryTransactionData) repository
- .getTransactionRepository().fromSignature(signature);
+ try {
+ for( ArbitraryTransactionData transactionData : transactionsToProcess) {
+ if (transactionData.getService() == null) {
+ // Unsupported service - ignore this resource
+ continue;
+ }
- if (transactionData.getService() == null) {
- // Unsupported service - ignore this resource
- continue;
+ latestTransactionsWrapped.add(new ArbitraryTransactionDataHashWrapper(transactionData));
+
+ // Update arbitrary resource caches
+ ArbitraryTransaction arbitraryTransaction = new ArbitraryTransaction(repository, transactionData);
+ arbitraryTransaction.updateArbitraryResourceCacheIncludingMetadata(repository, latestTransactionsWrapped, resourceByWrapper);
}
-
- // Update arbitrary resource caches
- ArbitraryTransaction arbitraryTransaction = new ArbitraryTransaction(repository, transactionData);
- arbitraryTransaction.updateArbitraryResourceCache(repository);
- arbitraryTransaction.updateArbitraryMetadataCache(repository);
repository.saveChanges();
+ } catch (DataException e) {
+ repository.discardChanges();
+
+ LOGGER.error(e.getMessage(), e);
}
offset += batchSize;
}
@@ -192,6 +257,11 @@ public class ArbitraryDataCacheManager extends Thread {
repository.discardChanges();
throw new DataException("Build of arbitrary resources cache failed.");
}
+ catch (Exception e) {
+ LOGGER.error(e.getMessage(), e);
+
+ return false;
+ }
}
private boolean refreshArbitraryStatuses(Repository repository) throws DataException {
@@ -199,27 +269,48 @@ public class ArbitraryDataCacheManager extends Thread {
LOGGER.info("Refreshing arbitrary resource statuses for locally hosted transactions...");
SplashFrame.getInstance().updateStatus("Refreshing statuses - please wait...");
- final int batchSize = 100;
+ final int batchSize = Settings.getInstance().getBuildArbitraryResourcesBatchSize();
int offset = 0;
+ List allHostedTransactions
+ = ArbitraryDataStorageManager.getInstance()
+ .listAllHostedTransactions(repository, null, null);
+
// Loop through all ARBITRARY transactions, and determine latest state
while (!Controller.isStopping()) {
- LOGGER.info("Fetching hosted transactions {} - {}", offset, offset+batchSize-1);
+ LOGGER.info(
+ "Fetching hosted transactions {} - {} / {} Total",
+ FORMATTER.format(offset),
+ FORMATTER.format(offset+batchSize-1),
+ FORMATTER.format(allHostedTransactions.size())
+ );
+
+ List hostedTransactions
+ = allHostedTransactions.stream()
+ .skip(offset)
+ .limit(batchSize)
+ .collect(Collectors.toList());
- List hostedTransactions = ArbitraryDataStorageManager.getInstance().listAllHostedTransactions(repository, batchSize, offset);
if (hostedTransactions.isEmpty()) {
// Complete
break;
}
- // Loop through hosted transactions
- for (ArbitraryTransactionData transactionData : hostedTransactions) {
+ try {
+ // Loop through hosted transactions
+ for (ArbitraryTransactionData transactionData : hostedTransactions) {
- // Determine status and update cache
- ArbitraryTransaction arbitraryTransaction = new ArbitraryTransaction(repository, transactionData);
- arbitraryTransaction.updateArbitraryResourceStatus(repository);
+ // Determine status and update cache
+ ArbitraryTransaction arbitraryTransaction = new ArbitraryTransaction(repository, transactionData);
+ arbitraryTransaction.updateArbitraryResourceStatus(repository);
+ }
repository.saveChanges();
+ } catch (DataException e) {
+ repository.discardChanges();
+
+ LOGGER.error(e.getMessage(), e);
}
+
offset += batchSize;
}
@@ -233,6 +324,11 @@ public class ArbitraryDataCacheManager extends Thread {
repository.discardChanges();
throw new DataException("Refresh of arbitrary resource statuses failed.");
}
+ catch (Exception e) {
+ LOGGER.error(e.getMessage(), e);
+
+ return false;
+ }
}
}
diff --git a/src/main/java/org/qortal/controller/arbitrary/ArbitraryDataCleanupManager.java b/src/main/java/org/qortal/controller/arbitrary/ArbitraryDataCleanupManager.java
index 7b434acb..ce4dd565 100644
--- a/src/main/java/org/qortal/controller/arbitrary/ArbitraryDataCleanupManager.java
+++ b/src/main/java/org/qortal/controller/arbitrary/ArbitraryDataCleanupManager.java
@@ -2,9 +2,10 @@ package org.qortal.controller.arbitrary;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
-import org.qortal.api.resource.TransactionsResource.ConfirmationStatus;
import org.qortal.data.transaction.ArbitraryTransactionData;
import org.qortal.data.transaction.TransactionData;
+import org.qortal.event.DataMonitorEvent;
+import org.qortal.event.EventBus;
import org.qortal.repository.DataException;
import org.qortal.repository.Repository;
import org.qortal.repository.RepositoryManager;
@@ -21,8 +22,12 @@ import java.nio.file.Paths;
import java.security.SecureRandom;
import java.util.ArrayList;
import java.util.Arrays;
+import java.util.HashSet;
import java.util.List;
import java.util.Objects;
+import java.util.Optional;
+import java.util.Set;
+import java.util.stream.Collectors;
import static org.qortal.controller.arbitrary.ArbitraryDataStorageManager.DELETION_THRESHOLD;
@@ -71,11 +76,25 @@ public class ArbitraryDataCleanupManager extends Thread {
@Override
public void run() {
Thread.currentThread().setName("Arbitrary Data Cleanup Manager");
+ Thread.currentThread().setPriority(NORM_PRIORITY);
// Paginate queries when fetching arbitrary transactions
final int limit = 100;
int offset = 0;
+ List allArbitraryTransactionsInDescendingOrder;
+
+ try (final Repository repository = RepositoryManager.getRepository()) {
+ allArbitraryTransactionsInDescendingOrder
+ = repository.getArbitraryRepository()
+ .getLatestArbitraryTransactions();
+ } catch( Exception e) {
+ LOGGER.error(e.getMessage(), e);
+ allArbitraryTransactionsInDescendingOrder = new ArrayList<>(0);
+ }
+
+ Set processedTransactions = new HashSet<>();
+
try {
while (!isStopping) {
Thread.sleep(30000);
@@ -106,27 +125,31 @@ public class ArbitraryDataCleanupManager extends Thread {
// Any arbitrary transactions we want to fetch data for?
try (final Repository repository = RepositoryManager.getRepository()) {
- List signatures = repository.getTransactionRepository().getSignaturesMatchingCriteria(null, null, null, ARBITRARY_TX_TYPE, null, null, null, ConfirmationStatus.BOTH, limit, offset, true);
- // LOGGER.info("Found {} arbitrary transactions at offset: {}, limit: {}", signatures.size(), offset, limit);
+ List transactions = allArbitraryTransactionsInDescendingOrder.stream().skip(offset).limit(limit).collect(Collectors.toList());
if (isStopping) {
return;
}
- if (signatures == null || signatures.isEmpty()) {
+ if (transactions == null || transactions.isEmpty()) {
offset = 0;
- continue;
+ allArbitraryTransactionsInDescendingOrder
+ = repository.getArbitraryRepository()
+ .getLatestArbitraryTransactions();
+ transactions = allArbitraryTransactionsInDescendingOrder.stream().limit(limit).collect(Collectors.toList());
+ processedTransactions.clear();
}
+
offset += limit;
now = NTP.getTime();
// Loop through the signatures in this batch
- for (int i=0; i moreRecentPutTransaction
+ = processedTransactions.stream()
+ .filter(data -> data.equals(arbitraryTransactionData))
+ .findAny();
+
+ if( moreRecentPutTransaction.isPresent() ) {
+ EventBus.INSTANCE.notify(
+ new DataMonitorEvent(
+ System.currentTimeMillis(),
+ arbitraryTransactionData.getIdentifier(),
+ arbitraryTransactionData.getName(),
+ arbitraryTransactionData.getService().name(),
+ "deleting data due to replacement",
+ arbitraryTransactionData.getTimestamp(),
+ moreRecentPutTransaction.get().getTimestamp()
+ )
+ );
+ }
+ else {
+ LOGGER.warn("Something went wrong with the most recent put transaction determination!");
+ }
+
continue;
}
@@ -198,7 +255,21 @@ public class ArbitraryDataCleanupManager extends Thread {
LOGGER.debug(String.format("Transaction %s has complete file and all chunks",
Base58.encode(arbitraryTransactionData.getSignature())));
- ArbitraryTransactionUtils.deleteCompleteFile(arbitraryTransactionData, now, STALE_FILE_TIMEOUT);
+ boolean wasDeleted = ArbitraryTransactionUtils.deleteCompleteFile(arbitraryTransactionData, now, STALE_FILE_TIMEOUT);
+
+ if( wasDeleted ) {
+ EventBus.INSTANCE.notify(
+ new DataMonitorEvent(
+ System.currentTimeMillis(),
+ arbitraryTransactionData.getIdentifier(),
+ arbitraryTransactionData.getName(),
+ arbitraryTransactionData.getService().name(),
+ "deleting file, retaining chunks",
+ arbitraryTransactionData.getTimestamp(),
+ arbitraryTransactionData.getTimestamp()
+ )
+ );
+ }
continue;
}
@@ -236,17 +307,6 @@ public class ArbitraryDataCleanupManager extends Thread {
this.storageLimitReached(repository);
}
- // Delete random data associated with name if we're over our storage limit for this name
- // Use the DELETION_THRESHOLD, for the same reasons as above
- for (String followedName : ListUtils.followedNames()) {
- if (isStopping) {
- return;
- }
- if (!storageManager.isStorageSpaceAvailableForName(repository, followedName, DELETION_THRESHOLD)) {
- this.storageLimitReachedForName(repository, followedName);
- }
- }
-
} catch (DataException e) {
LOGGER.error("Repository issue when cleaning up arbitrary transaction data", e);
}
@@ -325,25 +385,6 @@ public class ArbitraryDataCleanupManager extends Thread {
// FUTURE: consider reducing the expiry time of the reader cache
}
- public void storageLimitReachedForName(Repository repository, String name) throws InterruptedException {
- // We think that the storage limit has been reached for supplied name - but we should double check
- if (ArbitraryDataStorageManager.getInstance().isStorageSpaceAvailableForName(repository, name, DELETION_THRESHOLD)) {
- // We have space available for this name, so don't delete anything
- return;
- }
-
- // Delete a batch of random chunks associated with this name
- // This reduces the chance of too many nodes deleting the same chunk
- // when they reach their storage limit
- Path dataPath = Paths.get(Settings.getInstance().getDataPath());
- for (int i=0; i allArbitraryTransactionsInDescendingOrder;
+
+ try (final Repository repository = RepositoryManager.getRepository()) {
+
+ if( name == null ) {
+ allArbitraryTransactionsInDescendingOrder
+ = repository.getArbitraryRepository()
+ .getLatestArbitraryTransactions();
+ }
+ else {
+ allArbitraryTransactionsInDescendingOrder
+ = repository.getArbitraryRepository()
+ .getLatestArbitraryTransactionsByName(name);
+ }
+ } catch( Exception e) {
+ LOGGER.error(e.getMessage(), e);
+ allArbitraryTransactionsInDescendingOrder = new ArrayList<>(0);
+ }
+
+ // collect processed transactions in a set to ensure outdated data transactions do not get fetched
+ Set processedTransactions = new HashSet<>();
+
while (!isStopping) {
Thread.sleep(1000L);
// Any arbitrary transactions we want to fetch data for?
try (final Repository repository = RepositoryManager.getRepository()) {
- List signatures = repository.getTransactionRepository().getSignaturesMatchingCriteria(null, null, null, ARBITRARY_TX_TYPE, null, name, null, ConfirmationStatus.BOTH, limit, offset, true);
- // LOGGER.trace("Found {} arbitrary transactions at offset: {}, limit: {}", signatures.size(), offset, limit);
+ List signatures = processTransactionsForSignatures(limit, offset, allArbitraryTransactionsInDescendingOrder, processedTransactions);
+
if (signatures == null || signatures.isEmpty()) {
offset = 0;
break;
@@ -222,14 +248,38 @@ public class ArbitraryDataManager extends Thread {
ArbitraryTransactionData arbitraryTransactionData = (ArbitraryTransactionData) arbitraryTransaction.getTransactionData();
// Skip transactions that we don't need to proactively store data for
- if (!storageManager.shouldPreFetchData(repository, arbitraryTransactionData)) {
+ ArbitraryDataExamination arbitraryDataExamination = storageManager.shouldPreFetchData(repository, arbitraryTransactionData);
+ if (!arbitraryDataExamination.isPass()) {
iterator.remove();
+
+ EventBus.INSTANCE.notify(
+ new DataMonitorEvent(
+ System.currentTimeMillis(),
+ arbitraryTransactionData.getIdentifier(),
+ arbitraryTransactionData.getName(),
+ arbitraryTransactionData.getService().name(),
+ arbitraryDataExamination.getNotes(),
+ arbitraryTransactionData.getTimestamp(),
+ arbitraryTransactionData.getTimestamp()
+ )
+ );
continue;
}
// Remove transactions that we already have local data for
if (hasLocalData(arbitraryTransaction)) {
iterator.remove();
+ EventBus.INSTANCE.notify(
+ new DataMonitorEvent(
+ System.currentTimeMillis(),
+ arbitraryTransactionData.getIdentifier(),
+ arbitraryTransactionData.getName(),
+ arbitraryTransactionData.getService().name(),
+ "already have local data, skipping",
+ arbitraryTransactionData.getTimestamp(),
+ arbitraryTransactionData.getTimestamp()
+ )
+ );
}
}
@@ -247,8 +297,21 @@ public class ArbitraryDataManager extends Thread {
// Check to see if we have had a more recent PUT
ArbitraryTransactionData arbitraryTransactionData = ArbitraryTransactionUtils.fetchTransactionData(repository, signature);
- boolean hasMoreRecentPutTransaction = ArbitraryTransactionUtils.hasMoreRecentPutTransaction(repository, arbitraryTransactionData);
- if (hasMoreRecentPutTransaction) {
+
+ Optional moreRecentPutTransaction = ArbitraryTransactionUtils.hasMoreRecentPutTransaction(repository, arbitraryTransactionData);
+
+ if (moreRecentPutTransaction.isPresent()) {
+ EventBus.INSTANCE.notify(
+ new DataMonitorEvent(
+ System.currentTimeMillis(),
+ arbitraryTransactionData.getIdentifier(),
+ arbitraryTransactionData.getName(),
+ arbitraryTransactionData.getService().name(),
+ "not fetching old data",
+ arbitraryTransactionData.getTimestamp(),
+ moreRecentPutTransaction.get().getTimestamp()
+ )
+ );
// There is a more recent PUT transaction than the one we are currently processing.
// When a PUT is issued, it replaces any layers that would have been there before.
// Therefore any data relating to this older transaction is no longer needed and we
@@ -256,10 +319,34 @@ public class ArbitraryDataManager extends Thread {
continue;
}
+ EventBus.INSTANCE.notify(
+ new DataMonitorEvent(
+ System.currentTimeMillis(),
+ arbitraryTransactionData.getIdentifier(),
+ arbitraryTransactionData.getName(),
+ arbitraryTransactionData.getService().name(),
+ "fetching data",
+ arbitraryTransactionData.getTimestamp(),
+ arbitraryTransactionData.getTimestamp()
+ )
+ );
+
// Ask our connected peers if they have files for this signature
// This process automatically then fetches the files themselves if a peer is found
fetchData(arbitraryTransactionData);
+ EventBus.INSTANCE.notify(
+ new DataMonitorEvent(
+ System.currentTimeMillis(),
+ arbitraryTransactionData.getIdentifier(),
+ arbitraryTransactionData.getName(),
+ arbitraryTransactionData.getService().name(),
+ "fetched data",
+ arbitraryTransactionData.getTimestamp(),
+ arbitraryTransactionData.getTimestamp()
+ )
+ );
+
} catch (DataException e) {
LOGGER.error("Repository issue when fetching arbitrary transaction data", e);
}
@@ -273,6 +360,20 @@ public class ArbitraryDataManager extends Thread {
final int limit = 100;
int offset = 0;
+ List allArbitraryTransactionsInDescendingOrder;
+
+ try (final Repository repository = RepositoryManager.getRepository()) {
+ allArbitraryTransactionsInDescendingOrder
+ = repository.getArbitraryRepository()
+ .getLatestArbitraryTransactions();
+ } catch( Exception e) {
+ LOGGER.error(e.getMessage(), e);
+ allArbitraryTransactionsInDescendingOrder = new ArrayList<>(0);
+ }
+
+ // collect processed transactions in a set to ensure outdated data transactions do not get fetched
+ Set processedTransactions = new HashSet<>();
+
while (!isStopping) {
final int minSeconds = 3;
final int maxSeconds = 10;
@@ -281,8 +382,8 @@ public class ArbitraryDataManager extends Thread {
// Any arbitrary transactions we want to fetch data for?
try (final Repository repository = RepositoryManager.getRepository()) {
- List signatures = repository.getTransactionRepository().getSignaturesMatchingCriteria(null, null, null, ARBITRARY_TX_TYPE, null, null, null, ConfirmationStatus.BOTH, limit, offset, true);
- // LOGGER.trace("Found {} arbitrary transactions at offset: {}, limit: {}", signatures.size(), offset, limit);
+ List signatures = processTransactionsForSignatures(limit, offset, allArbitraryTransactionsInDescendingOrder, processedTransactions);
+
if (signatures == null || signatures.isEmpty()) {
offset = 0;
break;
@@ -327,26 +428,74 @@ public class ArbitraryDataManager extends Thread {
continue;
}
- // Check to see if we have had a more recent PUT
+ // No longer need to see if we have had a more recent PUT since we compared the transactions to process
+ // to the transactions previously processed, so we can fetch the transactiondata, notify the event bus,
+ // fetch the metadata and notify the event bus again
ArbitraryTransactionData arbitraryTransactionData = ArbitraryTransactionUtils.fetchTransactionData(repository, signature);
- boolean hasMoreRecentPutTransaction = ArbitraryTransactionUtils.hasMoreRecentPutTransaction(repository, arbitraryTransactionData);
- if (hasMoreRecentPutTransaction) {
- // There is a more recent PUT transaction than the one we are currently processing.
- // When a PUT is issued, it replaces any layers that would have been there before.
- // Therefore any data relating to this older transaction is no longer needed and we
- // shouldn't fetch it from the network.
- continue;
- }
// Ask our connected peers if they have metadata for this signature
fetchMetadata(arbitraryTransactionData);
+ EventBus.INSTANCE.notify(
+ new DataMonitorEvent(
+ System.currentTimeMillis(),
+ arbitraryTransactionData.getIdentifier(),
+ arbitraryTransactionData.getName(),
+ arbitraryTransactionData.getService().name(),
+ "fetched metadata",
+ arbitraryTransactionData.getTimestamp(),
+ arbitraryTransactionData.getTimestamp()
+ )
+ );
} catch (DataException e) {
LOGGER.error("Repository issue when fetching arbitrary transaction data", e);
+ } catch (Exception e) {
+ LOGGER.error(e.getMessage(), e);
}
}
}
+ private static List processTransactionsForSignatures(
+ int limit,
+ int offset,
+ List transactionsInDescendingOrder,
+ Set processedTransactions) {
+ // these transactions are in descending order, latest transactions come first
+ List transactions
+ = transactionsInDescendingOrder.stream()
+ .skip(offset)
+ .limit(limit)
+ .collect(Collectors.toList());
+
+ // wrap the transactions, so they can be used for hashing and comparing
+ // Class ArbitraryTransactionDataHashWrapper supports hashCode() and equals(...) for this purpose
+ List wrappedTransactions
+ = transactions.stream()
+ .map(transaction -> new ArbitraryTransactionDataHashWrapper(transaction))
+ .collect(Collectors.toList());
+
+ // create a set of wrappers and populate it first to last, so that all outdated transactions get rejected
+ Set transactionsToProcess = new HashSet<>(wrappedTransactions.size());
+ for(ArbitraryTransactionDataHashWrapper wrappedTransaction : wrappedTransactions) {
+ transactionsToProcess.add(wrappedTransaction);
+ }
+
+ // remove the matches for previously processed transactions,
+ // because these transactions have had updates that have already been processed
+ transactionsToProcess.removeAll(processedTransactions);
+
+ // add to processed transactions to compare and remove matches from future processing iterations
+ processedTransactions.addAll(transactionsToProcess);
+
+ List signatures
+ = transactionsToProcess.stream()
+ .map(transactionToProcess -> transactionToProcess.getData()
+ .getSignature())
+ .collect(Collectors.toList());
+
+ return signatures;
+ }
+
private ArbitraryTransaction fetchTransaction(final Repository repository, byte[] signature) {
try {
TransactionData transactionData = repository.getTransactionRepository().fromSignature(signature);
diff --git a/src/main/java/org/qortal/controller/arbitrary/ArbitraryDataRenderManager.java b/src/main/java/org/qortal/controller/arbitrary/ArbitraryDataRenderManager.java
index 809db7af..c2a720fa 100644
--- a/src/main/java/org/qortal/controller/arbitrary/ArbitraryDataRenderManager.java
+++ b/src/main/java/org/qortal/controller/arbitrary/ArbitraryDataRenderManager.java
@@ -36,6 +36,7 @@ public class ArbitraryDataRenderManager extends Thread {
@Override
public void run() {
Thread.currentThread().setName("Arbitrary Data Render Manager");
+ Thread.currentThread().setPriority(NORM_PRIORITY);
try {
while (!isStopping) {
diff --git a/src/main/java/org/qortal/controller/arbitrary/ArbitraryDataStorageManager.java b/src/main/java/org/qortal/controller/arbitrary/ArbitraryDataStorageManager.java
index 91cb9965..c54a1e12 100644
--- a/src/main/java/org/qortal/controller/arbitrary/ArbitraryDataStorageManager.java
+++ b/src/main/java/org/qortal/controller/arbitrary/ArbitraryDataStorageManager.java
@@ -72,6 +72,8 @@ public class ArbitraryDataStorageManager extends Thread {
@Override
public void run() {
Thread.currentThread().setName("Arbitrary Data Storage Manager");
+ Thread.currentThread().setPriority(NORM_PRIORITY);
+
try {
while (!isStopping) {
Thread.sleep(1000);
@@ -153,31 +155,24 @@ public class ArbitraryDataStorageManager extends Thread {
* @param arbitraryTransactionData - the transaction
* @return boolean - whether to prefetch or not
*/
- public boolean shouldPreFetchData(Repository repository, ArbitraryTransactionData arbitraryTransactionData) {
+ public ArbitraryDataExamination shouldPreFetchData(Repository repository, ArbitraryTransactionData arbitraryTransactionData) {
String name = arbitraryTransactionData.getName();
// Only fetch data associated with hashes, as we already have RAW_DATA
if (arbitraryTransactionData.getDataType() != ArbitraryTransactionData.DataType.DATA_HASH) {
- return false;
+ return new ArbitraryDataExamination(false, "Only fetch data associated with hashes");
}
// Don't fetch anything more if we're (nearly) out of space
// Make sure to keep STORAGE_FULL_THRESHOLD considerably less than 1, to
// avoid a fetch/delete loop
if (!this.isStorageSpaceAvailable(STORAGE_FULL_THRESHOLD)) {
- return false;
- }
-
- // Don't fetch anything if we're (nearly) out of space for this name
- // Again, make sure to keep STORAGE_FULL_THRESHOLD considerably less than 1, to
- // avoid a fetch/delete loop
- if (!this.isStorageSpaceAvailableForName(repository, arbitraryTransactionData.getName(), STORAGE_FULL_THRESHOLD)) {
- return false;
+ return new ArbitraryDataExamination(false,"Don't fetch anything more if we're (nearly) out of space");
}
// Don't store data unless it's an allowed type (public/private)
if (!this.isDataTypeAllowed(arbitraryTransactionData)) {
- return false;
+ return new ArbitraryDataExamination(false, "Don't store data unless it's an allowed type (public/private)");
}
// Handle transactions without names differently
@@ -187,21 +182,21 @@ public class ArbitraryDataStorageManager extends Thread {
// Never fetch data from blocked names, even if they are followed
if (ListUtils.isNameBlocked(name)) {
- return false;
+ return new ArbitraryDataExamination(false, "blocked name");
}
switch (Settings.getInstance().getStoragePolicy()) {
case FOLLOWED:
case FOLLOWED_OR_VIEWED:
- return ListUtils.isFollowingName(name);
+ return new ArbitraryDataExamination(ListUtils.isFollowingName(name), Settings.getInstance().getStoragePolicy().name());
case ALL:
- return true;
+ return new ArbitraryDataExamination(true, Settings.getInstance().getStoragePolicy().name());
case NONE:
case VIEWED:
default:
- return false;
+ return new ArbitraryDataExamination(false, Settings.getInstance().getStoragePolicy().name());
}
}
@@ -212,17 +207,17 @@ public class ArbitraryDataStorageManager extends Thread {
*
* @return boolean - whether the storage policy allows for unnamed data
*/
- private boolean shouldPreFetchDataWithoutName() {
+ private ArbitraryDataExamination shouldPreFetchDataWithoutName() {
switch (Settings.getInstance().getStoragePolicy()) {
case ALL:
- return true;
+ return new ArbitraryDataExamination(true, "Fetching all data");
case NONE:
case VIEWED:
case FOLLOWED:
case FOLLOWED_OR_VIEWED:
default:
- return false;
+ return new ArbitraryDataExamination(false, Settings.getInstance().getStoragePolicy().name());
}
}
@@ -482,51 +477,6 @@ public class ArbitraryDataStorageManager extends Thread {
return true;
}
- public boolean isStorageSpaceAvailableForName(Repository repository, String name, double threshold) {
- if (!this.isStorageSpaceAvailable(threshold)) {
- // No storage space available at all, so no need to check this name
- return false;
- }
-
- if (Settings.getInstance().getStoragePolicy() == StoragePolicy.ALL) {
- // Using storage policy ALL, so don't limit anything per name
- return true;
- }
-
- if (name == null) {
- // This transaction doesn't have a name, so fall back to total space limitations
- return true;
- }
-
- int followedNamesCount = ListUtils.followedNamesCount();
- if (followedNamesCount == 0) {
- // Not following any names, so we have space
- return true;
- }
-
- long totalSizeForName = 0;
- long maxStoragePerName = this.storageCapacityPerName(threshold);
-
- // Fetch all hosted transactions
- List hostedTransactions = this.listAllHostedTransactions(repository, null, null);
- for (ArbitraryTransactionData transactionData : hostedTransactions) {
- String transactionName = transactionData.getName();
- if (!Objects.equals(name, transactionName)) {
- // Transaction relates to a different name
- continue;
- }
-
- totalSizeForName += transactionData.getSize();
- }
-
- // Have we reached the limit for this name?
- if (totalSizeForName > maxStoragePerName) {
- return false;
- }
-
- return true;
- }
-
public long storageCapacityPerName(double threshold) {
int followedNamesCount = ListUtils.followedNamesCount();
if (followedNamesCount == 0) {
diff --git a/src/main/java/org/qortal/controller/arbitrary/ArbitraryTransactionDataHashWrapper.java b/src/main/java/org/qortal/controller/arbitrary/ArbitraryTransactionDataHashWrapper.java
new file mode 100644
index 00000000..9ff40771
--- /dev/null
+++ b/src/main/java/org/qortal/controller/arbitrary/ArbitraryTransactionDataHashWrapper.java
@@ -0,0 +1,48 @@
+package org.qortal.controller.arbitrary;
+
+import org.qortal.arbitrary.misc.Service;
+import org.qortal.data.transaction.ArbitraryTransactionData;
+
+import java.util.Objects;
+
+public class ArbitraryTransactionDataHashWrapper {
+
+ private ArbitraryTransactionData data;
+
+ private int service;
+
+ private String name;
+
+ private String identifier;
+
+ public ArbitraryTransactionDataHashWrapper(ArbitraryTransactionData data) {
+ this.data = data;
+
+ this.service = data.getService().value;
+ this.name = data.getName();
+ this.identifier = data.getIdentifier();
+ }
+
+ public ArbitraryTransactionDataHashWrapper(int service, String name, String identifier) {
+ this.service = service;
+ this.name = name;
+ this.identifier = identifier;
+ }
+
+ public ArbitraryTransactionData getData() {
+ return data;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ ArbitraryTransactionDataHashWrapper that = (ArbitraryTransactionDataHashWrapper) o;
+ return service == that.service && name.equals(that.name) && Objects.equals(identifier, that.identifier);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(service, name, identifier);
+ }
+}
diff --git a/src/main/java/org/qortal/controller/arbitrary/RNSArbitraryDataFileListManager.java b/src/main/java/org/qortal/controller/arbitrary/RNSArbitraryDataFileListManager.java
new file mode 100644
index 00000000..93c3cd11
--- /dev/null
+++ b/src/main/java/org/qortal/controller/arbitrary/RNSArbitraryDataFileListManager.java
@@ -0,0 +1,731 @@
+package org.qortal.controller.arbitrary;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.qortal.arbitrary.ArbitraryDataFile;
+import org.qortal.arbitrary.ArbitraryDataFileChunk;
+import org.qortal.controller.Controller;
+import org.qortal.data.arbitrary.RNSArbitraryDirectConnectionInfo;
+import org.qortal.data.arbitrary.RNSArbitraryFileListResponseInfo;
+import org.qortal.data.arbitrary.RNSArbitraryRelayInfo;
+import org.qortal.data.transaction.ArbitraryTransactionData;
+import org.qortal.data.transaction.TransactionData;
+import org.qortal.network.RNSNetwork;
+import org.qortal.network.RNSPeer;
+import org.qortal.network.message.ArbitraryDataFileListMessage;
+import org.qortal.network.message.GetArbitraryDataFileListMessage;
+import org.qortal.network.message.Message;
+import org.qortal.repository.DataException;
+import org.qortal.repository.Repository;
+import org.qortal.repository.RepositoryManager;
+import org.qortal.settings.Settings;
+import org.qortal.utils.Base58;
+import org.qortal.utils.ListUtils;
+import org.qortal.utils.NTP;
+import org.qortal.utils.Triple;
+
+import java.util.*;
+
+import static org.qortal.controller.arbitrary.RNSArbitraryDataFileManager.MAX_FILE_HASH_RESPONSES;
+
+public class RNSArbitraryDataFileListManager {
+
+ private static final Logger LOGGER = LogManager.getLogger(RNSArbitraryDataFileListManager.class);
+
+ private static RNSArbitraryDataFileListManager instance;
+
+ private static String MIN_PEER_VERSION_FOR_FILE_LIST_STATS = "3.2.0";
+
+ /**
+ * Map of recent incoming requests for ARBITRARY transaction data file lists.
+ *
+ * Key is original request's message ID
+ * Value is Triple<transaction signature in base58, first requesting peer, first request's timestamp>
+ *
+ * If peer is null then either:
+ *
+ * - we are the original requesting peer
+ * - we have already sent data payload to original requesting peer.
+ *
+ * If signature is null then we have already received the file list and either:
+ *
+ * - we are the original requesting peer and have processed it
+ * - we have forwarded the file list
+ *
+ */
+ public Map> arbitraryDataFileListRequests = Collections.synchronizedMap(new HashMap<>());
+
+ /**
+ * Map to keep track of in progress arbitrary data signature requests
+ * Key: string - the signature encoded in base58
+ * Value: Triple
+ */
+ private Map> arbitraryDataSignatureRequests = Collections.synchronizedMap(new HashMap<>());
+
+
+ /** Maximum number of seconds that a file list relay request is able to exist on the network */
+ public static long RELAY_REQUEST_MAX_DURATION = 5000L;
+ /** Maximum number of hops that a file list relay request is allowed to make */
+ public static int RELAY_REQUEST_MAX_HOPS = 4;
+
+ /** Minimum peer version to use relay */
+ public static String RELAY_MIN_PEER_VERSION = "3.4.0";
+
+
+ private RNSArbitraryDataFileListManager() {
+ }
+
+ public static RNSArbitraryDataFileListManager getInstance() {
+ if (instance == null)
+ instance = new RNSArbitraryDataFileListManager();
+
+ return instance;
+ }
+
+
+ public void cleanupRequestCache(Long now) {
+ if (now == null) {
+ return;
+ }
+ final long requestMinimumTimestamp = now - ArbitraryDataManager.ARBITRARY_REQUEST_TIMEOUT;
+ arbitraryDataFileListRequests.entrySet().removeIf(entry -> entry.getValue().getC() == null || entry.getValue().getC() < requestMinimumTimestamp);
+ }
+
+
+ // Track file list lookups by signature
+
+ private boolean shouldMakeFileListRequestForSignature(String signature58) {
+ Triple request = arbitraryDataSignatureRequests.get(signature58);
+
+ if (request == null) {
+ // Not attempted yet
+ return true;
+ }
+
+ // Extract the components
+ Integer networkBroadcastCount = request.getA();
+ // Integer directPeerRequestCount = request.getB();
+ Long lastAttemptTimestamp = request.getC();
+
+ if (lastAttemptTimestamp == null) {
+ // Not attempted yet
+ return true;
+ }
+
+ long timeSinceLastAttempt = NTP.getTime() - lastAttemptTimestamp;
+
+ // Allow a second attempt after 15 seconds, and another after 30 seconds
+ if (timeSinceLastAttempt > 15 * 1000L) {
+ // We haven't tried for at least 15 seconds
+
+ if (networkBroadcastCount < 3) {
+ // We've made less than 3 total attempts
+ return true;
+ }
+ }
+
+ // Then allow another 5 attempts, each 1 minute apart
+ if (timeSinceLastAttempt > 60 * 1000L) {
+ // We haven't tried for at least 1 minute
+
+ if (networkBroadcastCount < 8) {
+ // We've made less than 8 total attempts
+ return true;
+ }
+ }
+
+ // Then allow another 8 attempts, each 15 minutes apart
+ if (timeSinceLastAttempt > 15 * 60 * 1000L) {
+ // We haven't tried for at least 15 minutes
+
+ if (networkBroadcastCount < 16) {
+ // We've made less than 16 total attempts
+ return true;
+ }
+ }
+
+ // From then on, only try once every 6 hours, to reduce network spam
+ if (timeSinceLastAttempt > 6 * 60 * 60 * 1000L) {
+ // We haven't tried for at least 6 hours
+ return true;
+ }
+
+ return false;
+ }
+
+ private boolean shouldMakeDirectFileRequestsForSignature(String signature58) {
+ if (!Settings.getInstance().isDirectDataRetrievalEnabled()) {
+ // Direct connections are disabled in the settings
+ return false;
+ }
+
+ Triple request = arbitraryDataSignatureRequests.get(signature58);
+
+ if (request == null) {
+ // Not attempted yet
+ return true;
+ }
+
+ // Extract the components
+ //Integer networkBroadcastCount = request.getA();
+ Integer directPeerRequestCount = request.getB();
+ Long lastAttemptTimestamp = request.getC();
+
+ if (lastAttemptTimestamp == null) {
+ // Not attempted yet
+ return true;
+ }
+
+ if (directPeerRequestCount == 0) {
+ // We haven't tried asking peers directly yet, so we should
+ return true;
+ }
+
+ long timeSinceLastAttempt = NTP.getTime() - lastAttemptTimestamp;
+ if (timeSinceLastAttempt > 10 * 1000L) {
+ // We haven't tried for at least 10 seconds
+ if (directPeerRequestCount < 5) {
+ // We've made less than 5 total attempts
+ return true;
+ }
+ }
+
+ if (timeSinceLastAttempt > 5 * 60 * 1000L) {
+ // We haven't tried for at least 5 minutes
+ if (directPeerRequestCount < 10) {
+ // We've made less than 10 total attempts
+ return true;
+ }
+ }
+
+ if (timeSinceLastAttempt > 60 * 60 * 1000L) {
+ // We haven't tried for at least 1 hour
+ return true;
+ }
+
+ return false;
+ }
+
+ public boolean isSignatureRateLimited(byte[] signature) {
+ String signature58 = Base58.encode(signature);
+ return !this.shouldMakeFileListRequestForSignature(signature58)
+ && !this.shouldMakeDirectFileRequestsForSignature(signature58);
+ }
+
+ public long lastRequestForSignature(byte[] signature) {
+ String signature58 = Base58.encode(signature);
+ Triple request = arbitraryDataSignatureRequests.get(signature58);
+
+ if (request == null) {
+ // Not attempted yet
+ return 0;
+ }
+
+ // Extract the components
+ Long lastAttemptTimestamp = request.getC();
+ if (lastAttemptTimestamp != null) {
+ return lastAttemptTimestamp;
+ }
+ return 0;
+ }
+
+ public void addToSignatureRequests(String signature58, boolean incrementNetworkRequests, boolean incrementPeerRequests) {
+ Triple request = arbitraryDataSignatureRequests.get(signature58);
+ Long now = NTP.getTime();
+
+ if (request == null) {
+ // No entry yet
+ Triple newRequest = new Triple<>(0, 0, now);
+ arbitraryDataSignatureRequests.put(signature58, newRequest);
+ }
+ else {
+ // There is an existing entry
+ if (incrementNetworkRequests) {
+ request.setA(request.getA() + 1);
+ }
+ if (incrementPeerRequests) {
+ request.setB(request.getB() + 1);
+ }
+ request.setC(now);
+ arbitraryDataSignatureRequests.put(signature58, request);
+ }
+ }
+
+ public void removeFromSignatureRequests(String signature58) {
+ arbitraryDataSignatureRequests.remove(signature58);
+ }
+
+
+ // Lookup file lists by signature (and optionally hashes)
+
+ public boolean fetchArbitraryDataFileList(ArbitraryTransactionData arbitraryTransactionData) {
+ byte[] signature = arbitraryTransactionData.getSignature();
+ String signature58 = Base58.encode(signature);
+
+ // Require an NTP sync
+ Long now = NTP.getTime();
+ if (now == null) {
+ return false;
+ }
+
+ // If we've already tried too many times in a short space of time, make sure to give up
+ if (!this.shouldMakeFileListRequestForSignature(signature58)) {
+ // Check if we should make direct connections to peers
+ if (this.shouldMakeDirectFileRequestsForSignature(signature58)) {
+ return RNSArbitraryDataFileManager.getInstance().fetchDataFilesFromPeersForSignature(signature);
+ }
+
+ LOGGER.trace("Skipping file list request for signature {} due to rate limit", signature58);
+ return false;
+ }
+ this.addToSignatureRequests(signature58, true, false);
+
+ //List handshakedPeers = Network.getInstance().getImmutableHandshakedPeers();
+ List handshakedPeers = RNSNetwork.getInstance().getLinkedPeers();
+ List missingHashes = null;
+
+ // Find hashes that we are missing
+ try {
+ ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromTransactionData(arbitraryTransactionData);
+ missingHashes = arbitraryDataFile.missingHashes();
+ } catch (DataException e) {
+ // Leave missingHashes as null, so that all hashes are requested
+ }
+ int hashCount = missingHashes != null ? missingHashes.size() : 0;
+
+ LOGGER.debug(String.format("Sending data file list request for signature %s with %d hashes to %d peers...", signature58, hashCount, handshakedPeers.size()));
+
+ //// Send our address as requestingPeer, to allow for potential direct connections with seeds/peers
+ //String requestingPeer = Network.getInstance().getOurExternalIpAddressAndPort();
+ String requestingPeer = null;
+
+ // Build request
+ Message getArbitraryDataFileListMessage = new GetArbitraryDataFileListMessage(signature, missingHashes, now, 0, requestingPeer);
+
+ // Save our request into requests map
+ Triple requestEntry = new Triple<>(signature58, null, NTP.getTime());
+
+ // Assign random ID to this message
+ int id;
+ do {
+ id = new Random().nextInt(Integer.MAX_VALUE - 1) + 1;
+
+ // Put queue into map (keyed by message ID) so we can poll for a response
+ // If putIfAbsent() doesn't return null, then this ID is already taken
+ } while (arbitraryDataFileListRequests.put(id, requestEntry) != null);
+ getArbitraryDataFileListMessage.setId(id);
+
+ // Broadcast request
+ RNSNetwork.getInstance().broadcast(peer -> getArbitraryDataFileListMessage);
+
+ // Poll to see if data has arrived
+ final long singleWait = 100;
+ long totalWait = 0;
+ while (totalWait < ArbitraryDataManager.ARBITRARY_REQUEST_TIMEOUT) {
+ try {
+ Thread.sleep(singleWait);
+ } catch (InterruptedException e) {
+ break;
+ }
+
+ requestEntry = arbitraryDataFileListRequests.get(id);
+ if (requestEntry == null)
+ return false;
+
+ if (requestEntry.getA() == null)
+ break;
+
+ totalWait += singleWait;
+ }
+ return true;
+ }
+
+ public boolean fetchArbitraryDataFileList(RNSPeer peer, byte[] signature) {
+ String signature58 = Base58.encode(signature);
+
+ // Require an NTP sync
+ Long now = NTP.getTime();
+ if (now == null) {
+ return false;
+ }
+
+ int hashCount = 0;
+ LOGGER.debug(String.format("Sending data file list request for signature %s with %d hashes to peer %s...", signature58, hashCount, peer));
+
+ // Build request
+ // Use a time in the past, so that the recipient peer doesn't try and relay it
+ // Also, set hashes to null since it's easier to request all hashes than it is to determine which ones we need
+ // This could be optimized in the future
+ long timestamp = now - 60000L;
+ List hashes = null;
+ Message getArbitraryDataFileListMessage = new GetArbitraryDataFileListMessage(signature, hashes, timestamp, 0, null);
+
+ // Save our request into requests map
+ Triple requestEntry = new Triple<>(signature58, null, NTP.getTime());
+
+ // Assign random ID to this message
+ int id;
+ do {
+ id = new Random().nextInt(Integer.MAX_VALUE - 1) + 1;
+
+ // Put queue into map (keyed by message ID) so we can poll for a response
+ // If putIfAbsent() doesn't return null, then this ID is already taken
+ } while (arbitraryDataFileListRequests.put(id, requestEntry) != null);
+ getArbitraryDataFileListMessage.setId(id);
+
+ // Send the request
+ peer.sendMessage(getArbitraryDataFileListMessage);
+
+ // Poll to see if data has arrived
+ final long singleWait = 100;
+ long totalWait = 0;
+ while (totalWait < ArbitraryDataManager.ARBITRARY_REQUEST_TIMEOUT) {
+ try {
+ Thread.sleep(singleWait);
+ } catch (InterruptedException e) {
+ break;
+ }
+
+ requestEntry = arbitraryDataFileListRequests.get(id);
+ if (requestEntry == null)
+ return false;
+
+ if (requestEntry.getA() == null)
+ break;
+
+ totalWait += singleWait;
+ }
+ return true;
+ }
+
+ public void deleteFileListRequestsForSignature(byte[] signature) {
+ String signature58 = Base58.encode(signature);
+ for (Iterator>> it = arbitraryDataFileListRequests.entrySet().iterator(); it.hasNext();) {
+ Map.Entry> entry = it.next();
+ if (entry == null || entry.getKey() == null || entry.getValue() != null) {
+ continue;
+ }
+ if (Objects.equals(entry.getValue().getA(), signature58)) {
+ // Update requests map to reflect that we've received all chunks
+ Triple newEntry = new Triple<>(null, null, entry.getValue().getC());
+ arbitraryDataFileListRequests.put(entry.getKey(), newEntry);
+ }
+ }
+ }
+
+ // Network handlers
+
+ public void onNetworkArbitraryDataFileListMessage(RNSPeer peer, Message message) {
+ // Don't process if QDN is disabled
+ if (!Settings.getInstance().isQdnEnabled()) {
+ return;
+ }
+
+ ArbitraryDataFileListMessage arbitraryDataFileListMessage = (ArbitraryDataFileListMessage) message;
+ LOGGER.debug("Received hash list from peer {} with {} hashes", peer, arbitraryDataFileListMessage.getHashes().size());
+
+ if (LOGGER.isDebugEnabled() && arbitraryDataFileListMessage.getRequestTime() != null) {
+ long totalRequestTime = NTP.getTime() - arbitraryDataFileListMessage.getRequestTime();
+ LOGGER.debug("totalRequestTime: {}, requestHops: {}, peerAddress: {}, isRelayPossible: {}",
+ totalRequestTime, arbitraryDataFileListMessage.getRequestHops(),
+ arbitraryDataFileListMessage.getPeerAddress(), arbitraryDataFileListMessage.isRelayPossible());
+ }
+
+ // Do we have a pending request for this data?
+ Triple request = arbitraryDataFileListRequests.get(message.getId());
+ if (request == null || request.getA() == null) {
+ return;
+ }
+ boolean isRelayRequest = (request.getB() != null);
+
+ // Does this message's signature match what we're expecting?
+ byte[] signature = arbitraryDataFileListMessage.getSignature();
+ String signature58 = Base58.encode(signature);
+ if (!request.getA().equals(signature58)) {
+ return;
+ }
+
+ List hashes = arbitraryDataFileListMessage.getHashes();
+ if (hashes == null || hashes.isEmpty()) {
+ return;
+ }
+
+ ArbitraryTransactionData arbitraryTransactionData = null;
+
+ // Check transaction exists and hashes are correct
+ try (final Repository repository = RepositoryManager.getRepository()) {
+ TransactionData transactionData = repository.getTransactionRepository().fromSignature(signature);
+ if (!(transactionData instanceof ArbitraryTransactionData))
+ return;
+
+ arbitraryTransactionData = (ArbitraryTransactionData) transactionData;
+
+// // Load data file(s)
+// ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromTransactionData(arbitraryTransactionData);
+//
+// // Check all hashes exist
+// for (byte[] hash : hashes) {
+// //LOGGER.debug("Received hash {}", Base58.encode(hash));
+// if (!arbitraryDataFile.containsChunk(hash)) {
+// // Check the hash against the complete file
+// if (!Arrays.equals(arbitraryDataFile.getHash(), hash)) {
+// LOGGER.info("Received non-matching chunk hash {} for signature {}. This could happen if we haven't obtained the metadata file yet.", Base58.encode(hash), signature58);
+// return;
+// }
+// }
+// }
+
+ if (!isRelayRequest || !Settings.getInstance().isRelayModeEnabled()) {
+ Long now = NTP.getTime();
+
+ if (RNSArbitraryDataFileManager.getInstance().arbitraryDataFileHashResponses.size() < MAX_FILE_HASH_RESPONSES) {
+ // Keep track of the hashes this peer reports to have access to
+ for (byte[] hash : hashes) {
+ String hash58 = Base58.encode(hash);
+
+ // Treat null request hops as 100, so that they are able to be sorted (and put to the end of the list)
+ int requestHops = arbitraryDataFileListMessage.getRequestHops() != null ? arbitraryDataFileListMessage.getRequestHops() : 100;
+
+ RNSArbitraryFileListResponseInfo responseInfo = new RNSArbitraryFileListResponseInfo(hash58, signature58,
+ peer, now, arbitraryDataFileListMessage.getRequestTime(), requestHops);
+
+ RNSArbitraryDataFileManager.getInstance().arbitraryDataFileHashResponses.add(responseInfo);
+ }
+ }
+
+ // Keep track of the source peer, for direct connections
+ if (arbitraryDataFileListMessage.getPeerAddress() != null) {
+ RNSArbitraryDataFileManager.getInstance().addDirectConnectionInfoIfUnique(
+ new RNSArbitraryDirectConnectionInfo(signature, arbitraryDataFileListMessage.getPeerAddress(), hashes, now));
+ }
+ }
+
+ } catch (DataException e) {
+ LOGGER.error(String.format("Repository issue while finding arbitrary transaction data list for peer %s", peer), e);
+ }
+
+ // Forwarding
+ if (isRelayRequest && Settings.getInstance().isRelayModeEnabled()) {
+ boolean isBlocked = (arbitraryTransactionData == null || ListUtils.isNameBlocked(arbitraryTransactionData.getName()));
+ if (!isBlocked) {
+ RNSPeer requestingPeer = request.getB();
+ if (requestingPeer != null) {
+ Long requestTime = arbitraryDataFileListMessage.getRequestTime();
+ Integer requestHops = arbitraryDataFileListMessage.getRequestHops();
+
+ // Add each hash to our local mapping so we know who to ask later
+ Long now = NTP.getTime();
+ for (byte[] hash : hashes) {
+ String hash58 = Base58.encode(hash);
+ RNSArbitraryRelayInfo relayInfo = new RNSArbitraryRelayInfo(hash58, signature58, peer, now, requestTime, requestHops);
+ RNSArbitraryDataFileManager.getInstance().addToRelayMap(relayInfo);
+ }
+
+ // Bump requestHops if it exists
+ if (requestHops != null) {
+ requestHops++;
+ }
+
+ ArbitraryDataFileListMessage forwardArbitraryDataFileListMessage;
+
+ //// TODO - rework for Reticulum
+ //// Remove optional parameters if the requesting peer doesn't support it yet
+ //// A message with less statistical data is better than no message at all
+ //if (!requestingPeer.isAtLeastVersion(MIN_PEER_VERSION_FOR_FILE_LIST_STATS)) {
+ // forwardArbitraryDataFileListMessage = new ArbitraryDataFileListMessage(signature, hashes);
+ //} else {
+ // forwardArbitraryDataFileListMessage = new ArbitraryDataFileListMessage(signature, hashes, requestTime, requestHops,
+ // arbitraryDataFileListMessage.getPeerAddress(), arbitraryDataFileListMessage.isRelayPossible());
+ //}
+ forwardArbitraryDataFileListMessage = new ArbitraryDataFileListMessage(signature, hashes, requestTime, requestHops,
+ arbitraryDataFileListMessage.getPeerAddress(), arbitraryDataFileListMessage.isRelayPossible());
+ forwardArbitraryDataFileListMessage.setId(message.getId());
+
+ // Forward to requesting peer
+ LOGGER.debug("Forwarding file list with {} hashes to requesting peer: {}", hashes.size(), requestingPeer);
+ //if (!requestingPeer.sendMessage(forwardArbitraryDataFileListMessage)) {
+ // requestingPeer.disconnect("failed to forward arbitrary data file list");
+ //}
+ requestingPeer.sendMessage(forwardArbitraryDataFileListMessage);
+ }
+ }
+ }
+ }
+
+ public void onNetworkGetArbitraryDataFileListMessage(RNSPeer peer, Message message) {
+ // Don't respond if QDN is disabled
+ if (!Settings.getInstance().isQdnEnabled()) {
+ return;
+ }
+
+ Controller.getInstance().stats.getArbitraryDataFileListMessageStats.requests.incrementAndGet();
+
+ GetArbitraryDataFileListMessage getArbitraryDataFileListMessage = (GetArbitraryDataFileListMessage) message;
+ byte[] signature = getArbitraryDataFileListMessage.getSignature();
+ String signature58 = Base58.encode(signature);
+ Long now = NTP.getTime();
+ Triple newEntry = new Triple<>(signature58, peer, now);
+
+ // If we've seen this request recently, then ignore
+ if (arbitraryDataFileListRequests.putIfAbsent(message.getId(), newEntry) != null) {
+ LOGGER.trace("Ignoring hash list request from peer {} for signature {}", peer, signature58);
+ return;
+ }
+
+ List requestedHashes = getArbitraryDataFileListMessage.getHashes();
+ int hashCount = requestedHashes != null ? requestedHashes.size() : 0;
+ String requestingPeer = getArbitraryDataFileListMessage.getRequestingPeer();
+
+ if (requestingPeer != null) {
+ LOGGER.debug("Received hash list request with {} hashes from peer {} (requesting peer {}) for signature {}", hashCount, peer, requestingPeer, signature58);
+ }
+ else {
+ LOGGER.debug("Received hash list request with {} hashes from peer {} for signature {}", hashCount, peer, signature58);
+ }
+
+ List hashes = new ArrayList<>();
+ ArbitraryTransactionData transactionData = null;
+ boolean allChunksExist = false;
+ boolean hasMetadata = false;
+
+ try (final Repository repository = RepositoryManager.getRepository()) {
+
+ // Firstly we need to lookup this file on chain to get a list of its hashes
+ transactionData = (ArbitraryTransactionData)repository.getTransactionRepository().fromSignature(signature);
+ if (transactionData instanceof ArbitraryTransactionData) {
+
+ // Check if we're even allowed to serve data for this transaction
+ if (ArbitraryDataStorageManager.getInstance().canStoreData(transactionData)) {
+
+ // Load file(s) and add any that exist to the list of hashes
+ ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromTransactionData(transactionData);
+
+ // If the peer didn't supply a hash list, we need to return all hashes for this transaction
+ if (requestedHashes == null || requestedHashes.isEmpty()) {
+ requestedHashes = new ArrayList<>();
+
+ // Add the metadata file
+ if (arbitraryDataFile.getMetadataHash() != null) {
+ requestedHashes.add(arbitraryDataFile.getMetadataHash());
+ hasMetadata = true;
+ }
+
+ // Add the chunk hashes
+ if (!arbitraryDataFile.getChunkHashes().isEmpty()) {
+ requestedHashes.addAll(arbitraryDataFile.getChunkHashes());
+ }
+ // Add complete file if there are no hashes
+ else {
+ requestedHashes.add(arbitraryDataFile.getHash());
+ }
+ }
+
+ // Assume all chunks exists, unless one can't be found below
+ allChunksExist = true;
+
+ for (byte[] requestedHash : requestedHashes) {
+ ArbitraryDataFileChunk chunk = ArbitraryDataFileChunk.fromHash(requestedHash, signature);
+ if (chunk.exists()) {
+ hashes.add(chunk.getHash());
+ //LOGGER.trace("Added hash {}", chunk.getHash58());
+ } else {
+ LOGGER.trace("Couldn't add hash {} because it doesn't exist", chunk.getHash58());
+ allChunksExist = false;
+ }
+ }
+ }
+ }
+
+ } catch (DataException e) {
+ LOGGER.error(String.format("Repository issue while fetching arbitrary file list for peer %s", peer), e);
+ }
+
+ // If the only file we have is the metadata then we shouldn't respond. Most nodes will already have that,
+ // or can use the separate metadata protocol to fetch it. This should greatly reduce network spam.
+ if (hasMetadata && hashes.size() == 1) {
+ hashes.clear();
+ }
+
+ // We should only respond if we have at least one hash
+ if (!hashes.isEmpty()) {
+
+ // Firstly we should keep track of the requesting peer, to allow for potential direct connections later
+ RNSArbitraryDataFileManager.getInstance().addRecentDataRequest(requestingPeer);
+
+ // We have all the chunks, so update requests map to reflect that we've sent it
+ // There is no need to keep track of the request, as we can serve all the chunks
+ if (allChunksExist) {
+ newEntry = new Triple<>(null, null, now);
+ arbitraryDataFileListRequests.put(message.getId(), newEntry);
+ }
+
+ //String ourAddress = RNSNetwork.getInstance().getOurExternalIpAddressAndPort();
+ String ourAddress = RNSNetwork.getInstance().getBaseDestination().getHexHash();
+ ArbitraryDataFileListMessage arbitraryDataFileListMessage;
+
+ // TODO: rework for Reticulum
+ // Remove optional parameters if the requesting peer doesn't support it yet
+ // A message with less statistical data is better than no message at all
+ //if (!peer.isAtLeastVersion(MIN_PEER_VERSION_FOR_FILE_LIST_STATS)) {
+ // arbitraryDataFileListMessage = new ArbitraryDataFileListMessage(signature, hashes);
+ //} else {
+ // arbitraryDataFileListMessage = new ArbitraryDataFileListMessage(signature,
+ // hashes, NTP.getTime(), 0, ourAddress, true);
+ //}
+ arbitraryDataFileListMessage = new ArbitraryDataFileListMessage(signature, hashes);
+
+ arbitraryDataFileListMessage.setId(message.getId());
+
+ //if (!peer.sendMessage(arbitraryDataFileListMessage)) {
+ // LOGGER.debug("Couldn't send list of hashes");
+ // peer.disconnect("failed to send list of hashes");
+ // return;
+ //}
+ peer.sendMessage(arbitraryDataFileListMessage);
+ LOGGER.debug("Sent list of hashes (count: {})", hashes.size());
+
+ if (allChunksExist) {
+ // Nothing left to do, so return to prevent any unnecessary forwarding from occurring
+ LOGGER.debug("No need for any forwarding because file list request is fully served");
+ return;
+ }
+
+ }
+
+ // We may need to forward this request on
+ boolean isBlocked = (transactionData == null || ListUtils.isNameBlocked(transactionData.getName()));
+ if (Settings.getInstance().isRelayModeEnabled() && !isBlocked) {
+ // In relay mode - so ask our other peers if they have it
+
+ long requestTime = getArbitraryDataFileListMessage.getRequestTime();
+ int requestHops = getArbitraryDataFileListMessage.getRequestHops() + 1;
+ long totalRequestTime = now - requestTime;
+
+ if (totalRequestTime < RELAY_REQUEST_MAX_DURATION) {
+ // Relay request hasn't timed out yet, so can potentially be rebroadcast
+ if (requestHops < RELAY_REQUEST_MAX_HOPS) {
+ // Relay request hasn't reached the maximum number of hops yet, so can be rebroadcast
+
+ Message relayGetArbitraryDataFileListMessage = new GetArbitraryDataFileListMessage(signature, hashes, requestTime, requestHops, requestingPeer);
+ relayGetArbitraryDataFileListMessage.setId(message.getId());
+
+ LOGGER.debug("Rebroadcasting hash list request from peer {} for signature {} to our other peers... totalRequestTime: {}, requestHops: {}", peer, Base58.encode(signature), totalRequestTime, requestHops);
+ //Network.getInstance().broadcast(
+ // broadcastPeer ->
+ // !broadcastPeer.isAtLeastVersion(RELAY_MIN_PEER_VERSION) ? null :
+ // broadcastPeer == peer || Objects.equals(broadcastPeer.getPeerData().getAddress().getHost(), peer.getPeerData().getAddress().getHost()) ? null : relayGetArbitraryDataFileListMessage
+ //);
+ RNSNetwork.getInstance().broadcast(broadcastPeer -> relayGetArbitraryDataFileListMessage);
+
+ }
+ else {
+ // This relay request has reached the maximum number of allowed hops
+ }
+ }
+ else {
+ // This relay request has timed out
+ }
+ }
+ }
+
+}
diff --git a/src/main/java/org/qortal/controller/arbitrary/RNSArbitraryDataFileManager.java b/src/main/java/org/qortal/controller/arbitrary/RNSArbitraryDataFileManager.java
new file mode 100644
index 00000000..fc68fdec
--- /dev/null
+++ b/src/main/java/org/qortal/controller/arbitrary/RNSArbitraryDataFileManager.java
@@ -0,0 +1,639 @@
+package org.qortal.controller.arbitrary;
+
+import com.google.common.net.InetAddresses;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.qortal.arbitrary.ArbitraryDataFile;
+import org.qortal.controller.Controller;
+import org.qortal.data.arbitrary.RNSArbitraryDirectConnectionInfo;
+import org.qortal.data.arbitrary.RNSArbitraryFileListResponseInfo;
+import org.qortal.data.arbitrary.RNSArbitraryRelayInfo;
+import org.qortal.data.network.PeerData;
+import org.qortal.data.transaction.ArbitraryTransactionData;
+import org.qortal.network.RNSNetwork;
+import org.qortal.network.RNSPeer;
+import org.qortal.network.message.*;
+import org.qortal.repository.DataException;
+import org.qortal.repository.Repository;
+import org.qortal.repository.RepositoryManager;
+import org.qortal.settings.Settings;
+import org.qortal.utils.ArbitraryTransactionUtils;
+import org.qortal.utils.Base58;
+import org.qortal.utils.NTP;
+
+import java.security.SecureRandom;
+import java.util.*;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.stream.Collectors;
+
+public class RNSArbitraryDataFileManager extends Thread {
+
+ private static final Logger LOGGER = LogManager.getLogger(RNSArbitraryDataFileManager.class);
+
+ private static RNSArbitraryDataFileManager instance;
+ private volatile boolean isStopping = false;
+
+
+ /**
+ * Map to keep track of our in progress (outgoing) arbitrary data file requests
+ */
+ public Map arbitraryDataFileRequests = Collections.synchronizedMap(new HashMap<>());
+
+ /**
+ * Map to keep track of hashes that we might need to relay
+ */
+ public final List arbitraryRelayMap = Collections.synchronizedList(new ArrayList<>());
+
+ /**
+ * List to keep track of any arbitrary data file hash responses
+ */
+ public final List arbitraryDataFileHashResponses = Collections.synchronizedList(new ArrayList<>());
+
+ /**
+ * List to keep track of peers potentially available for direct connections, based on recent requests
+ */
+ private final List directConnectionInfo = Collections.synchronizedList(new ArrayList<>());
+
+ /**
+ * Map to keep track of peers requesting QDN data that we hold.
+ * Key = peer address string, value = time of last request.
+ * This allows for additional "burst" connections beyond existing limits.
+ */
+ private Map recentDataRequests = Collections.synchronizedMap(new HashMap<>());
+
+
+ public static int MAX_FILE_HASH_RESPONSES = 1000;
+
+
+ private RNSArbitraryDataFileManager() {
+ }
+
+ public static RNSArbitraryDataFileManager getInstance() {
+ if (instance == null)
+ instance = new RNSArbitraryDataFileManager();
+
+ return instance;
+ }
+
+ @Override
+ public void run() {
+ Thread.currentThread().setName("Arbitrary Data File Manager");
+
+ try {
+ // Use a fixed thread pool to execute the arbitrary data file requests
+ int threadCount = 5;
+ ExecutorService arbitraryDataFileRequestExecutor = Executors.newFixedThreadPool(threadCount);
+ for (int i = 0; i < threadCount; i++) {
+ arbitraryDataFileRequestExecutor.execute(new ArbitraryDataFileRequestThread());
+ }
+
+ while (!isStopping) {
+ // Nothing to do yet
+ Thread.sleep(1000);
+ }
+ } catch (InterruptedException e) {
+ // Fall-through to exit thread...
+ }
+ }
+
+ public void shutdown() {
+ isStopping = true;
+ this.interrupt();
+ }
+
+
+ public void cleanupRequestCache(Long now) {
+ if (now == null) {
+ return;
+ }
+ final long requestMinimumTimestamp = now - ArbitraryDataManager.getInstance().ARBITRARY_REQUEST_TIMEOUT;
+ arbitraryDataFileRequests.entrySet().removeIf(entry -> entry.getValue() == null || entry.getValue() < requestMinimumTimestamp);
+
+ final long relayMinimumTimestamp = now - ArbitraryDataManager.getInstance().ARBITRARY_RELAY_TIMEOUT;
+ arbitraryRelayMap.removeIf(entry -> entry == null || entry.getTimestamp() == null || entry.getTimestamp() < relayMinimumTimestamp);
+ arbitraryDataFileHashResponses.removeIf(entry -> entry.getTimestamp() < relayMinimumTimestamp);
+
+ final long directConnectionInfoMinimumTimestamp = now - ArbitraryDataManager.getInstance().ARBITRARY_DIRECT_CONNECTION_INFO_TIMEOUT;
+ directConnectionInfo.removeIf(entry -> entry.getTimestamp() < directConnectionInfoMinimumTimestamp);
+
+ final long recentDataRequestMinimumTimestamp = now - ArbitraryDataManager.getInstance().ARBITRARY_RECENT_DATA_REQUESTS_TIMEOUT;
+ recentDataRequests.entrySet().removeIf(entry -> entry.getValue() < recentDataRequestMinimumTimestamp);
+ }
+
+
+
+ // Fetch data files by hash
+
+ public boolean fetchArbitraryDataFiles(Repository repository,
+ RNSPeer peer,
+ byte[] signature,
+ ArbitraryTransactionData arbitraryTransactionData,
+ List hashes) throws DataException {
+
+ // Load data file(s)
+ ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromTransactionData(arbitraryTransactionData);
+ boolean receivedAtLeastOneFile = false;
+
+ // Now fetch actual data from this peer
+ for (byte[] hash : hashes) {
+ if (isStopping) {
+ return false;
+ }
+ String hash58 = Base58.encode(hash);
+ if (!arbitraryDataFile.chunkExists(hash)) {
+ // Only request the file if we aren't already requesting it from someone else
+ if (!arbitraryDataFileRequests.containsKey(Base58.encode(hash))) {
+ LOGGER.debug("Requesting data file {} from peer {}", hash58, peer);
+ Long startTime = NTP.getTime();
+ ArbitraryDataFile receivedArbitraryDataFile = fetchArbitraryDataFile(peer, null, arbitraryTransactionData, signature, hash, null);
+ Long endTime = NTP.getTime();
+ if (receivedArbitraryDataFile != null) {
+ LOGGER.debug("Received data file {} from peer {}. Time taken: {} ms", receivedArbitraryDataFile.getHash58(), peer, (endTime-startTime));
+ receivedAtLeastOneFile = true;
+
+ // Remove this hash from arbitraryDataFileHashResponses now that we have received it
+ arbitraryDataFileHashResponses.remove(hash58);
+ }
+ else {
+ LOGGER.debug("Peer {} didn't respond with data file {} for signature {}. Time taken: {} ms", peer, Base58.encode(hash), Base58.encode(signature), (endTime-startTime));
+
+ // Remove this hash from arbitraryDataFileHashResponses now that we have failed to receive it
+ arbitraryDataFileHashResponses.remove(hash58);
+
+ // Stop asking for files from this peer
+ break;
+ }
+ }
+ else {
+ LOGGER.trace("Already requesting data file {} for signature {} from peer {}", arbitraryDataFile, Base58.encode(signature), peer);
+ }
+ }
+ else {
+ // Remove this hash from arbitraryDataFileHashResponses because we have a local copy
+ arbitraryDataFileHashResponses.remove(hash58);
+ }
+ }
+
+ if (receivedAtLeastOneFile) {
+ // Invalidate the hosted transactions cache as we are now hosting something new
+ ArbitraryDataStorageManager.getInstance().invalidateHostedTransactionsCache();
+
+ // Check if we have all the files we need for this transaction
+ if (arbitraryDataFile.allFilesExist()) {
+
+ // We have all the chunks for this transaction, so we should invalidate the transaction's name's
+ // data cache so that it is rebuilt the next time we serve it
+ ArbitraryDataManager.getInstance().invalidateCache(arbitraryTransactionData);
+ }
+ }
+
+ return receivedAtLeastOneFile;
+ }
+
+ private ArbitraryDataFile fetchArbitraryDataFile(RNSPeer peer, RNSPeer requestingPeer, ArbitraryTransactionData arbitraryTransactionData, byte[] signature, byte[] hash, Message originalMessage) throws DataException {
+ ArbitraryDataFile existingFile = ArbitraryDataFile.fromHash(hash, signature);
+ boolean fileAlreadyExists = existingFile.exists();
+ String hash58 = Base58.encode(hash);
+ ArbitraryDataFile arbitraryDataFile;
+
+ // Fetch the file if it doesn't exist locally
+ if (!fileAlreadyExists) {
+ LOGGER.debug(String.format("Fetching data file %.8s from peer %s", hash58, peer));
+ arbitraryDataFileRequests.put(hash58, NTP.getTime());
+ Message getArbitraryDataFileMessage = new GetArbitraryDataFileMessage(signature, hash);
+
+ Message response = null;
+ //// TODO - revisit (doesn't work with Reticulum)
+ //try {
+ // response = peer.getResponseWithTimeout(getArbitraryDataFileMessage, (int) ArbitraryDataManager.ARBITRARY_REQUEST_TIMEOUT);
+ //} catch (InterruptedException e) {
+ // // Will return below due to null response
+ //}
+ arbitraryDataFileRequests.remove(hash58);
+ LOGGER.trace(String.format("Removed hash %.8s from arbitraryDataFileRequests", hash58));
+
+ // We may need to remove the file list request, if we have all the files for this transaction
+ this.handleFileListRequests(signature);
+
+ if (response == null) {
+ LOGGER.debug("Received null response from peer {}", peer);
+ return null;
+ }
+ if (response.getType() != MessageType.ARBITRARY_DATA_FILE) {
+ LOGGER.debug("Received response with invalid type: {} from peer {}", response.getType(), peer);
+ return null;
+ }
+
+ ArbitraryDataFileMessage peersArbitraryDataFileMessage = (ArbitraryDataFileMessage) response;
+ arbitraryDataFile = peersArbitraryDataFileMessage.getArbitraryDataFile();
+ } else {
+ LOGGER.debug(String.format("File hash %s already exists, so skipping the request", hash58));
+ arbitraryDataFile = existingFile;
+ }
+
+ if (arbitraryDataFile == null) {
+ // We don't have a file, so give up here
+ return null;
+ }
+
+ // We might want to forward the request to the peer that originally requested it
+ this.handleArbitraryDataFileForwarding(requestingPeer, new ArbitraryDataFileMessage(signature, arbitraryDataFile), originalMessage);
+
+ boolean isRelayRequest = (requestingPeer != null);
+ if (isRelayRequest) {
+ if (!fileAlreadyExists) {
+ // File didn't exist locally before the request, and it's a forwarding request, so delete it if it exists.
+ // It shouldn't exist on the filesystem yet, but leaving this here just in case.
+ arbitraryDataFile.delete(10);
+ }
+ }
+ else {
+ arbitraryDataFile.save();
+ }
+
+ // If this is a metadata file then we need to update the cache
+ if (arbitraryTransactionData != null && arbitraryTransactionData.getMetadataHash() != null) {
+ if (Arrays.equals(arbitraryTransactionData.getMetadataHash(), hash)) {
+ ArbitraryDataCacheManager.getInstance().addToUpdateQueue(arbitraryTransactionData);
+ }
+ }
+
+ return arbitraryDataFile;
+ }
+
+ private void handleFileListRequests(byte[] signature) {
+ try (final Repository repository = RepositoryManager.getRepository()) {
+
+ // Fetch the transaction data
+ ArbitraryTransactionData arbitraryTransactionData = ArbitraryTransactionUtils.fetchTransactionData(repository, signature);
+ if (arbitraryTransactionData == null) {
+ return;
+ }
+
+ boolean allChunksExist = ArbitraryTransactionUtils.allChunksExist(arbitraryTransactionData);
+
+ if (allChunksExist) {
+ // Update requests map to reflect that we've received all chunks
+ RNSArbitraryDataFileListManager.getInstance().deleteFileListRequestsForSignature(signature);
+ }
+
+ } catch (DataException e) {
+ LOGGER.debug("Unable to handle file list requests: {}", e.getMessage());
+ }
+ }
+
+ public void handleArbitraryDataFileForwarding(RNSPeer requestingPeer, Message message, Message originalMessage) {
+ // Return if there is no originally requesting peer to forward to
+ if (requestingPeer == null) {
+ return;
+ }
+
+ // Return if we're not in relay mode or if this request doesn't need forwarding
+ if (!Settings.getInstance().isRelayModeEnabled()) {
+ return;
+ }
+
+ LOGGER.debug("Received arbitrary data file - forwarding is needed");
+
+ // The ID needs to match that of the original request
+ message.setId(originalMessage.getId());
+
+ //if (!requestingPeer.sendMessageWithTimeout(message, (int) ArbitraryDataManager.ARBITRARY_REQUEST_TIMEOUT)) {
+ // LOGGER.debug("Failed to forward arbitrary data file to peer {}", requestingPeer);
+ // requestingPeer.disconnect("failed to forward arbitrary data file");
+ //}
+ //else {
+ // LOGGER.debug("Forwarded arbitrary data file to peer {}", requestingPeer);
+ //}
+ requestingPeer.sendMessageWithTimeout(message, (int) ArbitraryDataManager.ARBITRARY_REQUEST_TIMEOUT);
+ }
+
+
+ // Fetch data directly from peers
+
+ private List getDirectConnectionInfoForSignature(byte[] signature) {
+ synchronized (directConnectionInfo) {
+ return directConnectionInfo.stream().filter(i -> Arrays.equals(i.getSignature(), signature)).collect(Collectors.toList());
+ }
+ }
+
+ /**
+ * Add an ArbitraryDirectConnectionInfo item, but only if one with this peer-signature combination
+ * doesn't already exist.
+ * @param connectionInfo - the direct connection info to add
+ */
+ public void addDirectConnectionInfoIfUnique(RNSArbitraryDirectConnectionInfo connectionInfo) {
+ boolean peerAlreadyExists;
+ synchronized (directConnectionInfo) {
+ peerAlreadyExists = directConnectionInfo.stream()
+ .anyMatch(i -> Arrays.equals(i.getSignature(), connectionInfo.getSignature())
+ && Objects.equals(i.getPeerAddress(), connectionInfo.getPeerAddress()));
+ }
+ if (!peerAlreadyExists) {
+ directConnectionInfo.add(connectionInfo);
+ }
+ }
+
+ private void removeDirectConnectionInfo(RNSArbitraryDirectConnectionInfo connectionInfo) {
+ this.directConnectionInfo.remove(connectionInfo);
+ }
+
+ public boolean fetchDataFilesFromPeersForSignature(byte[] signature) {
+ String signature58 = Base58.encode(signature);
+
+ boolean success = false;
+
+ try {
+ while (!success) {
+ if (isStopping) {
+ return false;
+ }
+ Thread.sleep(500L);
+
+ // Firstly fetch peers that claim to be hosting files for this signature
+ List connectionInfoList = getDirectConnectionInfoForSignature(signature);
+ if (connectionInfoList == null || connectionInfoList.isEmpty()) {
+ LOGGER.debug("No remaining direct connection peers found for signature {}", signature58);
+ return false;
+ }
+
+ LOGGER.debug("Attempting a direct peer connection for signature {}...", signature58);
+
+ // Peers found, so pick one with the highest number of chunks
+ Comparator highestChunkCountFirstComparator =
+ Comparator.comparingInt(RNSArbitraryDirectConnectionInfo::getHashCount).reversed();
+ RNSArbitraryDirectConnectionInfo directConnectionInfo = connectionInfoList.stream()
+ .sorted(highestChunkCountFirstComparator).findFirst().orElse(null);
+
+ if (directConnectionInfo == null) {
+ return false;
+ }
+
+ // Remove from the list so that a different peer is tried next time
+ removeDirectConnectionInfo(directConnectionInfo);
+
+ //// TODO - rework this section (RNS network address?)
+ //String peerAddressString = directConnectionInfo.getPeerAddress();
+ //
+ //// Parse the peer address to find the host and port
+ //String host = null;
+ //int port = -1;
+ //String[] parts = peerAddressString.split(":");
+ //if (parts.length > 1) {
+ // host = parts[0];
+ // port = Integer.parseInt(parts[1]);
+ //} else {
+ // // Assume no port included
+ // host = peerAddressString;
+ // // Use default listen port
+ // port = Settings.getInstance().getDefaultListenPort();
+ //}
+ //
+ //String peerAddressStringWithPort = String.format("%s:%d", host, port);
+ //success = Network.getInstance().requestDataFromPeer(peerAddressStringWithPort, signature);
+ //
+ //int defaultPort = Settings.getInstance().getDefaultListenPort();
+ //
+ //// If unsuccessful, and using a non-standard port, try a second connection with the default listen port,
+ //// since almost all nodes use that. This is a workaround to account for any ephemeral ports that may
+ //// have made it into the dataset.
+ //if (!success) {
+ // if (host != null && port > 0) {
+ // if (port != defaultPort) {
+ // String newPeerAddressString = String.format("%s:%d", host, defaultPort);
+ // success = Network.getInstance().requestDataFromPeer(newPeerAddressString, signature);
+ // }
+ // }
+ //}
+ //
+ //// If _still_ unsuccessful, try matching the peer's IP address with some known peers, and then connect
+ //// to each of those in turn until one succeeds.
+ //if (!success) {
+ // if (host != null) {
+ // final String finalHost = host;
+ // List knownPeers = Network.getInstance().getAllKnownPeers().stream()
+ // .filter(knownPeerData -> knownPeerData.getAddress().getHost().equals(finalHost))
+ // .collect(Collectors.toList());
+ // // Loop through each match and attempt a connection
+ // for (PeerData matchingPeer : knownPeers) {
+ // String matchingPeerAddress = matchingPeer.getAddress().toString();
+ // int matchingPeerPort = matchingPeer.getAddress().getPort();
+ // // Make sure that it's not a port we've already tried
+ // if (matchingPeerPort != port && matchingPeerPort != defaultPort) {
+ // success = Network.getInstance().requestDataFromPeer(matchingPeerAddress, signature);
+ // if (success) {
+ // // Successfully connected, so stop making connections
+ // break;
+ // }
+ // }
+ // }
+ // }
+ //}
+
+ if (success) {
+ // We were able to connect with a peer, so track the request
+ RNSArbitraryDataFileListManager.getInstance().addToSignatureRequests(signature58, false, true);
+ }
+
+ }
+ } catch (InterruptedException e) {
+ // Do nothing
+ }
+
+ return success;
+ }
+
+
+ // Relays
+
+ private List getRelayInfoListForHash(String hash58) {
+ synchronized (arbitraryRelayMap) {
+ return arbitraryRelayMap.stream()
+ .filter(relayInfo -> Objects.equals(relayInfo.getHash58(), hash58))
+ .collect(Collectors.toList());
+ }
+ }
+
+ private RNSArbitraryRelayInfo getOptimalRelayInfoEntryForHash(String hash58) {
+ LOGGER.trace("Fetching relay info for hash: {}", hash58);
+ List relayInfoList = this.getRelayInfoListForHash(hash58);
+ if (relayInfoList != null && !relayInfoList.isEmpty()) {
+
+ // Remove any with null requestHops
+ relayInfoList.removeIf(r -> r.getRequestHops() == null);
+
+ // If list is now empty, then just return one at random
+ if (relayInfoList.isEmpty()) {
+ return this.getRandomRelayInfoEntryForHash(hash58);
+ }
+
+ // Sort by number of hops (lowest first)
+ relayInfoList.sort(Comparator.comparingInt(RNSArbitraryRelayInfo::getRequestHops));
+
+ // FUTURE: secondary sort by requestTime?
+
+ RNSArbitraryRelayInfo relayInfo = relayInfoList.get(0);
+
+ LOGGER.trace("Returning optimal relay info for hash: {} (requestHops {})", hash58, relayInfo.getRequestHops());
+ return relayInfo;
+ }
+ LOGGER.trace("No relay info exists for hash: {}", hash58);
+ return null;
+ }
+
+ private RNSArbitraryRelayInfo getRandomRelayInfoEntryForHash(String hash58) {
+ LOGGER.trace("Fetching random relay info for hash: {}", hash58);
+ List relayInfoList = this.getRelayInfoListForHash(hash58);
+ if (relayInfoList != null && !relayInfoList.isEmpty()) {
+
+ // Pick random item
+ int index = new SecureRandom().nextInt(relayInfoList.size());
+ LOGGER.trace("Returning random relay info for hash: {} (index {})", hash58, index);
+ return relayInfoList.get(index);
+ }
+ LOGGER.trace("No relay info exists for hash: {}", hash58);
+ return null;
+ }
+
+ public void addToRelayMap(RNSArbitraryRelayInfo newEntry) {
+ if (newEntry == null || !newEntry.isValid()) {
+ return;
+ }
+
+ // Remove existing entry for this peer if it exists, to renew the timestamp
+ this.removeFromRelayMap(newEntry);
+
+ // Re-add
+ arbitraryRelayMap.add(newEntry);
+ LOGGER.debug("Added entry to relay map: {}", newEntry);
+ }
+
+ private void removeFromRelayMap(RNSArbitraryRelayInfo entry) {
+ arbitraryRelayMap.removeIf(relayInfo -> relayInfo.equals(entry));
+ }
+
+
+ // Peers requesting QDN data from us
+
+ /**
+ * Add an address string of a peer that is trying to request data from us.
+ * @param peerAddress
+ */
+ public void addRecentDataRequest(String peerAddress) {
+ if (peerAddress == null) {
+ return;
+ }
+
+ Long now = NTP.getTime();
+ if (now == null) {
+ return;
+ }
+
+ // Make sure to remove the port, since it isn't guaranteed to match next time
+ String[] parts = peerAddress.split(":");
+ if (parts.length == 0) {
+ return;
+ }
+ String host = parts[0];
+ if (!InetAddresses.isInetAddress(host)) {
+ // Invalid host
+ return;
+ }
+
+ this.recentDataRequests.put(host, now);
+ }
+
+ public boolean isPeerRequestingData(String peerAddressWithoutPort) {
+ return this.recentDataRequests.containsKey(peerAddressWithoutPort);
+ }
+
+ public boolean hasPendingDataRequest() {
+ return !this.recentDataRequests.isEmpty();
+ }
+
+
+ // Network handlers
+
+ public void onNetworkGetArbitraryDataFileMessage(RNSPeer peer, Message message) {
+ // Don't respond if QDN is disabled
+ if (!Settings.getInstance().isQdnEnabled()) {
+ return;
+ }
+
+ GetArbitraryDataFileMessage getArbitraryDataFileMessage = (GetArbitraryDataFileMessage) message;
+ byte[] hash = getArbitraryDataFileMessage.getHash();
+ String hash58 = Base58.encode(hash);
+ byte[] signature = getArbitraryDataFileMessage.getSignature();
+ Controller.getInstance().stats.getArbitraryDataFileMessageStats.requests.incrementAndGet();
+
+ LOGGER.debug("Received GetArbitraryDataFileMessage from peer {} for hash {}", peer, Base58.encode(hash));
+
+ try {
+ ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(hash, signature);
+ RNSArbitraryRelayInfo relayInfo = this.getOptimalRelayInfoEntryForHash(hash58);
+
+ if (arbitraryDataFile.exists()) {
+ LOGGER.trace("Hash {} exists", hash58);
+
+ // We can serve the file directly as we already have it
+ LOGGER.debug("Sending file {}...", arbitraryDataFile);
+ ArbitraryDataFileMessage arbitraryDataFileMessage = new ArbitraryDataFileMessage(signature, arbitraryDataFile);
+ arbitraryDataFileMessage.setId(message.getId());
+ //if (!peer.sendMessageWithTimeout(arbitraryDataFileMessage, (int) ArbitraryDataManager.ARBITRARY_REQUEST_TIMEOUT)) {
+ // LOGGER.debug("Couldn't send file {}", arbitraryDataFile);
+ // peer.disconnect("failed to send file");
+ //}
+ //else {
+ // LOGGER.debug("Sent file {}", arbitraryDataFile);
+ //}
+ peer.sendMessageWithTimeout(arbitraryDataFileMessage, (int) ArbitraryDataManager.ARBITRARY_REQUEST_TIMEOUT);
+ }
+ //// TODO: rework (doesn't work with Reticulum)
+ //else if (relayInfo != null) {
+ // LOGGER.debug("We have relay info for hash {}", Base58.encode(hash));
+ // // We need to ask this peer for the file
+ // Peer peerToAsk = relayInfo.getPeer();
+ // if (peerToAsk != null) {
+ //
+ // // Forward the message to this peer
+ // LOGGER.debug("Asking peer {} for hash {}", peerToAsk, hash58);
+ // // No need to pass arbitraryTransactionData below because this is only used for metadata caching,
+ // // and metadata isn't retained when relaying.
+ // this.fetchArbitraryDataFile(peerToAsk, peer, null, signature, hash, message);
+ // }
+ // else {
+ // LOGGER.debug("Peer {} not found in relay info", peer);
+ // }
+ //}
+ else {
+ LOGGER.debug("Hash {} doesn't exist and we don't have relay info", hash58);
+
+ // We don't have this file
+ Controller.getInstance().stats.getArbitraryDataFileMessageStats.unknownFiles.getAndIncrement();
+
+ // Send valid, yet unexpected message type in response, so peer's synchronizer doesn't have to wait for timeout
+ LOGGER.debug(String.format("Sending 'file unknown' response to peer %s for GET_FILE request for unknown file %s", peer, arbitraryDataFile));
+
+ //// Send generic 'unknown' message as it's very short
+ //Message fileUnknownMessage = peer.getPeersVersion() >= GenericUnknownMessage.MINIMUM_PEER_VERSION
+ // ? new GenericUnknownMessage()
+ // : new BlockSummariesMessage(Collections.emptyList());
+ //fileUnknownMessage.setId(message.getId());
+ //if (!peer.sendMessage(fileUnknownMessage)) {
+ // LOGGER.debug("Couldn't sent file-unknown response");
+ // peer.disconnect("failed to send file-unknown response");
+ //}
+ //else {
+ // LOGGER.debug("Sent file-unknown response for file {}", arbitraryDataFile);
+ //}
+ Message fileUnknownMessage = new GenericUnknownMessage();
+ peer.sendMessage(fileUnknownMessage);
+ }
+ }
+ catch (DataException e) {
+ LOGGER.debug("Unable to handle request for arbitrary data file: {}", hash58);
+ }
+ }
+
+}
diff --git a/src/main/java/org/qortal/controller/arbitrary/RNSArbitraryMetadataManager.java b/src/main/java/org/qortal/controller/arbitrary/RNSArbitraryMetadataManager.java
new file mode 100644
index 00000000..45e674b7
--- /dev/null
+++ b/src/main/java/org/qortal/controller/arbitrary/RNSArbitraryMetadataManager.java
@@ -0,0 +1,481 @@
+package org.qortal.controller.arbitrary;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.qortal.arbitrary.ArbitraryDataFile;
+import org.qortal.arbitrary.ArbitraryDataResource;
+import org.qortal.arbitrary.metadata.ArbitraryDataTransactionMetadata;
+import org.qortal.controller.Controller;
+import org.qortal.data.transaction.ArbitraryTransactionData;
+import org.qortal.data.transaction.TransactionData;
+import org.qortal.network.RNSNetwork;
+import org.qortal.network.RNSPeer;
+import org.qortal.network.message.ArbitraryMetadataMessage;
+import org.qortal.network.message.GetArbitraryMetadataMessage;
+import org.qortal.network.message.Message;
+import org.qortal.repository.DataException;
+import org.qortal.repository.Repository;
+import org.qortal.repository.RepositoryManager;
+import org.qortal.settings.Settings;
+import org.qortal.utils.Base58;
+import org.qortal.utils.ListUtils;
+import org.qortal.utils.NTP;
+import org.qortal.utils.Triple;
+
+import java.io.IOException;
+import java.util.*;
+
+import static org.qortal.controller.arbitrary.ArbitraryDataFileListManager.*;
+
+public class RNSArbitraryMetadataManager {
+
+ private static final Logger LOGGER = LogManager.getLogger(ArbitraryMetadataManager.class);
+
+ private static RNSArbitraryMetadataManager instance;
+
+ /**
+ * Map of recent incoming requests for ARBITRARY transaction metadata.
+ *
+ * Key is original request's message ID
+ * Value is Triple<transaction signature in base58, first requesting peer, first request's timestamp>
+ *
+ * If peer is null then either:
+ *
+ * - we are the original requesting peer
+ * - we have already sent data payload to original requesting peer.
+ *
+ * If signature is null then we have already received the file list and either:
+ *
+ * - we are the original requesting peer and have processed it
+ * - we have forwarded the metadata
+ *
+ */
+ public Map> arbitraryMetadataRequests = Collections.synchronizedMap(new HashMap<>());
+
+ /**
+ * Map to keep track of in progress arbitrary metadata requests
+ * Key: string - the signature encoded in base58
+ * Value: Triple
+ */
+ private Map> arbitraryMetadataSignatureRequests = Collections.synchronizedMap(new HashMap<>());
+
+
+ private RNSArbitraryMetadataManager() {
+ }
+
+ public static RNSArbitraryMetadataManager getInstance() {
+ if (instance == null)
+ instance = new RNSArbitraryMetadataManager();
+
+ return instance;
+ }
+
+ public void cleanupRequestCache(Long now) {
+ if (now == null) {
+ return;
+ }
+ final long requestMinimumTimestamp = now - ArbitraryDataManager.ARBITRARY_REQUEST_TIMEOUT;
+ arbitraryMetadataRequests.entrySet().removeIf(entry -> entry.getValue().getC() == null || entry.getValue().getC() < requestMinimumTimestamp);
+ }
+
+
+ public ArbitraryDataTransactionMetadata fetchMetadata(ArbitraryDataResource arbitraryDataResource, boolean useRateLimiter) {
+ try (final Repository repository = RepositoryManager.getRepository()) {
+ // Find latest transaction
+ ArbitraryTransactionData latestTransaction = repository.getArbitraryRepository()
+ .getLatestTransaction(arbitraryDataResource.getResourceId(), arbitraryDataResource.getService(),
+ null, arbitraryDataResource.getIdentifier());
+
+ if (latestTransaction != null) {
+ byte[] signature = latestTransaction.getSignature();
+ byte[] metadataHash = latestTransaction.getMetadataHash();
+ if (metadataHash == null) {
+ // This resource doesn't have metadata
+ throw new IllegalArgumentException("This resource doesn't have metadata");
+ }
+
+ ArbitraryDataFile metadataFile = ArbitraryDataFile.fromHash(metadataHash, signature);
+ if (!metadataFile.exists()) {
+ // Request from network
+ this.fetchArbitraryMetadata(latestTransaction, useRateLimiter);
+ }
+
+ // Now check again as it may have been downloaded above
+ if (metadataFile.exists()) {
+ // Use local copy
+ ArbitraryDataTransactionMetadata transactionMetadata = new ArbitraryDataTransactionMetadata(metadataFile.getFilePath());
+ try {
+ transactionMetadata.read();
+ } catch (DataException e) {
+ // Invalid file, so delete it
+ LOGGER.info("Deleting invalid metadata file due to exception: {}", e.getMessage());
+ transactionMetadata.delete();
+ return null;
+ }
+ return transactionMetadata;
+ }
+ }
+
+ } catch (DataException | IOException e) {
+ LOGGER.error("Repository issue when fetching arbitrary transaction metadata", e);
+ }
+
+ return null;
+ }
+
+
+ // Request metadata from network
+
+ public byte[] fetchArbitraryMetadata(ArbitraryTransactionData arbitraryTransactionData, boolean useRateLimiter) {
+ byte[] metadataHash = arbitraryTransactionData.getMetadataHash();
+ if (metadataHash == null) {
+ return null;
+ }
+
+ byte[] signature = arbitraryTransactionData.getSignature();
+ String signature58 = Base58.encode(signature);
+
+ // Require an NTP sync
+ Long now = NTP.getTime();
+ if (now == null) {
+ return null;
+ }
+
+ // If we've already tried too many times in a short space of time, make sure to give up
+ if (useRateLimiter && !this.shouldMakeMetadataRequestForSignature(signature58)) {
+ LOGGER.trace("Skipping metadata request for signature {} due to rate limit", signature58);
+ return null;
+ }
+ this.addToSignatureRequests(signature58, true, false);
+
+ //List handshakedPeers = Network.getInstance().getImmutableHandshakedPeers();
+ List handshakedPeers = RNSNetwork.getInstance().getLinkedPeers();
+ LOGGER.debug(String.format("Sending metadata request for signature %s to %d peers...", signature58, handshakedPeers.size()));
+
+ // Build request
+ Message getArbitraryMetadataMessage = new GetArbitraryMetadataMessage(signature, now, 0);
+
+ // Save our request into requests map
+ Triple requestEntry = new Triple<>(signature58, null, NTP.getTime());
+
+ // Assign random ID to this message
+ int id;
+ do {
+ id = new Random().nextInt(Integer.MAX_VALUE - 1) + 1;
+
+ // Put queue into map (keyed by message ID) so we can poll for a response
+ // If putIfAbsent() doesn't return null, then this ID is already taken
+ } while (arbitraryMetadataRequests.put(id, requestEntry) != null);
+ getArbitraryMetadataMessage.setId(id);
+
+ // Broadcast request
+ RNSNetwork.getInstance().broadcast(peer -> getArbitraryMetadataMessage);
+
+ // Poll to see if data has arrived
+ final long singleWait = 100;
+ long totalWait = 0;
+ while (totalWait < ArbitraryDataManager.ARBITRARY_REQUEST_TIMEOUT) {
+ try {
+ Thread.sleep(singleWait);
+ } catch (InterruptedException e) {
+ break;
+ }
+
+ requestEntry = arbitraryMetadataRequests.get(id);
+ if (requestEntry == null)
+ return null;
+
+ if (requestEntry.getA() == null)
+ break;
+
+ totalWait += singleWait;
+ }
+
+ try {
+ ArbitraryDataFile metadataFile = ArbitraryDataFile.fromHash(metadataHash, signature);
+ if (metadataFile.exists()) {
+ return metadataFile.getBytes();
+ }
+ } catch (DataException e) {
+ // Do nothing
+ }
+
+ return null;
+ }
+
+
+ // Track metadata lookups by signature
+
+ private boolean shouldMakeMetadataRequestForSignature(String signature58) {
+ Triple request = arbitraryMetadataSignatureRequests.get(signature58);
+
+ if (request == null) {
+ // Not attempted yet
+ return true;
+ }
+
+ // Extract the components
+ Integer networkBroadcastCount = request.getA();
+ // Integer directPeerRequestCount = request.getB();
+ Long lastAttemptTimestamp = request.getC();
+
+ if (lastAttemptTimestamp == null) {
+ // Not attempted yet
+ return true;
+ }
+
+ long timeSinceLastAttempt = NTP.getTime() - lastAttemptTimestamp;
+
+ // Allow a second attempt after 60 seconds
+ if (timeSinceLastAttempt > 60 * 1000L) {
+ // We haven't tried for at least 60 seconds
+
+ if (networkBroadcastCount < 2) {
+ // We've made less than 2 total attempts
+ return true;
+ }
+ }
+
+ // Then allow another attempt after 60 minutes
+ if (timeSinceLastAttempt > 60 * 60 * 1000L) {
+ // We haven't tried for at least 60 minutes
+
+ if (networkBroadcastCount < 3) {
+ // We've made less than 3 total attempts
+ return true;
+ }
+ }
+
+ return false;
+ }
+
+ public boolean isSignatureRateLimited(byte[] signature) {
+ String signature58 = Base58.encode(signature);
+ return !this.shouldMakeMetadataRequestForSignature(signature58);
+ }
+
+ public long lastRequestForSignature(byte[] signature) {
+ String signature58 = Base58.encode(signature);
+ Triple request = arbitraryMetadataSignatureRequests.get(signature58);
+
+ if (request == null) {
+ // Not attempted yet
+ return 0;
+ }
+
+ // Extract the components
+ Long lastAttemptTimestamp = request.getC();
+ if (lastAttemptTimestamp != null) {
+ return lastAttemptTimestamp;
+ }
+ return 0;
+ }
+
+ public void addToSignatureRequests(String signature58, boolean incrementNetworkRequests, boolean incrementPeerRequests) {
+ Triple request = arbitraryMetadataSignatureRequests.get(signature58);
+ Long now = NTP.getTime();
+
+ if (request == null) {
+ // No entry yet
+ Triple newRequest = new Triple<>(0, 0, now);
+ arbitraryMetadataSignatureRequests.put(signature58, newRequest);
+ }
+ else {
+ // There is an existing entry
+ if (incrementNetworkRequests) {
+ request.setA(request.getA() + 1);
+ }
+ if (incrementPeerRequests) {
+ request.setB(request.getB() + 1);
+ }
+ request.setC(now);
+ arbitraryMetadataSignatureRequests.put(signature58, request);
+ }
+ }
+
+ public void removeFromSignatureRequests(String signature58) {
+ arbitraryMetadataSignatureRequests.remove(signature58);
+ }
+
+
+ // Network handlers
+
+ public void onNetworkArbitraryMetadataMessage(RNSPeer peer, Message message) {
+ // Don't process if QDN is disabled
+ if (!Settings.getInstance().isQdnEnabled()) {
+ return;
+ }
+
+ ArbitraryMetadataMessage arbitraryMetadataMessage = (ArbitraryMetadataMessage) message;
+ LOGGER.debug("Received metadata from peer {}", peer);
+
+ // Do we have a pending request for this data?
+ Triple request = arbitraryMetadataRequests.get(message.getId());
+ if (request == null || request.getA() == null) {
+ return;
+ }
+ boolean isRelayRequest = (request.getB() != null);
+
+ // Does this message's signature match what we're expecting?
+ byte[] signature = arbitraryMetadataMessage.getSignature();
+ String signature58 = Base58.encode(signature);
+ if (!request.getA().equals(signature58)) {
+ return;
+ }
+
+ // Update requests map to reflect that we've received this metadata
+ Triple newEntry = new Triple<>(null, null, request.getC());
+ arbitraryMetadataRequests.put(message.getId(), newEntry);
+
+ // Get transaction info
+ try (final Repository repository = RepositoryManager.getRepository()) {
+ TransactionData transactionData = repository.getTransactionRepository().fromSignature(signature);
+ if (!(transactionData instanceof ArbitraryTransactionData)) {
+ return;
+ }
+ ArbitraryTransactionData arbitraryTransactionData = (ArbitraryTransactionData) transactionData;
+
+ // Check if the name is blocked
+ boolean isBlocked = (arbitraryTransactionData == null || ListUtils.isNameBlocked(arbitraryTransactionData.getName()));
+
+ // Save if not blocked
+ ArbitraryDataFile arbitraryMetadataFile = arbitraryMetadataMessage.getArbitraryMetadataFile();
+ if (!isBlocked && arbitraryMetadataFile != null) {
+ arbitraryMetadataFile.save();
+ }
+
+ // Forwarding
+ if (isRelayRequest && Settings.getInstance().isRelayModeEnabled()) {
+ if (!isBlocked) {
+ RNSPeer requestingPeer = request.getB();
+ if (requestingPeer != null) {
+
+ ArbitraryMetadataMessage forwardArbitraryMetadataMessage = new ArbitraryMetadataMessage(signature, arbitraryMetadataMessage.getArbitraryMetadataFile());
+ forwardArbitraryMetadataMessage.setId(arbitraryMetadataMessage.getId());
+
+ // Forward to requesting peer
+ LOGGER.debug("Forwarding metadata to requesting peer: {}", requestingPeer);
+ //if (!requestingPeer.sendMessage(forwardArbitraryMetadataMessage)) {
+ // requestingPeer.disconnect("failed to forward arbitrary metadata");
+ //}
+ requestingPeer.sendMessage(forwardArbitraryMetadataMessage);
+ }
+ }
+ }
+
+ // Add to resource queue to update arbitrary resource caches
+ if (arbitraryTransactionData != null) {
+ ArbitraryDataCacheManager.getInstance().addToUpdateQueue(arbitraryTransactionData);
+ }
+
+ } catch (DataException e) {
+ LOGGER.error(String.format("Repository issue while saving arbitrary transaction metadata from peer %s", peer), e);
+ }
+ }
+
+ public void onNetworkGetArbitraryMetadataMessage(RNSPeer peer, Message message) {
+ // Don't respond if QDN is disabled
+ if (!Settings.getInstance().isQdnEnabled()) {
+ return;
+ }
+
+ Controller.getInstance().stats.getArbitraryMetadataMessageStats.requests.incrementAndGet();
+
+ GetArbitraryMetadataMessage getArbitraryMetadataMessage = (GetArbitraryMetadataMessage) message;
+ byte[] signature = getArbitraryMetadataMessage.getSignature();
+ String signature58 = Base58.encode(signature);
+ Long now = NTP.getTime();
+ Triple newEntry = new Triple<>(signature58, peer, now);
+
+ // If we've seen this request recently, then ignore
+ if (arbitraryMetadataRequests.putIfAbsent(message.getId(), newEntry) != null) {
+ LOGGER.debug("Ignoring metadata request from peer {} for signature {}", peer, signature58);
+ return;
+ }
+
+ LOGGER.debug("Received metadata request from peer {} for signature {}", peer, signature58);
+
+ ArbitraryTransactionData transactionData = null;
+ ArbitraryDataFile metadataFile = null;
+
+ try (final Repository repository = RepositoryManager.getRepository()) {
+
+ // Firstly we need to lookup this file on chain to get its metadata hash
+ transactionData = (ArbitraryTransactionData)repository.getTransactionRepository().fromSignature(signature);
+ if (transactionData instanceof ArbitraryTransactionData) {
+
+ // Check if we're even allowed to serve metadata for this transaction
+ if (ArbitraryDataStorageManager.getInstance().canStoreData(transactionData)) {
+
+ byte[] metadataHash = transactionData.getMetadataHash();
+ if (metadataHash != null) {
+
+ // Load metadata file
+ metadataFile = ArbitraryDataFile.fromHash(metadataHash, signature);
+ }
+ }
+ }
+
+ } catch (DataException e) {
+ LOGGER.error(String.format("Repository issue while fetching arbitrary metadata for peer %s", peer), e);
+ }
+
+ // We should only respond if we have the metadata file
+ if (metadataFile != null && metadataFile.exists()) {
+
+ // We have the metadata file, so update requests map to reflect that we've sent it
+ newEntry = new Triple<>(null, null, now);
+ arbitraryMetadataRequests.put(message.getId(), newEntry);
+
+ ArbitraryMetadataMessage arbitraryMetadataMessage = new ArbitraryMetadataMessage(signature, metadataFile);
+ arbitraryMetadataMessage.setId(message.getId());
+ //if (!peer.sendMessage(arbitraryMetadataMessage)) {
+ // LOGGER.debug("Couldn't send metadata");
+ // peer.disconnect("failed to send metadata");
+ // return;
+ //}
+ peer.sendMessage(arbitraryMetadataMessage);
+ LOGGER.debug("Sent metadata");
+
+ // Nothing left to do, so return to prevent any unnecessary forwarding from occurring
+ LOGGER.debug("No need for any forwarding because metadata request is fully served");
+ return;
+
+ }
+
+ // We may need to forward this request on
+ boolean isBlocked = (transactionData == null || ListUtils.isNameBlocked(transactionData.getName()));
+ if (Settings.getInstance().isRelayModeEnabled() && !isBlocked) {
+ // In relay mode - so ask our other peers if they have it
+
+ long requestTime = getArbitraryMetadataMessage.getRequestTime();
+ int requestHops = getArbitraryMetadataMessage.getRequestHops() + 1;
+ long totalRequestTime = now - requestTime;
+
+ if (totalRequestTime < RELAY_REQUEST_MAX_DURATION) {
+ // Relay request hasn't timed out yet, so can potentially be rebroadcast
+ if (requestHops < RELAY_REQUEST_MAX_HOPS) {
+ // Relay request hasn't reached the maximum number of hops yet, so can be rebroadcast
+
+ Message relayGetArbitraryMetadataMessage = new GetArbitraryMetadataMessage(signature, requestTime, requestHops);
+ relayGetArbitraryMetadataMessage.setId(message.getId());
+
+ LOGGER.debug("Rebroadcasting metadata request from peer {} for signature {} to our other peers... totalRequestTime: {}, requestHops: {}", peer, Base58.encode(signature), totalRequestTime, requestHops);
+ //Network.getInstance().broadcast(
+ // broadcastPeer ->
+ // !broadcastPeer.isAtLeastVersion(RELAY_MIN_PEER_VERSION) ? null :
+ // broadcastPeer == peer || Objects.equals(broadcastPeer.getPeerData().getAddress().getHost(), peer.getPeerData().getAddress().getHost()) ? null : relayGetArbitraryMetadataMessage);
+ RNSNetwork.getInstance().broadcast(broadcastPeer -> relayGetArbitraryMetadataMessage);
+
+ }
+ else {
+ // This relay request has reached the maximum number of allowed hops
+ }
+ }
+ else {
+ // This relay request has timed out
+ }
+ }
+ }
+
+}
diff --git a/src/main/java/org/qortal/controller/arbitrary/RebuildArbitraryResourceCacheTask.java b/src/main/java/org/qortal/controller/arbitrary/RebuildArbitraryResourceCacheTask.java
new file mode 100644
index 00000000..d7472325
--- /dev/null
+++ b/src/main/java/org/qortal/controller/arbitrary/RebuildArbitraryResourceCacheTask.java
@@ -0,0 +1,33 @@
+package org.qortal.controller.arbitrary;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.qortal.repository.DataException;
+import org.qortal.repository.Repository;
+import org.qortal.repository.RepositoryManager;
+
+import java.util.TimerTask;
+
+public class RebuildArbitraryResourceCacheTask extends TimerTask {
+
+ private static final Logger LOGGER = LogManager.getLogger(RebuildArbitraryResourceCacheTask.class);
+
+ public static final long MILLIS_IN_HOUR = 60 * 60 * 1000;
+
+ public static final long MILLIS_IN_MINUTE = 60 * 1000;
+
+ private static final String REBUILD_ARBITRARY_RESOURCE_CACHE_TASK = "Rebuild Arbitrary Resource Cache Task";
+
+ @Override
+ public void run() {
+
+ Thread.currentThread().setName(REBUILD_ARBITRARY_RESOURCE_CACHE_TASK);
+
+ try (final Repository repository = RepositoryManager.getRepository()) {
+ ArbitraryDataCacheManager.getInstance().buildArbitraryResourcesCache(repository, true);
+ }
+ catch( DataException e ) {
+ LOGGER.error(e.getMessage(), e);
+ }
+ }
+}
diff --git a/src/main/java/org/qortal/controller/hsqldb/HSQLDBBalanceRecorder.java b/src/main/java/org/qortal/controller/hsqldb/HSQLDBBalanceRecorder.java
new file mode 100644
index 00000000..43e7c542
--- /dev/null
+++ b/src/main/java/org/qortal/controller/hsqldb/HSQLDBBalanceRecorder.java
@@ -0,0 +1,139 @@
+package org.qortal.controller.hsqldb;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.util.PropertySource;
+import org.qortal.data.account.AccountBalanceData;
+import org.qortal.data.account.BlockHeightRange;
+import org.qortal.data.account.BlockHeightRangeAddressAmounts;
+import org.qortal.repository.hsqldb.HSQLDBCacheUtils;
+import org.qortal.settings.Settings;
+import org.qortal.utils.BalanceRecorderUtils;
+
+import java.util.Comparator;
+import java.util.List;
+import java.util.Optional;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.stream.Collectors;
+
+public class HSQLDBBalanceRecorder extends Thread{
+
+ private static final Logger LOGGER = LogManager.getLogger(HSQLDBBalanceRecorder.class);
+
+ private static HSQLDBBalanceRecorder SINGLETON = null;
+
+ private ConcurrentHashMap> balancesByHeight = new ConcurrentHashMap<>();
+
+ private ConcurrentHashMap> balancesByAddress = new ConcurrentHashMap<>();
+
+ private CopyOnWriteArrayList balanceDynamics = new CopyOnWriteArrayList<>();
+
+ private int priorityRequested;
+ private int frequency;
+ private int capacity;
+
+ private HSQLDBBalanceRecorder( int priorityRequested, int frequency, int capacity) {
+
+ super("Balance Recorder");
+
+ this.priorityRequested = priorityRequested;
+ this.frequency = frequency;
+ this.capacity = capacity;
+ }
+
+ public static Optional getInstance() {
+
+ if( SINGLETON == null ) {
+
+ SINGLETON
+ = new HSQLDBBalanceRecorder(
+ Settings.getInstance().getBalanceRecorderPriority(),
+ Settings.getInstance().getBalanceRecorderFrequency(),
+ Settings.getInstance().getBalanceRecorderCapacity()
+ );
+
+ }
+ else if( SINGLETON == null ) {
+
+ return Optional.empty();
+ }
+
+ return Optional.of(SINGLETON);
+ }
+
+ @Override
+ public void run() {
+
+ Thread.currentThread().setName("Balance Recorder");
+
+ HSQLDBCacheUtils.startRecordingBalances(this.balancesByHeight, this.balanceDynamics, this.priorityRequested, this.frequency, this.capacity);
+ }
+
+ public List getLatestDynamics(int limit, long offset) {
+
+ List latest = this.balanceDynamics.stream()
+ .sorted(BalanceRecorderUtils.BLOCK_HEIGHT_RANGE_ADDRESS_AMOUNTS_COMPARATOR.reversed())
+ .skip(offset)
+ .limit(limit)
+ .collect(Collectors.toList());
+
+ return latest;
+ }
+
+ public List getRanges(Integer offset, Integer limit, Boolean reverse) {
+
+ if( reverse ) {
+ return this.balanceDynamics.stream()
+ .map(BlockHeightRangeAddressAmounts::getRange)
+ .sorted(BalanceRecorderUtils.BLOCK_HEIGHT_RANGE_COMPARATOR.reversed())
+ .skip(offset)
+ .limit(limit)
+ .collect(Collectors.toList());
+ }
+ else {
+ return this.balanceDynamics.stream()
+ .map(BlockHeightRangeAddressAmounts::getRange)
+ .sorted(BalanceRecorderUtils.BLOCK_HEIGHT_RANGE_COMPARATOR)
+ .skip(offset)
+ .limit(limit)
+ .collect(Collectors.toList());
+ }
+ }
+
+ public Optional getAddressAmounts(BlockHeightRange range) {
+
+ return this.balanceDynamics.stream()
+ .filter( dynamic -> dynamic.getRange().equals(range))
+ .findAny();
+ }
+
+ public Optional getRange( int height ) {
+ return this.balanceDynamics.stream()
+ .map(BlockHeightRangeAddressAmounts::getRange)
+ .filter( range -> range.getBegin() < height && range.getEnd() >= height )
+ .findAny();
+ }
+
+ private Optional getLastHeight() {
+ return this.balancesByHeight.keySet().stream().sorted(Comparator.reverseOrder()).findFirst();
+ }
+
+ public List getBlocksRecorded() {
+
+ return this.balancesByHeight.keySet().stream().collect(Collectors.toList());
+ }
+
+ public List getAccountBalanceRecordings(String address) {
+ return this.balancesByAddress.get(address);
+ }
+
+ @Override
+ public String toString() {
+ return "HSQLDBBalanceRecorder{" +
+ "priorityRequested=" + priorityRequested +
+ ", frequency=" + frequency +
+ ", capacity=" + capacity +
+ '}';
+ }
+}
diff --git a/src/main/java/org/qortal/controller/hsqldb/HSQLDBDataCacheManager.java b/src/main/java/org/qortal/controller/hsqldb/HSQLDBDataCacheManager.java
new file mode 100644
index 00000000..434a67f1
--- /dev/null
+++ b/src/main/java/org/qortal/controller/hsqldb/HSQLDBDataCacheManager.java
@@ -0,0 +1,22 @@
+package org.qortal.controller.hsqldb;
+
+import org.qortal.data.arbitrary.ArbitraryResourceCache;
+import org.qortal.repository.RepositoryManager;
+import org.qortal.repository.hsqldb.HSQLDBCacheUtils;
+import org.qortal.repository.hsqldb.HSQLDBRepository;
+import org.qortal.settings.Settings;
+
+public class HSQLDBDataCacheManager extends Thread{
+
+ public HSQLDBDataCacheManager() {}
+
+ @Override
+ public void run() {
+ Thread.currentThread().setName("HSQLDB Data Cache Manager");
+
+ HSQLDBCacheUtils.startCaching(
+ Settings.getInstance().getDbCacheThreadPriority(),
+ Settings.getInstance().getDbCacheFrequency()
+ );
+ }
+}
diff --git a/src/main/java/org/qortal/controller/repository/AtStatesPruner.java b/src/main/java/org/qortal/controller/repository/AtStatesPruner.java
index f06efdb8..3bc3db99 100644
--- a/src/main/java/org/qortal/controller/repository/AtStatesPruner.java
+++ b/src/main/java/org/qortal/controller/repository/AtStatesPruner.java
@@ -11,6 +11,8 @@ import org.qortal.repository.RepositoryManager;
import org.qortal.settings.Settings;
import org.qortal.utils.NTP;
+import static java.lang.Thread.MIN_PRIORITY;
+
public class AtStatesPruner implements Runnable {
private static final Logger LOGGER = LogManager.getLogger(AtStatesPruner.class);
@@ -37,82 +39,97 @@ public class AtStatesPruner implements Runnable {
}
}
+ int pruneStartHeight;
+ int maxLatestAtStatesHeight;
+
try (final Repository repository = RepositoryManager.getRepository()) {
- int pruneStartHeight = repository.getATRepository().getAtPruneHeight();
- int maxLatestAtStatesHeight = PruneManager.getMaxHeightForLatestAtStates(repository);
+ pruneStartHeight = repository.getATRepository().getAtPruneHeight();
+ maxLatestAtStatesHeight = PruneManager.getMaxHeightForLatestAtStates(repository);
repository.discardChanges();
repository.getATRepository().rebuildLatestAtStates(maxLatestAtStatesHeight);
repository.saveChanges();
+ } catch (Exception e) {
+ LOGGER.error("AT States Pruning is not working! Not trying again. Restart ASAP. Report this error immediately to the developers.", e);
+ return;
+ }
- while (!Controller.isStopping()) {
- repository.discardChanges();
+ while (!Controller.isStopping()) {
+ try (final Repository repository = RepositoryManager.getRepository()) {
- Thread.sleep(Settings.getInstance().getAtStatesPruneInterval());
+ try {
+ repository.discardChanges();
- BlockData chainTip = Controller.getInstance().getChainTip();
- if (chainTip == null || NTP.getTime() == null)
- continue;
+ Thread.sleep(Settings.getInstance().getAtStatesPruneInterval());
- // Don't even attempt if we're mid-sync as our repository requests will be delayed for ages
- if (Synchronizer.getInstance().isSynchronizing())
- continue;
+ BlockData chainTip = Controller.getInstance().getChainTip();
+ if (chainTip == null || NTP.getTime() == null)
+ continue;
- // Prune AT states for all blocks up until our latest minus pruneBlockLimit
- final int ourLatestHeight = chainTip.getHeight();
- int upperPrunableHeight = ourLatestHeight - Settings.getInstance().getPruneBlockLimit();
+ // Don't even attempt if we're mid-sync as our repository requests will be delayed for ages
+ if (Synchronizer.getInstance().isSynchronizing())
+ continue;
- // In archive mode we are only allowed to trim blocks that have already been archived
- if (archiveMode) {
- upperPrunableHeight = repository.getBlockArchiveRepository().getBlockArchiveHeight() - 1;
+ // Prune AT states for all blocks up until our latest minus pruneBlockLimit
+ final int ourLatestHeight = chainTip.getHeight();
+ int upperPrunableHeight = ourLatestHeight - Settings.getInstance().getPruneBlockLimit();
- // TODO: validate that the actual archived data exists before pruning it?
- }
+ // In archive mode we are only allowed to trim blocks that have already been archived
+ if (archiveMode) {
+ upperPrunableHeight = repository.getBlockArchiveRepository().getBlockArchiveHeight() - 1;
- int upperBatchHeight = pruneStartHeight + Settings.getInstance().getAtStatesPruneBatchSize();
- int upperPruneHeight = Math.min(upperBatchHeight, upperPrunableHeight);
+ // TODO: validate that the actual archived data exists before pruning it?
+ }
- if (pruneStartHeight >= upperPruneHeight)
- continue;
+ int upperBatchHeight = pruneStartHeight + Settings.getInstance().getAtStatesPruneBatchSize();
+ int upperPruneHeight = Math.min(upperBatchHeight, upperPrunableHeight);
- LOGGER.debug(String.format("Pruning AT states between blocks %d and %d...", pruneStartHeight, upperPruneHeight));
+ if (pruneStartHeight >= upperPruneHeight)
+ continue;
- int numAtStatesPruned = repository.getATRepository().pruneAtStates(pruneStartHeight, upperPruneHeight);
- repository.saveChanges();
- int numAtStateDataRowsTrimmed = repository.getATRepository().trimAtStates(
- pruneStartHeight, upperPruneHeight, Settings.getInstance().getAtStatesTrimLimit());
- repository.saveChanges();
+ LOGGER.info(String.format("Pruning AT states between blocks %d and %d...", pruneStartHeight, upperPruneHeight));
- if (numAtStatesPruned > 0 || numAtStateDataRowsTrimmed > 0) {
- final int finalPruneStartHeight = pruneStartHeight;
- LOGGER.debug(() -> String.format("Pruned %d AT state%s between blocks %d and %d",
- numAtStatesPruned, (numAtStatesPruned != 1 ? "s" : ""),
- finalPruneStartHeight, upperPruneHeight));
- } else {
- // Can we move onto next batch?
- if (upperPrunableHeight > upperBatchHeight) {
- pruneStartHeight = upperBatchHeight;
- repository.getATRepository().setAtPruneHeight(pruneStartHeight);
- maxLatestAtStatesHeight = PruneManager.getMaxHeightForLatestAtStates(repository);
- repository.getATRepository().rebuildLatestAtStates(maxLatestAtStatesHeight);
- repository.saveChanges();
+ int numAtStatesPruned = repository.getATRepository().pruneAtStates(pruneStartHeight, upperPruneHeight);
+ repository.saveChanges();
+ int numAtStateDataRowsTrimmed = repository.getATRepository().trimAtStates(
+ pruneStartHeight, upperPruneHeight, Settings.getInstance().getAtStatesTrimLimit());
+ repository.saveChanges();
+ if (numAtStatesPruned > 0 || numAtStateDataRowsTrimmed > 0) {
final int finalPruneStartHeight = pruneStartHeight;
- LOGGER.debug(() -> String.format("Bumping AT state base prune height to %d", finalPruneStartHeight));
+ LOGGER.info(() -> String.format("Pruned %d AT state%s between blocks %d and %d",
+ numAtStatesPruned, (numAtStatesPruned != 1 ? "s" : ""),
+ finalPruneStartHeight, upperPruneHeight));
+ } else {
+ // Can we move onto next batch?
+ if (upperPrunableHeight > upperBatchHeight) {
+ pruneStartHeight = upperBatchHeight;
+ repository.getATRepository().setAtPruneHeight(pruneStartHeight);
+ maxLatestAtStatesHeight = PruneManager.getMaxHeightForLatestAtStates(repository);
+ repository.getATRepository().rebuildLatestAtStates(maxLatestAtStatesHeight);
+ repository.saveChanges();
+
+ final int finalPruneStartHeight = pruneStartHeight;
+ LOGGER.info(() -> String.format("Bumping AT state base prune height to %d", finalPruneStartHeight));
+ } else {
+ // We've pruned up to the upper prunable height
+ // Back off for a while to save CPU for syncing
+ repository.discardChanges();
+ Thread.sleep(5 * 60 * 1000L);
+ }
}
- else {
- // We've pruned up to the upper prunable height
- // Back off for a while to save CPU for syncing
- repository.discardChanges();
- Thread.sleep(5*60*1000L);
+ } catch (InterruptedException e) {
+ if (Controller.isStopping()) {
+ LOGGER.info("AT States Pruning Shutting Down");
+ } else {
+ LOGGER.warn("AT States Pruning interrupted. Trying again. Report this error immediately to the developers.", e);
}
+ } catch (Exception e) {
+ LOGGER.warn("AT States Pruning stopped working. Trying again. Report this error immediately to the developers.", e);
}
+ } catch(Exception e){
+ LOGGER.error("AT States Pruning is not working! Not trying again. Restart ASAP. Report this error immediately to the developers.", e);
}
- } catch (DataException e) {
- LOGGER.warn(String.format("Repository issue trying to prune AT states: %s", e.getMessage()));
- } catch (InterruptedException e) {
- // Time to exit
}
}
-
}
diff --git a/src/main/java/org/qortal/controller/repository/AtStatesTrimmer.java b/src/main/java/org/qortal/controller/repository/AtStatesTrimmer.java
index 125628f1..d188f81a 100644
--- a/src/main/java/org/qortal/controller/repository/AtStatesTrimmer.java
+++ b/src/main/java/org/qortal/controller/repository/AtStatesTrimmer.java
@@ -11,6 +11,8 @@ import org.qortal.repository.RepositoryManager;
import org.qortal.settings.Settings;
import org.qortal.utils.NTP;
+import static java.lang.Thread.MIN_PRIORITY;
+
public class AtStatesTrimmer implements Runnable {
private static final Logger LOGGER = LogManager.getLogger(AtStatesTrimmer.class);
@@ -24,66 +26,83 @@ public class AtStatesTrimmer implements Runnable {
return;
}
+ int trimStartHeight;
+ int maxLatestAtStatesHeight;
+
try (final Repository repository = RepositoryManager.getRepository()) {
- int trimStartHeight = repository.getATRepository().getAtTrimHeight();
- int maxLatestAtStatesHeight = PruneManager.getMaxHeightForLatestAtStates(repository);
+ trimStartHeight = repository.getATRepository().getAtTrimHeight();
+ maxLatestAtStatesHeight = PruneManager.getMaxHeightForLatestAtStates(repository);
repository.discardChanges();
repository.getATRepository().rebuildLatestAtStates(maxLatestAtStatesHeight);
repository.saveChanges();
+ } catch (Exception e) {
+ LOGGER.error("AT States Trimming is not working! Not trying again. Restart ASAP. Report this error immediately to the developers.", e);
+ return;
+ }
- while (!Controller.isStopping()) {
- repository.discardChanges();
+ while (!Controller.isStopping()) {
+ try (final Repository repository = RepositoryManager.getRepository()) {
+ try {
+ repository.discardChanges();
- Thread.sleep(Settings.getInstance().getAtStatesTrimInterval());
+ Thread.sleep(Settings.getInstance().getAtStatesTrimInterval());
- BlockData chainTip = Controller.getInstance().getChainTip();
- if (chainTip == null || NTP.getTime() == null)
- continue;
+ BlockData chainTip = Controller.getInstance().getChainTip();
+ if (chainTip == null || NTP.getTime() == null)
+ continue;
- // Don't even attempt if we're mid-sync as our repository requests will be delayed for ages
- if (Synchronizer.getInstance().isSynchronizing())
- continue;
+ // Don't even attempt if we're mid-sync as our repository requests will be delayed for ages
+ if (Synchronizer.getInstance().isSynchronizing())
+ continue;
- long currentTrimmableTimestamp = NTP.getTime() - Settings.getInstance().getAtStatesMaxLifetime();
- // We want to keep AT states near the tip of our copy of blockchain so we can process/orphan nearby blocks
- long chainTrimmableTimestamp = chainTip.getTimestamp() - Settings.getInstance().getAtStatesMaxLifetime();
+ long currentTrimmableTimestamp = NTP.getTime() - Settings.getInstance().getAtStatesMaxLifetime();
+ // We want to keep AT states near the tip of our copy of blockchain so we can process/orphan nearby blocks
+ long chainTrimmableTimestamp = chainTip.getTimestamp() - Settings.getInstance().getAtStatesMaxLifetime();
- long upperTrimmableTimestamp = Math.min(currentTrimmableTimestamp, chainTrimmableTimestamp);
- int upperTrimmableHeight = repository.getBlockRepository().getHeightFromTimestamp(upperTrimmableTimestamp);
+ long upperTrimmableTimestamp = Math.min(currentTrimmableTimestamp, chainTrimmableTimestamp);
+ int upperTrimmableHeight = repository.getBlockRepository().getHeightFromTimestamp(upperTrimmableTimestamp);
- int upperBatchHeight = trimStartHeight + Settings.getInstance().getAtStatesTrimBatchSize();
- int upperTrimHeight = Math.min(upperBatchHeight, upperTrimmableHeight);
+ int upperBatchHeight = trimStartHeight + Settings.getInstance().getAtStatesTrimBatchSize();
+ int upperTrimHeight = Math.min(upperBatchHeight, upperTrimmableHeight);
- if (trimStartHeight >= upperTrimHeight)
- continue;
+ if (trimStartHeight >= upperTrimHeight)
+ continue;
- int numAtStatesTrimmed = repository.getATRepository().trimAtStates(trimStartHeight, upperTrimHeight, Settings.getInstance().getAtStatesTrimLimit());
- repository.saveChanges();
-
- if (numAtStatesTrimmed > 0) {
- final int finalTrimStartHeight = trimStartHeight;
- LOGGER.debug(() -> String.format("Trimmed %d AT state%s between blocks %d and %d",
- numAtStatesTrimmed, (numAtStatesTrimmed != 1 ? "s" : ""),
- finalTrimStartHeight, upperTrimHeight));
- } else {
- // Can we move onto next batch?
- if (upperTrimmableHeight > upperBatchHeight) {
- trimStartHeight = upperBatchHeight;
- repository.getATRepository().setAtTrimHeight(trimStartHeight);
- maxLatestAtStatesHeight = PruneManager.getMaxHeightForLatestAtStates(repository);
- repository.getATRepository().rebuildLatestAtStates(maxLatestAtStatesHeight);
- repository.saveChanges();
+ int numAtStatesTrimmed = repository.getATRepository().trimAtStates(trimStartHeight, upperTrimHeight, Settings.getInstance().getAtStatesTrimLimit());
+ repository.saveChanges();
+ if (numAtStatesTrimmed > 0) {
final int finalTrimStartHeight = trimStartHeight;
- LOGGER.debug(() -> String.format("Bumping AT state base trim height to %d", finalTrimStartHeight));
+ LOGGER.info(() -> String.format("Trimmed %d AT state%s between blocks %d and %d",
+ numAtStatesTrimmed, (numAtStatesTrimmed != 1 ? "s" : ""),
+ finalTrimStartHeight, upperTrimHeight));
+ } else {
+ // Can we move onto next batch?
+ if (upperTrimmableHeight > upperBatchHeight) {
+ trimStartHeight = upperBatchHeight;
+ repository.getATRepository().setAtTrimHeight(trimStartHeight);
+ maxLatestAtStatesHeight = PruneManager.getMaxHeightForLatestAtStates(repository);
+ repository.getATRepository().rebuildLatestAtStates(maxLatestAtStatesHeight);
+ repository.saveChanges();
+
+ final int finalTrimStartHeight = trimStartHeight;
+ LOGGER.info(() -> String.format("Bumping AT state base trim height to %d", finalTrimStartHeight));
+ }
}
+ } catch (InterruptedException e) {
+ if(Controller.isStopping()) {
+ LOGGER.info("AT States Trimming Shutting Down");
+ }
+ else {
+ LOGGER.warn("AT States Trimming interrupted. Trying again. Report this error immediately to the developers.", e);
+ }
+ } catch (Exception e) {
+ LOGGER.warn("AT States Trimming stopped working. Trying again. Report this error immediately to the developers.", e);
}
+ } catch (Exception e) {
+ LOGGER.error("AT States Trimming is not working! Not trying again. Restart ASAP. Report this error immediately to the developers.", e);
}
- } catch (DataException e) {
- LOGGER.warn(String.format("Repository issue trying to trim AT states: %s", e.getMessage()));
- } catch (InterruptedException e) {
- // Time to exit
}
}
diff --git a/src/main/java/org/qortal/controller/repository/BlockArchiver.java b/src/main/java/org/qortal/controller/repository/BlockArchiver.java
index a643d9b9..01cf40ed 100644
--- a/src/main/java/org/qortal/controller/repository/BlockArchiver.java
+++ b/src/main/java/org/qortal/controller/repository/BlockArchiver.java
@@ -15,11 +15,13 @@ import org.qortal.utils.NTP;
import java.io.IOException;
+import static java.lang.Thread.NORM_PRIORITY;
+
public class BlockArchiver implements Runnable {
private static final Logger LOGGER = LogManager.getLogger(BlockArchiver.class);
- private static final long INITIAL_SLEEP_PERIOD = 5 * 60 * 1000L + 1234L; // ms
+ private static final long INITIAL_SLEEP_PERIOD = 15 * 60 * 1000L; // ms
public void run() {
Thread.currentThread().setName("Block archiver");
@@ -28,11 +30,13 @@ public class BlockArchiver implements Runnable {
return;
}
+ int startHeight;
+
try (final Repository repository = RepositoryManager.getRepository()) {
// Don't even start building until initial rush has ended
Thread.sleep(INITIAL_SLEEP_PERIOD);
- int startHeight = repository.getBlockArchiveRepository().getBlockArchiveHeight();
+ startHeight = repository.getBlockArchiveRepository().getBlockArchiveHeight();
// Don't attempt to archive if we have no ATStatesHeightIndex, as it will be too slow
boolean hasAtStatesHeightIndex = repository.getATRepository().hasAtStatesHeightIndex();
@@ -41,77 +45,87 @@ public class BlockArchiver implements Runnable {
repository.discardChanges();
return;
}
-
- LOGGER.info("Starting block archiver from height {}...", startHeight);
-
- while (!Controller.isStopping()) {
- repository.discardChanges();
-
- Thread.sleep(Settings.getInstance().getArchiveInterval());
-
- BlockData chainTip = Controller.getInstance().getChainTip();
- if (chainTip == null || NTP.getTime() == null) {
- continue;
- }
-
- // Don't even attempt if we're mid-sync as our repository requests will be delayed for ages
- if (Synchronizer.getInstance().isSynchronizing()) {
- continue;
- }
-
- // Don't attempt to archive if we're not synced yet
- final Long minLatestBlockTimestamp = Controller.getMinimumLatestBlockTimestamp();
- if (minLatestBlockTimestamp == null || chainTip.getTimestamp() < minLatestBlockTimestamp) {
- continue;
- }
-
-
- // Build cache of blocks
- try {
- final int maximumArchiveHeight = BlockArchiveWriter.getMaxArchiveHeight(repository);
- BlockArchiveWriter writer = new BlockArchiveWriter(startHeight, maximumArchiveHeight, repository);
- BlockArchiveWriter.BlockArchiveWriteResult result = writer.write();
- switch (result) {
- case OK:
- // Increment block archive height
- startHeight += writer.getWrittenCount();
- repository.getBlockArchiveRepository().setBlockArchiveHeight(startHeight);
- repository.saveChanges();
- break;
-
- case STOPPING:
- return;
-
- // We've reached the limit of the blocks we can archive
- // Sleep for a while to allow more to become available
- case NOT_ENOUGH_BLOCKS:
- // We didn't reach our file size target, so that must mean that we don't have enough blocks
- // yet or something went wrong. Sleep for a while and then try again.
- repository.discardChanges();
- Thread.sleep(60 * 60 * 1000L); // 1 hour
- break;
-
- case BLOCK_NOT_FOUND:
- // We tried to archive a block that didn't exist. This is a major failure and likely means
- // that a bootstrap or re-sync is needed. Try again every minute until then.
- LOGGER.info("Error: block not found when building archive. If this error persists, " +
- "a bootstrap or re-sync may be needed.");
- repository.discardChanges();
- Thread.sleep( 60 * 1000L); // 1 minute
- break;
- }
-
- } catch (IOException | TransformationException e) {
- LOGGER.info("Caught exception when creating block cache", e);
- }
-
- }
- } catch (DataException e) {
- LOGGER.info("Caught exception when creating block cache", e);
- } catch (InterruptedException e) {
- // Do nothing
+ } catch (Exception e) {
+ LOGGER.error("Block Archiving is not working! Not trying again. Restart ASAP. Report this error immediately to the developers.", e);
+ return;
}
- }
+ LOGGER.info("Starting block archiver from height {}...", startHeight);
+ while (!Controller.isStopping()) {
+ try (final Repository repository = RepositoryManager.getRepository()) {
+
+ try {
+ repository.discardChanges();
+
+ Thread.sleep(Settings.getInstance().getArchiveInterval());
+
+ BlockData chainTip = Controller.getInstance().getChainTip();
+ if (chainTip == null || NTP.getTime() == null) {
+ continue;
+ }
+
+ // Don't even attempt if we're mid-sync as our repository requests will be delayed for ages
+ if (Synchronizer.getInstance().isSynchronizing()) {
+ continue;
+ }
+
+ // Don't attempt to archive if we're not synced yet
+ final Long minLatestBlockTimestamp = Controller.getMinimumLatestBlockTimestamp();
+ if (minLatestBlockTimestamp == null || chainTip.getTimestamp() < minLatestBlockTimestamp) {
+ continue;
+ }
+
+ // Build cache of blocks
+ try {
+ final int maximumArchiveHeight = BlockArchiveWriter.getMaxArchiveHeight(repository);
+ BlockArchiveWriter writer = new BlockArchiveWriter(startHeight, maximumArchiveHeight, repository);
+ BlockArchiveWriter.BlockArchiveWriteResult result = writer.write();
+ switch (result) {
+ case OK:
+ // Increment block archive height
+ startHeight += writer.getWrittenCount();
+ repository.getBlockArchiveRepository().setBlockArchiveHeight(startHeight);
+ repository.saveChanges();
+ break;
+
+ case STOPPING:
+ return;
+
+ // We've reached the limit of the blocks we can archive
+ // Sleep for a while to allow more to become available
+ case NOT_ENOUGH_BLOCKS:
+ // We didn't reach our file size target, so that must mean that we don't have enough blocks
+ // yet or something went wrong. Sleep for a while and then try again.
+ repository.discardChanges();
+ Thread.sleep(2 * 60 * 60 * 1000L); // 1 hour
+ break;
+
+ case BLOCK_NOT_FOUND:
+ // We tried to archive a block that didn't exist. This is a major failure and likely means
+ // that a bootstrap or re-sync is needed. Try again every minute until then.
+ LOGGER.info("Error: block not found when building archive. If this error persists, " +
+ "a bootstrap or re-sync may be needed.");
+ repository.discardChanges();
+ Thread.sleep(60 * 1000L); // 1 minute
+ break;
+ }
+
+ } catch (IOException | TransformationException e) {
+ LOGGER.info("Caught exception when creating block cache", e);
+ }
+ } catch (InterruptedException e) {
+ if (Controller.isStopping()) {
+ LOGGER.info("Block Archiving Shutting Down");
+ } else {
+ LOGGER.warn("Block Archiving interrupted. Trying again. Report this error immediately to the developers.", e);
+ }
+ } catch (Exception e) {
+ LOGGER.warn("Block Archiving stopped working. Trying again. Report this error immediately to the developers.", e);
+ }
+ } catch(Exception e){
+ LOGGER.error("Block Archiving is not working! Not trying again. Restart ASAP. Report this error immediately to the developers.", e);
+ }
+ }
+ }
}
diff --git a/src/main/java/org/qortal/controller/repository/BlockPruner.java b/src/main/java/org/qortal/controller/repository/BlockPruner.java
index 23e3a45a..7801f284 100644
--- a/src/main/java/org/qortal/controller/repository/BlockPruner.java
+++ b/src/main/java/org/qortal/controller/repository/BlockPruner.java
@@ -11,6 +11,8 @@ import org.qortal.repository.RepositoryManager;
import org.qortal.settings.Settings;
import org.qortal.utils.NTP;
+import static java.lang.Thread.NORM_PRIORITY;
+
public class BlockPruner implements Runnable {
private static final Logger LOGGER = LogManager.getLogger(BlockPruner.class);
@@ -37,8 +39,10 @@ public class BlockPruner implements Runnable {
}
}
+ int pruneStartHeight;
+
try (final Repository repository = RepositoryManager.getRepository()) {
- int pruneStartHeight = repository.getBlockRepository().getBlockPruneHeight();
+ pruneStartHeight = repository.getBlockRepository().getBlockPruneHeight();
// Don't attempt to prune if we have no ATStatesHeightIndex, as it will be too slow
boolean hasAtStatesHeightIndex = repository.getATRepository().hasAtStatesHeightIndex();
@@ -46,75 +50,90 @@ public class BlockPruner implements Runnable {
LOGGER.info("Unable to start block pruner due to missing ATStatesHeightIndex. Bootstrapping is recommended.");
return;
}
+ } catch (Exception e) {
+ LOGGER.error("Block Pruning is not working! Not trying again. Restart ASAP. Report this error immediately to the developers.", e);
+ return;
+ }
- while (!Controller.isStopping()) {
- repository.discardChanges();
+ while (!Controller.isStopping()) {
- Thread.sleep(Settings.getInstance().getBlockPruneInterval());
+ try (final Repository repository = RepositoryManager.getRepository()) {
- BlockData chainTip = Controller.getInstance().getChainTip();
- if (chainTip == null || NTP.getTime() == null)
- continue;
+ try {
+ repository.discardChanges();
- // Don't even attempt if we're mid-sync as our repository requests will be delayed for ages
- if (Synchronizer.getInstance().isSynchronizing()) {
- continue;
- }
+ Thread.sleep(Settings.getInstance().getBlockPruneInterval());
- // Don't attempt to prune if we're not synced yet
- final Long minLatestBlockTimestamp = Controller.getMinimumLatestBlockTimestamp();
- if (minLatestBlockTimestamp == null || chainTip.getTimestamp() < minLatestBlockTimestamp) {
- continue;
- }
+ BlockData chainTip = Controller.getInstance().getChainTip();
+ if (chainTip == null || NTP.getTime() == null)
+ continue;
- // Prune all blocks up until our latest minus pruneBlockLimit
- final int ourLatestHeight = chainTip.getHeight();
- int upperPrunableHeight = ourLatestHeight - Settings.getInstance().getPruneBlockLimit();
+ // Don't even attempt if we're mid-sync as our repository requests will be delayed for ages
+ if (Synchronizer.getInstance().isSynchronizing()) {
+ continue;
+ }
- // In archive mode we are only allowed to trim blocks that have already been archived
- if (archiveMode) {
- upperPrunableHeight = repository.getBlockArchiveRepository().getBlockArchiveHeight() - 1;
- }
+ // Don't attempt to prune if we're not synced yet
+ final Long minLatestBlockTimestamp = Controller.getMinimumLatestBlockTimestamp();
+ if (minLatestBlockTimestamp == null || chainTip.getTimestamp() < minLatestBlockTimestamp) {
+ continue;
+ }
- int upperBatchHeight = pruneStartHeight + Settings.getInstance().getBlockPruneBatchSize();
- int upperPruneHeight = Math.min(upperBatchHeight, upperPrunableHeight);
+ // Prune all blocks up until our latest minus pruneBlockLimit
+ final int ourLatestHeight = chainTip.getHeight();
+ int upperPrunableHeight = ourLatestHeight - Settings.getInstance().getPruneBlockLimit();
- if (pruneStartHeight >= upperPruneHeight) {
- continue;
- }
+ // In archive mode we are only allowed to trim blocks that have already been archived
+ if (archiveMode) {
+ upperPrunableHeight = repository.getBlockArchiveRepository().getBlockArchiveHeight() - 1;
+ }
- LOGGER.debug(String.format("Pruning blocks between %d and %d...", pruneStartHeight, upperPruneHeight));
+ int upperBatchHeight = pruneStartHeight + Settings.getInstance().getBlockPruneBatchSize();
+ int upperPruneHeight = Math.min(upperBatchHeight, upperPrunableHeight);
- int numBlocksPruned = repository.getBlockRepository().pruneBlocks(pruneStartHeight, upperPruneHeight);
- repository.saveChanges();
+ if (pruneStartHeight >= upperPruneHeight) {
+ continue;
+ }
- if (numBlocksPruned > 0) {
- LOGGER.debug(String.format("Pruned %d block%s between %d and %d",
- numBlocksPruned, (numBlocksPruned != 1 ? "s" : ""),
- pruneStartHeight, upperPruneHeight));
- } else {
- final int nextPruneHeight = upperPruneHeight + 1;
- repository.getBlockRepository().setBlockPruneHeight(nextPruneHeight);
+ LOGGER.info(String.format("Pruning blocks between %d and %d...", pruneStartHeight, upperPruneHeight));
+
+ int numBlocksPruned = repository.getBlockRepository().pruneBlocks(pruneStartHeight, upperPruneHeight);
repository.saveChanges();
- LOGGER.debug(String.format("Bumping block base prune height to %d", pruneStartHeight));
- // Can we move onto next batch?
- if (upperPrunableHeight > nextPruneHeight) {
- pruneStartHeight = nextPruneHeight;
+ if (numBlocksPruned > 0) {
+ LOGGER.info(String.format("Pruned %d block%s between %d and %d",
+ numBlocksPruned, (numBlocksPruned != 1 ? "s" : ""),
+ pruneStartHeight, upperPruneHeight));
+ } else {
+ final int nextPruneHeight = upperPruneHeight + 1;
+ repository.getBlockRepository().setBlockPruneHeight(nextPruneHeight);
+ repository.saveChanges();
+ LOGGER.info(String.format("Bumping block base prune height to %d", pruneStartHeight));
+
+ // Can we move onto next batch?
+ if (upperPrunableHeight > nextPruneHeight) {
+ pruneStartHeight = nextPruneHeight;
+ }
+ else {
+ // We've pruned up to the upper prunable height
+ // Back off for a while to save CPU for syncing
+ repository.discardChanges();
+ Thread.sleep(10*60*1000L);
+ }
+ }
+ } catch (InterruptedException e) {
+ if(Controller.isStopping()) {
+ LOGGER.info("Block Pruning Shutting Down");
}
else {
- // We've pruned up to the upper prunable height
- // Back off for a while to save CPU for syncing
- repository.discardChanges();
- Thread.sleep(10*60*1000L);
+ LOGGER.warn("Block Pruning interrupted. Trying again. Report this error immediately to the developers.", e);
}
+ } catch (Exception e) {
+ LOGGER.warn("Block Pruning stopped working. Trying again. Report this error immediately to the developers.", e);
}
+ } catch(Exception e){
+ LOGGER.error("Block Pruning is not working! Not trying again. Restart ASAP. Report this error immediately to the developers.", e);
}
- } catch (DataException e) {
- LOGGER.warn(String.format("Repository issue trying to prune blocks: %s", e.getMessage()));
- } catch (InterruptedException e) {
- // Time to exit
}
}
-
}
diff --git a/src/main/java/org/qortal/controller/repository/OnlineAccountsSignaturesTrimmer.java b/src/main/java/org/qortal/controller/repository/OnlineAccountsSignaturesTrimmer.java
index d74df4b5..c2d37e14 100644
--- a/src/main/java/org/qortal/controller/repository/OnlineAccountsSignaturesTrimmer.java
+++ b/src/main/java/org/qortal/controller/repository/OnlineAccountsSignaturesTrimmer.java
@@ -12,6 +12,8 @@ import org.qortal.repository.RepositoryManager;
import org.qortal.settings.Settings;
import org.qortal.utils.NTP;
+import static java.lang.Thread.NORM_PRIORITY;
+
public class OnlineAccountsSignaturesTrimmer implements Runnable {
private static final Logger LOGGER = LogManager.getLogger(OnlineAccountsSignaturesTrimmer.class);
@@ -26,61 +28,77 @@ public class OnlineAccountsSignaturesTrimmer implements Runnable {
return;
}
+ int trimStartHeight;
+
try (final Repository repository = RepositoryManager.getRepository()) {
// Don't even start trimming until initial rush has ended
Thread.sleep(INITIAL_SLEEP_PERIOD);
- int trimStartHeight = repository.getBlockRepository().getOnlineAccountsSignaturesTrimHeight();
+ trimStartHeight = repository.getBlockRepository().getOnlineAccountsSignaturesTrimHeight();
+ } catch (Exception e) {
+ LOGGER.error("Online Accounts Signatures Trimming is not working! Not trying again. Restart ASAP. Report this error immediately to the developers.", e);
+ return;
+ }
- while (!Controller.isStopping()) {
- repository.discardChanges();
+ while (!Controller.isStopping()) {
+ try (final Repository repository = RepositoryManager.getRepository()) {
- Thread.sleep(Settings.getInstance().getOnlineSignaturesTrimInterval());
+ try {
+ repository.discardChanges();
- BlockData chainTip = Controller.getInstance().getChainTip();
- if (chainTip == null || NTP.getTime() == null)
- continue;
+ Thread.sleep(Settings.getInstance().getOnlineSignaturesTrimInterval());
- // Don't even attempt if we're mid-sync as our repository requests will be delayed for ages
- if (Synchronizer.getInstance().isSynchronizing())
- continue;
+ BlockData chainTip = Controller.getInstance().getChainTip();
+ if (chainTip == null || NTP.getTime() == null)
+ continue;
- // Trim blockchain by removing 'old' online accounts signatures
- long upperTrimmableTimestamp = NTP.getTime() - BlockChain.getInstance().getOnlineAccountSignaturesMaxLifetime();
- int upperTrimmableHeight = repository.getBlockRepository().getHeightFromTimestamp(upperTrimmableTimestamp);
+ // Don't even attempt if we're mid-sync as our repository requests will be delayed for ages
+ if (Synchronizer.getInstance().isSynchronizing())
+ continue;
- int upperBatchHeight = trimStartHeight + Settings.getInstance().getOnlineSignaturesTrimBatchSize();
- int upperTrimHeight = Math.min(upperBatchHeight, upperTrimmableHeight);
+ // Trim blockchain by removing 'old' online accounts signatures
+ long upperTrimmableTimestamp = NTP.getTime() - BlockChain.getInstance().getOnlineAccountSignaturesMaxLifetime();
+ int upperTrimmableHeight = repository.getBlockRepository().getHeightFromTimestamp(upperTrimmableTimestamp);
- if (trimStartHeight >= upperTrimHeight)
- continue;
+ int upperBatchHeight = trimStartHeight + Settings.getInstance().getOnlineSignaturesTrimBatchSize();
+ int upperTrimHeight = Math.min(upperBatchHeight, upperTrimmableHeight);
- int numSigsTrimmed = repository.getBlockRepository().trimOldOnlineAccountsSignatures(trimStartHeight, upperTrimHeight);
- repository.saveChanges();
+ if (trimStartHeight >= upperTrimHeight)
+ continue;
- if (numSigsTrimmed > 0) {
- final int finalTrimStartHeight = trimStartHeight;
- LOGGER.debug(() -> String.format("Trimmed %d online accounts signature%s between blocks %d and %d",
- numSigsTrimmed, (numSigsTrimmed != 1 ? "s" : ""),
- finalTrimStartHeight, upperTrimHeight));
- } else {
- // Can we move onto next batch?
- if (upperTrimmableHeight > upperBatchHeight) {
- trimStartHeight = upperBatchHeight;
-
- repository.getBlockRepository().setOnlineAccountsSignaturesTrimHeight(trimStartHeight);
- repository.saveChanges();
+ int numSigsTrimmed = repository.getBlockRepository().trimOldOnlineAccountsSignatures(trimStartHeight, upperTrimHeight);
+ repository.saveChanges();
+ if (numSigsTrimmed > 0) {
final int finalTrimStartHeight = trimStartHeight;
- LOGGER.debug(() -> String.format("Bumping online accounts signatures base trim height to %d", finalTrimStartHeight));
+ LOGGER.info(() -> String.format("Trimmed %d online accounts signature%s between blocks %d and %d",
+ numSigsTrimmed, (numSigsTrimmed != 1 ? "s" : ""),
+ finalTrimStartHeight, upperTrimHeight));
+ } else {
+ // Can we move onto next batch?
+ if (upperTrimmableHeight > upperBatchHeight) {
+ trimStartHeight = upperBatchHeight;
+
+ repository.getBlockRepository().setOnlineAccountsSignaturesTrimHeight(trimStartHeight);
+ repository.saveChanges();
+
+ final int finalTrimStartHeight = trimStartHeight;
+ LOGGER.info(() -> String.format("Bumping online accounts signatures base trim height to %d", finalTrimStartHeight));
+ }
}
+ } catch (InterruptedException e) {
+ if(Controller.isStopping()) {
+ LOGGER.info("Online Accounts Signatures Trimming Shutting Down");
+ }
+ else {
+ LOGGER.warn("Online Accounts Signatures Trimming interrupted. Trying again. Report this error immediately to the developers.", e);
+ }
+ } catch (Exception e) {
+ LOGGER.warn("Online Accounts Signatures Trimming stopped working. Trying again. Report this error immediately to the developers.", e);
}
+ } catch (Exception e) {
+ LOGGER.error("Online Accounts Signatures Trimming is not working! Not trying again. Restart ASAP. Report this error immediately to the developers.", e);
}
- } catch (DataException e) {
- LOGGER.warn(String.format("Repository issue trying to trim online accounts signatures: %s", e.getMessage()));
- } catch (InterruptedException e) {
- // Time to exit
}
}
-
}
diff --git a/src/main/java/org/qortal/controller/repository/PruneManager.java b/src/main/java/org/qortal/controller/repository/PruneManager.java
index d48f85f7..8865668b 100644
--- a/src/main/java/org/qortal/controller/repository/PruneManager.java
+++ b/src/main/java/org/qortal/controller/repository/PruneManager.java
@@ -40,7 +40,7 @@ public class PruneManager {
}
public void start() {
- this.executorService = Executors.newCachedThreadPool(new DaemonThreadFactory());
+ this.executorService = Executors.newCachedThreadPool(new DaemonThreadFactory(Settings.getInstance().getPruningThreadPriority()));
if (Settings.getInstance().isTopOnly()) {
// Top-only-sync
diff --git a/src/main/java/org/qortal/controller/tradebot/BitcoinACCTv1TradeBot.java b/src/main/java/org/qortal/controller/tradebot/BitcoinACCTv1TradeBot.java
index 259a16b8..e7cb0fb8 100644
--- a/src/main/java/org/qortal/controller/tradebot/BitcoinACCTv1TradeBot.java
+++ b/src/main/java/org/qortal/controller/tradebot/BitcoinACCTv1TradeBot.java
@@ -7,6 +7,7 @@ import org.bitcoinj.script.Script.ScriptType;
import org.qortal.account.PrivateKeyAccount;
import org.qortal.account.PublicKeyAccount;
import org.qortal.api.model.crosschain.TradeBotCreateRequest;
+import org.qortal.api.resource.CrossChainUtils;
import org.qortal.asset.Asset;
import org.qortal.crosschain.*;
import org.qortal.crypto.Crypto;
@@ -527,7 +528,7 @@ public class BitcoinACCTv1TradeBot implements AcctTradeBot {
// P2SH-A funding confirmed
// Attempt to send MESSAGE to Bob's Qortal trade address
- byte[] messageData = BitcoinACCTv1.buildOfferMessage(tradeBotData.getTradeForeignPublicKeyHash(), tradeBotData.getHashOfSecret(), tradeBotData.getLockTimeA());
+ byte[] messageData = CrossChainUtils.buildOfferMessage(tradeBotData.getTradeForeignPublicKeyHash(), tradeBotData.getHashOfSecret(), tradeBotData.getLockTimeA());
String messageRecipient = crossChainTradeData.qortalCreatorTradeAddress;
boolean isMessageAlreadySent = repository.getMessageRepository().exists(tradeBotData.getTradeNativePublicKey(), messageRecipient, messageData);
@@ -893,7 +894,7 @@ public class BitcoinACCTv1TradeBot implements AcctTradeBot {
// Redeem P2SH-B using secret-B
Coin redeemAmount = Coin.valueOf(P2SH_B_OUTPUT_AMOUNT); // An actual amount to avoid dust filter, remaining used as fees. The real funds are in P2SH-A.
ECKey redeemKey = ECKey.fromPrivate(tradeBotData.getTradePrivateKey());
- List fundingOutputs = bitcoin.getUnspentOutputs(p2shAddressB);
+ List fundingOutputs = bitcoin.getUnspentOutputs(p2shAddressB, false);
byte[] receivingAccountInfo = tradeBotData.getReceivingAccountInfo();
Transaction p2shRedeemTransaction = BitcoinyHTLC.buildRedeemTransaction(bitcoin.getNetworkParameters(), redeemAmount, redeemKey,
@@ -1063,7 +1064,7 @@ public class BitcoinACCTv1TradeBot implements AcctTradeBot {
case FUNDED: {
Coin redeemAmount = Coin.valueOf(crossChainTradeData.expectedForeignAmount - P2SH_B_OUTPUT_AMOUNT);
ECKey redeemKey = ECKey.fromPrivate(tradeBotData.getTradePrivateKey());
- List fundingOutputs = bitcoin.getUnspentOutputs(p2shAddressA);
+ List fundingOutputs = bitcoin.getUnspentOutputs(p2shAddressA, false);
Transaction p2shRedeemTransaction = BitcoinyHTLC.buildRedeemTransaction(bitcoin.getNetworkParameters(), redeemAmount, redeemKey,
fundingOutputs, redeemScriptA, secretA, receivingAccountInfo);
@@ -1135,7 +1136,7 @@ public class BitcoinACCTv1TradeBot implements AcctTradeBot {
case FUNDED:{
Coin refundAmount = Coin.valueOf(P2SH_B_OUTPUT_AMOUNT); // An actual amount to avoid dust filter, remaining used as fees.
ECKey refundKey = ECKey.fromPrivate(tradeBotData.getTradePrivateKey());
- List