From 52b6b79b083ae92074692333a6071759ac3841d8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=BCrg=20Schulthess?= Date: Sun, 16 Mar 2025 14:04:06 +0100 Subject: [PATCH] RNS mostly implemented. Todo - removing bytest from buffer when reading (threading issue?) --- .../org/qortal/controller/Controller.java | 710 +++++++++++++++- .../controller/RNSTransactionImporter.java | 460 +++++++++++ .../RNSArbitraryDataFileListManager.java | 731 ++++++++++++++++ .../RNSArbitraryDataFileManager.java | 639 ++++++++++++++ .../RNSArbitraryMetadataManager.java | 481 +++++++++++ .../controller/tradebot/RNSTradeBot.java | 778 ++++++++++++++++++ .../RNSArbitraryDirectConnectionInfo.java | 59 ++ .../RNSArbitraryFileListResponseInfo.java | 11 + .../data/arbitrary/RNSArbitraryRelayInfo.java | 73 ++ .../org/qortal/data/network/RNSPeerData.java | 117 +++ .../java/org/qortal/network/RNSNetwork.java | 124 ++- src/main/java/org/qortal/network/RNSPeer.java | 124 ++- .../qortal/network/task/RNSBroadcastTask.java | 19 + .../qortal/network/task/RNSMessageTask.java | 29 + .../java/org/qortal/settings/Settings.java | 21 + 15 files changed, 4328 insertions(+), 48 deletions(-) create mode 100644 src/main/java/org/qortal/controller/RNSTransactionImporter.java create mode 100644 src/main/java/org/qortal/controller/arbitrary/RNSArbitraryDataFileListManager.java create mode 100644 src/main/java/org/qortal/controller/arbitrary/RNSArbitraryDataFileManager.java create mode 100644 src/main/java/org/qortal/controller/arbitrary/RNSArbitraryMetadataManager.java create mode 100644 src/main/java/org/qortal/controller/tradebot/RNSTradeBot.java create mode 100644 src/main/java/org/qortal/data/arbitrary/RNSArbitraryDirectConnectionInfo.java create mode 100644 src/main/java/org/qortal/data/arbitrary/RNSArbitraryFileListResponseInfo.java create mode 100644 src/main/java/org/qortal/data/arbitrary/RNSArbitraryRelayInfo.java create mode 100644 src/main/java/org/qortal/data/network/RNSPeerData.java create mode 100644 src/main/java/org/qortal/network/task/RNSBroadcastTask.java create mode 100644 src/main/java/org/qortal/network/task/RNSMessageTask.java diff --git a/src/main/java/org/qortal/controller/Controller.java b/src/main/java/org/qortal/controller/Controller.java index b3ca7f8d..5697f1ae 100644 --- a/src/main/java/org/qortal/controller/Controller.java +++ b/src/main/java/org/qortal/controller/Controller.java @@ -18,6 +18,7 @@ import org.qortal.controller.hsqldb.HSQLDBDataCacheManager; import org.qortal.controller.repository.NamesDatabaseIntegrityCheck; import org.qortal.controller.repository.PruneManager; import org.qortal.controller.tradebot.TradeBot; +import org.qortal.controller.tradebot.RNSTradeBot; import org.qortal.data.account.AccountBalanceData; import org.qortal.data.account.AccountData; import org.qortal.data.block.BlockData; @@ -34,6 +35,7 @@ import org.qortal.gui.Gui; import org.qortal.gui.SysTray; import org.qortal.network.Network; import org.qortal.network.RNSNetwork; +import org.qortal.network.RNSPeer; import org.qortal.network.Peer; import org.qortal.network.PeerAddress; import org.qortal.network.message.*; @@ -1245,20 +1247,20 @@ public class Controller extends Thread { } public void doRNSNetworkBroadcast() { - //if (Settings.getInstance().isLite()) { - // // Lite nodes have nothing to broadcast - // return; - //} - //RNSNetwork network = RNSNetwork.getInstance(); + if (Settings.getInstance().isLite()) { + // Lite nodes have nothing to broadcast + return; + } + RNSNetwork network = RNSNetwork.getInstance(); - //// Send our current height - //network.broadcastOurChain(); + // Send our current height + network.broadcastOurChain(); - //// Request unconfirmed transaction signatures, but only if we're up-to-date. - //// if we're not up-to-dat then priority is synchronizing first - //if (isUpToDate()) { - // network.broadcast(network::buildGetUnconfirmedTransactionsMessage); - //} + // Request unconfirmed transaction signatures, but only if we're up-to-date. + // if we're not up-to-dat then priority is synchronizing first + if (isUpToDateRNS()) { + network.broadcast(network::buildGetUnconfirmedTransactionsMessage); + } } @@ -2193,4 +2195,688 @@ public class Controller extends Thread { public StatsSnapshot getStatsSnapshot() { return this.stats; } + + public void onRNSNetworkMessage(RNSPeer peer, Message message) { + LOGGER.trace(() -> String.format("Processing %s message from %s", message.getType().name(), peer)); + + // Ordered by message type value + switch (message.getType()) { + case GET_BLOCK: + onRNSNetworkGetBlockMessage(peer, message); + break; + + case GET_BLOCK_SUMMARIES: + onRNSNetworkGetBlockSummariesMessage(peer, message); + break; + + case GET_SIGNATURES_V2: + onRNSNetworkGetSignaturesV2Message(peer, message); + break; + + case HEIGHT_V2: + onRNSNetworkHeightV2Message(peer, message); + break; + + case BLOCK_SUMMARIES_V2: + onRNSNetworkBlockSummariesV2Message(peer, message); + break; + + case GET_TRANSACTION: + RNSTransactionImporter.getInstance().onNetworkGetTransactionMessage(peer, message); + break; + + case TRANSACTION: + RNSTransactionImporter.getInstance().onNetworkTransactionMessage(peer, message); + break; + + case GET_UNCONFIRMED_TRANSACTIONS: + RNSTransactionImporter.getInstance().onNetworkGetUnconfirmedTransactionsMessage(peer, message); + break; + + case TRANSACTION_SIGNATURES: + RNSTransactionImporter.getInstance().onNetworkTransactionSignaturesMessage(peer, message); + break; + + //case GET_ONLINE_ACCOUNTS_V3: + // OnlineAccountsManager.getInstance().onNetworkGetOnlineAccountsV3Message(peer, message); + // break; + // + //case ONLINE_ACCOUNTS_V3: + // OnlineAccountsManager.getInstance().onNetworkOnlineAccountsV3Message(peer, message); + // break; + + //// TODO: Compiles but much of the Manager details need to be rethought for Reticulum + //case GET_ARBITRARY_DATA: + // // Not currently supported + // break; + //// + //case ARBITRARY_DATA_FILE_LIST: + // RNSArbitraryDataFileListManager.getInstance().onNetworkArbitraryDataFileListMessage(peer, message); + // break; + // + //case GET_ARBITRARY_DATA_FILE: + // RNSArbitraryDataFileManager.getInstance().onNetworkGetArbitraryDataFileMessage(peer, message); + // break; + // + //case GET_ARBITRARY_DATA_FILE_LIST: + // RNSArbitraryDataFileListManager.getInstance().onNetworkGetArbitraryDataFileListMessage(peer, message); + // break; + // + case ARBITRARY_SIGNATURES: + // Not currently supported + break; + + case GET_ARBITRARY_METADATA: + RNSArbitraryMetadataManager.getInstance().onNetworkGetArbitraryMetadataMessage(peer, message); + break; + + case ARBITRARY_METADATA: + RNSArbitraryMetadataManager.getInstance().onNetworkArbitraryMetadataMessage(peer, message); + break; + + case GET_TRADE_PRESENCES: + RNSTradeBot.getInstance().onGetTradePresencesMessage(peer, message); + break; + + case TRADE_PRESENCES: + RNSTradeBot.getInstance().onTradePresencesMessage(peer, message); + break; + + case GET_ACCOUNT: + onRNSNetworkGetAccountMessage(peer, message); + break; + + case GET_ACCOUNT_BALANCE: + onRNSNetworkGetAccountBalanceMessage(peer, message); + break; + + case GET_ACCOUNT_TRANSACTIONS: + onRNSNetworkGetAccountTransactionsMessage(peer, message); + break; + + case GET_ACCOUNT_NAMES: + onRNSNetworkGetAccountNamesMessage(peer, message); + break; + + case GET_NAME: + onRNSNetworkGetNameMessage(peer, message); + break; + + default: + LOGGER.debug(() -> String.format("Unhandled %s message [ID %d] from peer %s", message.getType().name(), message.getId(), peer)); + break; + } + } + + private void onRNSNetworkGetBlockMessage(RNSPeer peer, Message message) { + GetBlockMessage getBlockMessage = (GetBlockMessage) message; + byte[] signature = getBlockMessage.getSignature(); + this.stats.getBlockMessageStats.requests.incrementAndGet(); + + ByteArray signatureAsByteArray = ByteArray.wrap(signature); + + CachedBlockMessage cachedBlockMessage = this.blockMessageCache.get(signatureAsByteArray); + int blockCacheSize = Settings.getInstance().getBlockCacheSize(); + + // Check cached latest block message + if (cachedBlockMessage != null) { + this.stats.getBlockMessageStats.cacheHits.incrementAndGet(); + + // We need to duplicate it to prevent multiple threads setting ID on the same message + CachedBlockMessage clonedBlockMessage = Message.cloneWithNewId(cachedBlockMessage, message.getId()); + + //if (!peer.sendMessage(clonedBlockMessage)) + // peer.disconnect("failed to send block"); + peer.sendMessage(clonedBlockMessage); + + return; + } + + try (final Repository repository = RepositoryManager.getRepository()) { + BlockData blockData = repository.getBlockRepository().fromSignature(signature); + + if (blockData != null) { + if (PruneManager.getInstance().isBlockPruned(blockData.getHeight())) { + // If this is a pruned block, we likely only have partial data, so best not to sent it + blockData = null; + } + } + + // If we have no block data, we should check the archive in case it's there + if (blockData == null) { + if (Settings.getInstance().isArchiveEnabled()) { + Triple serializedBlock = BlockArchiveReader.getInstance().fetchSerializedBlockBytesForSignature(signature, true, repository); + if (serializedBlock != null) { + byte[] bytes = serializedBlock.getA(); + Integer serializationVersion = serializedBlock.getB(); + + Message blockMessage; + switch (serializationVersion) { + case 1: + blockMessage = new CachedBlockMessage(bytes); + break; + + case 2: + blockMessage = new CachedBlockV2Message(bytes); + break; + + default: + return; + } + blockMessage.setId(message.getId()); + + // This call also causes the other needed data to be pulled in from repository + //if (!peer.sendMessage(blockMessage)) { + // peer.disconnect("failed to send block"); + // // Don't fall-through to caching because failure to send might be from failure to build message + // return; + //} + peer.sendMessage(blockMessage); + + // Sent successfully from archive, so nothing more to do + return; + } + } + } + + if (blockData == null) { + // We don't have this block + this.stats.getBlockMessageStats.unknownBlocks.getAndIncrement(); + + // Send valid, yet unexpected message type in response, so peer's synchronizer doesn't have to wait for timeout + LOGGER.debug(() -> String.format("Sending 'block unknown' response to peer %s for GET_BLOCK request for unknown block %s", peer, Base58.encode(signature))); + + // Send generic 'unknown' message as it's very short + //Message blockUnknownMessage = peer.getPeersVersion() >= GenericUnknownMessage.MINIMUM_PEER_VERSION + // ? new GenericUnknownMessage() + // : new BlockSummariesMessage(Collections.emptyList()); + Message blockUnknownMessage = new GenericUnknownMessage(); + blockUnknownMessage.setId(message.getId()); + //if (!peer.sendMessage(blockUnknownMessage)) + // peer.disconnect("failed to send block-unknown response"); + peer.sendMessage(blockUnknownMessage); + return; + } + + Block block = new Block(repository, blockData); + + //// V2 support + //if (peer.getPeersVersion() >= BlockV2Message.MIN_PEER_VERSION) { + // Message blockMessage = new BlockV2Message(block); + // blockMessage.setId(message.getId()); + // if (!peer.sendMessage(blockMessage)) { + // peer.disconnect("failed to send block"); + // // Don't fall-through to caching because failure to send might be from failure to build message + // return; + // } + // return; + //} + + CachedBlockMessage blockMessage = new CachedBlockMessage(block); + blockMessage.setId(message.getId()); + + //if (!peer.sendMessage(blockMessage)) { + // peer.disconnect("failed to send block"); + // // Don't fall-through to caching because failure to send might be from failure to build message + // return; + //} + peer.sendMessage(blockMessage); + + // If request is for a recent block, cache it + if (getChainHeight() - blockData.getHeight() <= blockCacheSize) { + this.stats.getBlockMessageStats.cacheFills.incrementAndGet(); + + this.blockMessageCache.put(ByteArray.wrap(blockData.getSignature()), blockMessage); + } + } catch (DataException e) { + LOGGER.error(String.format("Repository issue while sending block %s to peer %s", Base58.encode(signature), peer), e); + } catch (TransformationException e) { + LOGGER.error(String.format("Serialization issue while sending block %s to peer %s", Base58.encode(signature), peer), e); + } + } + + private void onRNSNetworkGetBlockSummariesMessage(RNSPeer peer, Message message) { + GetBlockSummariesMessage getBlockSummariesMessage = (GetBlockSummariesMessage) message; + final byte[] parentSignature = getBlockSummariesMessage.getParentSignature(); + this.stats.getBlockSummariesStats.requests.incrementAndGet(); + + // If peer's parent signature matches our latest block signature + // then we have no blocks after that and can short-circuit with an empty response + BlockData chainTip = getChainTip(); + if (chainTip != null && Arrays.equals(parentSignature, chainTip.getSignature())) { + //Message blockSummariesMessage = peer.getPeersVersion() >= BlockSummariesV2Message.MINIMUM_PEER_VERSION + // ? new BlockSummariesV2Message(Collections.emptyList()) + // : new BlockSummariesMessage(Collections.emptyList()); + Message blockSummariesMessage = new BlockSummariesV2Message(Collections.emptyList()); + + blockSummariesMessage.setId(message.getId()); + + //if (!peer.sendMessage(blockSummariesMessage)) + // peer.disconnect("failed to send block summaries"); + peer.sendMessage(blockSummariesMessage); + + return; + } + + List blockSummaries = new ArrayList<>(); + + // Attempt to serve from our cache of latest blocks + synchronized (this.latestBlocks) { + blockSummaries = this.latestBlocks.stream() + .dropWhile(cachedBlockData -> !Arrays.equals(cachedBlockData.getReference(), parentSignature)) + .map(BlockSummaryData::new) + .collect(Collectors.toList()); + } + + if (blockSummaries.isEmpty()) { + try (final Repository repository = RepositoryManager.getRepository()) { + int numberRequested = Math.min(Network.MAX_BLOCK_SUMMARIES_PER_REPLY, getBlockSummariesMessage.getNumberRequested()); + + BlockData blockData = repository.getBlockRepository().fromReference(parentSignature); + if (blockData == null) { + // Try the archive + blockData = repository.getBlockArchiveRepository().fromReference(parentSignature); + } + + if (blockData != null) { + if (PruneManager.getInstance().isBlockPruned(blockData.getHeight())) { + // If this request contains a pruned block, we likely only have partial data, so best not to sent anything + // We always prune from the oldest first, so it's fine to just check the first block requested + blockData = null; + } + } + + while (blockData != null && blockSummaries.size() < numberRequested) { + BlockSummaryData blockSummary = new BlockSummaryData(blockData); + blockSummaries.add(blockSummary); + + byte[] previousSignature = blockData.getSignature(); + blockData = repository.getBlockRepository().fromReference(previousSignature); + if (blockData == null) { + // Try the archive + blockData = repository.getBlockArchiveRepository().fromReference(previousSignature); + } + } + } catch (DataException e) { + LOGGER.error(String.format("Repository issue while sending block summaries after %s to peer %s", Base58.encode(parentSignature), peer), e); + } + } else { + this.stats.getBlockSummariesStats.cacheHits.incrementAndGet(); + + if (blockSummaries.size() >= getBlockSummariesMessage.getNumberRequested()) + this.stats.getBlockSummariesStats.fullyFromCache.incrementAndGet(); + } + + //Message blockSummariesMessage = peer.getPeersVersion() >= BlockSummariesV2Message.MINIMUM_PEER_VERSION + // ? new BlockSummariesV2Message(blockSummaries) + // : new BlockSummariesMessage(blockSummaries); + Message blockSummariesMessage = new BlockSummariesV2Message(blockSummaries); + blockSummariesMessage.setId(message.getId()); + //if (!peer.sendMessage(blockSummariesMessage)) + // peer.disconnect("failed to send block summaries"); + peer.sendMessage(blockSummariesMessage); + } + + private void onRNSNetworkGetSignaturesV2Message(RNSPeer peer, Message message) { + GetSignaturesV2Message getSignaturesMessage = (GetSignaturesV2Message) message; + final byte[] parentSignature = getSignaturesMessage.getParentSignature(); + this.stats.getBlockSignaturesV2Stats.requests.incrementAndGet(); + + // If peer's parent signature matches our latest block signature + // then we can short-circuit with an empty response + BlockData chainTip = getChainTip(); + if (chainTip != null && Arrays.equals(parentSignature, chainTip.getSignature())) { + Message signaturesMessage = new SignaturesMessage(Collections.emptyList()); + signaturesMessage.setId(message.getId()); + //if (!peer.sendMessage(signaturesMessage)) + // peer.disconnect("failed to send signatures (v2)"); + peer.sendMessage(signaturesMessage); + + return; + } + + List signatures = new ArrayList<>(); + + // Attempt to serve from our cache of latest blocks + synchronized (this.latestBlocks) { + signatures = this.latestBlocks.stream() + .dropWhile(cachedBlockData -> !Arrays.equals(cachedBlockData.getReference(), parentSignature)) + .map(BlockData::getSignature) + .collect(Collectors.toList()); + } + + if (signatures.isEmpty()) { + try (final Repository repository = RepositoryManager.getRepository()) { + int numberRequested = getSignaturesMessage.getNumberRequested(); + BlockData blockData = repository.getBlockRepository().fromReference(parentSignature); + if (blockData == null) { + // Try the archive + blockData = repository.getBlockArchiveRepository().fromReference(parentSignature); + } + + while (blockData != null && signatures.size() < numberRequested) { + signatures.add(blockData.getSignature()); + + byte[] previousSignature = blockData.getSignature(); + blockData = repository.getBlockRepository().fromReference(previousSignature); + if (blockData == null) { + // Try the archive + blockData = repository.getBlockArchiveRepository().fromReference(previousSignature); + } + } + } catch (DataException e) { + LOGGER.error(String.format("Repository issue while sending V2 signatures after %s to peer %s", Base58.encode(parentSignature), peer), e); + } + } else { + this.stats.getBlockSignaturesV2Stats.cacheHits.incrementAndGet(); + + if (signatures.size() >= getSignaturesMessage.getNumberRequested()) + this.stats.getBlockSignaturesV2Stats.fullyFromCache.incrementAndGet(); + } + + Message signaturesMessage = new SignaturesMessage(signatures); + signaturesMessage.setId(message.getId()); + //if (!peer.sendMessage(signaturesMessage)) + // peer.disconnect("failed to send signatures (v2)"); + peer.sendMessage(signaturesMessage); + } + + private void onRNSNetworkHeightV2Message(RNSPeer peer, Message message) { + HeightV2Message heightV2Message = (HeightV2Message) message; + + if (!Settings.getInstance().isLite()) { + // If peer is inbound and we've not updated their height + // then this is probably their initial HEIGHT_V2 message + // so they need a corresponding HEIGHT_V2 message from us + if (!peer.getIsInitiator() && peer.getChainTipData() == null) { + Message responseMessage = RNSNetwork.getInstance().buildHeightOrChainTipInfo(peer); + peer.sendMessage(responseMessage); + } + } + + // Update peer chain tip data + BlockSummaryData newChainTipData = new BlockSummaryData(heightV2Message.getHeight(), heightV2Message.getSignature(), heightV2Message.getMinterPublicKey(), heightV2Message.getTimestamp()); + peer.setChainTipData(newChainTipData); + + // Potentially synchronize + Synchronizer.getInstance().requestSync(); + } + + private void onRNSNetworkBlockSummariesV2Message(RNSPeer peer, Message message) { + BlockSummariesV2Message blockSummariesV2Message = (BlockSummariesV2Message) message; + + if (!Settings.getInstance().isLite()) { + //// If peer is inbound and we've not updated their height + //// then this is probably their initial BLOCK_SUMMARIES_V2 message + //// so they need a corresponding BLOCK_SUMMARIES_V2 message from us + if (!peer.getIsInitiator() && peer.getChainTipData() == null) { + Message responseMessage = RNSNetwork.getInstance().buildHeightOrChainTipInfo(peer); + peer.sendMessage(responseMessage); + } + } + + if (message.hasId()) { + /* + * Experimental proof-of-concept: discard messages with ID + * These are 'late' reply messages received after timeout has expired, + * having been passed upwards from Peer to Network to Controller. + * Hence, these are NOT simple "here's my chain tip" broadcasts from other peers. + */ + LOGGER.debug("Discarding late {} message with ID {} from {}", message.getType().name(), message.getId(), peer); + return; + } + + // Update peer chain tip data + peer.setChainTipSummaries(blockSummariesV2Message.getBlockSummaries()); + + // Potentially synchronize + Synchronizer.getInstance().requestSync(); + } + + // ************ + + private void onRNSNetworkGetAccountMessage(RNSPeer peer, Message message) { + GetAccountMessage getAccountMessage = (GetAccountMessage) message; + String address = getAccountMessage.getAddress(); + this.stats.getAccountMessageStats.requests.incrementAndGet(); + + try (final Repository repository = RepositoryManager.getRepository()) { + AccountData accountData = repository.getAccountRepository().getAccount(address); + + if (accountData == null) { + // We don't have this account + this.stats.getAccountMessageStats.unknownAccounts.getAndIncrement(); + + // Send valid, yet unexpected message type in response, so peer doesn't have to wait for timeout + LOGGER.debug(() -> String.format("Sending 'account unknown' response to peer %s for GET_ACCOUNT request for unknown account %s", peer, address)); + + // Send generic 'unknown' message as it's very short + Message accountUnknownMessage = new GenericUnknownMessage(); + accountUnknownMessage.setId(message.getId()); + peer.sendMessage(accountUnknownMessage); + return; + } + + AccountMessage accountMessage = new AccountMessage(accountData); + accountMessage.setId(message.getId()); + + // handle in timeout callback instead + //if (!peer.sendMessage(accountMessage)) { + // peer.disconnect("failed to send account"); + //} + peer.sendMessage(accountMessage); + + } catch (DataException e) { + LOGGER.error(String.format("Repository issue while send account %s to peer %s", address, peer), e); + } + } + + private void onRNSNetworkGetAccountBalanceMessage(RNSPeer peer, Message message) { + GetAccountBalanceMessage getAccountBalanceMessage = (GetAccountBalanceMessage) message; + String address = getAccountBalanceMessage.getAddress(); + long assetId = getAccountBalanceMessage.getAssetId(); + this.stats.getAccountBalanceMessageStats.requests.incrementAndGet(); + + try (final Repository repository = RepositoryManager.getRepository()) { + AccountBalanceData accountBalanceData = repository.getAccountRepository().getBalance(address, assetId); + + if (accountBalanceData == null) { + // We don't have this account + this.stats.getAccountBalanceMessageStats.unknownAccounts.getAndIncrement(); + + // Send valid, yet unexpected message type in response, so peer doesn't have to wait for timeout + LOGGER.debug(() -> String.format("Sending 'account unknown' response to peer %s for GET_ACCOUNT_BALANCE request for unknown account %s and asset ID %d", peer, address, assetId)); + + // Send generic 'unknown' message as it's very short + Message accountUnknownMessage = new GenericUnknownMessage(); + accountUnknownMessage.setId(message.getId()); + peer.sendMessage(accountUnknownMessage); + return; + } + + AccountBalanceMessage accountMessage = new AccountBalanceMessage(accountBalanceData); + accountMessage.setId(message.getId()); + + // handle in timeout callback instead + //if (!peer.sendMessage(accountMessage)) { + // peer.disconnect("failed to send account balance"); + //} + peer.sendMessage(accountMessage); + + } catch (DataException e) { + LOGGER.error(String.format("Repository issue while send balance for account %s and asset ID %d to peer %s", address, assetId, peer), e); + } + } + + private void onRNSNetworkGetAccountTransactionsMessage(RNSPeer peer, Message message) { + GetAccountTransactionsMessage getAccountTransactionsMessage = (GetAccountTransactionsMessage) message; + String address = getAccountTransactionsMessage.getAddress(); + int limit = Math.min(getAccountTransactionsMessage.getLimit(), 100); + int offset = getAccountTransactionsMessage.getOffset(); + this.stats.getAccountTransactionsMessageStats.requests.incrementAndGet(); + + try (final Repository repository = RepositoryManager.getRepository()) { + List signatures = repository.getTransactionRepository().getSignaturesMatchingCriteria(null, null, null, + null, null, null, address, TransactionsResource.ConfirmationStatus.CONFIRMED, limit, offset, false); + + // Expand signatures to transactions + List transactions = new ArrayList<>(signatures.size()); + for (byte[] signature : signatures) { + transactions.add(repository.getTransactionRepository().fromSignature(signature)); + } + + if (transactions == null) { + // We don't have this account + this.stats.getAccountTransactionsMessageStats.unknownAccounts.getAndIncrement(); + + // Send valid, yet unexpected message type in response, so peer doesn't have to wait for timeout + LOGGER.debug(() -> String.format("Sending 'account unknown' response to peer %s for GET_ACCOUNT_TRANSACTIONS request for unknown account %s", peer, address)); + + // Send generic 'unknown' message as it's very short + Message accountUnknownMessage = new GenericUnknownMessage(); + accountUnknownMessage.setId(message.getId()); + peer.sendMessage(accountUnknownMessage); + return; + } + + TransactionsMessage transactionsMessage = new TransactionsMessage(transactions); + transactionsMessage.setId(message.getId()); + + // handle in timeout callback instead + //if (!peer.sendMessage(transactionsMessage)) { + // peer.disconnect("failed to send account transactions"); + //} + peer.sendMessage(transactionsMessage); + + } catch (DataException e) { + LOGGER.error(String.format("Repository issue while sending transactions for account %s %d to peer %s", address, peer), e); + } catch (MessageException e) { + LOGGER.error(String.format("Message serialization issue while sending transactions for account %s %d to peer %s", address, peer), e); + } + } + + private void onRNSNetworkGetAccountNamesMessage(RNSPeer peer, Message message) { + GetAccountNamesMessage getAccountNamesMessage = (GetAccountNamesMessage) message; + String address = getAccountNamesMessage.getAddress(); + this.stats.getAccountNamesMessageStats.requests.incrementAndGet(); + + try (final Repository repository = RepositoryManager.getRepository()) { + List namesDataList = repository.getNameRepository().getNamesByOwner(address); + + if (namesDataList == null) { + // We don't have this account + this.stats.getAccountNamesMessageStats.unknownAccounts.getAndIncrement(); + + // Send valid, yet unexpected message type in response, so peer doesn't have to wait for timeout + LOGGER.debug(() -> String.format("Sending 'account unknown' response to peer %s for GET_ACCOUNT_NAMES request for unknown account %s", peer, address)); + + // Send generic 'unknown' message as it's very short + Message accountUnknownMessage = new GenericUnknownMessage(); + accountUnknownMessage.setId(message.getId()); + peer.sendMessage(accountUnknownMessage); + return; + } + + NamesMessage namesMessage = new NamesMessage(namesDataList); + namesMessage.setId(message.getId()); + + // handle in timeout callback instead + //if (!peer.sendMessage(namesMessage)) { + // peer.disconnect("failed to send account names"); + //} + peer.sendMessage(namesMessage); + + } catch (DataException e) { + LOGGER.error(String.format("Repository issue while send names for account %s to peer %s", address, peer), e); + } + } + + private void onRNSNetworkGetNameMessage(RNSPeer peer, Message message) { + GetNameMessage getNameMessage = (GetNameMessage) message; + String name = getNameMessage.getName(); + this.stats.getNameMessageStats.requests.incrementAndGet(); + + try (final Repository repository = RepositoryManager.getRepository()) { + NameData nameData = repository.getNameRepository().fromName(name); + + if (nameData == null) { + // We don't have this account + this.stats.getNameMessageStats.unknownAccounts.getAndIncrement(); + + // Send valid, yet unexpected message type in response, so peer doesn't have to wait for timeout + LOGGER.debug(() -> String.format("Sending 'name unknown' response to peer %s for GET_NAME request for unknown name %s", peer, name)); + + // Send generic 'unknown' message as it's very short + Message nameUnknownMessage = new GenericUnknownMessage(); + nameUnknownMessage.setId(message.getId()); + if (!peer.sendMessage(nameUnknownMessage)) + peer.sendMessage(nameUnknownMessage); + return; + } + + NamesMessage namesMessage = new NamesMessage(Arrays.asList(nameData)); + namesMessage.setId(message.getId()); + + // handle in timeout callback instead + //if (!peer.sendMessage(namesMessage)) { + // peer.disconnect("failed to send name data"); + //} + peer.sendMessage(namesMessage); + + } catch (DataException e) { + LOGGER.error(String.format("Repository issue while send name %s to peer %s", name, peer), e); + } + } + + /** + * Returns whether we think our node has up-to-date blockchain based on our info about other peers. + * @param minLatestBlockTimestamp - the minimum block timestamp to be considered recent + * @return boolean - whether our node's blockchain is up to date or not + */ + public boolean isUpToDateRNS(Long minLatestBlockTimestamp) { + if (Settings.getInstance().isLite()) { + // Lite nodes are always "up to date" + return true; + } + + // Do we even have a vaguely recent block? + if (minLatestBlockTimestamp == null) + return false; + + final BlockData latestBlockData = getChainTip(); + if (latestBlockData == null || latestBlockData.getTimestamp() < minLatestBlockTimestamp) + return false; + + if (Settings.getInstance().isSingleNodeTestnet()) + // Single node testnets won't have peers, so we can assume up to date from this point + return true; + + // Needs a mutable copy of the unmodifiableList + List peers = new ArrayList<>(RNSNetwork.getInstance().getImmutableLinkedPeers()); + if (peers == null) + return false; + + //// Disregard peers that have "misbehaved" recently + //peers.removeIf(hasMisbehaved); + // + //// Disregard peers that don't have a recent block + //peers.removeIf(hasNoRecentBlock); + + // Check we have enough peers to potentially synchronize/mint + if (peers.size() < Settings.getInstance().getReticulumMinDesiredPeers()) + return false; + + // If we don't have any peers left then can't synchronize, therefore consider ourself not up to date + return !peers.isEmpty(); + } + + /** + * Returns whether we think our node has up-to-date blockchain based on our info about other peers. + * Uses the default minLatestBlockTimestamp value. + * @return boolean - whether our node's blockchain is up to date or not + */ + public boolean isUpToDateRNS() { + final Long minLatestBlockTimestamp = getMinimumLatestBlockTimestamp(); + return this.isUpToDate(minLatestBlockTimestamp); + } } diff --git a/src/main/java/org/qortal/controller/RNSTransactionImporter.java b/src/main/java/org/qortal/controller/RNSTransactionImporter.java new file mode 100644 index 00000000..40d89ada --- /dev/null +++ b/src/main/java/org/qortal/controller/RNSTransactionImporter.java @@ -0,0 +1,460 @@ +package org.qortal.controller; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.qortal.data.block.BlockData; +import org.qortal.data.transaction.TransactionData; +import org.qortal.network.RNSNetwork; +import org.qortal.network.RNSPeer; +import org.qortal.network.message.GetTransactionMessage; +import org.qortal.network.message.Message; +import org.qortal.network.message.TransactionMessage; +import org.qortal.network.message.TransactionSignaturesMessage; +import org.qortal.repository.DataException; +import org.qortal.repository.Repository; +import org.qortal.repository.RepositoryManager; +import org.qortal.settings.Settings; +import org.qortal.transaction.Transaction; +import org.qortal.transform.TransformationException; +import org.qortal.utils.Base58; +import org.qortal.utils.NTP; + +import java.util.*; +import java.util.concurrent.locks.ReentrantLock; +import java.util.stream.Collectors; + +public class RNSTransactionImporter extends Thread { + + private static final Logger LOGGER = LogManager.getLogger(RNSTransactionImporter.class); + + private static RNSTransactionImporter instance; + private volatile boolean isStopping = false; + + private static final int MAX_INCOMING_TRANSACTIONS = 5000; + + /** Minimum time before considering an invalid unconfirmed transaction as "stale" */ + public static final long INVALID_TRANSACTION_STALE_TIMEOUT = 30 * 60 * 1000L; // ms + /** Minimum frequency to re-request stale unconfirmed transactions from peers, to recheck validity */ + public static final long INVALID_TRANSACTION_RECHECK_INTERVAL = 60 * 60 * 1000L; // ms\ + /** Minimum frequency to re-request expired unconfirmed transactions from peers, to recheck validity + * This mainly exists to stop expired transactions from bloating the list */ + public static final long EXPIRED_TRANSACTION_RECHECK_INTERVAL = 10 * 60 * 1000L; // ms + + + /** Map of incoming transaction that are in the import queue. Key is transaction data, value is whether signature has been validated. */ + private final Map incomingTransactions = Collections.synchronizedMap(new HashMap<>()); + + /** Map of recent invalid unconfirmed transactions. Key is base58 transaction signature, value is do-not-request expiry timestamp. */ + private final Map invalidUnconfirmedTransactions = Collections.synchronizedMap(new HashMap<>()); + + /** Cached list of unconfirmed transactions, used when counting per creator. This is replaced regularly */ + public static List unconfirmedTransactionsCache = null; + + + public static synchronized RNSTransactionImporter getInstance() { + if (instance == null) { + instance = new RNSTransactionImporter(); + } + + return instance; + } + + @Override + public void run() { + Thread.currentThread().setName("Transaction Importer"); + + try { + while (!Controller.isStopping()) { + Thread.sleep(500L); + + // Process incoming transactions queue + validateTransactionsInQueue(); + importTransactionsInQueue(); + + // Clean up invalid incoming transactions list + cleanupInvalidTransactionsList(NTP.getTime()); + } + } catch (InterruptedException e) { + // Fall through to exit thread + } + } + + public void shutdown() { + isStopping = true; + this.interrupt(); + } + + + // Incoming transactions queue + + private boolean incomingTransactionQueueContains(byte[] signature) { + synchronized (incomingTransactions) { + return incomingTransactions.keySet().stream().anyMatch(t -> Arrays.equals(t.getSignature(), signature)); + } + } + + private void removeIncomingTransaction(byte[] signature) { + incomingTransactions.keySet().removeIf(t -> Arrays.equals(t.getSignature(), signature)); + } + + /** + * Retrieve all pending unconfirmed transactions that have had their signatures validated. + * @return a list of TransactionData objects, with valid signatures. + */ + private List getCachedSigValidTransactions() { + synchronized (this.incomingTransactions) { + return this.incomingTransactions.entrySet().stream() + .filter(t -> Boolean.TRUE.equals(t.getValue())) + .map(Map.Entry::getKey) + .collect(Collectors.toList()); + } + } + + /** + * Validate the signatures of any transactions pending import, then update their + * entries in the queue to mark them as valid/invalid. + * + * No database lock is required. + */ + private void validateTransactionsInQueue() { + if (this.incomingTransactions.isEmpty()) { + // Nothing to do? + return; + } + + try (final Repository repository = RepositoryManager.getRepository()) { + // Take a snapshot of incomingTransactions, so we don't need to lock it while processing + Map incomingTransactionsCopy = Map.copyOf(this.incomingTransactions); + + int unvalidatedCount = Collections.frequency(incomingTransactionsCopy.values(), Boolean.FALSE); + int validatedCount = 0; + + if (unvalidatedCount > 0) { + LOGGER.debug("Validating signatures in incoming transactions queue (size {})...", unvalidatedCount); + } + + // A list of all currently pending transactions that have valid signatures + List sigValidTransactions = new ArrayList<>(); + + // A list of signatures that became valid in this round + List newlyValidSignatures = new ArrayList<>(); + + boolean isLiteNode = Settings.getInstance().isLite(); + + // We need the latest block in order to check for expired transactions + BlockData latestBlock = Controller.getInstance().getChainTip(); + + // Signature validation round - does not require blockchain lock + for (Map.Entry transactionEntry : incomingTransactionsCopy.entrySet()) { + // Quick exit? + if (isStopping) { + return; + } + + TransactionData transactionData = transactionEntry.getKey(); + Transaction transaction = Transaction.fromData(repository, transactionData); + String signature58 = Base58.encode(transactionData.getSignature()); + + Long now = NTP.getTime(); + if (now == null) { + return; + } + + // Drop expired transactions before they are considered "sig valid" + if (latestBlock != null && transaction.getDeadline() <= latestBlock.getTimestamp()) { + LOGGER.debug("Removing expired {} transaction {} from import queue", transactionData.getType().name(), signature58); + removeIncomingTransaction(transactionData.getSignature()); + invalidUnconfirmedTransactions.put(signature58, (now + EXPIRED_TRANSACTION_RECHECK_INTERVAL)); + continue; + } + + // Only validate signature if we haven't already done so + Boolean isSigValid = transactionEntry.getValue(); + if (!Boolean.TRUE.equals(isSigValid)) { + if (isLiteNode) { + // Lite nodes can't easily validate transactions, so for now we will have to assume that everything is valid + sigValidTransactions.add(transaction); + newlyValidSignatures.add(transactionData.getSignature()); + // Add mark signature as valid if transaction still exists in import queue + incomingTransactions.computeIfPresent(transactionData, (k, v) -> Boolean.TRUE); + continue; + } + + if (!transaction.isSignatureValid()) { + LOGGER.debug("Ignoring {} transaction {} with invalid signature", transactionData.getType().name(), signature58); + removeIncomingTransaction(transactionData.getSignature()); + + // Also add to invalidIncomingTransactions map + now = NTP.getTime(); + if (now != null) { + Long expiry = now + INVALID_TRANSACTION_RECHECK_INTERVAL; + LOGGER.trace("Adding invalid transaction {} to invalidUnconfirmedTransactions...", signature58); + // Add to invalidUnconfirmedTransactions so that we don't keep requesting it + invalidUnconfirmedTransactions.put(signature58, expiry); + } + + // We're done with this transaction + continue; + } + + // Count the number that were validated in this round, for logging purposes + validatedCount++; + + // Add mark signature as valid if transaction still exists in import queue + incomingTransactions.computeIfPresent(transactionData, (k, v) -> Boolean.TRUE); + + // Signature validated in this round + newlyValidSignatures.add(transactionData.getSignature()); + + } else { + LOGGER.trace(() -> String.format("Transaction %s known to have valid signature", Base58.encode(transactionData.getSignature()))); + } + + // Signature valid - add to shortlist + sigValidTransactions.add(transaction); + } + + if (unvalidatedCount > 0) { + LOGGER.debug("Finished validating signatures in incoming transactions queue (valid this round: {}, total pending import: {})...", validatedCount, sigValidTransactions.size()); + } + + } catch (DataException e) { + LOGGER.error("Repository issue while processing incoming transactions", e); + } + } + + /** + * Import any transactions in the queue that have valid signatures. + * + * A database lock is required. + */ + private void importTransactionsInQueue() { + List sigValidTransactions = this.getCachedSigValidTransactions(); + if (sigValidTransactions.isEmpty()) { + // Don't bother locking if there are no new transactions to process + return; + } + + if (Synchronizer.getInstance().isSyncRequested() || Synchronizer.getInstance().isSynchronizing()) { + // Prioritize syncing, and don't attempt to lock + return; + } + + ReentrantLock blockchainLock = Controller.getInstance().getBlockchainLock(); + if (!blockchainLock.tryLock()) { + LOGGER.debug("Too busy to import incoming transactions queue"); + return; + } + + LOGGER.debug("Importing incoming transactions queue (size {})...", sigValidTransactions.size()); + + int processedCount = 0; + try (final Repository repository = RepositoryManager.getRepository()) { + + // Use a single copy of the unconfirmed transactions list for each cycle, to speed up constant lookups + // when counting unconfirmed transactions by creator. + List unconfirmedTransactions = repository.getTransactionRepository().getUnconfirmedTransactions(); + unconfirmedTransactions.removeIf(t -> t.getType() == Transaction.TransactionType.CHAT); + unconfirmedTransactionsCache = unconfirmedTransactions; + + // A list of signatures were imported in this round + List newlyImportedSignatures = new ArrayList<>(); + + // Import transactions with valid signatures + try { + for (int i = 0; i < sigValidTransactions.size(); ++i) { + if (isStopping) { + return; + } + + if (Synchronizer.getInstance().isSyncRequestPending()) { + LOGGER.debug("Breaking out of transaction importing with {} remaining, because a sync request is pending", sigValidTransactions.size() - i); + return; + } + + TransactionData transactionData = sigValidTransactions.get(i); + Transaction transaction = Transaction.fromData(repository, transactionData); + + Transaction.ValidationResult validationResult = transaction.importAsUnconfirmed(); + processedCount++; + + switch (validationResult) { + case TRANSACTION_ALREADY_EXISTS: { + LOGGER.trace(() -> String.format("Ignoring existing transaction %s", Base58.encode(transactionData.getSignature()))); + break; + } + + case NO_BLOCKCHAIN_LOCK: { + // Is this even possible considering we acquired blockchain lock above? + LOGGER.trace(() -> String.format("Couldn't lock blockchain to import unconfirmed transaction %s", Base58.encode(transactionData.getSignature()))); + break; + } + + case OK: { + LOGGER.debug(() -> String.format("Imported %s transaction %s", transactionData.getType().name(), Base58.encode(transactionData.getSignature()))); + + // Add to the unconfirmed transactions cache + if (transactionData.getType() != Transaction.TransactionType.CHAT && unconfirmedTransactionsCache != null) { + unconfirmedTransactionsCache.add(transactionData); + } + + // Signature imported in this round + newlyImportedSignatures.add(transactionData.getSignature()); + + break; + } + + // All other invalid cases: + default: { + final String signature58 = Base58.encode(transactionData.getSignature()); + LOGGER.debug(() -> String.format("Ignoring invalid (%s) %s transaction %s", validationResult.name(), transactionData.getType().name(), signature58)); + + Long now = NTP.getTime(); + if (now != null && now - transactionData.getTimestamp() > INVALID_TRANSACTION_STALE_TIMEOUT) { + Long expiryLength = INVALID_TRANSACTION_RECHECK_INTERVAL; + + if (validationResult == Transaction.ValidationResult.TIMESTAMP_TOO_OLD) { + // Use shorter recheck interval for expired transactions + expiryLength = EXPIRED_TRANSACTION_RECHECK_INTERVAL; + } + + Long expiry = now + expiryLength; + LOGGER.trace("Adding stale invalid transaction {} to invalidUnconfirmedTransactions...", signature58); + // Invalid, unconfirmed transaction has become stale - add to invalidUnconfirmedTransactions so that we don't keep requesting it + invalidUnconfirmedTransactions.put(signature58, expiry); + } + } + } + + // Transaction has been processed, even if only to reject it + removeIncomingTransaction(transactionData.getSignature()); + } + + if (!newlyImportedSignatures.isEmpty()) { + LOGGER.debug("Broadcasting {} newly imported signatures", newlyImportedSignatures.size()); + Message newTransactionSignatureMessage = new TransactionSignaturesMessage(newlyImportedSignatures); + RNSNetwork.getInstance().broadcast(broadcastPeer -> newTransactionSignatureMessage); + } + } finally { + LOGGER.debug("Finished importing {} incoming transaction{}", processedCount, (processedCount == 1 ? "" : "s")); + blockchainLock.unlock(); + + // Clear the unconfirmed transaction cache so new data can be populated in the next cycle + unconfirmedTransactionsCache = null; + } + } catch (DataException e) { + LOGGER.error("Repository issue while importing incoming transactions", e); + } + } + + private void cleanupInvalidTransactionsList(Long now) { + if (now == null) { + return; + } + // Periodically remove invalid unconfirmed transactions from the list, so that they can be fetched again + invalidUnconfirmedTransactions.entrySet().removeIf(entry -> entry.getValue() == null || entry.getValue() < now); + } + + + // Network handlers + + public void onNetworkTransactionMessage(RNSPeer peer, Message message) { + TransactionMessage transactionMessage = (TransactionMessage) message; + TransactionData transactionData = transactionMessage.getTransactionData(); + + if (this.incomingTransactions.size() < MAX_INCOMING_TRANSACTIONS) { + synchronized (this.incomingTransactions) { + if (!incomingTransactionQueueContains(transactionData.getSignature())) { + this.incomingTransactions.put(transactionData, Boolean.FALSE); + } + } + } + } + + public void onNetworkGetTransactionMessage(RNSPeer peer, Message message) { + GetTransactionMessage getTransactionMessage = (GetTransactionMessage) message; + byte[] signature = getTransactionMessage.getSignature(); + + try (final Repository repository = RepositoryManager.getRepository()) { + // Firstly check the sig-valid transactions that are currently queued for import + TransactionData transactionData = this.getCachedSigValidTransactions().stream() + .filter(t -> Arrays.equals(signature, t.getSignature())) + .findFirst().orElse(null); + + if (transactionData == null) { + // Not found in import queue, so try the database + transactionData = repository.getTransactionRepository().fromSignature(signature); + } + + if (transactionData == null) { + // Still not found - so we don't have this transaction + LOGGER.debug(() -> String.format("Ignoring GET_TRANSACTION request from peer %s for unknown transaction %s", peer, Base58.encode(signature))); + // Send no response at all??? + return; + } + + Message transactionMessage = new TransactionMessage(transactionData); + transactionMessage.setId(message.getId()); + peer.sendMessage(transactionMessage); + } catch (DataException e) { + LOGGER.error(String.format("Repository issue while sending transaction %s to peer %s", Base58.encode(signature), peer), e); + } catch (TransformationException e) { + LOGGER.error(String.format("Serialization issue while sending transaction %s to peer %s", Base58.encode(signature), peer), e); + } + } + + public void onNetworkGetUnconfirmedTransactionsMessage(RNSPeer peer, Message message) { + try (final Repository repository = RepositoryManager.getRepository()) { + List signatures = Collections.emptyList(); + + // If we're NOT up-to-date then don't send out unconfirmed transactions + // as it's possible they are already included in a later block that we don't have. + if (Controller.getInstance().isUpToDate()) + signatures = repository.getTransactionRepository().getUnconfirmedTransactionSignatures(); + + Message transactionSignaturesMessage = new TransactionSignaturesMessage(signatures); + peer.sendMessage(transactionSignaturesMessage); + } catch (DataException e) { + LOGGER.error(String.format("Repository issue while sending unconfirmed transaction signatures to peer %s", peer), e); + } + } + + public void onNetworkTransactionSignaturesMessage(RNSPeer peer, Message message) { + TransactionSignaturesMessage transactionSignaturesMessage = (TransactionSignaturesMessage) message; + List signatures = transactionSignaturesMessage.getSignatures(); + + try (final Repository repository = RepositoryManager.getRepository()) { + for (byte[] signature : signatures) { + String signature58 = Base58.encode(signature); + if (invalidUnconfirmedTransactions.containsKey(signature58)) { + // Previously invalid transaction - don't keep requesting it + // It will be periodically removed from invalidUnconfirmedTransactions to allow for rechecks + continue; + } + + // Ignore if this transaction is in the queue + if (incomingTransactionQueueContains(signature)) { + LOGGER.trace(() -> String.format("Ignoring existing queued transaction %s from peer %s", Base58.encode(signature), peer)); + continue; + } + + // Do we have it already? (Before requesting transaction data itself) + if (repository.getTransactionRepository().exists(signature)) { + LOGGER.trace(() -> String.format("Ignoring existing transaction %s from peer %s", Base58.encode(signature), peer)); + continue; + } + + // Check isInterrupted() here and exit fast + if (Thread.currentThread().isInterrupted()) + return; + + // Fetch actual transaction data from peer + Message getTransactionMessage = new GetTransactionMessage(signature); + peer.sendMessage(getTransactionMessage); + } + } catch (DataException e) { + LOGGER.error(String.format("Repository issue while processing unconfirmed transactions from peer %s", peer), e); + } + } + +} diff --git a/src/main/java/org/qortal/controller/arbitrary/RNSArbitraryDataFileListManager.java b/src/main/java/org/qortal/controller/arbitrary/RNSArbitraryDataFileListManager.java new file mode 100644 index 00000000..93c3cd11 --- /dev/null +++ b/src/main/java/org/qortal/controller/arbitrary/RNSArbitraryDataFileListManager.java @@ -0,0 +1,731 @@ +package org.qortal.controller.arbitrary; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.qortal.arbitrary.ArbitraryDataFile; +import org.qortal.arbitrary.ArbitraryDataFileChunk; +import org.qortal.controller.Controller; +import org.qortal.data.arbitrary.RNSArbitraryDirectConnectionInfo; +import org.qortal.data.arbitrary.RNSArbitraryFileListResponseInfo; +import org.qortal.data.arbitrary.RNSArbitraryRelayInfo; +import org.qortal.data.transaction.ArbitraryTransactionData; +import org.qortal.data.transaction.TransactionData; +import org.qortal.network.RNSNetwork; +import org.qortal.network.RNSPeer; +import org.qortal.network.message.ArbitraryDataFileListMessage; +import org.qortal.network.message.GetArbitraryDataFileListMessage; +import org.qortal.network.message.Message; +import org.qortal.repository.DataException; +import org.qortal.repository.Repository; +import org.qortal.repository.RepositoryManager; +import org.qortal.settings.Settings; +import org.qortal.utils.Base58; +import org.qortal.utils.ListUtils; +import org.qortal.utils.NTP; +import org.qortal.utils.Triple; + +import java.util.*; + +import static org.qortal.controller.arbitrary.RNSArbitraryDataFileManager.MAX_FILE_HASH_RESPONSES; + +public class RNSArbitraryDataFileListManager { + + private static final Logger LOGGER = LogManager.getLogger(RNSArbitraryDataFileListManager.class); + + private static RNSArbitraryDataFileListManager instance; + + private static String MIN_PEER_VERSION_FOR_FILE_LIST_STATS = "3.2.0"; + + /** + * Map of recent incoming requests for ARBITRARY transaction data file lists. + *

+ * Key is original request's message ID
+ * Value is Triple<transaction signature in base58, first requesting peer, first request's timestamp> + *

+ * If peer is null then either:
+ *

    + *
  • we are the original requesting peer
  • + *
  • we have already sent data payload to original requesting peer.
  • + *
+ * If signature is null then we have already received the file list and either:
+ *
    + *
  • we are the original requesting peer and have processed it
  • + *
  • we have forwarded the file list
  • + *
+ */ + public Map> arbitraryDataFileListRequests = Collections.synchronizedMap(new HashMap<>()); + + /** + * Map to keep track of in progress arbitrary data signature requests + * Key: string - the signature encoded in base58 + * Value: Triple + */ + private Map> arbitraryDataSignatureRequests = Collections.synchronizedMap(new HashMap<>()); + + + /** Maximum number of seconds that a file list relay request is able to exist on the network */ + public static long RELAY_REQUEST_MAX_DURATION = 5000L; + /** Maximum number of hops that a file list relay request is allowed to make */ + public static int RELAY_REQUEST_MAX_HOPS = 4; + + /** Minimum peer version to use relay */ + public static String RELAY_MIN_PEER_VERSION = "3.4.0"; + + + private RNSArbitraryDataFileListManager() { + } + + public static RNSArbitraryDataFileListManager getInstance() { + if (instance == null) + instance = new RNSArbitraryDataFileListManager(); + + return instance; + } + + + public void cleanupRequestCache(Long now) { + if (now == null) { + return; + } + final long requestMinimumTimestamp = now - ArbitraryDataManager.ARBITRARY_REQUEST_TIMEOUT; + arbitraryDataFileListRequests.entrySet().removeIf(entry -> entry.getValue().getC() == null || entry.getValue().getC() < requestMinimumTimestamp); + } + + + // Track file list lookups by signature + + private boolean shouldMakeFileListRequestForSignature(String signature58) { + Triple request = arbitraryDataSignatureRequests.get(signature58); + + if (request == null) { + // Not attempted yet + return true; + } + + // Extract the components + Integer networkBroadcastCount = request.getA(); + // Integer directPeerRequestCount = request.getB(); + Long lastAttemptTimestamp = request.getC(); + + if (lastAttemptTimestamp == null) { + // Not attempted yet + return true; + } + + long timeSinceLastAttempt = NTP.getTime() - lastAttemptTimestamp; + + // Allow a second attempt after 15 seconds, and another after 30 seconds + if (timeSinceLastAttempt > 15 * 1000L) { + // We haven't tried for at least 15 seconds + + if (networkBroadcastCount < 3) { + // We've made less than 3 total attempts + return true; + } + } + + // Then allow another 5 attempts, each 1 minute apart + if (timeSinceLastAttempt > 60 * 1000L) { + // We haven't tried for at least 1 minute + + if (networkBroadcastCount < 8) { + // We've made less than 8 total attempts + return true; + } + } + + // Then allow another 8 attempts, each 15 minutes apart + if (timeSinceLastAttempt > 15 * 60 * 1000L) { + // We haven't tried for at least 15 minutes + + if (networkBroadcastCount < 16) { + // We've made less than 16 total attempts + return true; + } + } + + // From then on, only try once every 6 hours, to reduce network spam + if (timeSinceLastAttempt > 6 * 60 * 60 * 1000L) { + // We haven't tried for at least 6 hours + return true; + } + + return false; + } + + private boolean shouldMakeDirectFileRequestsForSignature(String signature58) { + if (!Settings.getInstance().isDirectDataRetrievalEnabled()) { + // Direct connections are disabled in the settings + return false; + } + + Triple request = arbitraryDataSignatureRequests.get(signature58); + + if (request == null) { + // Not attempted yet + return true; + } + + // Extract the components + //Integer networkBroadcastCount = request.getA(); + Integer directPeerRequestCount = request.getB(); + Long lastAttemptTimestamp = request.getC(); + + if (lastAttemptTimestamp == null) { + // Not attempted yet + return true; + } + + if (directPeerRequestCount == 0) { + // We haven't tried asking peers directly yet, so we should + return true; + } + + long timeSinceLastAttempt = NTP.getTime() - lastAttemptTimestamp; + if (timeSinceLastAttempt > 10 * 1000L) { + // We haven't tried for at least 10 seconds + if (directPeerRequestCount < 5) { + // We've made less than 5 total attempts + return true; + } + } + + if (timeSinceLastAttempt > 5 * 60 * 1000L) { + // We haven't tried for at least 5 minutes + if (directPeerRequestCount < 10) { + // We've made less than 10 total attempts + return true; + } + } + + if (timeSinceLastAttempt > 60 * 60 * 1000L) { + // We haven't tried for at least 1 hour + return true; + } + + return false; + } + + public boolean isSignatureRateLimited(byte[] signature) { + String signature58 = Base58.encode(signature); + return !this.shouldMakeFileListRequestForSignature(signature58) + && !this.shouldMakeDirectFileRequestsForSignature(signature58); + } + + public long lastRequestForSignature(byte[] signature) { + String signature58 = Base58.encode(signature); + Triple request = arbitraryDataSignatureRequests.get(signature58); + + if (request == null) { + // Not attempted yet + return 0; + } + + // Extract the components + Long lastAttemptTimestamp = request.getC(); + if (lastAttemptTimestamp != null) { + return lastAttemptTimestamp; + } + return 0; + } + + public void addToSignatureRequests(String signature58, boolean incrementNetworkRequests, boolean incrementPeerRequests) { + Triple request = arbitraryDataSignatureRequests.get(signature58); + Long now = NTP.getTime(); + + if (request == null) { + // No entry yet + Triple newRequest = new Triple<>(0, 0, now); + arbitraryDataSignatureRequests.put(signature58, newRequest); + } + else { + // There is an existing entry + if (incrementNetworkRequests) { + request.setA(request.getA() + 1); + } + if (incrementPeerRequests) { + request.setB(request.getB() + 1); + } + request.setC(now); + arbitraryDataSignatureRequests.put(signature58, request); + } + } + + public void removeFromSignatureRequests(String signature58) { + arbitraryDataSignatureRequests.remove(signature58); + } + + + // Lookup file lists by signature (and optionally hashes) + + public boolean fetchArbitraryDataFileList(ArbitraryTransactionData arbitraryTransactionData) { + byte[] signature = arbitraryTransactionData.getSignature(); + String signature58 = Base58.encode(signature); + + // Require an NTP sync + Long now = NTP.getTime(); + if (now == null) { + return false; + } + + // If we've already tried too many times in a short space of time, make sure to give up + if (!this.shouldMakeFileListRequestForSignature(signature58)) { + // Check if we should make direct connections to peers + if (this.shouldMakeDirectFileRequestsForSignature(signature58)) { + return RNSArbitraryDataFileManager.getInstance().fetchDataFilesFromPeersForSignature(signature); + } + + LOGGER.trace("Skipping file list request for signature {} due to rate limit", signature58); + return false; + } + this.addToSignatureRequests(signature58, true, false); + + //List handshakedPeers = Network.getInstance().getImmutableHandshakedPeers(); + List handshakedPeers = RNSNetwork.getInstance().getLinkedPeers(); + List missingHashes = null; + + // Find hashes that we are missing + try { + ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromTransactionData(arbitraryTransactionData); + missingHashes = arbitraryDataFile.missingHashes(); + } catch (DataException e) { + // Leave missingHashes as null, so that all hashes are requested + } + int hashCount = missingHashes != null ? missingHashes.size() : 0; + + LOGGER.debug(String.format("Sending data file list request for signature %s with %d hashes to %d peers...", signature58, hashCount, handshakedPeers.size())); + + //// Send our address as requestingPeer, to allow for potential direct connections with seeds/peers + //String requestingPeer = Network.getInstance().getOurExternalIpAddressAndPort(); + String requestingPeer = null; + + // Build request + Message getArbitraryDataFileListMessage = new GetArbitraryDataFileListMessage(signature, missingHashes, now, 0, requestingPeer); + + // Save our request into requests map + Triple requestEntry = new Triple<>(signature58, null, NTP.getTime()); + + // Assign random ID to this message + int id; + do { + id = new Random().nextInt(Integer.MAX_VALUE - 1) + 1; + + // Put queue into map (keyed by message ID) so we can poll for a response + // If putIfAbsent() doesn't return null, then this ID is already taken + } while (arbitraryDataFileListRequests.put(id, requestEntry) != null); + getArbitraryDataFileListMessage.setId(id); + + // Broadcast request + RNSNetwork.getInstance().broadcast(peer -> getArbitraryDataFileListMessage); + + // Poll to see if data has arrived + final long singleWait = 100; + long totalWait = 0; + while (totalWait < ArbitraryDataManager.ARBITRARY_REQUEST_TIMEOUT) { + try { + Thread.sleep(singleWait); + } catch (InterruptedException e) { + break; + } + + requestEntry = arbitraryDataFileListRequests.get(id); + if (requestEntry == null) + return false; + + if (requestEntry.getA() == null) + break; + + totalWait += singleWait; + } + return true; + } + + public boolean fetchArbitraryDataFileList(RNSPeer peer, byte[] signature) { + String signature58 = Base58.encode(signature); + + // Require an NTP sync + Long now = NTP.getTime(); + if (now == null) { + return false; + } + + int hashCount = 0; + LOGGER.debug(String.format("Sending data file list request for signature %s with %d hashes to peer %s...", signature58, hashCount, peer)); + + // Build request + // Use a time in the past, so that the recipient peer doesn't try and relay it + // Also, set hashes to null since it's easier to request all hashes than it is to determine which ones we need + // This could be optimized in the future + long timestamp = now - 60000L; + List hashes = null; + Message getArbitraryDataFileListMessage = new GetArbitraryDataFileListMessage(signature, hashes, timestamp, 0, null); + + // Save our request into requests map + Triple requestEntry = new Triple<>(signature58, null, NTP.getTime()); + + // Assign random ID to this message + int id; + do { + id = new Random().nextInt(Integer.MAX_VALUE - 1) + 1; + + // Put queue into map (keyed by message ID) so we can poll for a response + // If putIfAbsent() doesn't return null, then this ID is already taken + } while (arbitraryDataFileListRequests.put(id, requestEntry) != null); + getArbitraryDataFileListMessage.setId(id); + + // Send the request + peer.sendMessage(getArbitraryDataFileListMessage); + + // Poll to see if data has arrived + final long singleWait = 100; + long totalWait = 0; + while (totalWait < ArbitraryDataManager.ARBITRARY_REQUEST_TIMEOUT) { + try { + Thread.sleep(singleWait); + } catch (InterruptedException e) { + break; + } + + requestEntry = arbitraryDataFileListRequests.get(id); + if (requestEntry == null) + return false; + + if (requestEntry.getA() == null) + break; + + totalWait += singleWait; + } + return true; + } + + public void deleteFileListRequestsForSignature(byte[] signature) { + String signature58 = Base58.encode(signature); + for (Iterator>> it = arbitraryDataFileListRequests.entrySet().iterator(); it.hasNext();) { + Map.Entry> entry = it.next(); + if (entry == null || entry.getKey() == null || entry.getValue() != null) { + continue; + } + if (Objects.equals(entry.getValue().getA(), signature58)) { + // Update requests map to reflect that we've received all chunks + Triple newEntry = new Triple<>(null, null, entry.getValue().getC()); + arbitraryDataFileListRequests.put(entry.getKey(), newEntry); + } + } + } + + // Network handlers + + public void onNetworkArbitraryDataFileListMessage(RNSPeer peer, Message message) { + // Don't process if QDN is disabled + if (!Settings.getInstance().isQdnEnabled()) { + return; + } + + ArbitraryDataFileListMessage arbitraryDataFileListMessage = (ArbitraryDataFileListMessage) message; + LOGGER.debug("Received hash list from peer {} with {} hashes", peer, arbitraryDataFileListMessage.getHashes().size()); + + if (LOGGER.isDebugEnabled() && arbitraryDataFileListMessage.getRequestTime() != null) { + long totalRequestTime = NTP.getTime() - arbitraryDataFileListMessage.getRequestTime(); + LOGGER.debug("totalRequestTime: {}, requestHops: {}, peerAddress: {}, isRelayPossible: {}", + totalRequestTime, arbitraryDataFileListMessage.getRequestHops(), + arbitraryDataFileListMessage.getPeerAddress(), arbitraryDataFileListMessage.isRelayPossible()); + } + + // Do we have a pending request for this data? + Triple request = arbitraryDataFileListRequests.get(message.getId()); + if (request == null || request.getA() == null) { + return; + } + boolean isRelayRequest = (request.getB() != null); + + // Does this message's signature match what we're expecting? + byte[] signature = arbitraryDataFileListMessage.getSignature(); + String signature58 = Base58.encode(signature); + if (!request.getA().equals(signature58)) { + return; + } + + List hashes = arbitraryDataFileListMessage.getHashes(); + if (hashes == null || hashes.isEmpty()) { + return; + } + + ArbitraryTransactionData arbitraryTransactionData = null; + + // Check transaction exists and hashes are correct + try (final Repository repository = RepositoryManager.getRepository()) { + TransactionData transactionData = repository.getTransactionRepository().fromSignature(signature); + if (!(transactionData instanceof ArbitraryTransactionData)) + return; + + arbitraryTransactionData = (ArbitraryTransactionData) transactionData; + +// // Load data file(s) +// ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromTransactionData(arbitraryTransactionData); +// +// // Check all hashes exist +// for (byte[] hash : hashes) { +// //LOGGER.debug("Received hash {}", Base58.encode(hash)); +// if (!arbitraryDataFile.containsChunk(hash)) { +// // Check the hash against the complete file +// if (!Arrays.equals(arbitraryDataFile.getHash(), hash)) { +// LOGGER.info("Received non-matching chunk hash {} for signature {}. This could happen if we haven't obtained the metadata file yet.", Base58.encode(hash), signature58); +// return; +// } +// } +// } + + if (!isRelayRequest || !Settings.getInstance().isRelayModeEnabled()) { + Long now = NTP.getTime(); + + if (RNSArbitraryDataFileManager.getInstance().arbitraryDataFileHashResponses.size() < MAX_FILE_HASH_RESPONSES) { + // Keep track of the hashes this peer reports to have access to + for (byte[] hash : hashes) { + String hash58 = Base58.encode(hash); + + // Treat null request hops as 100, so that they are able to be sorted (and put to the end of the list) + int requestHops = arbitraryDataFileListMessage.getRequestHops() != null ? arbitraryDataFileListMessage.getRequestHops() : 100; + + RNSArbitraryFileListResponseInfo responseInfo = new RNSArbitraryFileListResponseInfo(hash58, signature58, + peer, now, arbitraryDataFileListMessage.getRequestTime(), requestHops); + + RNSArbitraryDataFileManager.getInstance().arbitraryDataFileHashResponses.add(responseInfo); + } + } + + // Keep track of the source peer, for direct connections + if (arbitraryDataFileListMessage.getPeerAddress() != null) { + RNSArbitraryDataFileManager.getInstance().addDirectConnectionInfoIfUnique( + new RNSArbitraryDirectConnectionInfo(signature, arbitraryDataFileListMessage.getPeerAddress(), hashes, now)); + } + } + + } catch (DataException e) { + LOGGER.error(String.format("Repository issue while finding arbitrary transaction data list for peer %s", peer), e); + } + + // Forwarding + if (isRelayRequest && Settings.getInstance().isRelayModeEnabled()) { + boolean isBlocked = (arbitraryTransactionData == null || ListUtils.isNameBlocked(arbitraryTransactionData.getName())); + if (!isBlocked) { + RNSPeer requestingPeer = request.getB(); + if (requestingPeer != null) { + Long requestTime = arbitraryDataFileListMessage.getRequestTime(); + Integer requestHops = arbitraryDataFileListMessage.getRequestHops(); + + // Add each hash to our local mapping so we know who to ask later + Long now = NTP.getTime(); + for (byte[] hash : hashes) { + String hash58 = Base58.encode(hash); + RNSArbitraryRelayInfo relayInfo = new RNSArbitraryRelayInfo(hash58, signature58, peer, now, requestTime, requestHops); + RNSArbitraryDataFileManager.getInstance().addToRelayMap(relayInfo); + } + + // Bump requestHops if it exists + if (requestHops != null) { + requestHops++; + } + + ArbitraryDataFileListMessage forwardArbitraryDataFileListMessage; + + //// TODO - rework for Reticulum + //// Remove optional parameters if the requesting peer doesn't support it yet + //// A message with less statistical data is better than no message at all + //if (!requestingPeer.isAtLeastVersion(MIN_PEER_VERSION_FOR_FILE_LIST_STATS)) { + // forwardArbitraryDataFileListMessage = new ArbitraryDataFileListMessage(signature, hashes); + //} else { + // forwardArbitraryDataFileListMessage = new ArbitraryDataFileListMessage(signature, hashes, requestTime, requestHops, + // arbitraryDataFileListMessage.getPeerAddress(), arbitraryDataFileListMessage.isRelayPossible()); + //} + forwardArbitraryDataFileListMessage = new ArbitraryDataFileListMessage(signature, hashes, requestTime, requestHops, + arbitraryDataFileListMessage.getPeerAddress(), arbitraryDataFileListMessage.isRelayPossible()); + forwardArbitraryDataFileListMessage.setId(message.getId()); + + // Forward to requesting peer + LOGGER.debug("Forwarding file list with {} hashes to requesting peer: {}", hashes.size(), requestingPeer); + //if (!requestingPeer.sendMessage(forwardArbitraryDataFileListMessage)) { + // requestingPeer.disconnect("failed to forward arbitrary data file list"); + //} + requestingPeer.sendMessage(forwardArbitraryDataFileListMessage); + } + } + } + } + + public void onNetworkGetArbitraryDataFileListMessage(RNSPeer peer, Message message) { + // Don't respond if QDN is disabled + if (!Settings.getInstance().isQdnEnabled()) { + return; + } + + Controller.getInstance().stats.getArbitraryDataFileListMessageStats.requests.incrementAndGet(); + + GetArbitraryDataFileListMessage getArbitraryDataFileListMessage = (GetArbitraryDataFileListMessage) message; + byte[] signature = getArbitraryDataFileListMessage.getSignature(); + String signature58 = Base58.encode(signature); + Long now = NTP.getTime(); + Triple newEntry = new Triple<>(signature58, peer, now); + + // If we've seen this request recently, then ignore + if (arbitraryDataFileListRequests.putIfAbsent(message.getId(), newEntry) != null) { + LOGGER.trace("Ignoring hash list request from peer {} for signature {}", peer, signature58); + return; + } + + List requestedHashes = getArbitraryDataFileListMessage.getHashes(); + int hashCount = requestedHashes != null ? requestedHashes.size() : 0; + String requestingPeer = getArbitraryDataFileListMessage.getRequestingPeer(); + + if (requestingPeer != null) { + LOGGER.debug("Received hash list request with {} hashes from peer {} (requesting peer {}) for signature {}", hashCount, peer, requestingPeer, signature58); + } + else { + LOGGER.debug("Received hash list request with {} hashes from peer {} for signature {}", hashCount, peer, signature58); + } + + List hashes = new ArrayList<>(); + ArbitraryTransactionData transactionData = null; + boolean allChunksExist = false; + boolean hasMetadata = false; + + try (final Repository repository = RepositoryManager.getRepository()) { + + // Firstly we need to lookup this file on chain to get a list of its hashes + transactionData = (ArbitraryTransactionData)repository.getTransactionRepository().fromSignature(signature); + if (transactionData instanceof ArbitraryTransactionData) { + + // Check if we're even allowed to serve data for this transaction + if (ArbitraryDataStorageManager.getInstance().canStoreData(transactionData)) { + + // Load file(s) and add any that exist to the list of hashes + ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromTransactionData(transactionData); + + // If the peer didn't supply a hash list, we need to return all hashes for this transaction + if (requestedHashes == null || requestedHashes.isEmpty()) { + requestedHashes = new ArrayList<>(); + + // Add the metadata file + if (arbitraryDataFile.getMetadataHash() != null) { + requestedHashes.add(arbitraryDataFile.getMetadataHash()); + hasMetadata = true; + } + + // Add the chunk hashes + if (!arbitraryDataFile.getChunkHashes().isEmpty()) { + requestedHashes.addAll(arbitraryDataFile.getChunkHashes()); + } + // Add complete file if there are no hashes + else { + requestedHashes.add(arbitraryDataFile.getHash()); + } + } + + // Assume all chunks exists, unless one can't be found below + allChunksExist = true; + + for (byte[] requestedHash : requestedHashes) { + ArbitraryDataFileChunk chunk = ArbitraryDataFileChunk.fromHash(requestedHash, signature); + if (chunk.exists()) { + hashes.add(chunk.getHash()); + //LOGGER.trace("Added hash {}", chunk.getHash58()); + } else { + LOGGER.trace("Couldn't add hash {} because it doesn't exist", chunk.getHash58()); + allChunksExist = false; + } + } + } + } + + } catch (DataException e) { + LOGGER.error(String.format("Repository issue while fetching arbitrary file list for peer %s", peer), e); + } + + // If the only file we have is the metadata then we shouldn't respond. Most nodes will already have that, + // or can use the separate metadata protocol to fetch it. This should greatly reduce network spam. + if (hasMetadata && hashes.size() == 1) { + hashes.clear(); + } + + // We should only respond if we have at least one hash + if (!hashes.isEmpty()) { + + // Firstly we should keep track of the requesting peer, to allow for potential direct connections later + RNSArbitraryDataFileManager.getInstance().addRecentDataRequest(requestingPeer); + + // We have all the chunks, so update requests map to reflect that we've sent it + // There is no need to keep track of the request, as we can serve all the chunks + if (allChunksExist) { + newEntry = new Triple<>(null, null, now); + arbitraryDataFileListRequests.put(message.getId(), newEntry); + } + + //String ourAddress = RNSNetwork.getInstance().getOurExternalIpAddressAndPort(); + String ourAddress = RNSNetwork.getInstance().getBaseDestination().getHexHash(); + ArbitraryDataFileListMessage arbitraryDataFileListMessage; + + // TODO: rework for Reticulum + // Remove optional parameters if the requesting peer doesn't support it yet + // A message with less statistical data is better than no message at all + //if (!peer.isAtLeastVersion(MIN_PEER_VERSION_FOR_FILE_LIST_STATS)) { + // arbitraryDataFileListMessage = new ArbitraryDataFileListMessage(signature, hashes); + //} else { + // arbitraryDataFileListMessage = new ArbitraryDataFileListMessage(signature, + // hashes, NTP.getTime(), 0, ourAddress, true); + //} + arbitraryDataFileListMessage = new ArbitraryDataFileListMessage(signature, hashes); + + arbitraryDataFileListMessage.setId(message.getId()); + + //if (!peer.sendMessage(arbitraryDataFileListMessage)) { + // LOGGER.debug("Couldn't send list of hashes"); + // peer.disconnect("failed to send list of hashes"); + // return; + //} + peer.sendMessage(arbitraryDataFileListMessage); + LOGGER.debug("Sent list of hashes (count: {})", hashes.size()); + + if (allChunksExist) { + // Nothing left to do, so return to prevent any unnecessary forwarding from occurring + LOGGER.debug("No need for any forwarding because file list request is fully served"); + return; + } + + } + + // We may need to forward this request on + boolean isBlocked = (transactionData == null || ListUtils.isNameBlocked(transactionData.getName())); + if (Settings.getInstance().isRelayModeEnabled() && !isBlocked) { + // In relay mode - so ask our other peers if they have it + + long requestTime = getArbitraryDataFileListMessage.getRequestTime(); + int requestHops = getArbitraryDataFileListMessage.getRequestHops() + 1; + long totalRequestTime = now - requestTime; + + if (totalRequestTime < RELAY_REQUEST_MAX_DURATION) { + // Relay request hasn't timed out yet, so can potentially be rebroadcast + if (requestHops < RELAY_REQUEST_MAX_HOPS) { + // Relay request hasn't reached the maximum number of hops yet, so can be rebroadcast + + Message relayGetArbitraryDataFileListMessage = new GetArbitraryDataFileListMessage(signature, hashes, requestTime, requestHops, requestingPeer); + relayGetArbitraryDataFileListMessage.setId(message.getId()); + + LOGGER.debug("Rebroadcasting hash list request from peer {} for signature {} to our other peers... totalRequestTime: {}, requestHops: {}", peer, Base58.encode(signature), totalRequestTime, requestHops); + //Network.getInstance().broadcast( + // broadcastPeer -> + // !broadcastPeer.isAtLeastVersion(RELAY_MIN_PEER_VERSION) ? null : + // broadcastPeer == peer || Objects.equals(broadcastPeer.getPeerData().getAddress().getHost(), peer.getPeerData().getAddress().getHost()) ? null : relayGetArbitraryDataFileListMessage + //); + RNSNetwork.getInstance().broadcast(broadcastPeer -> relayGetArbitraryDataFileListMessage); + + } + else { + // This relay request has reached the maximum number of allowed hops + } + } + else { + // This relay request has timed out + } + } + } + +} diff --git a/src/main/java/org/qortal/controller/arbitrary/RNSArbitraryDataFileManager.java b/src/main/java/org/qortal/controller/arbitrary/RNSArbitraryDataFileManager.java new file mode 100644 index 00000000..fc68fdec --- /dev/null +++ b/src/main/java/org/qortal/controller/arbitrary/RNSArbitraryDataFileManager.java @@ -0,0 +1,639 @@ +package org.qortal.controller.arbitrary; + +import com.google.common.net.InetAddresses; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.qortal.arbitrary.ArbitraryDataFile; +import org.qortal.controller.Controller; +import org.qortal.data.arbitrary.RNSArbitraryDirectConnectionInfo; +import org.qortal.data.arbitrary.RNSArbitraryFileListResponseInfo; +import org.qortal.data.arbitrary.RNSArbitraryRelayInfo; +import org.qortal.data.network.PeerData; +import org.qortal.data.transaction.ArbitraryTransactionData; +import org.qortal.network.RNSNetwork; +import org.qortal.network.RNSPeer; +import org.qortal.network.message.*; +import org.qortal.repository.DataException; +import org.qortal.repository.Repository; +import org.qortal.repository.RepositoryManager; +import org.qortal.settings.Settings; +import org.qortal.utils.ArbitraryTransactionUtils; +import org.qortal.utils.Base58; +import org.qortal.utils.NTP; + +import java.security.SecureRandom; +import java.util.*; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.stream.Collectors; + +public class RNSArbitraryDataFileManager extends Thread { + + private static final Logger LOGGER = LogManager.getLogger(RNSArbitraryDataFileManager.class); + + private static RNSArbitraryDataFileManager instance; + private volatile boolean isStopping = false; + + + /** + * Map to keep track of our in progress (outgoing) arbitrary data file requests + */ + public Map arbitraryDataFileRequests = Collections.synchronizedMap(new HashMap<>()); + + /** + * Map to keep track of hashes that we might need to relay + */ + public final List arbitraryRelayMap = Collections.synchronizedList(new ArrayList<>()); + + /** + * List to keep track of any arbitrary data file hash responses + */ + public final List arbitraryDataFileHashResponses = Collections.synchronizedList(new ArrayList<>()); + + /** + * List to keep track of peers potentially available for direct connections, based on recent requests + */ + private final List directConnectionInfo = Collections.synchronizedList(new ArrayList<>()); + + /** + * Map to keep track of peers requesting QDN data that we hold. + * Key = peer address string, value = time of last request. + * This allows for additional "burst" connections beyond existing limits. + */ + private Map recentDataRequests = Collections.synchronizedMap(new HashMap<>()); + + + public static int MAX_FILE_HASH_RESPONSES = 1000; + + + private RNSArbitraryDataFileManager() { + } + + public static RNSArbitraryDataFileManager getInstance() { + if (instance == null) + instance = new RNSArbitraryDataFileManager(); + + return instance; + } + + @Override + public void run() { + Thread.currentThread().setName("Arbitrary Data File Manager"); + + try { + // Use a fixed thread pool to execute the arbitrary data file requests + int threadCount = 5; + ExecutorService arbitraryDataFileRequestExecutor = Executors.newFixedThreadPool(threadCount); + for (int i = 0; i < threadCount; i++) { + arbitraryDataFileRequestExecutor.execute(new ArbitraryDataFileRequestThread()); + } + + while (!isStopping) { + // Nothing to do yet + Thread.sleep(1000); + } + } catch (InterruptedException e) { + // Fall-through to exit thread... + } + } + + public void shutdown() { + isStopping = true; + this.interrupt(); + } + + + public void cleanupRequestCache(Long now) { + if (now == null) { + return; + } + final long requestMinimumTimestamp = now - ArbitraryDataManager.getInstance().ARBITRARY_REQUEST_TIMEOUT; + arbitraryDataFileRequests.entrySet().removeIf(entry -> entry.getValue() == null || entry.getValue() < requestMinimumTimestamp); + + final long relayMinimumTimestamp = now - ArbitraryDataManager.getInstance().ARBITRARY_RELAY_TIMEOUT; + arbitraryRelayMap.removeIf(entry -> entry == null || entry.getTimestamp() == null || entry.getTimestamp() < relayMinimumTimestamp); + arbitraryDataFileHashResponses.removeIf(entry -> entry.getTimestamp() < relayMinimumTimestamp); + + final long directConnectionInfoMinimumTimestamp = now - ArbitraryDataManager.getInstance().ARBITRARY_DIRECT_CONNECTION_INFO_TIMEOUT; + directConnectionInfo.removeIf(entry -> entry.getTimestamp() < directConnectionInfoMinimumTimestamp); + + final long recentDataRequestMinimumTimestamp = now - ArbitraryDataManager.getInstance().ARBITRARY_RECENT_DATA_REQUESTS_TIMEOUT; + recentDataRequests.entrySet().removeIf(entry -> entry.getValue() < recentDataRequestMinimumTimestamp); + } + + + + // Fetch data files by hash + + public boolean fetchArbitraryDataFiles(Repository repository, + RNSPeer peer, + byte[] signature, + ArbitraryTransactionData arbitraryTransactionData, + List hashes) throws DataException { + + // Load data file(s) + ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromTransactionData(arbitraryTransactionData); + boolean receivedAtLeastOneFile = false; + + // Now fetch actual data from this peer + for (byte[] hash : hashes) { + if (isStopping) { + return false; + } + String hash58 = Base58.encode(hash); + if (!arbitraryDataFile.chunkExists(hash)) { + // Only request the file if we aren't already requesting it from someone else + if (!arbitraryDataFileRequests.containsKey(Base58.encode(hash))) { + LOGGER.debug("Requesting data file {} from peer {}", hash58, peer); + Long startTime = NTP.getTime(); + ArbitraryDataFile receivedArbitraryDataFile = fetchArbitraryDataFile(peer, null, arbitraryTransactionData, signature, hash, null); + Long endTime = NTP.getTime(); + if (receivedArbitraryDataFile != null) { + LOGGER.debug("Received data file {} from peer {}. Time taken: {} ms", receivedArbitraryDataFile.getHash58(), peer, (endTime-startTime)); + receivedAtLeastOneFile = true; + + // Remove this hash from arbitraryDataFileHashResponses now that we have received it + arbitraryDataFileHashResponses.remove(hash58); + } + else { + LOGGER.debug("Peer {} didn't respond with data file {} for signature {}. Time taken: {} ms", peer, Base58.encode(hash), Base58.encode(signature), (endTime-startTime)); + + // Remove this hash from arbitraryDataFileHashResponses now that we have failed to receive it + arbitraryDataFileHashResponses.remove(hash58); + + // Stop asking for files from this peer + break; + } + } + else { + LOGGER.trace("Already requesting data file {} for signature {} from peer {}", arbitraryDataFile, Base58.encode(signature), peer); + } + } + else { + // Remove this hash from arbitraryDataFileHashResponses because we have a local copy + arbitraryDataFileHashResponses.remove(hash58); + } + } + + if (receivedAtLeastOneFile) { + // Invalidate the hosted transactions cache as we are now hosting something new + ArbitraryDataStorageManager.getInstance().invalidateHostedTransactionsCache(); + + // Check if we have all the files we need for this transaction + if (arbitraryDataFile.allFilesExist()) { + + // We have all the chunks for this transaction, so we should invalidate the transaction's name's + // data cache so that it is rebuilt the next time we serve it + ArbitraryDataManager.getInstance().invalidateCache(arbitraryTransactionData); + } + } + + return receivedAtLeastOneFile; + } + + private ArbitraryDataFile fetchArbitraryDataFile(RNSPeer peer, RNSPeer requestingPeer, ArbitraryTransactionData arbitraryTransactionData, byte[] signature, byte[] hash, Message originalMessage) throws DataException { + ArbitraryDataFile existingFile = ArbitraryDataFile.fromHash(hash, signature); + boolean fileAlreadyExists = existingFile.exists(); + String hash58 = Base58.encode(hash); + ArbitraryDataFile arbitraryDataFile; + + // Fetch the file if it doesn't exist locally + if (!fileAlreadyExists) { + LOGGER.debug(String.format("Fetching data file %.8s from peer %s", hash58, peer)); + arbitraryDataFileRequests.put(hash58, NTP.getTime()); + Message getArbitraryDataFileMessage = new GetArbitraryDataFileMessage(signature, hash); + + Message response = null; + //// TODO - revisit (doesn't work with Reticulum) + //try { + // response = peer.getResponseWithTimeout(getArbitraryDataFileMessage, (int) ArbitraryDataManager.ARBITRARY_REQUEST_TIMEOUT); + //} catch (InterruptedException e) { + // // Will return below due to null response + //} + arbitraryDataFileRequests.remove(hash58); + LOGGER.trace(String.format("Removed hash %.8s from arbitraryDataFileRequests", hash58)); + + // We may need to remove the file list request, if we have all the files for this transaction + this.handleFileListRequests(signature); + + if (response == null) { + LOGGER.debug("Received null response from peer {}", peer); + return null; + } + if (response.getType() != MessageType.ARBITRARY_DATA_FILE) { + LOGGER.debug("Received response with invalid type: {} from peer {}", response.getType(), peer); + return null; + } + + ArbitraryDataFileMessage peersArbitraryDataFileMessage = (ArbitraryDataFileMessage) response; + arbitraryDataFile = peersArbitraryDataFileMessage.getArbitraryDataFile(); + } else { + LOGGER.debug(String.format("File hash %s already exists, so skipping the request", hash58)); + arbitraryDataFile = existingFile; + } + + if (arbitraryDataFile == null) { + // We don't have a file, so give up here + return null; + } + + // We might want to forward the request to the peer that originally requested it + this.handleArbitraryDataFileForwarding(requestingPeer, new ArbitraryDataFileMessage(signature, arbitraryDataFile), originalMessage); + + boolean isRelayRequest = (requestingPeer != null); + if (isRelayRequest) { + if (!fileAlreadyExists) { + // File didn't exist locally before the request, and it's a forwarding request, so delete it if it exists. + // It shouldn't exist on the filesystem yet, but leaving this here just in case. + arbitraryDataFile.delete(10); + } + } + else { + arbitraryDataFile.save(); + } + + // If this is a metadata file then we need to update the cache + if (arbitraryTransactionData != null && arbitraryTransactionData.getMetadataHash() != null) { + if (Arrays.equals(arbitraryTransactionData.getMetadataHash(), hash)) { + ArbitraryDataCacheManager.getInstance().addToUpdateQueue(arbitraryTransactionData); + } + } + + return arbitraryDataFile; + } + + private void handleFileListRequests(byte[] signature) { + try (final Repository repository = RepositoryManager.getRepository()) { + + // Fetch the transaction data + ArbitraryTransactionData arbitraryTransactionData = ArbitraryTransactionUtils.fetchTransactionData(repository, signature); + if (arbitraryTransactionData == null) { + return; + } + + boolean allChunksExist = ArbitraryTransactionUtils.allChunksExist(arbitraryTransactionData); + + if (allChunksExist) { + // Update requests map to reflect that we've received all chunks + RNSArbitraryDataFileListManager.getInstance().deleteFileListRequestsForSignature(signature); + } + + } catch (DataException e) { + LOGGER.debug("Unable to handle file list requests: {}", e.getMessage()); + } + } + + public void handleArbitraryDataFileForwarding(RNSPeer requestingPeer, Message message, Message originalMessage) { + // Return if there is no originally requesting peer to forward to + if (requestingPeer == null) { + return; + } + + // Return if we're not in relay mode or if this request doesn't need forwarding + if (!Settings.getInstance().isRelayModeEnabled()) { + return; + } + + LOGGER.debug("Received arbitrary data file - forwarding is needed"); + + // The ID needs to match that of the original request + message.setId(originalMessage.getId()); + + //if (!requestingPeer.sendMessageWithTimeout(message, (int) ArbitraryDataManager.ARBITRARY_REQUEST_TIMEOUT)) { + // LOGGER.debug("Failed to forward arbitrary data file to peer {}", requestingPeer); + // requestingPeer.disconnect("failed to forward arbitrary data file"); + //} + //else { + // LOGGER.debug("Forwarded arbitrary data file to peer {}", requestingPeer); + //} + requestingPeer.sendMessageWithTimeout(message, (int) ArbitraryDataManager.ARBITRARY_REQUEST_TIMEOUT); + } + + + // Fetch data directly from peers + + private List getDirectConnectionInfoForSignature(byte[] signature) { + synchronized (directConnectionInfo) { + return directConnectionInfo.stream().filter(i -> Arrays.equals(i.getSignature(), signature)).collect(Collectors.toList()); + } + } + + /** + * Add an ArbitraryDirectConnectionInfo item, but only if one with this peer-signature combination + * doesn't already exist. + * @param connectionInfo - the direct connection info to add + */ + public void addDirectConnectionInfoIfUnique(RNSArbitraryDirectConnectionInfo connectionInfo) { + boolean peerAlreadyExists; + synchronized (directConnectionInfo) { + peerAlreadyExists = directConnectionInfo.stream() + .anyMatch(i -> Arrays.equals(i.getSignature(), connectionInfo.getSignature()) + && Objects.equals(i.getPeerAddress(), connectionInfo.getPeerAddress())); + } + if (!peerAlreadyExists) { + directConnectionInfo.add(connectionInfo); + } + } + + private void removeDirectConnectionInfo(RNSArbitraryDirectConnectionInfo connectionInfo) { + this.directConnectionInfo.remove(connectionInfo); + } + + public boolean fetchDataFilesFromPeersForSignature(byte[] signature) { + String signature58 = Base58.encode(signature); + + boolean success = false; + + try { + while (!success) { + if (isStopping) { + return false; + } + Thread.sleep(500L); + + // Firstly fetch peers that claim to be hosting files for this signature + List connectionInfoList = getDirectConnectionInfoForSignature(signature); + if (connectionInfoList == null || connectionInfoList.isEmpty()) { + LOGGER.debug("No remaining direct connection peers found for signature {}", signature58); + return false; + } + + LOGGER.debug("Attempting a direct peer connection for signature {}...", signature58); + + // Peers found, so pick one with the highest number of chunks + Comparator highestChunkCountFirstComparator = + Comparator.comparingInt(RNSArbitraryDirectConnectionInfo::getHashCount).reversed(); + RNSArbitraryDirectConnectionInfo directConnectionInfo = connectionInfoList.stream() + .sorted(highestChunkCountFirstComparator).findFirst().orElse(null); + + if (directConnectionInfo == null) { + return false; + } + + // Remove from the list so that a different peer is tried next time + removeDirectConnectionInfo(directConnectionInfo); + + //// TODO - rework this section (RNS network address?) + //String peerAddressString = directConnectionInfo.getPeerAddress(); + // + //// Parse the peer address to find the host and port + //String host = null; + //int port = -1; + //String[] parts = peerAddressString.split(":"); + //if (parts.length > 1) { + // host = parts[0]; + // port = Integer.parseInt(parts[1]); + //} else { + // // Assume no port included + // host = peerAddressString; + // // Use default listen port + // port = Settings.getInstance().getDefaultListenPort(); + //} + // + //String peerAddressStringWithPort = String.format("%s:%d", host, port); + //success = Network.getInstance().requestDataFromPeer(peerAddressStringWithPort, signature); + // + //int defaultPort = Settings.getInstance().getDefaultListenPort(); + // + //// If unsuccessful, and using a non-standard port, try a second connection with the default listen port, + //// since almost all nodes use that. This is a workaround to account for any ephemeral ports that may + //// have made it into the dataset. + //if (!success) { + // if (host != null && port > 0) { + // if (port != defaultPort) { + // String newPeerAddressString = String.format("%s:%d", host, defaultPort); + // success = Network.getInstance().requestDataFromPeer(newPeerAddressString, signature); + // } + // } + //} + // + //// If _still_ unsuccessful, try matching the peer's IP address with some known peers, and then connect + //// to each of those in turn until one succeeds. + //if (!success) { + // if (host != null) { + // final String finalHost = host; + // List knownPeers = Network.getInstance().getAllKnownPeers().stream() + // .filter(knownPeerData -> knownPeerData.getAddress().getHost().equals(finalHost)) + // .collect(Collectors.toList()); + // // Loop through each match and attempt a connection + // for (PeerData matchingPeer : knownPeers) { + // String matchingPeerAddress = matchingPeer.getAddress().toString(); + // int matchingPeerPort = matchingPeer.getAddress().getPort(); + // // Make sure that it's not a port we've already tried + // if (matchingPeerPort != port && matchingPeerPort != defaultPort) { + // success = Network.getInstance().requestDataFromPeer(matchingPeerAddress, signature); + // if (success) { + // // Successfully connected, so stop making connections + // break; + // } + // } + // } + // } + //} + + if (success) { + // We were able to connect with a peer, so track the request + RNSArbitraryDataFileListManager.getInstance().addToSignatureRequests(signature58, false, true); + } + + } + } catch (InterruptedException e) { + // Do nothing + } + + return success; + } + + + // Relays + + private List getRelayInfoListForHash(String hash58) { + synchronized (arbitraryRelayMap) { + return arbitraryRelayMap.stream() + .filter(relayInfo -> Objects.equals(relayInfo.getHash58(), hash58)) + .collect(Collectors.toList()); + } + } + + private RNSArbitraryRelayInfo getOptimalRelayInfoEntryForHash(String hash58) { + LOGGER.trace("Fetching relay info for hash: {}", hash58); + List relayInfoList = this.getRelayInfoListForHash(hash58); + if (relayInfoList != null && !relayInfoList.isEmpty()) { + + // Remove any with null requestHops + relayInfoList.removeIf(r -> r.getRequestHops() == null); + + // If list is now empty, then just return one at random + if (relayInfoList.isEmpty()) { + return this.getRandomRelayInfoEntryForHash(hash58); + } + + // Sort by number of hops (lowest first) + relayInfoList.sort(Comparator.comparingInt(RNSArbitraryRelayInfo::getRequestHops)); + + // FUTURE: secondary sort by requestTime? + + RNSArbitraryRelayInfo relayInfo = relayInfoList.get(0); + + LOGGER.trace("Returning optimal relay info for hash: {} (requestHops {})", hash58, relayInfo.getRequestHops()); + return relayInfo; + } + LOGGER.trace("No relay info exists for hash: {}", hash58); + return null; + } + + private RNSArbitraryRelayInfo getRandomRelayInfoEntryForHash(String hash58) { + LOGGER.trace("Fetching random relay info for hash: {}", hash58); + List relayInfoList = this.getRelayInfoListForHash(hash58); + if (relayInfoList != null && !relayInfoList.isEmpty()) { + + // Pick random item + int index = new SecureRandom().nextInt(relayInfoList.size()); + LOGGER.trace("Returning random relay info for hash: {} (index {})", hash58, index); + return relayInfoList.get(index); + } + LOGGER.trace("No relay info exists for hash: {}", hash58); + return null; + } + + public void addToRelayMap(RNSArbitraryRelayInfo newEntry) { + if (newEntry == null || !newEntry.isValid()) { + return; + } + + // Remove existing entry for this peer if it exists, to renew the timestamp + this.removeFromRelayMap(newEntry); + + // Re-add + arbitraryRelayMap.add(newEntry); + LOGGER.debug("Added entry to relay map: {}", newEntry); + } + + private void removeFromRelayMap(RNSArbitraryRelayInfo entry) { + arbitraryRelayMap.removeIf(relayInfo -> relayInfo.equals(entry)); + } + + + // Peers requesting QDN data from us + + /** + * Add an address string of a peer that is trying to request data from us. + * @param peerAddress + */ + public void addRecentDataRequest(String peerAddress) { + if (peerAddress == null) { + return; + } + + Long now = NTP.getTime(); + if (now == null) { + return; + } + + // Make sure to remove the port, since it isn't guaranteed to match next time + String[] parts = peerAddress.split(":"); + if (parts.length == 0) { + return; + } + String host = parts[0]; + if (!InetAddresses.isInetAddress(host)) { + // Invalid host + return; + } + + this.recentDataRequests.put(host, now); + } + + public boolean isPeerRequestingData(String peerAddressWithoutPort) { + return this.recentDataRequests.containsKey(peerAddressWithoutPort); + } + + public boolean hasPendingDataRequest() { + return !this.recentDataRequests.isEmpty(); + } + + + // Network handlers + + public void onNetworkGetArbitraryDataFileMessage(RNSPeer peer, Message message) { + // Don't respond if QDN is disabled + if (!Settings.getInstance().isQdnEnabled()) { + return; + } + + GetArbitraryDataFileMessage getArbitraryDataFileMessage = (GetArbitraryDataFileMessage) message; + byte[] hash = getArbitraryDataFileMessage.getHash(); + String hash58 = Base58.encode(hash); + byte[] signature = getArbitraryDataFileMessage.getSignature(); + Controller.getInstance().stats.getArbitraryDataFileMessageStats.requests.incrementAndGet(); + + LOGGER.debug("Received GetArbitraryDataFileMessage from peer {} for hash {}", peer, Base58.encode(hash)); + + try { + ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(hash, signature); + RNSArbitraryRelayInfo relayInfo = this.getOptimalRelayInfoEntryForHash(hash58); + + if (arbitraryDataFile.exists()) { + LOGGER.trace("Hash {} exists", hash58); + + // We can serve the file directly as we already have it + LOGGER.debug("Sending file {}...", arbitraryDataFile); + ArbitraryDataFileMessage arbitraryDataFileMessage = new ArbitraryDataFileMessage(signature, arbitraryDataFile); + arbitraryDataFileMessage.setId(message.getId()); + //if (!peer.sendMessageWithTimeout(arbitraryDataFileMessage, (int) ArbitraryDataManager.ARBITRARY_REQUEST_TIMEOUT)) { + // LOGGER.debug("Couldn't send file {}", arbitraryDataFile); + // peer.disconnect("failed to send file"); + //} + //else { + // LOGGER.debug("Sent file {}", arbitraryDataFile); + //} + peer.sendMessageWithTimeout(arbitraryDataFileMessage, (int) ArbitraryDataManager.ARBITRARY_REQUEST_TIMEOUT); + } + //// TODO: rework (doesn't work with Reticulum) + //else if (relayInfo != null) { + // LOGGER.debug("We have relay info for hash {}", Base58.encode(hash)); + // // We need to ask this peer for the file + // Peer peerToAsk = relayInfo.getPeer(); + // if (peerToAsk != null) { + // + // // Forward the message to this peer + // LOGGER.debug("Asking peer {} for hash {}", peerToAsk, hash58); + // // No need to pass arbitraryTransactionData below because this is only used for metadata caching, + // // and metadata isn't retained when relaying. + // this.fetchArbitraryDataFile(peerToAsk, peer, null, signature, hash, message); + // } + // else { + // LOGGER.debug("Peer {} not found in relay info", peer); + // } + //} + else { + LOGGER.debug("Hash {} doesn't exist and we don't have relay info", hash58); + + // We don't have this file + Controller.getInstance().stats.getArbitraryDataFileMessageStats.unknownFiles.getAndIncrement(); + + // Send valid, yet unexpected message type in response, so peer's synchronizer doesn't have to wait for timeout + LOGGER.debug(String.format("Sending 'file unknown' response to peer %s for GET_FILE request for unknown file %s", peer, arbitraryDataFile)); + + //// Send generic 'unknown' message as it's very short + //Message fileUnknownMessage = peer.getPeersVersion() >= GenericUnknownMessage.MINIMUM_PEER_VERSION + // ? new GenericUnknownMessage() + // : new BlockSummariesMessage(Collections.emptyList()); + //fileUnknownMessage.setId(message.getId()); + //if (!peer.sendMessage(fileUnknownMessage)) { + // LOGGER.debug("Couldn't sent file-unknown response"); + // peer.disconnect("failed to send file-unknown response"); + //} + //else { + // LOGGER.debug("Sent file-unknown response for file {}", arbitraryDataFile); + //} + Message fileUnknownMessage = new GenericUnknownMessage(); + peer.sendMessage(fileUnknownMessage); + } + } + catch (DataException e) { + LOGGER.debug("Unable to handle request for arbitrary data file: {}", hash58); + } + } + +} diff --git a/src/main/java/org/qortal/controller/arbitrary/RNSArbitraryMetadataManager.java b/src/main/java/org/qortal/controller/arbitrary/RNSArbitraryMetadataManager.java new file mode 100644 index 00000000..45e674b7 --- /dev/null +++ b/src/main/java/org/qortal/controller/arbitrary/RNSArbitraryMetadataManager.java @@ -0,0 +1,481 @@ +package org.qortal.controller.arbitrary; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.qortal.arbitrary.ArbitraryDataFile; +import org.qortal.arbitrary.ArbitraryDataResource; +import org.qortal.arbitrary.metadata.ArbitraryDataTransactionMetadata; +import org.qortal.controller.Controller; +import org.qortal.data.transaction.ArbitraryTransactionData; +import org.qortal.data.transaction.TransactionData; +import org.qortal.network.RNSNetwork; +import org.qortal.network.RNSPeer; +import org.qortal.network.message.ArbitraryMetadataMessage; +import org.qortal.network.message.GetArbitraryMetadataMessage; +import org.qortal.network.message.Message; +import org.qortal.repository.DataException; +import org.qortal.repository.Repository; +import org.qortal.repository.RepositoryManager; +import org.qortal.settings.Settings; +import org.qortal.utils.Base58; +import org.qortal.utils.ListUtils; +import org.qortal.utils.NTP; +import org.qortal.utils.Triple; + +import java.io.IOException; +import java.util.*; + +import static org.qortal.controller.arbitrary.ArbitraryDataFileListManager.*; + +public class RNSArbitraryMetadataManager { + + private static final Logger LOGGER = LogManager.getLogger(ArbitraryMetadataManager.class); + + private static RNSArbitraryMetadataManager instance; + + /** + * Map of recent incoming requests for ARBITRARY transaction metadata. + *

+ * Key is original request's message ID
+ * Value is Triple<transaction signature in base58, first requesting peer, first request's timestamp> + *

+ * If peer is null then either:
+ *

    + *
  • we are the original requesting peer
  • + *
  • we have already sent data payload to original requesting peer.
  • + *
+ * If signature is null then we have already received the file list and either:
+ *
    + *
  • we are the original requesting peer and have processed it
  • + *
  • we have forwarded the metadata
  • + *
+ */ + public Map> arbitraryMetadataRequests = Collections.synchronizedMap(new HashMap<>()); + + /** + * Map to keep track of in progress arbitrary metadata requests + * Key: string - the signature encoded in base58 + * Value: Triple + */ + private Map> arbitraryMetadataSignatureRequests = Collections.synchronizedMap(new HashMap<>()); + + + private RNSArbitraryMetadataManager() { + } + + public static RNSArbitraryMetadataManager getInstance() { + if (instance == null) + instance = new RNSArbitraryMetadataManager(); + + return instance; + } + + public void cleanupRequestCache(Long now) { + if (now == null) { + return; + } + final long requestMinimumTimestamp = now - ArbitraryDataManager.ARBITRARY_REQUEST_TIMEOUT; + arbitraryMetadataRequests.entrySet().removeIf(entry -> entry.getValue().getC() == null || entry.getValue().getC() < requestMinimumTimestamp); + } + + + public ArbitraryDataTransactionMetadata fetchMetadata(ArbitraryDataResource arbitraryDataResource, boolean useRateLimiter) { + try (final Repository repository = RepositoryManager.getRepository()) { + // Find latest transaction + ArbitraryTransactionData latestTransaction = repository.getArbitraryRepository() + .getLatestTransaction(arbitraryDataResource.getResourceId(), arbitraryDataResource.getService(), + null, arbitraryDataResource.getIdentifier()); + + if (latestTransaction != null) { + byte[] signature = latestTransaction.getSignature(); + byte[] metadataHash = latestTransaction.getMetadataHash(); + if (metadataHash == null) { + // This resource doesn't have metadata + throw new IllegalArgumentException("This resource doesn't have metadata"); + } + + ArbitraryDataFile metadataFile = ArbitraryDataFile.fromHash(metadataHash, signature); + if (!metadataFile.exists()) { + // Request from network + this.fetchArbitraryMetadata(latestTransaction, useRateLimiter); + } + + // Now check again as it may have been downloaded above + if (metadataFile.exists()) { + // Use local copy + ArbitraryDataTransactionMetadata transactionMetadata = new ArbitraryDataTransactionMetadata(metadataFile.getFilePath()); + try { + transactionMetadata.read(); + } catch (DataException e) { + // Invalid file, so delete it + LOGGER.info("Deleting invalid metadata file due to exception: {}", e.getMessage()); + transactionMetadata.delete(); + return null; + } + return transactionMetadata; + } + } + + } catch (DataException | IOException e) { + LOGGER.error("Repository issue when fetching arbitrary transaction metadata", e); + } + + return null; + } + + + // Request metadata from network + + public byte[] fetchArbitraryMetadata(ArbitraryTransactionData arbitraryTransactionData, boolean useRateLimiter) { + byte[] metadataHash = arbitraryTransactionData.getMetadataHash(); + if (metadataHash == null) { + return null; + } + + byte[] signature = arbitraryTransactionData.getSignature(); + String signature58 = Base58.encode(signature); + + // Require an NTP sync + Long now = NTP.getTime(); + if (now == null) { + return null; + } + + // If we've already tried too many times in a short space of time, make sure to give up + if (useRateLimiter && !this.shouldMakeMetadataRequestForSignature(signature58)) { + LOGGER.trace("Skipping metadata request for signature {} due to rate limit", signature58); + return null; + } + this.addToSignatureRequests(signature58, true, false); + + //List handshakedPeers = Network.getInstance().getImmutableHandshakedPeers(); + List handshakedPeers = RNSNetwork.getInstance().getLinkedPeers(); + LOGGER.debug(String.format("Sending metadata request for signature %s to %d peers...", signature58, handshakedPeers.size())); + + // Build request + Message getArbitraryMetadataMessage = new GetArbitraryMetadataMessage(signature, now, 0); + + // Save our request into requests map + Triple requestEntry = new Triple<>(signature58, null, NTP.getTime()); + + // Assign random ID to this message + int id; + do { + id = new Random().nextInt(Integer.MAX_VALUE - 1) + 1; + + // Put queue into map (keyed by message ID) so we can poll for a response + // If putIfAbsent() doesn't return null, then this ID is already taken + } while (arbitraryMetadataRequests.put(id, requestEntry) != null); + getArbitraryMetadataMessage.setId(id); + + // Broadcast request + RNSNetwork.getInstance().broadcast(peer -> getArbitraryMetadataMessage); + + // Poll to see if data has arrived + final long singleWait = 100; + long totalWait = 0; + while (totalWait < ArbitraryDataManager.ARBITRARY_REQUEST_TIMEOUT) { + try { + Thread.sleep(singleWait); + } catch (InterruptedException e) { + break; + } + + requestEntry = arbitraryMetadataRequests.get(id); + if (requestEntry == null) + return null; + + if (requestEntry.getA() == null) + break; + + totalWait += singleWait; + } + + try { + ArbitraryDataFile metadataFile = ArbitraryDataFile.fromHash(metadataHash, signature); + if (metadataFile.exists()) { + return metadataFile.getBytes(); + } + } catch (DataException e) { + // Do nothing + } + + return null; + } + + + // Track metadata lookups by signature + + private boolean shouldMakeMetadataRequestForSignature(String signature58) { + Triple request = arbitraryMetadataSignatureRequests.get(signature58); + + if (request == null) { + // Not attempted yet + return true; + } + + // Extract the components + Integer networkBroadcastCount = request.getA(); + // Integer directPeerRequestCount = request.getB(); + Long lastAttemptTimestamp = request.getC(); + + if (lastAttemptTimestamp == null) { + // Not attempted yet + return true; + } + + long timeSinceLastAttempt = NTP.getTime() - lastAttemptTimestamp; + + // Allow a second attempt after 60 seconds + if (timeSinceLastAttempt > 60 * 1000L) { + // We haven't tried for at least 60 seconds + + if (networkBroadcastCount < 2) { + // We've made less than 2 total attempts + return true; + } + } + + // Then allow another attempt after 60 minutes + if (timeSinceLastAttempt > 60 * 60 * 1000L) { + // We haven't tried for at least 60 minutes + + if (networkBroadcastCount < 3) { + // We've made less than 3 total attempts + return true; + } + } + + return false; + } + + public boolean isSignatureRateLimited(byte[] signature) { + String signature58 = Base58.encode(signature); + return !this.shouldMakeMetadataRequestForSignature(signature58); + } + + public long lastRequestForSignature(byte[] signature) { + String signature58 = Base58.encode(signature); + Triple request = arbitraryMetadataSignatureRequests.get(signature58); + + if (request == null) { + // Not attempted yet + return 0; + } + + // Extract the components + Long lastAttemptTimestamp = request.getC(); + if (lastAttemptTimestamp != null) { + return lastAttemptTimestamp; + } + return 0; + } + + public void addToSignatureRequests(String signature58, boolean incrementNetworkRequests, boolean incrementPeerRequests) { + Triple request = arbitraryMetadataSignatureRequests.get(signature58); + Long now = NTP.getTime(); + + if (request == null) { + // No entry yet + Triple newRequest = new Triple<>(0, 0, now); + arbitraryMetadataSignatureRequests.put(signature58, newRequest); + } + else { + // There is an existing entry + if (incrementNetworkRequests) { + request.setA(request.getA() + 1); + } + if (incrementPeerRequests) { + request.setB(request.getB() + 1); + } + request.setC(now); + arbitraryMetadataSignatureRequests.put(signature58, request); + } + } + + public void removeFromSignatureRequests(String signature58) { + arbitraryMetadataSignatureRequests.remove(signature58); + } + + + // Network handlers + + public void onNetworkArbitraryMetadataMessage(RNSPeer peer, Message message) { + // Don't process if QDN is disabled + if (!Settings.getInstance().isQdnEnabled()) { + return; + } + + ArbitraryMetadataMessage arbitraryMetadataMessage = (ArbitraryMetadataMessage) message; + LOGGER.debug("Received metadata from peer {}", peer); + + // Do we have a pending request for this data? + Triple request = arbitraryMetadataRequests.get(message.getId()); + if (request == null || request.getA() == null) { + return; + } + boolean isRelayRequest = (request.getB() != null); + + // Does this message's signature match what we're expecting? + byte[] signature = arbitraryMetadataMessage.getSignature(); + String signature58 = Base58.encode(signature); + if (!request.getA().equals(signature58)) { + return; + } + + // Update requests map to reflect that we've received this metadata + Triple newEntry = new Triple<>(null, null, request.getC()); + arbitraryMetadataRequests.put(message.getId(), newEntry); + + // Get transaction info + try (final Repository repository = RepositoryManager.getRepository()) { + TransactionData transactionData = repository.getTransactionRepository().fromSignature(signature); + if (!(transactionData instanceof ArbitraryTransactionData)) { + return; + } + ArbitraryTransactionData arbitraryTransactionData = (ArbitraryTransactionData) transactionData; + + // Check if the name is blocked + boolean isBlocked = (arbitraryTransactionData == null || ListUtils.isNameBlocked(arbitraryTransactionData.getName())); + + // Save if not blocked + ArbitraryDataFile arbitraryMetadataFile = arbitraryMetadataMessage.getArbitraryMetadataFile(); + if (!isBlocked && arbitraryMetadataFile != null) { + arbitraryMetadataFile.save(); + } + + // Forwarding + if (isRelayRequest && Settings.getInstance().isRelayModeEnabled()) { + if (!isBlocked) { + RNSPeer requestingPeer = request.getB(); + if (requestingPeer != null) { + + ArbitraryMetadataMessage forwardArbitraryMetadataMessage = new ArbitraryMetadataMessage(signature, arbitraryMetadataMessage.getArbitraryMetadataFile()); + forwardArbitraryMetadataMessage.setId(arbitraryMetadataMessage.getId()); + + // Forward to requesting peer + LOGGER.debug("Forwarding metadata to requesting peer: {}", requestingPeer); + //if (!requestingPeer.sendMessage(forwardArbitraryMetadataMessage)) { + // requestingPeer.disconnect("failed to forward arbitrary metadata"); + //} + requestingPeer.sendMessage(forwardArbitraryMetadataMessage); + } + } + } + + // Add to resource queue to update arbitrary resource caches + if (arbitraryTransactionData != null) { + ArbitraryDataCacheManager.getInstance().addToUpdateQueue(arbitraryTransactionData); + } + + } catch (DataException e) { + LOGGER.error(String.format("Repository issue while saving arbitrary transaction metadata from peer %s", peer), e); + } + } + + public void onNetworkGetArbitraryMetadataMessage(RNSPeer peer, Message message) { + // Don't respond if QDN is disabled + if (!Settings.getInstance().isQdnEnabled()) { + return; + } + + Controller.getInstance().stats.getArbitraryMetadataMessageStats.requests.incrementAndGet(); + + GetArbitraryMetadataMessage getArbitraryMetadataMessage = (GetArbitraryMetadataMessage) message; + byte[] signature = getArbitraryMetadataMessage.getSignature(); + String signature58 = Base58.encode(signature); + Long now = NTP.getTime(); + Triple newEntry = new Triple<>(signature58, peer, now); + + // If we've seen this request recently, then ignore + if (arbitraryMetadataRequests.putIfAbsent(message.getId(), newEntry) != null) { + LOGGER.debug("Ignoring metadata request from peer {} for signature {}", peer, signature58); + return; + } + + LOGGER.debug("Received metadata request from peer {} for signature {}", peer, signature58); + + ArbitraryTransactionData transactionData = null; + ArbitraryDataFile metadataFile = null; + + try (final Repository repository = RepositoryManager.getRepository()) { + + // Firstly we need to lookup this file on chain to get its metadata hash + transactionData = (ArbitraryTransactionData)repository.getTransactionRepository().fromSignature(signature); + if (transactionData instanceof ArbitraryTransactionData) { + + // Check if we're even allowed to serve metadata for this transaction + if (ArbitraryDataStorageManager.getInstance().canStoreData(transactionData)) { + + byte[] metadataHash = transactionData.getMetadataHash(); + if (metadataHash != null) { + + // Load metadata file + metadataFile = ArbitraryDataFile.fromHash(metadataHash, signature); + } + } + } + + } catch (DataException e) { + LOGGER.error(String.format("Repository issue while fetching arbitrary metadata for peer %s", peer), e); + } + + // We should only respond if we have the metadata file + if (metadataFile != null && metadataFile.exists()) { + + // We have the metadata file, so update requests map to reflect that we've sent it + newEntry = new Triple<>(null, null, now); + arbitraryMetadataRequests.put(message.getId(), newEntry); + + ArbitraryMetadataMessage arbitraryMetadataMessage = new ArbitraryMetadataMessage(signature, metadataFile); + arbitraryMetadataMessage.setId(message.getId()); + //if (!peer.sendMessage(arbitraryMetadataMessage)) { + // LOGGER.debug("Couldn't send metadata"); + // peer.disconnect("failed to send metadata"); + // return; + //} + peer.sendMessage(arbitraryMetadataMessage); + LOGGER.debug("Sent metadata"); + + // Nothing left to do, so return to prevent any unnecessary forwarding from occurring + LOGGER.debug("No need for any forwarding because metadata request is fully served"); + return; + + } + + // We may need to forward this request on + boolean isBlocked = (transactionData == null || ListUtils.isNameBlocked(transactionData.getName())); + if (Settings.getInstance().isRelayModeEnabled() && !isBlocked) { + // In relay mode - so ask our other peers if they have it + + long requestTime = getArbitraryMetadataMessage.getRequestTime(); + int requestHops = getArbitraryMetadataMessage.getRequestHops() + 1; + long totalRequestTime = now - requestTime; + + if (totalRequestTime < RELAY_REQUEST_MAX_DURATION) { + // Relay request hasn't timed out yet, so can potentially be rebroadcast + if (requestHops < RELAY_REQUEST_MAX_HOPS) { + // Relay request hasn't reached the maximum number of hops yet, so can be rebroadcast + + Message relayGetArbitraryMetadataMessage = new GetArbitraryMetadataMessage(signature, requestTime, requestHops); + relayGetArbitraryMetadataMessage.setId(message.getId()); + + LOGGER.debug("Rebroadcasting metadata request from peer {} for signature {} to our other peers... totalRequestTime: {}, requestHops: {}", peer, Base58.encode(signature), totalRequestTime, requestHops); + //Network.getInstance().broadcast( + // broadcastPeer -> + // !broadcastPeer.isAtLeastVersion(RELAY_MIN_PEER_VERSION) ? null : + // broadcastPeer == peer || Objects.equals(broadcastPeer.getPeerData().getAddress().getHost(), peer.getPeerData().getAddress().getHost()) ? null : relayGetArbitraryMetadataMessage); + RNSNetwork.getInstance().broadcast(broadcastPeer -> relayGetArbitraryMetadataMessage); + + } + else { + // This relay request has reached the maximum number of allowed hops + } + } + else { + // This relay request has timed out + } + } + } + +} diff --git a/src/main/java/org/qortal/controller/tradebot/RNSTradeBot.java b/src/main/java/org/qortal/controller/tradebot/RNSTradeBot.java new file mode 100644 index 00000000..c648cbbd --- /dev/null +++ b/src/main/java/org/qortal/controller/tradebot/RNSTradeBot.java @@ -0,0 +1,778 @@ +package org.qortal.controller.tradebot; + +import com.google.common.primitives.Longs; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.bitcoinj.core.ECKey; +import org.qortal.account.PrivateKeyAccount; +import org.qortal.api.model.crosschain.TradeBotCreateRequest; +import org.qortal.controller.Controller; +import org.qortal.controller.Synchronizer; +import org.qortal.controller.tradebot.AcctTradeBot.ResponseResult; +import org.qortal.crosschain.*; +import org.qortal.crypto.Crypto; +import org.qortal.data.at.ATData; +import org.qortal.data.crosschain.CrossChainTradeData; +import org.qortal.data.crosschain.TradeBotData; +import org.qortal.data.network.TradePresenceData; +import org.qortal.data.transaction.TransactionData; +import org.qortal.event.Event; +import org.qortal.event.EventBus; +import org.qortal.event.Listener; +import org.qortal.gui.SysTray; +import org.qortal.network.RNSNetwork; +import org.qortal.network.RNSPeer; +import org.qortal.network.message.GetTradePresencesMessage; +import org.qortal.network.message.Message; +import org.qortal.network.message.TradePresencesMessage; +import org.qortal.repository.DataException; +import org.qortal.repository.Repository; +import org.qortal.repository.RepositoryManager; +import org.qortal.repository.hsqldb.HSQLDBImportExport; +import org.qortal.settings.Settings; +import org.qortal.transaction.Transaction; +import org.qortal.utils.ByteArray; +import org.qortal.utils.NTP; + +import java.awt.TrayIcon.MessageType; +import java.security.SecureRandom; +import java.util.*; +import java.util.function.Supplier; + +/** + * Performing cross-chain trading steps on behalf of user. + *

+ * We deal with three different independent state-spaces here: + *

    + *
  • Qortal blockchain
  • + *
  • Foreign blockchain
  • + *
  • Trade-bot entries
  • + *
+ */ +public class RNSTradeBot implements Listener { + + private static final Logger LOGGER = LogManager.getLogger(TradeBot.class); + private static final Random RANDOM = new SecureRandom(); + + /** Maximum lifetime of trade presence timestamp. 30 mins in ms. */ + private static final long PRESENCE_LIFETIME = 30 * 60 * 1000L; + /** How soon before expiry of our own trade presence timestamp that we want to trigger renewal. 5 mins in ms. */ + private static final long EARLY_RENEWAL_PERIOD = 5 * 60 * 1000L; + /** Trade presence timestamps are rounded up to this nearest interval. Bigger values improve grouping of entries in [GET_]TRADE_PRESENCES network messages. 15 mins in ms. */ + private static final long EXPIRY_ROUNDING = 15 * 60 * 1000L; + /** How often we want to broadcast our list of all known trade presences to peers. 5 mins in ms. */ + private static final long PRESENCE_BROADCAST_INTERVAL = 5 * 60 * 1000L; + + public interface StateNameAndValueSupplier { + public String getState(); + public int getStateValue(); + } + + public static class StateChangeEvent implements Event { + private final TradeBotData tradeBotData; + + public StateChangeEvent(TradeBotData tradeBotData) { + this.tradeBotData = tradeBotData; + } + + public TradeBotData getTradeBotData() { + return this.tradeBotData; + } + } + + public static class TradePresenceEvent implements Event { + private final TradePresenceData tradePresenceData; + + public TradePresenceEvent(TradePresenceData tradePresenceData) { + this.tradePresenceData = tradePresenceData; + } + + public TradePresenceData getTradePresenceData() { + return this.tradePresenceData; + } + } + + private static final Map, Supplier> acctTradeBotSuppliers = new HashMap<>(); + static { + acctTradeBotSuppliers.put(BitcoinACCTv1.class, BitcoinACCTv1TradeBot::getInstance); + acctTradeBotSuppliers.put(BitcoinACCTv3.class, BitcoinACCTv3TradeBot::getInstance); + acctTradeBotSuppliers.put(LitecoinACCTv1.class, LitecoinACCTv1TradeBot::getInstance); + acctTradeBotSuppliers.put(LitecoinACCTv3.class, LitecoinACCTv3TradeBot::getInstance); + acctTradeBotSuppliers.put(DogecoinACCTv1.class, DogecoinACCTv1TradeBot::getInstance); + acctTradeBotSuppliers.put(DogecoinACCTv3.class, DogecoinACCTv3TradeBot::getInstance); + acctTradeBotSuppliers.put(DigibyteACCTv3.class, DigibyteACCTv3TradeBot::getInstance); + acctTradeBotSuppliers.put(RavencoinACCTv3.class, RavencoinACCTv3TradeBot::getInstance); + acctTradeBotSuppliers.put(PirateChainACCTv3.class, PirateChainACCTv3TradeBot::getInstance); + } + + private static RNSTradeBot instance; + + private final Map ourTradePresenceTimestampsByPubkey = Collections.synchronizedMap(new HashMap<>()); + private final List pendingTradePresences = Collections.synchronizedList(new ArrayList<>()); + + private final Map allTradePresencesByPubkey = Collections.synchronizedMap(new HashMap<>()); + private Map safeAllTradePresencesByPubkey = Collections.emptyMap(); + private long nextTradePresenceBroadcastTimestamp = 0L; + + private Map failedTrades = new HashMap<>(); + private Map validTrades = new HashMap<>(); + + private RNSTradeBot() { + EventBus.INSTANCE.addListener(event -> RNSTradeBot.getInstance().listen(event)); + } + + public static synchronized RNSTradeBot getInstance() { + if (instance == null) + instance = new RNSTradeBot(); + + return instance; + } + + public ACCT getAcctUsingAtData(ATData atData) { + byte[] codeHash = atData.getCodeHash(); + if (codeHash == null) + return null; + + return SupportedBlockchain.getAcctByCodeHash(codeHash); + } + + public CrossChainTradeData populateTradeData(Repository repository, ATData atData) throws DataException { + ACCT acct = this.getAcctUsingAtData(atData); + if (acct == null) + return null; + + return acct.populateTradeData(repository, atData); + } + + /** + * Creates a new trade-bot entry from the "Bob" viewpoint, + * i.e. OFFERing QORT in exchange for foreign blockchain currency. + *

+ * Generates: + *

    + *
  • new 'trade' private key
  • + *
  • secret(s)
  • + *
+ * Derives: + *
    + *
  • 'native' (as in Qortal) public key, public key hash, address (starting with Q)
  • + *
  • 'foreign' public key, public key hash
  • + *
  • hash(es) of secret(s)
  • + *
+ * A Qortal AT is then constructed including the following as constants in the 'data segment': + *
    + *
  • 'native' (Qortal) 'trade' address - used to MESSAGE AT
  • + *
  • 'foreign' public key hash - used by Alice's to allow redeem of currency on foreign blockchain
  • + *
  • hash(es) of secret(s) - used by AT (optional) and foreign blockchain as needed
  • + *
  • QORT amount on offer by Bob
  • + *
  • foreign currency amount expected in return by Bob (from Alice)
  • + *
  • trading timeout, in case things go wrong and everyone needs to refund
  • + *
+ * Returns a DEPLOY_AT transaction that needs to be signed and broadcast to the Qortal network. + *

+ * Trade-bot will wait for Bob's AT to be deployed before taking next step. + *

+ * @param repository + * @param tradeBotCreateRequest + * @return raw, unsigned DEPLOY_AT transaction + * @throws DataException + */ + public byte[] createTrade(Repository repository, TradeBotCreateRequest tradeBotCreateRequest) throws DataException { + // Fetch latest ACCT version for requested foreign blockchain + ACCT acct = tradeBotCreateRequest.foreignBlockchain.getLatestAcct(); + + AcctTradeBot acctTradeBot = findTradeBotForAcct(acct); + if (acctTradeBot == null) + return null; + + return acctTradeBot.createTrade(repository, tradeBotCreateRequest); + } + + /** + * Creates a trade-bot entry from the 'Alice' viewpoint, + * i.e. matching foreign blockchain currency to an existing QORT offer. + *

+ * Requires a chosen trade offer from Bob, passed by crossChainTradeData + * and access to a foreign blockchain wallet via foreignKey. + *

+ * @param repository + * @param crossChainTradeData chosen trade OFFER that Alice wants to match + * @param foreignKey foreign blockchain wallet key + * @throws DataException + */ + public ResponseResult startResponse(Repository repository, ATData atData, ACCT acct, + CrossChainTradeData crossChainTradeData, String foreignKey, String receivingAddress) throws DataException { + AcctTradeBot acctTradeBot = findTradeBotForAcct(acct); + if (acctTradeBot == null) { + LOGGER.debug(() -> String.format("Couldn't find ACCT trade-bot for AT %s", atData.getATAddress())); + return ResponseResult.NETWORK_ISSUE; + } + + // Check Alice doesn't already have an existing, on-going trade-bot entry for this AT. + if (repository.getCrossChainRepository().existsTradeWithAtExcludingStates(atData.getATAddress(), acctTradeBot.getEndStates())) + return ResponseResult.TRADE_ALREADY_EXISTS; + + return acctTradeBot.startResponse(repository, atData, acct, crossChainTradeData, foreignKey, receivingAddress); + } + + /** + * Creates a trade-bot entries from the 'Alice' viewpoint, + * i.e. matching foreign blockchain currency to existing QORT offers. + *

+ * Requires chosen trade offers from Bob, passed by crossChainTradeData + * and access to a foreign blockchain wallet via foreignKey. + *

+ * @param repository + * @param crossChainTradeDataList chosen trade OFFERs that Alice wants to match + * @param receiveAddress Alice's Qortal address to receive her QORT + * @param foreignKey foreign blockchain wallet key + * @param bitcoiny + * @throws DataException + */ + public ResponseResult startResponseMultiple( + Repository repository, + ACCT acct, + List crossChainTradeDataList, + String receiveAddress, + String foreignKey, + Bitcoiny bitcoiny) throws DataException { + AcctTradeBot acctTradeBot = findTradeBotForAcct(acct); + if (acctTradeBot == null) { + LOGGER.debug(() -> String.format("Couldn't find ACCT trade-bot for %s", acct.getBlockchain())); + return ResponseResult.NETWORK_ISSUE; + } + + for( CrossChainTradeData tradeData : crossChainTradeDataList) { + // Check Alice doesn't already have an existing, on-going trade-bot entry for this AT. + if (repository.getCrossChainRepository().existsTradeWithAtExcludingStates(tradeData.qortalAtAddress, acctTradeBot.getEndStates())) + return ResponseResult.TRADE_ALREADY_EXISTS; + } + return TradeBotUtils.startResponseMultiple(repository, acct, crossChainTradeDataList, receiveAddress, foreignKey, bitcoiny); + } + + public boolean deleteEntry(Repository repository, byte[] tradePrivateKey) throws DataException { + TradeBotData tradeBotData = repository.getCrossChainRepository().getTradeBotData(tradePrivateKey); + if (tradeBotData == null) + // Can't delete what we don't have! + return false; + + boolean canDelete = false; + + ACCT acct = SupportedBlockchain.getAcctByName(tradeBotData.getAcctName()); + if (acct == null) + // We can't/no longer support this ACCT + canDelete = true; + else { + AcctTradeBot acctTradeBot = findTradeBotForAcct(acct); + canDelete = acctTradeBot == null || acctTradeBot.canDelete(repository, tradeBotData); + } + + if (canDelete) { + repository.getCrossChainRepository().delete(tradePrivateKey); + repository.saveChanges(); + } + + return canDelete; + } + + @Override + public void listen(Event event) { + if (!(event instanceof Synchronizer.NewChainTipEvent)) + return; + + // Don't process trade bots or broadcast presence timestamps if our chain is more than 60 minutes old + final Long minLatestBlockTimestamp = NTP.getTime() - (60 * 60 * 1000L); + if (!Controller.getInstance().isUpToDate(minLatestBlockTimestamp)) + return; + + synchronized (this) { + expireOldPresenceTimestamps(); + + List allTradeBotData; + + try (final Repository repository = RepositoryManager.getRepository()) { + allTradeBotData = repository.getCrossChainRepository().getAllTradeBotData(); + } catch (DataException e) { + LOGGER.error("Couldn't run trade bot due to repository issue", e); + return; + } + + for (TradeBotData tradeBotData : allTradeBotData) + try (final Repository repository = RepositoryManager.getRepository()) { + // Find ACCT-specific trade-bot for this entry + ACCT acct = SupportedBlockchain.getAcctByName(tradeBotData.getAcctName()); + if (acct == null) { + LOGGER.debug(() -> String.format("Couldn't find ACCT matching name %s", tradeBotData.getAcctName())); + continue; + } + + AcctTradeBot acctTradeBot = findTradeBotForAcct(acct); + if (acctTradeBot == null) { + LOGGER.debug(() -> String.format("Couldn't find ACCT trade-bot matching name %s", tradeBotData.getAcctName())); + continue; + } + + acctTradeBot.progress(repository, tradeBotData); + } catch (DataException e) { + LOGGER.error("Couldn't run trade bot due to repository issue", e); + } catch (ForeignBlockchainException e) { + LOGGER.warn(() -> String.format("Foreign blockchain issue processing trade-bot entry for AT %s: %s", tradeBotData.getAtAddress(), e.getMessage())); + } + + broadcastPresenceTimestamps(); + } + } + + public static byte[] generateTradePrivateKey() { + // The private key is used for both Curve25519 and secp256k1 so needs to be valid for both. + // Curve25519 accepts any seed, so generate a valid secp256k1 key and use that. + return new ECKey().getPrivKeyBytes(); + } + + public static byte[] deriveTradeNativePublicKey(byte[] privateKey) { + return Crypto.toPublicKey(privateKey); + } + + public static byte[] deriveTradeForeignPublicKey(byte[] privateKey) { + return ECKey.fromPrivate(privateKey).getPubKey(); + } + + /*package*/ public static byte[] generateSecret() { + byte[] secret = new byte[32]; + RANDOM.nextBytes(secret); + return secret; + } + + /*package*/ static void backupTradeBotData(Repository repository, List additional) { + // Attempt to backup the trade bot data. This an optional step and doesn't impact trading, so don't throw an exception on failure + try { + LOGGER.info("About to backup trade bot data..."); + HSQLDBImportExport.backupTradeBotStates(repository, additional); + } catch (DataException e) { + LOGGER.info(String.format("Repository issue when exporting trade bot data: %s", e.getMessage())); + } + } + + /** Updates trade-bot entry to new state, with current timestamp, logs message and notifies state-change listeners. */ + /*package*/ static void updateTradeBotState(Repository repository, TradeBotData tradeBotData, + String newState, int newStateValue, Supplier logMessageSupplier) throws DataException { + tradeBotData.setState(newState); + tradeBotData.setStateValue(newStateValue); + tradeBotData.setTimestamp(NTP.getTime()); + repository.getCrossChainRepository().save(tradeBotData); + repository.saveChanges(); + + if (Settings.getInstance().isTradebotSystrayEnabled()) + SysTray.getInstance().showMessage("Trade-Bot", String.format("%s: %s", tradeBotData.getAtAddress(), newState), MessageType.INFO); + + if (logMessageSupplier != null) + LOGGER.info(logMessageSupplier.get()); + + LOGGER.debug(() -> String.format("new state for trade-bot entry based on AT %s: %s", tradeBotData.getAtAddress(), newState)); + + notifyStateChange(tradeBotData); + } + + /** Updates trade-bot entry to new state, with current timestamp, logs message and notifies state-change listeners. */ + /*package*/ static void updateTradeBotState(Repository repository, TradeBotData tradeBotData, StateNameAndValueSupplier newStateSupplier, Supplier logMessageSupplier) throws DataException { + updateTradeBotState(repository, tradeBotData, newStateSupplier.getState(), newStateSupplier.getStateValue(), logMessageSupplier); + } + + /** Updates trade-bot entry to new state, with current timestamp, logs message and notifies state-change listeners. */ + /*package*/ static void updateTradeBotState(Repository repository, TradeBotData tradeBotData, Supplier logMessageSupplier) throws DataException { + updateTradeBotState(repository, tradeBotData, tradeBotData.getState(), tradeBotData.getStateValue(), logMessageSupplier); + } + + /*package*/ static void notifyStateChange(TradeBotData tradeBotData) { + StateChangeEvent stateChangeEvent = new StateChangeEvent(tradeBotData); + EventBus.INSTANCE.notify(stateChangeEvent); + } + + /*package*/ static AcctTradeBot findTradeBotForAcct(ACCT acct) { + Supplier acctTradeBotSupplier = acctTradeBotSuppliers.get(acct.getClass()); + if (acctTradeBotSupplier == null) + return null; + + return acctTradeBotSupplier.get(); + } + + // PRESENCE-related + + public Collection getAllTradePresences() { + return this.safeAllTradePresencesByPubkey.values(); + } + + /** Trade presence timestamps expire in the 'future' so any that reach 'now' have expired and are removed. */ + private void expireOldPresenceTimestamps() { + long now = NTP.getTime(); + + int allRemovedCount = 0; + synchronized (this.allTradePresencesByPubkey) { + int preRemoveCount = this.allTradePresencesByPubkey.size(); + this.allTradePresencesByPubkey.values().removeIf(tradePresenceData -> tradePresenceData.getTimestamp() <= now); + allRemovedCount = this.allTradePresencesByPubkey.size() - preRemoveCount; + } + + int ourRemovedCount = 0; + synchronized (this.ourTradePresenceTimestampsByPubkey) { + int preRemoveCount = this.ourTradePresenceTimestampsByPubkey.size(); + this.ourTradePresenceTimestampsByPubkey.values().removeIf(timestamp -> timestamp < now); + ourRemovedCount = this.ourTradePresenceTimestampsByPubkey.size() - preRemoveCount; + } + + if (allRemovedCount > 0) + LOGGER.debug("Removed {} expired trade presences, of which {} ours", allRemovedCount, ourRemovedCount); + } + + /*package*/ void updatePresence(Repository repository, TradeBotData tradeBotData, CrossChainTradeData tradeData) + throws DataException { + String atAddress = tradeBotData.getAtAddress(); + + PrivateKeyAccount tradeNativeAccount = new PrivateKeyAccount(repository, tradeBotData.getTradePrivateKey()); + String signerAddress = tradeNativeAccount.getAddress(); + + /* + * There's no point in Alice trying to broadcast presence for an AT that isn't locked to her, + * as other peers won't be able to verify as signing public key isn't yet in the AT's data segment. + */ + if (!signerAddress.equals(tradeData.qortalCreatorTradeAddress) && !signerAddress.equals(tradeData.qortalPartnerAddress)) { + // Signer is neither Bob, nor trade locked to Alice + LOGGER.trace("Can't provide trade presence for our AT {} as it's not yet locked to Alice", atAddress); + return; + } + + long now = NTP.getTime(); + long newExpiry = generateExpiry(now); + ByteArray pubkeyByteArray = ByteArray.wrap(tradeNativeAccount.getPublicKey()); + + // If map entry's timestamp is missing, or within early renewal period, use the new expiry - otherwise use existing timestamp. + synchronized (this.ourTradePresenceTimestampsByPubkey) { + Long currentTimestamp = this.ourTradePresenceTimestampsByPubkey.get(pubkeyByteArray); + + if (currentTimestamp != null && currentTimestamp - now > EARLY_RENEWAL_PERIOD) { + // timestamp still good + LOGGER.trace("Current trade presence timestamp {} still good for our trade {}", currentTimestamp, atAddress); + return; + } + + this.ourTradePresenceTimestampsByPubkey.put(pubkeyByteArray, newExpiry); + } + + // Create signature + byte[] signature = tradeNativeAccount.sign(Longs.toByteArray(newExpiry)); + + // Add new trade presence to queue to be broadcast around network + TradePresenceData tradePresenceData = new TradePresenceData(newExpiry, tradeNativeAccount.getPublicKey(), signature, atAddress); + this.pendingTradePresences.add(tradePresenceData); + + this.allTradePresencesByPubkey.put(pubkeyByteArray, tradePresenceData); + rebuildSafeAllTradePresences(); + + LOGGER.trace("New trade presence timestamp {} for our trade {}", newExpiry, atAddress); + + EventBus.INSTANCE.notify(new TradePresenceEvent(tradePresenceData)); + } + + private void rebuildSafeAllTradePresences() { + synchronized (this.allTradePresencesByPubkey) { + // Collect into a *new* unmodifiable map. + this.safeAllTradePresencesByPubkey = Map.copyOf(this.allTradePresencesByPubkey); + } + } + + private void broadcastPresenceTimestamps() { + // If we have new trade presences that are pending broadcast, send those as a priority + if (!this.pendingTradePresences.isEmpty()) { + // Create a copy for Network to safely use in another thread + List safeTradePresences; + synchronized (this.pendingTradePresences) { + safeTradePresences = List.copyOf(this.pendingTradePresences); + this.pendingTradePresences.clear(); + } + + LOGGER.debug("Broadcasting {} new trade presences", safeTradePresences.size()); + + TradePresencesMessage tradePresencesMessage = new TradePresencesMessage(safeTradePresences); + RNSNetwork.getInstance().broadcast(peer -> tradePresencesMessage); + + return; + } + + // As we have no new trade presences, check whether it's time to do a general broadcast + Long now = NTP.getTime(); + if (now == null || now < nextTradePresenceBroadcastTimestamp) + return; + + nextTradePresenceBroadcastTimestamp = now + PRESENCE_BROADCAST_INTERVAL; + + List safeTradePresences = List.copyOf(this.safeAllTradePresencesByPubkey.values()); + + LOGGER.debug("Broadcasting all {} known trade presences. Next broadcast timestamp: {}", + safeTradePresences.size(), nextTradePresenceBroadcastTimestamp + ); + + GetTradePresencesMessage getTradePresencesMessage = new GetTradePresencesMessage(safeTradePresences); + RNSNetwork.getInstance().broadcast(peer -> getTradePresencesMessage); + } + + // Network message processing + + public void onGetTradePresencesMessage(RNSPeer peer, Message message) { + GetTradePresencesMessage getTradePresencesMessage = (GetTradePresencesMessage) message; + + List peersTradePresences = getTradePresencesMessage.getTradePresences(); + + // Create mutable copy from safe snapshot + Map entriesUnknownToPeer = new HashMap<>(this.safeAllTradePresencesByPubkey); + int knownCount = entriesUnknownToPeer.size(); + + for (TradePresenceData peersTradePresence : peersTradePresences) { + ByteArray pubkeyByteArray = ByteArray.wrap(peersTradePresence.getPublicKey()); + + TradePresenceData ourEntry = entriesUnknownToPeer.get(pubkeyByteArray); + + if (ourEntry != null && ourEntry.getTimestamp() == peersTradePresence.getTimestamp()) + entriesUnknownToPeer.remove(pubkeyByteArray); + } + + if (entriesUnknownToPeer.isEmpty()) + return; + + LOGGER.debug("Sending {} trade presences to peer {} after excluding their {} from known {}", + entriesUnknownToPeer.size(), peer, peersTradePresences.size(), knownCount + ); + + // Send complement to peer + List safeTradePresences = List.copyOf(entriesUnknownToPeer.values()); + Message responseMessage = new TradePresencesMessage(safeTradePresences); + //if (!peer.sendMessage(responseMessage)) { + // peer.disconnect("failed to send TRADE_PRESENCES response"); + // return; + //} + peer.sendMessage(responseMessage); + } + + public void onTradePresencesMessage(RNSPeer peer, Message message) { + TradePresencesMessage tradePresencesMessage = (TradePresencesMessage) message; + + List peersTradePresences = tradePresencesMessage.getTradePresences(); + + long now = NTP.getTime(); + // Timestamps before this are too far into the past + long pastThreshold = now; + // Timestamps after this are too far into the future + long futureThreshold = now + PRESENCE_LIFETIME; + + Map> acctSuppliersByCodeHash = SupportedBlockchain.getAcctMap(); + + int newCount = 0; + + try (final Repository repository = RepositoryManager.getRepository()) { + for (TradePresenceData peersTradePresence : peersTradePresences) { + long timestamp = peersTradePresence.getTimestamp(); + + // Ignore if timestamp is out of bounds + if (timestamp < pastThreshold || timestamp > futureThreshold) { + if (timestamp < pastThreshold) + LOGGER.trace("Ignoring trade presence {} from peer {} as timestamp {} is too old vs {}", + peersTradePresence.getAtAddress(), peer, timestamp, pastThreshold + ); + else + LOGGER.trace("Ignoring trade presence {} from peer {} as timestamp {} is too new vs {}", + peersTradePresence.getAtAddress(), peer, timestamp, pastThreshold + ); + + continue; + } + + ByteArray pubkeyByteArray = ByteArray.wrap(peersTradePresence.getPublicKey()); + + // Ignore if we've previously verified this timestamp+publickey combo or sent timestamp is older + TradePresenceData existingTradeData = this.safeAllTradePresencesByPubkey.get(pubkeyByteArray); + if (existingTradeData != null && timestamp <= existingTradeData.getTimestamp()) { + if (timestamp == existingTradeData.getTimestamp()) + LOGGER.trace("Ignoring trade presence {} from peer {} as we have verified timestamp {} before", + peersTradePresence.getAtAddress(), peer, timestamp + ); + else + LOGGER.trace("Ignoring trade presence {} from peer {} as timestamp {} is older than latest {}", + peersTradePresence.getAtAddress(), peer, timestamp, existingTradeData.getTimestamp() + ); + + continue; + } + + // Check timestamp signature + byte[] timestampSignature = peersTradePresence.getSignature(); + byte[] timestampBytes = Longs.toByteArray(timestamp); + byte[] publicKey = peersTradePresence.getPublicKey(); + if (!Crypto.verify(publicKey, timestampSignature, timestampBytes)) { + LOGGER.trace("Ignoring trade presence {} from peer {} as signature failed to verify", + peersTradePresence.getAtAddress(), peer + ); + + continue; + } + + ATData atData = repository.getATRepository().fromATAddress(peersTradePresence.getAtAddress()); + if (atData == null || atData.getIsFrozen() || atData.getIsFinished()) { + if (atData == null) + LOGGER.trace("Ignoring trade presence {} from peer {} as AT doesn't exist", + peersTradePresence.getAtAddress(), peer + ); + else + LOGGER.trace("Ignoring trade presence {} from peer {} as AT is frozen or finished", + peersTradePresence.getAtAddress(), peer + ); + + continue; + } + + ByteArray atCodeHash = ByteArray.wrap(atData.getCodeHash()); + Supplier acctSupplier = acctSuppliersByCodeHash.get(atCodeHash); + if (acctSupplier == null) { + LOGGER.trace("Ignoring trade presence {} from peer {} as AT isn't a known ACCT?", + peersTradePresence.getAtAddress(), peer + ); + + continue; + } + + CrossChainTradeData tradeData = acctSupplier.get().populateTradeData(repository, atData); + if (tradeData == null) { + LOGGER.trace("Ignoring trade presence {} from peer {} as trade data not found?", + peersTradePresence.getAtAddress(), peer + ); + + continue; + } + + // Convert signer's public key to address form + String signerAddress = peersTradePresence.getTradeAddress(); + + // Signer's public key (in address form) must match Bob's / Alice's trade public key (in address form) + if (!signerAddress.equals(tradeData.qortalCreatorTradeAddress) && !signerAddress.equals(tradeData.qortalPartnerAddress)) { + LOGGER.trace("Ignoring trade presence {} from peer {} as signer isn't Alice or Bob?", + peersTradePresence.getAtAddress(), peer + ); + + continue; + } + + // This is new to us + this.allTradePresencesByPubkey.put(pubkeyByteArray, peersTradePresence); + ++newCount; + + LOGGER.trace("Added trade presence {} from peer {} with timestamp {}", + peersTradePresence.getAtAddress(), peer, timestamp + ); + + EventBus.INSTANCE.notify(new TradePresenceEvent(peersTradePresence)); + } + } catch (DataException e) { + LOGGER.error("Couldn't process TRADE_PRESENCES message due to repository issue", e); + } + + if (newCount > 0) { + LOGGER.debug("New trade presences: {}, all trade presences: {}", newCount, allTradePresencesByPubkey.size()); + rebuildSafeAllTradePresences(); + } + } + + public void bridgePresence(long timestamp, byte[] publicKey, byte[] signature, String atAddress) { + long expiry = generateExpiry(timestamp); + ByteArray pubkeyByteArray = ByteArray.wrap(publicKey); + + TradePresenceData fakeTradePresenceData = new TradePresenceData(expiry, publicKey, signature, atAddress); + + // Only bridge if trade presence expiry timestamp is newer + TradePresenceData computedTradePresenceData = this.allTradePresencesByPubkey.compute(pubkeyByteArray, (k, v) -> + v == null || v.getTimestamp() < expiry ? fakeTradePresenceData : v + ); + + if (computedTradePresenceData == fakeTradePresenceData) { + LOGGER.trace("Bridged PRESENCE transaction for trade {} with timestamp {}", atAddress, expiry); + rebuildSafeAllTradePresences(); + + EventBus.INSTANCE.notify(new TradePresenceEvent(fakeTradePresenceData)); + } + } + + /** Decorates a CrossChainTradeData object with Alice / Bob trade-bot presence timestamp, if available. */ + public void decorateTradeDataWithPresence(CrossChainTradeData crossChainTradeData) { + // Match by AT address, then check for Bob vs Alice + this.safeAllTradePresencesByPubkey.values().stream() + .filter(tradePresenceData -> tradePresenceData.getAtAddress().equals(crossChainTradeData.qortalAtAddress)) + .forEach(tradePresenceData -> { + String signerAddress = tradePresenceData.getTradeAddress(); + + // Signer's public key (in address form) must match Bob's / Alice's trade public key (in address form) + if (signerAddress.equals(crossChainTradeData.qortalCreatorTradeAddress)) + crossChainTradeData.creatorPresenceExpiry = tradePresenceData.getTimestamp(); + else if (signerAddress.equals(crossChainTradeData.qortalPartnerAddress)) + crossChainTradeData.partnerPresenceExpiry = tradePresenceData.getTimestamp(); + }); + } + + /** Removes any trades that have had multiple failures */ + public List removeFailedTrades(Repository repository, List crossChainTrades) { + Long now = NTP.getTime(); + if (now == null) { + return crossChainTrades; + } + + List updatedCrossChainTrades = new ArrayList<>(crossChainTrades); + int getMaxTradeOfferAttempts = Settings.getInstance().getMaxTradeOfferAttempts(); + + for (CrossChainTradeData crossChainTradeData : crossChainTrades) { + // We only care about trades in the OFFERING state + if (crossChainTradeData.mode != AcctMode.OFFERING) { + failedTrades.remove(crossChainTradeData.qortalAtAddress); + validTrades.remove(crossChainTradeData.qortalAtAddress); + continue; + } + + // Return recently cached values if they exist + Long failedTimestamp = failedTrades.get(crossChainTradeData.qortalAtAddress); + if (failedTimestamp != null && now - failedTimestamp < 60 * 60 * 1000L) { + updatedCrossChainTrades.remove(crossChainTradeData); + //LOGGER.info("Removing cached failed trade AT {}", crossChainTradeData.qortalAtAddress); + continue; + } + Long validTimestamp = validTrades.get(crossChainTradeData.qortalAtAddress); + if (validTimestamp != null && now - validTimestamp < 60 * 60 * 1000L) { + //LOGGER.info("NOT removing cached valid trade AT {}", crossChainTradeData.qortalAtAddress); + continue; + } + + try { + List transactions = repository.getTransactionRepository().getUnconfirmedTransactions(Arrays.asList(Transaction.TransactionType.MESSAGE), null, null, null, null); + + for (TransactionData transactionData : transactions) { + // Treat as failed if buy attempt was more than 60 mins ago (as it's still in the OFFERING state) + if (transactionData.getRecipient().equals(crossChainTradeData.qortalCreatorTradeAddress) && now - transactionData.getTimestamp() > 60*60*1000L) { + failedTrades.put(crossChainTradeData.qortalAtAddress, now); + updatedCrossChainTrades.remove(crossChainTradeData); + } else { + validTrades.put(crossChainTradeData.qortalAtAddress, now); + } + } + + } catch (DataException e) { + LOGGER.info("Unable to determine failed state of AT {}", crossChainTradeData.qortalAtAddress); + } + } + + return updatedCrossChainTrades; + } + + public boolean isFailedTrade(Repository repository, CrossChainTradeData crossChainTradeData) { + List results = removeFailedTrades(repository, Arrays.asList(crossChainTradeData)); + return results.isEmpty(); + } + + private long generateExpiry(long timestamp) { + return ((timestamp - 1) / EXPIRY_ROUNDING) * EXPIRY_ROUNDING + PRESENCE_LIFETIME; + } + +} diff --git a/src/main/java/org/qortal/data/arbitrary/RNSArbitraryDirectConnectionInfo.java b/src/main/java/org/qortal/data/arbitrary/RNSArbitraryDirectConnectionInfo.java new file mode 100644 index 00000000..586bb7f4 --- /dev/null +++ b/src/main/java/org/qortal/data/arbitrary/RNSArbitraryDirectConnectionInfo.java @@ -0,0 +1,59 @@ +package org.qortal.data.arbitrary; + +import java.util.Arrays; +import java.util.List; +import java.util.Objects; + +public class RNSArbitraryDirectConnectionInfo { + + private final byte[] signature; + private final String peerAddress; + private final List hashes; + private final long timestamp; + + public RNSArbitraryDirectConnectionInfo(byte[] signature, String peerAddress, List hashes, long timestamp) { + this.signature = signature; + this.peerAddress = peerAddress; + this.hashes = hashes; + this.timestamp = timestamp; + } + + public byte[] getSignature() { + return this.signature; + } + + public String getPeerAddress() { + return this.peerAddress; + } + + public List getHashes() { + return this.hashes; + } + + public long getTimestamp() { + return this.timestamp; + } + + public int getHashCount() { + if (this.hashes == null) { + return 0; + } + return this.hashes.size(); + } + + @Override + public boolean equals(Object other) { + if (other == this) + return true; + + if (!(other instanceof ArbitraryDirectConnectionInfo)) + return false; + + ArbitraryDirectConnectionInfo otherDirectConnectionInfo = (ArbitraryDirectConnectionInfo) other; + + return Arrays.equals(this.signature, otherDirectConnectionInfo.getSignature()) + && Objects.equals(this.peerAddress, otherDirectConnectionInfo.getPeerAddress()) + && Objects.equals(this.hashes, otherDirectConnectionInfo.getHashes()) + && Objects.equals(this.timestamp, otherDirectConnectionInfo.getTimestamp()); + } +} diff --git a/src/main/java/org/qortal/data/arbitrary/RNSArbitraryFileListResponseInfo.java b/src/main/java/org/qortal/data/arbitrary/RNSArbitraryFileListResponseInfo.java new file mode 100644 index 00000000..50620e1f --- /dev/null +++ b/src/main/java/org/qortal/data/arbitrary/RNSArbitraryFileListResponseInfo.java @@ -0,0 +1,11 @@ +package org.qortal.data.arbitrary; + +import org.qortal.network.RNSPeer; + +public class RNSArbitraryFileListResponseInfo extends RNSArbitraryRelayInfo { + + public RNSArbitraryFileListResponseInfo(String hash58, String signature58, RNSPeer peer, Long timestamp, Long requestTime, Integer requestHops) { + super(hash58, signature58, peer, timestamp, requestTime, requestHops); + } + +} diff --git a/src/main/java/org/qortal/data/arbitrary/RNSArbitraryRelayInfo.java b/src/main/java/org/qortal/data/arbitrary/RNSArbitraryRelayInfo.java new file mode 100644 index 00000000..cc8a0f90 --- /dev/null +++ b/src/main/java/org/qortal/data/arbitrary/RNSArbitraryRelayInfo.java @@ -0,0 +1,73 @@ +package org.qortal.data.arbitrary; + +import org.qortal.network.RNSPeer; + +import java.util.Objects; + +public class RNSArbitraryRelayInfo { + + private final String hash58; + private final String signature58; + private final RNSPeer peer; + private final Long timestamp; + private final Long requestTime; + private final Integer requestHops; + + public RNSArbitraryRelayInfo(String hash58, String signature58, RNSPeer peer, Long timestamp, Long requestTime, Integer requestHops) { + this.hash58 = hash58; + this.signature58 = signature58; + this.peer = peer; + this.timestamp = timestamp; + this.requestTime = requestTime; + this.requestHops = requestHops; + } + + public boolean isValid() { + return this.getHash58() != null && this.getSignature58() != null + && this.getPeer() != null && this.getTimestamp() != null; + } + + public String getHash58() { + return this.hash58; + } + + public String getSignature58() { + return signature58; + } + + public RNSPeer getPeer() { + return peer; + } + + public Long getTimestamp() { + return timestamp; + } + + public Long getRequestTime() { + return this.requestTime; + } + + public Integer getRequestHops() { + return this.requestHops; + } + + @Override + public String toString() { + return String.format("%s = %s, %s, %d", this.hash58, this.signature58, this.peer, this.timestamp); + } + + @Override + public boolean equals(Object other) { + if (other == this) + return true; + + if (!(other instanceof ArbitraryRelayInfo)) + return false; + + RNSArbitraryRelayInfo otherRelayInfo = (RNSArbitraryRelayInfo) other; + + return this.peer == otherRelayInfo.getPeer() + && Objects.equals(this.hash58, otherRelayInfo.getHash58()) + && Objects.equals(this.signature58, otherRelayInfo.getSignature58()); + } +} diff --git a/src/main/java/org/qortal/data/network/RNSPeerData.java b/src/main/java/org/qortal/data/network/RNSPeerData.java new file mode 100644 index 00000000..a552e214 --- /dev/null +++ b/src/main/java/org/qortal/data/network/RNSPeerData.java @@ -0,0 +1,117 @@ +package org.qortal.data.network; + +import io.swagger.v3.oas.annotations.media.Schema; +import org.qortal.network.PeerAddress; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlElement; +import javax.xml.bind.annotation.XmlTransient; +import static org.apache.commons.codec.binary.Hex.encodeHexString; + +// All properties to be converted to JSON via JAXB +@XmlAccessorType(XmlAccessType.FIELD) +public class RNSPeerData { + + //public static final int MAX_PEER_ADDRESS_SIZE = 255; + + // Properties + + //// Don't expose this via JAXB - use pretty getter instead + //@XmlTransient + //@Schema(hidden = true) + //private PeerAddress peerAddress; + private byte[] peerAddress; + + private Long lastAttempted; + private Long lastConnected; + private Long lastMisbehaved; + private Long addedWhen; + private String addedBy; + + /** The number of consecutive times we failed to sync with this peer */ + private int failedSyncCount = 0; + + // Constructors + + // necessary for JAXB serialization + protected RNSPeerData() { + } + + public RNSPeerData(byte[] peerAddress, Long lastAttempted, Long lastConnected, Long lastMisbehaved, Long addedWhen, String addedBy) { + this.peerAddress = peerAddress; + this.lastAttempted = lastAttempted; + this.lastConnected = lastConnected; + this.lastMisbehaved = lastMisbehaved; + this.addedWhen = addedWhen; + this.addedBy = addedBy; + } + + public RNSPeerData(byte[] peerAddress, Long addedWhen, String addedBy) { + this(peerAddress, null, null, null, addedWhen, addedBy); + } + + public RNSPeerData(byte[] peerAddress) { + this(peerAddress, null, null, null, null, null); + } + + // Getters / setters + + // Don't let JAXB use this getter + @XmlTransient + @Schema(hidden = true) + public byte[] getAddress() { + return this.peerAddress; + } + + public Long getLastAttempted() { + return this.lastAttempted; + } + + public void setLastAttempted(Long lastAttempted) { + this.lastAttempted = lastAttempted; + } + + public Long getLastConnected() { + return this.lastConnected; + } + + public void setLastConnected(Long lastConnected) { + this.lastConnected = lastConnected; + } + + public Long getLastMisbehaved() { + return this.lastMisbehaved; + } + + public void setLastMisbehaved(Long lastMisbehaved) { + this.lastMisbehaved = lastMisbehaved; + } + + public Long getAddedWhen() { + return this.addedWhen; + } + + public String getAddedBy() { + return this.addedBy; + } + + public int getFailedSyncCount() { + return this.failedSyncCount; + } + + public void setFailedSyncCount(int failedSyncCount) { + this.failedSyncCount = failedSyncCount; + } + + public void incrementFailedSyncCount() { + this.failedSyncCount++; + } + + // Pretty peerAddress getter for JAXB + @XmlElement(name = "address") + protected String getPrettyAddress() { + return encodeHexString(this.peerAddress); + } + +} diff --git a/src/main/java/org/qortal/network/RNSNetwork.java b/src/main/java/org/qortal/network/RNSNetwork.java index d60acea8..5885fb4d 100644 --- a/src/main/java/org/qortal/network/RNSNetwork.java +++ b/src/main/java/org/qortal/network/RNSNetwork.java @@ -69,12 +69,24 @@ import java.util.concurrent.atomic.AtomicLong; //import java.util.concurrent.locks.Lock; //import java.util.concurrent.locks.ReentrantLock; import java.util.Objects; +import java.util.function.Function; import org.apache.commons.codec.binary.Hex; import org.qortal.utils.ExecuteProduceConsume; import org.qortal.utils.ExecuteProduceConsume.StatsSnapshot; import org.qortal.utils.NTP; import org.qortal.utils.NamedThreadFactory; +import org.qortal.network.message.Message; +import org.qortal.network.message.BlockSummariesV2Message; +import org.qortal.network.message.TransactionSignaturesMessage; +import org.qortal.network.message.GetUnconfirmedTransactionsMessage; +import org.qortal.network.task.RNSBroadcastTask; +import org.qortal.controller.Controller; +import org.qortal.repository.Repository; +import org.qortal.repository.RepositoryManager; +import org.qortal.data.block.BlockData; +import org.qortal.data.block.BlockSummaryData; +import org.qortal.data.transaction.TransactionData; // logging import lombok.extern.slf4j.Slf4j; @@ -90,12 +102,8 @@ public class RNSNetwork { static final String APP_NAME = Settings.getInstance().isTestNet() ? RNSCommon.TESTNET_APP_NAME: RNSCommon.MAINNET_APP_NAME; //static final String defaultConfigPath = ".reticulum"; // if empty will look in Reticulums default paths static final String defaultConfigPath = Settings.getInstance().isTestNet() ? RNSCommon.defaultRNSConfigPathTestnet: RNSCommon.defaultRNSConfigPath; - //static final String defaultConfigPath = RNSCommon.defaultRNSConfigPath; - //private final String defaultConfigPath = Settings.getInstance().getReticulumDefaultConfigPath(); - private static Integer MAX_PEERS = 12; - //private final Integer MAX_PEERS = Settings.getInstance().getReticulumMaxPeers(); - private static Integer MIN_DESIRED_PEERS = 3; - //private final Integer MIN_DESIRED_PEERS = Settings.getInstance().getReticulumMinDesiredPeers(); + private final int MAX_PEERS = Settings.getInstance().getReticulumMaxPeers(); + private final int MIN_DESIRED_PEERS = Settings.getInstance().getReticulumMinDesiredPeers(); Identity serverIdentity; public Destination baseDestination; private volatile boolean isShuttingDown = false; @@ -114,14 +122,18 @@ public class RNSNetwork { private final ExecuteProduceConsume rnsNetworkEPC; private static final long NETWORK_EPC_KEEPALIVE = 1000L; // 1 second - //private volatile boolean isShuttingDown = false; private int totalThreadCount = 0; - // TODO: settings - MaxReticulumPeers, MaxRNSNetworkThreadPoolSize (if needed) + private final int reticulumMaxNetworkThreadPoolSize = Settings.getInstance().getReticulumMaxNetworkThreadPoolSize(); // replicating a feature from Network.class needed in for base Message.java, // just in case the classic TCP/IP Networking is turned off. private static final byte[] MAINNET_MESSAGE_MAGIC = new byte[]{0x51, 0x4f, 0x52, 0x54}; // QORT private static final byte[] TESTNET_MESSAGE_MAGIC = new byte[]{0x71, 0x6f, 0x72, 0x54}; // qorT + private static final int BROADCAST_CHAIN_TIP_DEPTH = 7; // Just enough to fill a SINGLE TCP packet (~1440 bytes) + /** + * How long between informational broadcasts to all ACTIVE peers, in milliseconds. + */ + private static final long BROADCAST_INTERVAL = 30 * 1000L; // ms //private static final Logger logger = LoggerFactory.getLogger(RNSNetwork.class); @@ -146,7 +158,7 @@ public class RNSNetwork { // Settings.getInstance().getMaxRNSNetworkThreadPoolSize(), // statically set to 5 below ExecutorService RNSNetworkExecutor = new ThreadPoolExecutor(1, - 5, + reticulumMaxNetworkThreadPoolSize, NETWORK_EPC_KEEPALIVE, TimeUnit.SECONDS, new SynchronousQueue(), new NamedThreadFactory("RNSNetwork-EPC", Settings.getInstance().getNetworkThreadPriority())); @@ -228,6 +240,44 @@ public class RNSNetwork { } } + public void broadcast(Function peerMessageBuilder) { + for (RNSPeer peer : getImmutableLinkedPeers()) { + if (this.isShuttingDown) + return; + + Message message = peerMessageBuilder.apply(peer); + + if (message == null) { + continue; + } + + peer.sendMessage(message); + } + } + + public void broadcastOurChain() { + BlockData latestBlockData = Controller.getInstance().getChainTip(); + int latestHeight = latestBlockData.getHeight(); + + try (final Repository repository = RepositoryManager.getRepository()) { + List latestBlockSummaries = repository.getBlockRepository().getBlockSummaries(latestHeight - BROADCAST_CHAIN_TIP_DEPTH, latestHeight); + Message latestBlockSummariesMessage = new BlockSummariesV2Message(latestBlockSummaries); + + broadcast(broadcastPeer -> latestBlockSummariesMessage); + } catch (DataException e) { + log.warn("Couldn't broadcast our chain tip info", e); + } + } + + public Message buildNewTransactionMessage(RNSPeer peer, TransactionData transactionData) { + // In V2 we send out transaction signature only and peers can decide whether to request the full transaction + return new TransactionSignaturesMessage(Collections.singletonList(transactionData.getSignature())); + } + + public Message buildGetUnconfirmedTransactionsMessage(RNSPeer peer) { + return new GetUnconfirmedTransactionsMessage(); + } + public void shutdown() { isShuttingDown = true; log.info("shutting down Reticulum"); @@ -403,6 +453,8 @@ public class RNSNetwork { protected Task produceTask(boolean canBlock) throws InterruptedException { Task task; + //// TODO: enable this once we figure out how to add pending messages in RNSPeer + /// (RNSPeer: pendingMessages.offer(message)) //task = maybeProducePeerMessageTask(); //if (task != null) { // return task; @@ -415,10 +467,10 @@ public class RNSNetwork { return task; } - //task = maybeProduceBroadcastTask(now); - //if (task != null) { - // return task; - //} + task = maybeProduceBroadcastTask(now); + if (task != null) { + return task; + } return null; } @@ -430,12 +482,18 @@ public class RNSNetwork { // .orElse(null); //} //private Task maybeProducePeerMessageTask() { - // return getImmutableIncommingPeers().stream() + // return getImmutableIncomingPeers().stream() // .map(RNSPeer::getMessageTask) // .filter(RNSPeer::isAvailable) - // .findFirst*() + // .findFirst() // .orElse(null); //} + private Task maybeProducePeerMessageTask() { + return getImmutableIncomingPeers().stream() + .map(RNSPeer::getMessageTask) + .findFirst() + .orElse(null); + } //private Task maybeProducePeerPingTask(Long now) { // return getImmutableHandshakedPeers().stream() @@ -461,14 +519,14 @@ public class RNSNetwork { .orElse(null); } - //private Task maybeProduceBroadcastTask(Long now) { - // if (now == null || now < nextBroadcastTimestamp.get()) { - // return null; - // } - // - // nextBroadcastTimestamp.set(now + BROADCAST_INTERVAL); - // return new BroadcastTask(); - //} + private Task maybeProduceBroadcastTask(Long now) { + if (now == null || now < nextBroadcastTimestamp.get()) { + return null; + } + + nextBroadcastTimestamp.set(now + BROADCAST_INTERVAL); + return new RNSBroadcastTask(); + } } private static class SingletonContainer { @@ -517,7 +575,7 @@ public class RNSNetwork { } public List getIncomingPeers() { - return this.incomingPeers; + return this.incomingPeers; } public List getImmutableIncomingPeers() { @@ -713,5 +771,23 @@ public class RNSNetwork { return Settings.getInstance().isTestNet() ? TESTNET_MESSAGE_MAGIC : MAINNET_MESSAGE_MAGIC; } + // Network methods Reticulum implementation + + /** Builds either (legacy) HeightV2Message or (newer) BlockSummariesV2Message, depending on peer version. + * + * @return Message, or null if DataException was thrown. + */ + public Message buildHeightOrChainTipInfo(RNSPeer peer) { + // peer only used for version check + int latestHeight = Controller.getInstance().getChainHeight(); + + try (final Repository repository = RepositoryManager.getRepository()) { + List latestBlockSummaries = repository.getBlockRepository().getBlockSummaries(latestHeight - BROADCAST_CHAIN_TIP_DEPTH, latestHeight); + return new BlockSummariesV2Message(latestBlockSummaries); + } catch (DataException e) { + return null; + } + } + } diff --git a/src/main/java/org/qortal/network/RNSPeer.java b/src/main/java/org/qortal/network/RNSPeer.java index 516eacb3..02736d24 100644 --- a/src/main/java/org/qortal/network/RNSPeer.java +++ b/src/main/java/org/qortal/network/RNSPeer.java @@ -8,6 +8,8 @@ import static java.util.Objects.nonNull; //import java.io.IOException; import java.time.Instant; import java.util.Arrays; +import java.util.List; +import java.util.Collections; //import io.reticulum.Reticulum; //import org.qortal.network.RNSNetwork; @@ -37,6 +39,7 @@ import static io.reticulum.utils.IdentityUtils.concatArrays; import org.qortal.controller.Controller; import org.qortal.data.block.BlockSummaryData; import org.qortal.data.block.CommonBlockData; +import org.qortal.data.network.RNSPeerData; import org.qortal.network.message.Message; import org.qortal.network.message.PingMessage; import org.qortal.network.message.*; @@ -106,11 +109,22 @@ public class RNSPeer { private Long lastPing = null; // last ping roundtrip time [ms] private Long lastPingSent = null; // time last ping was sent, or null if not started. private Map> replyQueues; - //private LinkedBlockingQueue pendingMessages; // we might not need this + private LinkedBlockingQueue pendingMessages; // Versioning public static final Pattern VERSION_PATTERN = Pattern.compile(Controller.VERSION_PREFIX + "(\\d{1,3})\\.(\\d{1,5})\\.(\\d{1,5})"); + private RNSPeerData peerData = null; + /** + * Latest block info as reported by peer. + */ + private List peersChainTipData = Collections.emptyList(); + /** + * Our common block with this peer + */ + private CommonBlockData commonBlockData; + + /** * Constructor for initiator peers */ @@ -122,6 +136,8 @@ public class RNSPeer { this.creationTimestamp = Instant.now(); this.isVacant = true; this.replyQueues = new ConcurrentHashMap<>(); + this.pendingMessages = new LinkedBlockingQueue<>(); + this.peerData = new RNSPeerData(dhash); } /** @@ -142,6 +158,7 @@ public class RNSPeer { //this.peerLink.setLinkEstablishedCallback(this::linkEstablished); //this.peerLink.setLinkClosedCallback(this::linkClosed); //this.peerLink.setPacketCallback(this::linkPacketReceived); + this.peerData = new RNSPeerData(this.destinationHash); } public void initPeerLink() { peerDestination = new Destination( @@ -168,7 +185,8 @@ public class RNSPeer { var channel = this.peerLink.getChannel(); if (nonNull(this.peerBuffer)) { log.trace("peerBuffer exists: {}, link status: {}", this.peerBuffer, this.peerLink.getStatus()); - return this.peerBuffer; + log.info("peerBuffer exists: {}, link status: {}", this.peerBuffer, this.peerLink.getStatus()); + //return this.peerBuffer; //try { // this.peerBuffer.close(); // this.peerBuffer = Buffer.createBidirectionalBuffer(receiveStreamId, sendStreamId, channel, this::peerBufferReady); @@ -185,7 +203,8 @@ public class RNSPeer { log.info("creating buffer - peerLink status: {}, channel: {}", this.peerLink.getStatus(), channel); this.peerBuffer = Buffer.createBidirectionalBuffer(receiveStreamId, sendStreamId, channel, this::peerBufferReady); } - return getPeerBuffer(); + //return getPeerBuffer(); + return this.peerBuffer; } public Link getOrInitPeerLink() { @@ -223,6 +242,10 @@ public class RNSPeer { return getPeerLink().getChannel(); } + public Boolean getIsInitiator() { + return this.isInitiator; + } + /** Link callbacks */ public void linkEstablished(Link link) { link.setLinkClosedCallback(this::linkClosed); @@ -285,38 +308,40 @@ public class RNSPeer { public void peerBufferReady(Integer readyBytes) { // get the message data var data = this.peerBuffer.read(readyBytes); + log.info("data length, data: {}, {}", data.length, data); //var pureData = Arrays.copyOfRange(data, this.messageMagic.length - 1, data.length); log.trace("peerBufferReady - data bytes: {}", data.length); try { Message message = Message.fromByteBuffer(ByteBuffer.wrap(data)); + log.info("received message - {}", message); log.info("type {} message received: {}", message.getType(), message); // TODO: Now what with message? switch (message.getType()) { + // Do we need this ? (seems like a TCP scenario only thing) + // Does any RNSPeer ever require an other RNSPeer's peer list? //case GET_PEERS: // onGetPeersMessage(peer, message); // break; case PING: onPingMessage(this, message); - //PongMessage pongMessage = new PongMessage(); - //pongMessage.setId(message.getId()); - //this.peerBuffer.write(pongMessage.toBytes()); - //this.peerBuffer.flush(); break; case PONG: //log.info("PONG received"); //break; + // Do we need this ? (We don't have RNSPeer versions) //case PEERS_V2: // onPeersV2Message(peer, message); // break; - // - //default: - // // Bump up to controller for possible action - // Controller.getInstance().onNetworkMessage(peer, message); - // break; + + default: + // Bump up to controller for possible action + //Controller.getInstance().onNetworkMessage(peer, message); + Controller.getInstance().onRNSNetworkMessage(this, message); + break; } } catch (MessageException e) { //log.error("{} from peer {}", e.getMessage(), this); @@ -569,6 +594,50 @@ public class RNSPeer { } } + protected Task getMessageTask() { + /* + * If our peerLink is not in ACTIVE node and there is a message yet to be + * processed then don't produce another message task. + * This allows us to process remaining messages sequentially. + */ + if (this.peerLink.getStatus() != ACTIVE) { + return null; + } + + final Message nextMessage = this.pendingMessages.poll(); + + if (nextMessage == null) { + return null; + } + + // Return a task to process message in queue + return new RNSMessageTask(this, nextMessage); + } + + /** + * Send a Qortal message using a Reticulum Buffer + * + * @param message message to be sent + * @return true if message successfully sent; false otherwise + */ + public boolean sendMessage(Message message) { + try { + log.trace("Sending {} message with ID {} to peer {}", message.getType().name(), message.getId(), this); + var peerBuffer = getOrInitPeerBuffer(); + this.peerBuffer.write(message.toBytes()); + this.peerBuffer.flush(); + return true; + } catch (IllegalStateException e) { + this.peerLink.teardown(); + this.peerBuffer = null; + log.error("IllegalStateException - can't write to buffer: {}", e); + return false; + } catch (MessageException e) { + log.error(e.getMessage(), e); + return false; + } + } + protected void startPings() { log.trace("[{}] Enabling pings for peer {}", peerLink.getDestination().getHexHash(), this); @@ -600,4 +669,35 @@ public class RNSPeer { return new RNSPingTask(this, now); } + + // Peer methods reticulum implementations + public BlockSummaryData getChainTipData() { + List chainTipSummaries = this.peersChainTipData; + + if (chainTipSummaries.isEmpty()) + return null; + + // Return last entry, which should have greatest height + return chainTipSummaries.get(chainTipSummaries.size() - 1); + } + + public void setChainTipData(BlockSummaryData chainTipData) { + this.peersChainTipData = Collections.singletonList(chainTipData); + } + + public List getChainTipSummaries() { + return this.peersChainTipData; + } + + public void setChainTipSummaries(List chainTipSummaries) { + this.peersChainTipData = List.copyOf(chainTipSummaries); + } + + public CommonBlockData getCommonBlockData() { + return this.commonBlockData; + } + + public void setCommonBlockData(CommonBlockData commonBlockData) { + this.commonBlockData = commonBlockData; + } } diff --git a/src/main/java/org/qortal/network/task/RNSBroadcastTask.java b/src/main/java/org/qortal/network/task/RNSBroadcastTask.java new file mode 100644 index 00000000..116cdf9d --- /dev/null +++ b/src/main/java/org/qortal/network/task/RNSBroadcastTask.java @@ -0,0 +1,19 @@ +package org.qortal.network.task; + +import org.qortal.controller.Controller; +import org.qortal.utils.ExecuteProduceConsume.Task; + +public class RNSBroadcastTask implements Task { + public RNSBroadcastTask() { + } + + @Override + public String getName() { + return "BroadcastTask"; + } + + @Override + public void perform() throws InterruptedException { + Controller.getInstance().doRNSNetworkBroadcast(); + } +} diff --git a/src/main/java/org/qortal/network/task/RNSMessageTask.java b/src/main/java/org/qortal/network/task/RNSMessageTask.java new file mode 100644 index 00000000..c364dc90 --- /dev/null +++ b/src/main/java/org/qortal/network/task/RNSMessageTask.java @@ -0,0 +1,29 @@ +package org.qortal.network.task; + +import org.qortal.network.RNSNetwork; +import org.qortal.network.RNSPeer; +import org.qortal.network.message.Message; +import org.qortal.utils.ExecuteProduceConsume.Task; + +public class RNSMessageTask implements Task { + private final RNSPeer peer; + private final Message nextMessage; + private final String name; + + public RNSMessageTask(RNSPeer peer, Message nextMessage) { + this.peer = peer; + this.nextMessage = nextMessage; + this.name = "MessageTask::" + peer + "::" + nextMessage.getType(); + } + + @Override + public String getName() { + return name; + } + + @Override + public void perform() throws InterruptedException { + //RNSNetwork.getInstance().onMessage(peer, nextMessage); + // TODO: what do we do in the Reticulum case? => implement + } +} diff --git a/src/main/java/org/qortal/settings/Settings.java b/src/main/java/org/qortal/settings/Settings.java index 3a0d17bb..f6a4ed5a 100644 --- a/src/main/java/org/qortal/settings/Settings.java +++ b/src/main/java/org/qortal/settings/Settings.java @@ -577,6 +577,15 @@ public class Settings { } } + // Related to Reticulum networking + + /** Maximum number of Reticulum peers allowed. */ + private int reticulumMaxPeers = 55; + /** Minimum number of Reticulum peers desired. */ + private int reticulumMinDesiredPeers = 3; + /** Maximum number of task executor network threads */ + private int reticulumMaxNetworkThreadPoolSize = 89; + // Constructors private Settings() { @@ -1333,4 +1342,16 @@ public class Settings { public boolean isConnectionPoolMonitorEnabled() { return connectionPoolMonitorEnabled; } + + public int getReticulumMaxPeers() { + return this.reticulumMaxPeers; + } + + public int getReticulumMinDesiredPeers() { + return this.reticulumMinDesiredPeers; + } + + public int getReticulumMaxNetworkThreadPoolSize() { + return this.reticulumMaxNetworkThreadPoolSize; + } }