Compare commits

...

41 Commits

Author SHA1 Message Date
aeab1acbbc Bump version to 4.7.1 2025-04-08 20:13:43 -07:00
crowetic
0b37666d2b
Merge pull request #250 from kennycud/master
Merging current 'test release' from kennycud repo after extensive testing by community.
2025-04-08 08:34:07 -07:00
kennycud
bcf3538d18 dd cache enabled to true by default 2025-04-07 12:19:56 -07:00
kennycud
b2d9d0539e removed cache orphaning, crowetic and I agree it should have never been added to begin with 2025-04-05 11:42:21 -07:00
kennycud
1bd6076e33 forgot IndexCache.java in the last commit
replaced index service attribute with a category attribute and reduced index attribute names to single characters to reduce memory footprint, t is for term, n is for name, c is for category, l if for link

changed default indexing frequency from 1 minute to 10 minutes to reduce memory use

added arbitrary resource endpoint for index search by issuer name and index prefix

added some additional error handling concerning unrecognized properties in the indices
2025-04-03 10:23:58 -07:00
kennycud
a6309e925b replaced index service attribute with a category attribute and reduced index attribute names to single characters to reduce memory footprint, t is for term, n is for name, c is for category, l if for link
changed default indexing frequency from 1 minute to 10 minutes to reduce memory use

added arbitrary resource endpoint for index search by issuer name and index prefix

added some additional error handling concerning unrecognized properties in the indices
2025-04-03 10:18:45 -07:00
kennycud
23de8a98bc removed logging 2025-03-21 18:59:32 -07:00
kennycud
d0a85d4717 QDN bug resolution 2025-03-21 18:44:41 -07:00
kennycud
a893888a2e reduced logging level for invalid formatting 2025-03-21 18:43:06 -07:00
kennycud
bd4472c2c0
Merge pull request #5 from Philreact/feature/search-keywords
added keywords to qortalRequest
2025-03-19 17:37:14 -07:00
kennycud
10dda255e2 added rebuild arbitrary rebuild resource timer task 2025-03-16 18:49:28 -07:00
kennycud
934c23402a added logging, so we can better understand the exception thrown 2025-03-16 18:47:59 -07:00
kennycud
4188f18a9a added error handling 2025-03-16 18:46:38 -07:00
kennycud
e48fd96c1e nullified impossible time constraints 2025-03-14 14:15:18 -07:00
kennycud
e76694e214 implemented before and after filtering 2025-03-13 13:46:17 -07:00
kennycud
dbf49309ec added some critical exception handling for arbitrary data indexing support 2025-03-12 14:24:23 -07:00
kennycud
ab4730cef0 initial implementation of arbitrary data indexing support 2025-03-12 11:21:57 -07:00
kennycud
7f3c1d553f removed name based arbitrary resource storage capacity limits and added arbitrary resource cache rebuild logging verbosity 2025-03-10 15:17:04 -07:00
ab0ef85458 added keywords to SEARCH_QDN_RESOURCES 2025-03-08 20:43:34 +02:00
b64674783a Merge remote-tracking branch 'kenny/master' into feature/search-keywords 2025-03-08 20:19:56 +02:00
kennycud
92fb52220a
Merge pull request #4 from Philreact/feature/search-keywords
Feature/search keywords
2025-03-06 07:10:26 -08:00
kennycud
2d0bdca8dc
Merge pull request #3 from Philreact/bugfix/get-qdn-resource-metadata
fix var bug for GET_QDN_RESOURCE_PROPERTIES
2025-03-06 07:06:43 -08:00
2e9f358d0b changed to list and added to cache 2025-03-06 16:10:30 +02:00
6a6380e9e7 Merge remote-tracking branch 'kenny/master' into feature/search-keywords 2025-03-06 14:02:36 +02:00
kennycud
11c2d1bd14 a solution for the metadata and status members getting nullified in the cache 2025-03-05 18:47:01 -08:00
1d79df078e Merge remote-tracking branch 'kenny/master' into feature/search-keywords 2025-03-05 21:14:47 +02:00
kennycud
4baafd1305 more arbitrary data optimizations, including the arbitrary resources cache rebuild and a setting to support it, added and removed notifications, added method to the arbitrary repository, also removed an unnecessary setting that was added in the last commit 2025-03-03 10:37:39 -08:00
f8cee2e0b7 Merge remote-tracking branch 'kenny/master' into feature/search-keywords 2025-02-27 18:36:00 +02:00
kennycud
676885ea2d optimized arbitrary metadata fetching, added arbitrary data cache manager notifications, removed redundant notifications, added method to arbitrary repository and a setting to support the optimization 2025-02-24 16:36:13 -08:00
kennycud
1f4ca6263f data monitor initial implementation 2025-02-19 17:18:05 -08:00
kennycud
df37372180 trade ledger export implementation, completed trades bug fix 2025-02-11 18:45:57 -08:00
086b0809d6 remove log 2025-02-08 22:20:03 +02:00
33650cc432 when the path is render/hash do not save path for nav history 2025-02-05 15:11:48 +02:00
c22abc440b change label 2025-02-04 18:02:56 +02:00
258eb3a0f9 added keywords query for arbitrary research search 2025-02-04 15:42:25 +02:00
kennycud
91ceafe0e3 supporting multiple minting groups instead of supporting one and only one minting group 2025-02-03 18:19:56 -08:00
kennycud
9017db725e Merge remote-tracking branch 'origin/master' 2025-02-01 18:44:17 -08:00
kennycud
a42f214358 invite orphan vulnerability patch, detailed test case coming in a commit soon 2025-02-01 18:43:48 -08:00
ecd4233dd0 fix fetch block qortalRequest 2025-01-26 00:13:58 +02:00
e5b6e893cd GET_AT missing a slash 2025-01-24 21:30:55 +02:00
9e45d640bc fix var bug 2025-01-23 23:52:39 +02:00
52 changed files with 2679 additions and 376 deletions

View File

@ -3,7 +3,7 @@
<modelVersion>4.0.0</modelVersion> <modelVersion>4.0.0</modelVersion>
<groupId>org.qortal</groupId> <groupId>org.qortal</groupId>
<artifactId>qortal</artifactId> <artifactId>qortal</artifactId>
<version>4.7.0</version> <version>4.7.1</version>
<packaging>jar</packaging> <packaging>jar</packaging>
<properties> <properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding> <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>

View File

@ -14,6 +14,7 @@ import org.qortal.repository.NameRepository;
import org.qortal.repository.Repository; import org.qortal.repository.Repository;
import org.qortal.settings.Settings; import org.qortal.settings.Settings;
import org.qortal.utils.Base58; import org.qortal.utils.Base58;
import org.qortal.utils.Groups;
import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlAccessorType;
@ -227,7 +228,7 @@ public class Account {
} }
int level = accountData.getLevel(); int level = accountData.getLevel();
int groupIdToMint = BlockChain.getInstance().getMintingGroupId(); List<Integer> groupIdsToMint = Groups.getGroupIdsToMint( BlockChain.getInstance(), blockchainHeight );
int nameCheckHeight = BlockChain.getInstance().getOnlyMintWithNameHeight(); int nameCheckHeight = BlockChain.getInstance().getOnlyMintWithNameHeight();
int groupCheckHeight = BlockChain.getInstance().getGroupMemberCheckHeight(); int groupCheckHeight = BlockChain.getInstance().getGroupMemberCheckHeight();
int removeNameCheckHeight = BlockChain.getInstance().getRemoveOnlyMintWithNameHeight(); int removeNameCheckHeight = BlockChain.getInstance().getRemoveOnlyMintWithNameHeight();
@ -261,9 +262,9 @@ public class Account {
if (blockchainHeight >= groupCheckHeight && blockchainHeight < removeNameCheckHeight) { if (blockchainHeight >= groupCheckHeight && blockchainHeight < removeNameCheckHeight) {
List<NameData> myName = nameRepository.getNamesByOwner(myAddress); List<NameData> myName = nameRepository.getNamesByOwner(myAddress);
if (Account.isFounder(accountData.getFlags())) { if (Account.isFounder(accountData.getFlags())) {
return accountData.getBlocksMintedPenalty() == 0 && !myName.isEmpty() && (isGroupValidated || groupRepository.memberExists(groupIdToMint, myAddress)); return accountData.getBlocksMintedPenalty() == 0 && !myName.isEmpty() && (isGroupValidated || Groups.memberExistsInAnyGroup(groupRepository, groupIdsToMint, myAddress));
} else { } else {
return level >= levelToMint && !myName.isEmpty() && (isGroupValidated || groupRepository.memberExists(groupIdToMint, myAddress)); return level >= levelToMint && !myName.isEmpty() && (isGroupValidated || Groups.memberExistsInAnyGroup(groupRepository, groupIdsToMint, myAddress));
} }
} }
@ -272,9 +273,9 @@ public class Account {
// Account's address is a member of the minter group // Account's address is a member of the minter group
if (blockchainHeight >= removeNameCheckHeight) { if (blockchainHeight >= removeNameCheckHeight) {
if (Account.isFounder(accountData.getFlags())) { if (Account.isFounder(accountData.getFlags())) {
return accountData.getBlocksMintedPenalty() == 0 && (isGroupValidated || groupRepository.memberExists(groupIdToMint, myAddress)); return accountData.getBlocksMintedPenalty() == 0 && (isGroupValidated || Groups.memberExistsInAnyGroup(groupRepository, groupIdsToMint, myAddress));
} else { } else {
return level >= levelToMint && (isGroupValidated || groupRepository.memberExists(groupIdToMint, myAddress)); return level >= levelToMint && (isGroupValidated || Groups.memberExistsInAnyGroup(groupRepository, groupIdsToMint, myAddress));
} }
} }

View File

@ -194,6 +194,7 @@ public class ApiService {
context.addServlet(AdminStatusWebSocket.class, "/websockets/admin/status"); context.addServlet(AdminStatusWebSocket.class, "/websockets/admin/status");
context.addServlet(BlocksWebSocket.class, "/websockets/blocks"); context.addServlet(BlocksWebSocket.class, "/websockets/blocks");
context.addServlet(DataMonitorSocket.class, "/websockets/datamonitor");
context.addServlet(ActiveChatsWebSocket.class, "/websockets/chat/active/*"); context.addServlet(ActiveChatsWebSocket.class, "/websockets/chat/active/*");
context.addServlet(ChatMessagesWebSocket.class, "/websockets/chat/messages"); context.addServlet(ChatMessagesWebSocket.class, "/websockets/chat/messages");
context.addServlet(TradeOffersWebSocket.class, "/websockets/crosschain/tradeoffers"); context.addServlet(TradeOffersWebSocket.class, "/websockets/crosschain/tradeoffers");

View File

@ -0,0 +1,72 @@
package org.qortal.api.model;
import io.swagger.v3.oas.annotations.media.Schema;
import org.qortal.data.crosschain.CrossChainTradeData;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.adapters.XmlJavaTypeAdapter;
// All properties to be converted to JSON via JAXB
@XmlAccessorType(XmlAccessType.FIELD)
public class CrossChainTradeLedgerEntry {
private String market;
private String currency;
@XmlJavaTypeAdapter(value = org.qortal.api.AmountTypeAdapter.class)
private long quantity;
@XmlJavaTypeAdapter(value = org.qortal.api.AmountTypeAdapter.class)
private long feeAmount;
private String feeCurrency;
@XmlJavaTypeAdapter(value = org.qortal.api.AmountTypeAdapter.class)
private long totalPrice;
private long tradeTimestamp;
protected CrossChainTradeLedgerEntry() {
/* For JAXB */
}
public CrossChainTradeLedgerEntry(String market, String currency, long quantity, long feeAmount, String feeCurrency, long totalPrice, long tradeTimestamp) {
this.market = market;
this.currency = currency;
this.quantity = quantity;
this.feeAmount = feeAmount;
this.feeCurrency = feeCurrency;
this.totalPrice = totalPrice;
this.tradeTimestamp = tradeTimestamp;
}
public String getMarket() {
return market;
}
public String getCurrency() {
return currency;
}
public long getQuantity() {
return quantity;
}
public long getFeeAmount() {
return feeAmount;
}
public String getFeeCurrency() {
return feeCurrency;
}
public long getTotalPrice() {
return totalPrice;
}
public long getTradeTimestamp() {
return tradeTimestamp;
}
}

View File

@ -33,9 +33,13 @@ import org.qortal.controller.arbitrary.ArbitraryDataStorageManager;
import org.qortal.controller.arbitrary.ArbitraryMetadataManager; import org.qortal.controller.arbitrary.ArbitraryMetadataManager;
import org.qortal.data.account.AccountData; import org.qortal.data.account.AccountData;
import org.qortal.data.arbitrary.ArbitraryCategoryInfo; import org.qortal.data.arbitrary.ArbitraryCategoryInfo;
import org.qortal.data.arbitrary.ArbitraryDataIndexDetail;
import org.qortal.data.arbitrary.ArbitraryDataIndexScoreKey;
import org.qortal.data.arbitrary.ArbitraryDataIndexScorecard;
import org.qortal.data.arbitrary.ArbitraryResourceData; import org.qortal.data.arbitrary.ArbitraryResourceData;
import org.qortal.data.arbitrary.ArbitraryResourceMetadata; import org.qortal.data.arbitrary.ArbitraryResourceMetadata;
import org.qortal.data.arbitrary.ArbitraryResourceStatus; import org.qortal.data.arbitrary.ArbitraryResourceStatus;
import org.qortal.data.arbitrary.IndexCache;
import org.qortal.data.naming.NameData; import org.qortal.data.naming.NameData;
import org.qortal.data.transaction.ArbitraryTransactionData; import org.qortal.data.transaction.ArbitraryTransactionData;
import org.qortal.data.transaction.TransactionData; import org.qortal.data.transaction.TransactionData;
@ -69,8 +73,11 @@ import java.nio.file.Files;
import java.nio.file.Paths; import java.nio.file.Paths;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
import java.util.Comparator;
import java.util.List; import java.util.List;
import java.util.Map;
import java.util.Objects; import java.util.Objects;
import java.util.stream.Collectors;
@Path("/arbitrary") @Path("/arbitrary")
@Tag(name = "Arbitrary") @Tag(name = "Arbitrary")
@ -172,6 +179,7 @@ public class ArbitraryResource {
@Parameter(description = "Name (searches name field only)") @QueryParam("name") List<String> names, @Parameter(description = "Name (searches name field only)") @QueryParam("name") List<String> names,
@Parameter(description = "Title (searches title metadata field only)") @QueryParam("title") String title, @Parameter(description = "Title (searches title metadata field only)") @QueryParam("title") String title,
@Parameter(description = "Description (searches description metadata field only)") @QueryParam("description") String description, @Parameter(description = "Description (searches description metadata field only)") @QueryParam("description") String description,
@Parameter(description = "Keyword (searches description metadata field by keywords)") @QueryParam("keywords") List<String> keywords,
@Parameter(description = "Prefix only (if true, only the beginning of fields are matched)") @QueryParam("prefix") Boolean prefixOnly, @Parameter(description = "Prefix only (if true, only the beginning of fields are matched)") @QueryParam("prefix") Boolean prefixOnly,
@Parameter(description = "Exact match names only (if true, partial name matches are excluded)") @QueryParam("exactmatchnames") Boolean exactMatchNamesOnly, @Parameter(description = "Exact match names only (if true, partial name matches are excluded)") @QueryParam("exactmatchnames") Boolean exactMatchNamesOnly,
@Parameter(description = "Default resources (without identifiers) only") @QueryParam("default") Boolean defaultResource, @Parameter(description = "Default resources (without identifiers) only") @QueryParam("default") Boolean defaultResource,
@ -212,7 +220,7 @@ public class ArbitraryResource {
} }
List<ArbitraryResourceData> resources = repository.getArbitraryRepository() List<ArbitraryResourceData> resources = repository.getArbitraryRepository()
.searchArbitraryResources(service, query, identifier, names, title, description, usePrefixOnly, .searchArbitraryResources(service, query, identifier, names, title, description, keywords, usePrefixOnly,
exactMatchNames, defaultRes, mode, minLevel, followedOnly, excludeBlocked, includeMetadata, includeStatus, exactMatchNames, defaultRes, mode, minLevel, followedOnly, excludeBlocked, includeMetadata, includeStatus,
before, after, limit, offset, reverse); before, after, limit, offset, reverse);
@ -1185,6 +1193,90 @@ public class ArbitraryResource {
} }
} }
@GET
@Path("/indices")
@Operation(
summary = "Find matching arbitrary resource indices",
description = "",
responses = {
@ApiResponse(
description = "indices",
content = @Content(
array = @ArraySchema(
schema = @Schema(
implementation = ArbitraryDataIndexScorecard.class
)
)
)
)
}
)
public List<ArbitraryDataIndexScorecard> searchIndices(@QueryParam("terms") String[] terms) {
List<ArbitraryDataIndexDetail> indices = new ArrayList<>();
// get index details for each term
for( String term : terms ) {
List<ArbitraryDataIndexDetail> details = IndexCache.getInstance().getIndicesByTerm().get(term);
if( details != null ) {
indices.addAll(details);
}
}
// sum up the scores for each index with identical attributes
Map<ArbitraryDataIndexScoreKey, Double> scoreForKey
= indices.stream()
.collect(
Collectors.groupingBy(
index -> new ArbitraryDataIndexScoreKey(index.name, index.category, index.link),
Collectors.summingDouble(detail -> 1.0 / detail.rank)
)
);
// create scorecards for each index group and put them in descending order by score
List<ArbitraryDataIndexScorecard> scorecards
= scoreForKey.entrySet().stream().map(
entry
->
new ArbitraryDataIndexScorecard(
entry.getValue(),
entry.getKey().name,
entry.getKey().category,
entry.getKey().link)
)
.sorted(Comparator.comparingDouble(ArbitraryDataIndexScorecard::getScore).reversed())
.collect(Collectors.toList());
return scorecards;
}
@GET
@Path("/indices/{name}/{idPrefix}")
@Operation(
summary = "Find matching arbitrary resource indices for a registered name and identifier prefix",
description = "",
responses = {
@ApiResponse(
description = "indices",
content = @Content(
array = @ArraySchema(
schema = @Schema(
implementation = ArbitraryDataIndexDetail.class
)
)
)
)
}
)
public List<ArbitraryDataIndexDetail> searchIndicesByName(@PathParam("name") String name, @PathParam("idPrefix") String idPrefix) {
return
IndexCache.getInstance().getIndicesByIssuer()
.getOrDefault(name, new ArrayList<>(0)).stream()
.filter( indexDetail -> indexDetail.indexIdentifer.startsWith(idPrefix))
.collect(Collectors.toList());
}
// Shared methods // Shared methods

View File

@ -10,11 +10,13 @@ import io.swagger.v3.oas.annotations.parameters.RequestBody;
import io.swagger.v3.oas.annotations.responses.ApiResponse; import io.swagger.v3.oas.annotations.responses.ApiResponse;
import io.swagger.v3.oas.annotations.security.SecurityRequirement; import io.swagger.v3.oas.annotations.security.SecurityRequirement;
import io.swagger.v3.oas.annotations.tags.Tag; import io.swagger.v3.oas.annotations.tags.Tag;
import org.glassfish.jersey.media.multipart.ContentDisposition;
import org.qortal.api.ApiError; import org.qortal.api.ApiError;
import org.qortal.api.ApiErrors; import org.qortal.api.ApiErrors;
import org.qortal.api.ApiExceptionFactory; import org.qortal.api.ApiExceptionFactory;
import org.qortal.api.Security; import org.qortal.api.Security;
import org.qortal.api.model.CrossChainCancelRequest; import org.qortal.api.model.CrossChainCancelRequest;
import org.qortal.api.model.CrossChainTradeLedgerEntry;
import org.qortal.api.model.CrossChainTradeSummary; import org.qortal.api.model.CrossChainTradeSummary;
import org.qortal.controller.tradebot.TradeBot; import org.qortal.controller.tradebot.TradeBot;
import org.qortal.crosschain.ACCT; import org.qortal.crosschain.ACCT;
@ -44,10 +46,14 @@ import org.qortal.utils.Base58;
import org.qortal.utils.ByteArray; import org.qortal.utils.ByteArray;
import org.qortal.utils.NTP; import org.qortal.utils.NTP;
import javax.servlet.ServletContext;
import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import javax.ws.rs.*; import javax.ws.rs.*;
import javax.ws.rs.core.Context; import javax.ws.rs.core.Context;
import javax.ws.rs.core.HttpHeaders;
import javax.ws.rs.core.MediaType; import javax.ws.rs.core.MediaType;
import java.io.IOException;
import java.util.*; import java.util.*;
import java.util.function.Supplier; import java.util.function.Supplier;
import java.util.stream.Collectors; import java.util.stream.Collectors;
@ -61,6 +67,13 @@ public class CrossChainResource {
@Context @Context
HttpServletRequest request; HttpServletRequest request;
@Context
HttpServletResponse response;
@Context
ServletContext context;
@GET @GET
@Path("/tradeoffers") @Path("/tradeoffers")
@Operation( @Operation(
@ -258,11 +271,11 @@ public class CrossChainResource {
example = "1597310000000" example = "1597310000000"
) @QueryParam("minimumTimestamp") Long minimumTimestamp, ) @QueryParam("minimumTimestamp") Long minimumTimestamp,
@Parameter( @Parameter(
description = "Optionally filter by buyer Qortal address" description = "Optionally filter by buyer Qortal public key"
) @QueryParam("buyerAddress") String buyerAddress, ) @QueryParam("buyerPublicKey") String buyerPublicKey58,
@Parameter( @Parameter(
description = "Optionally filter by seller Qortal address" description = "Optionally filter by seller Qortal public key"
) @QueryParam("sellerAddress") String sellerAddress, ) @QueryParam("sellerPublicKey") String sellerPublicKey58,
@Parameter( ref = "limit") @QueryParam("limit") Integer limit, @Parameter( ref = "limit") @QueryParam("limit") Integer limit,
@Parameter( ref = "offset" ) @QueryParam("offset") Integer offset, @Parameter( ref = "offset" ) @QueryParam("offset") Integer offset,
@Parameter( ref = "reverse" ) @QueryParam("reverse") Boolean reverse) { @Parameter( ref = "reverse" ) @QueryParam("reverse") Boolean reverse) {
@ -274,6 +287,10 @@ public class CrossChainResource {
if (minimumTimestamp != null && minimumTimestamp <= 0) if (minimumTimestamp != null && minimumTimestamp <= 0)
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA); throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA);
// Decode public keys
byte[] buyerPublicKey = decodePublicKey(buyerPublicKey58);
byte[] sellerPublicKey = decodePublicKey(sellerPublicKey58);
final Boolean isFinished = Boolean.TRUE; final Boolean isFinished = Boolean.TRUE;
try (final Repository repository = RepositoryManager.getRepository()) { try (final Repository repository = RepositoryManager.getRepository()) {
@ -304,7 +321,7 @@ public class CrossChainResource {
byte[] codeHash = acctInfo.getKey().value; byte[] codeHash = acctInfo.getKey().value;
ACCT acct = acctInfo.getValue().get(); ACCT acct = acctInfo.getValue().get();
List<ATStateData> atStates = repository.getATRepository().getMatchingFinalATStates(codeHash, buyerAddress, sellerAddress, List<ATStateData> atStates = repository.getATRepository().getMatchingFinalATStates(codeHash, buyerPublicKey, sellerPublicKey,
isFinished, acct.getModeByteOffset(), (long) AcctMode.REDEEMED.value, minimumFinalHeight, isFinished, acct.getModeByteOffset(), (long) AcctMode.REDEEMED.value, minimumFinalHeight,
limit, offset, reverse); limit, offset, reverse);
@ -343,6 +360,120 @@ public class CrossChainResource {
} }
} }
/**
* Decode Public Key
*
* @param publicKey58 the public key in a string
*
* @return the public key in bytes
*/
private byte[] decodePublicKey(String publicKey58) {
if( publicKey58 == null ) return null;
if( publicKey58.isEmpty() ) return new byte[0];
byte[] publicKey;
try {
publicKey = Base58.decode(publicKey58);
} catch (NumberFormatException e) {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_PUBLIC_KEY, e);
}
// Correct size for public key?
if (publicKey.length != Transformer.PUBLIC_KEY_LENGTH)
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_PUBLIC_KEY);
return publicKey;
}
@GET
@Path("/ledger/{publicKey}")
@Operation(
summary = "Accounting entries for all trades.",
description = "Returns accounting entries for all completed cross-chain trades",
responses = {
@ApiResponse(
content = @Content(
schema = @Schema(
type = "string",
format = "byte"
)
)
)
}
)
@ApiErrors({ApiError.INVALID_CRITERIA, ApiError.REPOSITORY_ISSUE})
public HttpServletResponse getLedgerEntries(
@PathParam("publicKey") String publicKey58,
@Parameter(
description = "Only return trades that completed on/after this timestamp (milliseconds since epoch)",
example = "1597310000000"
) @QueryParam("minimumTimestamp") Long minimumTimestamp) {
byte[] publicKey = decodePublicKey(publicKey58);
// minimumTimestamp (if given) needs to be positive
if (minimumTimestamp != null && minimumTimestamp <= 0)
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA);
try (final Repository repository = RepositoryManager.getRepository()) {
Integer minimumFinalHeight = null;
if (minimumTimestamp != null) {
minimumFinalHeight = repository.getBlockRepository().getHeightFromTimestamp(minimumTimestamp);
// If not found in the block repository it will return either 0 or 1
if (minimumFinalHeight == 0 || minimumFinalHeight == 1) {
// Try the archive
minimumFinalHeight = repository.getBlockArchiveRepository().getHeightFromTimestamp(minimumTimestamp);
}
if (minimumFinalHeight == 0)
// We don't have any blocks since minimumTimestamp, let alone trades, so nothing to return
return response;
// height returned from repository is for block BEFORE timestamp
// but we want trades AFTER timestamp so bump height accordingly
minimumFinalHeight++;
}
List<CrossChainTradeLedgerEntry> crossChainTradeLedgerEntries = new ArrayList<>();
Map<ByteArray, Supplier<ACCT>> acctsByCodeHash = SupportedBlockchain.getAcctMap();
// collect ledger entries for each ACCT
for (Map.Entry<ByteArray, Supplier<ACCT>> acctInfo : acctsByCodeHash.entrySet()) {
byte[] codeHash = acctInfo.getKey().value;
ACCT acct = acctInfo.getValue().get();
// collect buys and sells
CrossChainUtils.collectLedgerEntries(publicKey, repository, minimumFinalHeight, crossChainTradeLedgerEntries, codeHash, acct, true);
CrossChainUtils.collectLedgerEntries(publicKey, repository, minimumFinalHeight, crossChainTradeLedgerEntries, codeHash, acct, false);
}
crossChainTradeLedgerEntries.sort((a, b) -> Longs.compare(a.getTradeTimestamp(), b.getTradeTimestamp()));
response.setStatus(HttpServletResponse.SC_OK);
response.setContentType("text/csv");
response.setHeader(
HttpHeaders.CONTENT_DISPOSITION,
ContentDisposition
.type("attachment")
.fileName(CrossChainUtils.createLedgerFileName(Crypto.toAddress(publicKey)))
.build()
.toString()
);
CrossChainUtils.writeToLedger( response.getWriter(), crossChainTradeLedgerEntries);
return response;
} catch (DataException e) {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
} catch (IOException e) {
response.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
return response;
}
}
@GET @GET
@Path("/price/{blockchain}") @Path("/price/{blockchain}")
@Operation( @Operation(

View File

@ -10,21 +10,36 @@ import org.bitcoinj.script.ScriptBuilder;
import org.bouncycastle.util.Strings; import org.bouncycastle.util.Strings;
import org.json.simple.JSONObject; import org.json.simple.JSONObject;
import org.qortal.api.model.CrossChainTradeLedgerEntry;
import org.qortal.api.model.crosschain.BitcoinyTBDRequest; import org.qortal.api.model.crosschain.BitcoinyTBDRequest;
import org.qortal.crosschain.*; import org.qortal.crosschain.*;
import org.qortal.data.at.ATData; import org.qortal.data.at.ATData;
import org.qortal.data.at.ATStateData;
import org.qortal.data.crosschain.*; import org.qortal.data.crosschain.*;
import org.qortal.repository.DataException; import org.qortal.repository.DataException;
import org.qortal.repository.Repository; import org.qortal.repository.Repository;
import org.qortal.utils.Amounts;
import org.qortal.utils.BitTwiddling; import org.qortal.utils.BitTwiddling;
import java.io.BufferedWriter;
import java.io.IOException;
import java.io.OutputStreamWriter;
import java.io.PrintWriter;
import java.io.Writer;
import java.text.DateFormat;
import java.text.SimpleDateFormat;
import java.time.Instant;
import java.time.ZoneId;
import java.time.ZonedDateTime;
import java.util.*; import java.util.*;
import java.util.stream.Collectors; import java.util.stream.Collectors;
public class CrossChainUtils { public class CrossChainUtils {
public static final String QORT_CURRENCY_CODE = "QORT";
private static final Logger LOGGER = LogManager.getLogger(CrossChainUtils.class); private static final Logger LOGGER = LogManager.getLogger(CrossChainUtils.class);
public static final String CORE_API_CALL = "Core API Call"; public static final String CORE_API_CALL = "Core API Call";
public static final String QORTAL_EXCHANGE_LABEL = "Qortal";
public static ServerConfigurationInfo buildServerConfigurationInfo(Bitcoiny blockchain) { public static ServerConfigurationInfo buildServerConfigurationInfo(Bitcoiny blockchain) {
@ -632,4 +647,128 @@ public class CrossChainUtils {
byte[] lockTimeABytes = BitTwiddling.toBEByteArray((long) lockTimeA); byte[] lockTimeABytes = BitTwiddling.toBEByteArray((long) lockTimeA);
return Bytes.concat(partnerBitcoinPKH, hashOfSecretA, lockTimeABytes); return Bytes.concat(partnerBitcoinPKH, hashOfSecretA, lockTimeABytes);
} }
/**
* Write To Ledger
*
* @param writer the writer to the ledger
* @param entries the entries to write to the ledger
*
* @throws IOException
*/
public static void writeToLedger(Writer writer, List<CrossChainTradeLedgerEntry> entries) throws IOException {
BufferedWriter bufferedWriter = new BufferedWriter(writer);
StringJoiner header = new StringJoiner(",");
header.add("Market");
header.add("Currency");
header.add("Quantity");
header.add("Commission Paid");
header.add("Commission Currency");
header.add("Total Price");
header.add("Date Time");
header.add("Exchange");
bufferedWriter.append(header.toString());
DateFormat dateFormatter = new SimpleDateFormat("yyyyMMdd HH:mm");
dateFormatter.setTimeZone(TimeZone.getTimeZone("UTC"));
for( CrossChainTradeLedgerEntry entry : entries ) {
StringJoiner joiner = new StringJoiner(",");
joiner.add(entry.getMarket());
joiner.add(entry.getCurrency());
joiner.add(String.valueOf(Amounts.prettyAmount(entry.getQuantity())));
joiner.add(String.valueOf(Amounts.prettyAmount(entry.getFeeAmount())));
joiner.add(entry.getFeeCurrency());
joiner.add(String.valueOf(Amounts.prettyAmount(entry.getTotalPrice())));
joiner.add(dateFormatter.format(new Date(entry.getTradeTimestamp())));
joiner.add(QORTAL_EXCHANGE_LABEL);
bufferedWriter.newLine();
bufferedWriter.append(joiner.toString());
}
bufferedWriter.newLine();
bufferedWriter.flush();
}
/**
* Create Ledger File Name
*
* Create a file name the includes timestamp and address.
*
* @param address the address
*
* @return the file name created
*/
public static String createLedgerFileName(String address) {
DateFormat dateFormatter = new SimpleDateFormat("yyyyMMddHHmmss");
String fileName = "ledger-" + address + "-" + dateFormatter.format(new Date());
return fileName;
}
/**
* Collect Ledger Entries
*
* @param publicKey the public key for the ledger entries, buy and sell
* @param repository the data repository
* @param minimumFinalHeight the minimum block height for entries to be collected
* @param entries the ledger entries to add to
* @param codeHash code hash for the entry blockchain
* @param acct the ACCT for the entry blockchain
* @param isBuy true collecting entries for a buy, otherwise false
*
* @throws DataException
*/
public static void collectLedgerEntries(
byte[] publicKey,
Repository repository,
Integer minimumFinalHeight,
List<CrossChainTradeLedgerEntry> entries,
byte[] codeHash,
ACCT acct,
boolean isBuy) throws DataException {
// get all the final AT states for the code hash (foreign coin)
List<ATStateData> atStates
= repository.getATRepository().getMatchingFinalATStates(
codeHash,
isBuy ? publicKey : null,
!isBuy ? publicKey : null,
Boolean.TRUE, acct.getModeByteOffset(),
(long) AcctMode.REDEEMED.value,
minimumFinalHeight,
null, null, false
);
String foreignBlockchainCurrencyCode = acct.getBlockchain().getCurrencyCode();
// for each trade, build ledger entry, collect ledger entry
for (ATStateData atState : atStates) {
CrossChainTradeData crossChainTradeData = acct.populateTradeData(repository, atState);
// We also need block timestamp for use as trade timestamp
long localTimestamp = repository.getBlockRepository().getTimestampFromHeight(atState.getHeight());
if (localTimestamp == 0) {
// Try the archive
localTimestamp = repository.getBlockArchiveRepository().getTimestampFromHeight(atState.getHeight());
}
CrossChainTradeLedgerEntry ledgerEntry
= new CrossChainTradeLedgerEntry(
isBuy ? QORT_CURRENCY_CODE : foreignBlockchainCurrencyCode,
isBuy ? foreignBlockchainCurrencyCode : QORT_CURRENCY_CODE,
isBuy ? crossChainTradeData.qortAmount : crossChainTradeData.expectedForeignAmount,
0,
foreignBlockchainCurrencyCode,
isBuy ? crossChainTradeData.expectedForeignAmount : crossChainTradeData.qortAmount,
localTimestamp);
entries.add(ledgerEntry);
}
}
} }

View File

@ -0,0 +1,102 @@
package org.qortal.api.websocket;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.eclipse.jetty.websocket.api.Session;
import org.eclipse.jetty.websocket.api.WebSocketException;
import org.eclipse.jetty.websocket.api.annotations.OnWebSocketClose;
import org.eclipse.jetty.websocket.api.annotations.OnWebSocketConnect;
import org.eclipse.jetty.websocket.api.annotations.OnWebSocketError;
import org.eclipse.jetty.websocket.api.annotations.OnWebSocketMessage;
import org.eclipse.jetty.websocket.api.annotations.WebSocket;
import org.eclipse.jetty.websocket.servlet.WebSocketServletFactory;
import org.qortal.api.ApiError;
import org.qortal.controller.Controller;
import org.qortal.data.arbitrary.DataMonitorInfo;
import org.qortal.event.DataMonitorEvent;
import org.qortal.event.Event;
import org.qortal.event.EventBus;
import org.qortal.event.Listener;
import org.qortal.repository.DataException;
import org.qortal.repository.Repository;
import org.qortal.repository.RepositoryManager;
import org.qortal.utils.Base58;
import java.io.IOException;
import java.io.StringWriter;
import java.util.List;
@WebSocket
@SuppressWarnings("serial")
public class DataMonitorSocket extends ApiWebSocket implements Listener {
private static final Logger LOGGER = LogManager.getLogger(DataMonitorSocket.class);
@Override
public void configure(WebSocketServletFactory factory) {
LOGGER.info("configure");
factory.register(DataMonitorSocket.class);
EventBus.INSTANCE.addListener(this);
}
@Override
public void listen(Event event) {
if (!(event instanceof DataMonitorEvent))
return;
DataMonitorEvent dataMonitorEvent = (DataMonitorEvent) event;
for (Session session : getSessions())
sendDataEventSummary(session, buildInfo(dataMonitorEvent));
}
private DataMonitorInfo buildInfo(DataMonitorEvent dataMonitorEvent) {
return new DataMonitorInfo(
dataMonitorEvent.getTimestamp(),
dataMonitorEvent.getIdentifier(),
dataMonitorEvent.getName(),
dataMonitorEvent.getService(),
dataMonitorEvent.getDescription(),
dataMonitorEvent.getTransactionTimestamp(),
dataMonitorEvent.getLatestPutTimestamp()
);
}
@OnWebSocketConnect
@Override
public void onWebSocketConnect(Session session) {
super.onWebSocketConnect(session);
}
@OnWebSocketClose
@Override
public void onWebSocketClose(Session session, int statusCode, String reason) {
super.onWebSocketClose(session, statusCode, reason);
}
@OnWebSocketError
public void onWebSocketError(Session session, Throwable throwable) {
/* We ignore errors for now, but method here to silence log spam */
}
@OnWebSocketMessage
public void onWebSocketMessage(Session session, String message) {
LOGGER.info("onWebSocketMessage: message = " + message);
}
private void sendDataEventSummary(Session session, DataMonitorInfo dataMonitorInfo) {
StringWriter stringWriter = new StringWriter();
try {
marshall(stringWriter, dataMonitorInfo);
session.getRemote().sendStringByFuture(stringWriter.toString());
} catch (IOException | WebSocketException e) {
// No output this time
}
}
}

View File

@ -439,7 +439,15 @@ public class ArbitraryDataReader {
// Ensure the complete hash matches the joined chunks // Ensure the complete hash matches the joined chunks
if (!Arrays.equals(arbitraryDataFile.digest(), transactionData.getData())) { if (!Arrays.equals(arbitraryDataFile.digest(), transactionData.getData())) {
// Delete the invalid file // Delete the invalid file
arbitraryDataFile.delete(); LOGGER.info("Deleting invalid file: path = " + arbitraryDataFile.getFilePath());
if( arbitraryDataFile.delete() ) {
LOGGER.info("Deleted invalid file successfully: path = " + arbitraryDataFile.getFilePath());
}
else {
LOGGER.warn("Could not delete invalid file: path = " + arbitraryDataFile.getFilePath());
}
throw new DataException("Unable to validate complete file hash"); throw new DataException("Unable to validate complete file hash");
} }
} }

View File

@ -39,6 +39,7 @@ import org.qortal.transform.block.BlockTransformer;
import org.qortal.transform.transaction.TransactionTransformer; import org.qortal.transform.transaction.TransactionTransformer;
import org.qortal.utils.Amounts; import org.qortal.utils.Amounts;
import org.qortal.utils.Base58; import org.qortal.utils.Base58;
import org.qortal.utils.Groups;
import org.qortal.utils.NTP; import org.qortal.utils.NTP;
import java.io.ByteArrayOutputStream; import java.io.ByteArrayOutputStream;
@ -150,7 +151,7 @@ public class Block {
final BlockChain blockChain = BlockChain.getInstance(); final BlockChain blockChain = BlockChain.getInstance();
ExpandedAccount(Repository repository, RewardShareData rewardShareData) throws DataException { ExpandedAccount(Repository repository, RewardShareData rewardShareData, int blockHeight) throws DataException {
this.rewardShareData = rewardShareData; this.rewardShareData = rewardShareData;
this.sharePercent = this.rewardShareData.getSharePercent(); this.sharePercent = this.rewardShareData.getSharePercent();
@ -159,7 +160,12 @@ public class Block {
this.isMinterFounder = Account.isFounder(mintingAccountData.getFlags()); this.isMinterFounder = Account.isFounder(mintingAccountData.getFlags());
this.isRecipientAlsoMinter = this.rewardShareData.getRecipient().equals(this.mintingAccount.getAddress()); this.isRecipientAlsoMinter = this.rewardShareData.getRecipient().equals(this.mintingAccount.getAddress());
this.isMinterMember = repository.getGroupRepository().memberExists(BlockChain.getInstance().getMintingGroupId(), this.mintingAccount.getAddress()); this.isMinterMember
= Groups.memberExistsInAnyGroup(
repository.getGroupRepository(),
Groups.getGroupIdsToMint(BlockChain.getInstance(), blockHeight),
this.mintingAccount.getAddress()
);
if (this.isRecipientAlsoMinter) { if (this.isRecipientAlsoMinter) {
// Self-share: minter is also recipient // Self-share: minter is also recipient
@ -435,9 +441,9 @@ public class Block {
if (height >= BlockChain.getInstance().getGroupMemberCheckHeight()) { if (height >= BlockChain.getInstance().getGroupMemberCheckHeight()) {
onlineAccounts.removeIf(a -> { onlineAccounts.removeIf(a -> {
try { try {
int groupId = BlockChain.getInstance().getMintingGroupId(); List<Integer> groupIdsToMint = Groups.getGroupIdsToMint(BlockChain.getInstance(), height);
String address = Account.getRewardShareMintingAddress(repository, a.getPublicKey()); String address = Account.getRewardShareMintingAddress(repository, a.getPublicKey());
boolean isMinterGroupMember = repository.getGroupRepository().memberExists(groupId, address); boolean isMinterGroupMember = Groups.memberExistsInAnyGroup(repository.getGroupRepository(), groupIdsToMint, address);
return !isMinterGroupMember; return !isMinterGroupMember;
} catch (DataException e) { } catch (DataException e) {
// Something went wrong, so remove the account // Something went wrong, so remove the account
@ -753,7 +759,7 @@ public class Block {
List<ExpandedAccount> expandedAccounts = new ArrayList<>(); List<ExpandedAccount> expandedAccounts = new ArrayList<>();
for (RewardShareData rewardShare : this.cachedOnlineRewardShares) { for (RewardShareData rewardShare : this.cachedOnlineRewardShares) {
expandedAccounts.add(new ExpandedAccount(repository, rewardShare)); expandedAccounts.add(new ExpandedAccount(repository, rewardShare, this.blockData.getHeight()));
} }
this.cachedExpandedAccounts = expandedAccounts; this.cachedExpandedAccounts = expandedAccounts;
@ -2485,11 +2491,10 @@ public class Block {
try (final Repository repository = RepositoryManager.getRepository()) { try (final Repository repository = RepositoryManager.getRepository()) {
GroupRepository groupRepository = repository.getGroupRepository(); GroupRepository groupRepository = repository.getGroupRepository();
List<Integer> mintingGroupIds = Groups.getGroupIdsToMint(BlockChain.getInstance(), this.blockData.getHeight());
// all minter admins // all minter admins
List<String> minterAdmins List<String> minterAdmins = Groups.getAllAdmins(groupRepository, mintingGroupIds);
= groupRepository.getGroupAdmins(BlockChain.getInstance().getMintingGroupId()).stream()
.map(GroupAdminData::getAdmin)
.collect(Collectors.toList());
// all minter admins that are online // all minter admins that are online
List<ExpandedAccount> onlineMinterAdminAccounts List<ExpandedAccount> onlineMinterAdminAccounts

View File

@ -212,7 +212,13 @@ public class BlockChain {
private int minAccountLevelToRewardShare; private int minAccountLevelToRewardShare;
private int maxRewardSharesPerFounderMintingAccount; private int maxRewardSharesPerFounderMintingAccount;
private int founderEffectiveMintingLevel; private int founderEffectiveMintingLevel;
private int mintingGroupId;
public static class IdsForHeight {
public int height;
public List<Integer> ids;
}
private List<IdsForHeight> mintingGroupIds;
/** Minimum time to retain online account signatures (ms) for block validity checks. */ /** Minimum time to retain online account signatures (ms) for block validity checks. */
private long onlineAccountSignaturesMinLifetime; private long onlineAccountSignaturesMinLifetime;
@ -544,8 +550,8 @@ public class BlockChain {
return this.onlineAccountSignaturesMaxLifetime; return this.onlineAccountSignaturesMaxLifetime;
} }
public int getMintingGroupId() { public List<IdsForHeight> getMintingGroupIds() {
return this.mintingGroupId; return mintingGroupIds;
} }
public CiyamAtSettings getCiyamAtSettings() { public CiyamAtSettings getCiyamAtSettings() {

View File

@ -423,6 +423,12 @@ public class Controller extends Thread {
LOGGER.info("Db Cache Disabled"); LOGGER.info("Db Cache Disabled");
} }
LOGGER.info("Arbitrary Indexing Starting ...");
ArbitraryIndexUtils.startCaching(
Settings.getInstance().getArbitraryIndexingPriority(),
Settings.getInstance().getArbitraryIndexingFrequency()
);
if( Settings.getInstance().isBalanceRecorderEnabled() ) { if( Settings.getInstance().isBalanceRecorderEnabled() ) {
Optional<HSQLDBBalanceRecorder> recorder = HSQLDBBalanceRecorder.getInstance(); Optional<HSQLDBBalanceRecorder> recorder = HSQLDBBalanceRecorder.getInstance();
@ -541,6 +547,16 @@ public class Controller extends Thread {
ArbitraryDataStorageManager.getInstance().start(); ArbitraryDataStorageManager.getInstance().start();
ArbitraryDataRenderManager.getInstance().start(); ArbitraryDataRenderManager.getInstance().start();
// start rebuild arbitrary resource cache timer task
if( Settings.getInstance().isRebuildArbitraryResourceCacheTaskEnabled() ) {
new Timer().schedule(
new RebuildArbitraryResourceCacheTask(),
Settings.getInstance().getRebuildArbitraryResourceCacheTaskDelay() * RebuildArbitraryResourceCacheTask.MILLIS_IN_MINUTE,
Settings.getInstance().getRebuildArbitraryResourceCacheTaskPeriod() * RebuildArbitraryResourceCacheTask.MILLIS_IN_HOUR
);
}
LOGGER.info("Starting online accounts manager"); LOGGER.info("Starting online accounts manager");
OnlineAccountsManager.getInstance().start(); OnlineAccountsManager.getInstance().start();

View File

@ -25,6 +25,7 @@ import org.qortal.repository.Repository;
import org.qortal.repository.RepositoryManager; import org.qortal.repository.RepositoryManager;
import org.qortal.settings.Settings; import org.qortal.settings.Settings;
import org.qortal.utils.Base58; import org.qortal.utils.Base58;
import org.qortal.utils.Groups;
import org.qortal.utils.NTP; import org.qortal.utils.NTP;
import org.qortal.utils.NamedThreadFactory; import org.qortal.utils.NamedThreadFactory;
@ -225,11 +226,14 @@ public class OnlineAccountsManager {
Set<OnlineAccountData> onlineAccountsToAdd = new HashSet<>(); Set<OnlineAccountData> onlineAccountsToAdd = new HashSet<>();
Set<OnlineAccountData> onlineAccountsToRemove = new HashSet<>(); Set<OnlineAccountData> onlineAccountsToRemove = new HashSet<>();
try (final Repository repository = RepositoryManager.getRepository()) { try (final Repository repository = RepositoryManager.getRepository()) {
int blockHeight = repository.getBlockRepository().getBlockchainHeight();
List<String> mintingGroupMemberAddresses List<String> mintingGroupMemberAddresses
= repository.getGroupRepository() = Groups.getAllMembers(
.getGroupMembers(BlockChain.getInstance().getMintingGroupId()).stream() repository.getGroupRepository(),
.map(GroupMemberData::getMember) Groups.getGroupIdsToMint(BlockChain.getInstance(), blockHeight)
.collect(Collectors.toList()); );
for (OnlineAccountData onlineAccountData : this.onlineAccountsImportQueue) { for (OnlineAccountData onlineAccountData : this.onlineAccountsImportQueue) {
if (isStopping) if (isStopping)

View File

@ -2,22 +2,30 @@ package org.qortal.controller.arbitrary;
import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.Logger;
import org.qortal.api.resource.TransactionsResource;
import org.qortal.controller.Controller; import org.qortal.controller.Controller;
import org.qortal.data.arbitrary.ArbitraryResourceData; import org.qortal.data.arbitrary.ArbitraryResourceData;
import org.qortal.data.transaction.ArbitraryTransactionData; import org.qortal.data.transaction.ArbitraryTransactionData;
import org.qortal.event.DataMonitorEvent;
import org.qortal.event.EventBus;
import org.qortal.gui.SplashFrame; import org.qortal.gui.SplashFrame;
import org.qortal.repository.DataException; import org.qortal.repository.DataException;
import org.qortal.repository.Repository; import org.qortal.repository.Repository;
import org.qortal.repository.RepositoryManager; import org.qortal.repository.RepositoryManager;
import org.qortal.settings.Settings; import org.qortal.settings.Settings;
import org.qortal.transaction.ArbitraryTransaction; import org.qortal.transaction.ArbitraryTransaction;
import org.qortal.transaction.Transaction;
import org.qortal.utils.Base58; import org.qortal.utils.Base58;
import java.text.NumberFormat;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collections; import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List; import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.function.Function;
import java.util.stream.Collectors;
public class ArbitraryDataCacheManager extends Thread { public class ArbitraryDataCacheManager extends Thread {
@ -29,6 +37,11 @@ public class ArbitraryDataCacheManager extends Thread {
/** Queue of arbitrary transactions that require cache updates */ /** Queue of arbitrary transactions that require cache updates */
private final List<ArbitraryTransactionData> updateQueue = Collections.synchronizedList(new ArrayList<>()); private final List<ArbitraryTransactionData> updateQueue = Collections.synchronizedList(new ArrayList<>());
private static final NumberFormat FORMATTER = NumberFormat.getNumberInstance();
static {
FORMATTER.setGroupingUsed(true);
}
public static synchronized ArbitraryDataCacheManager getInstance() { public static synchronized ArbitraryDataCacheManager getInstance() {
if (instance == null) { if (instance == null) {
@ -45,17 +58,22 @@ public class ArbitraryDataCacheManager extends Thread {
try { try {
while (!Controller.isStopping()) { while (!Controller.isStopping()) {
try {
Thread.sleep(500L); Thread.sleep(500L);
// Process queue // Process queue
processResourceQueue(); processResourceQueue();
} catch (Exception e) {
LOGGER.error(e.getMessage(), e);
Thread.sleep(600_000L); // wait 10 minutes to continue
} }
} catch (InterruptedException e) {
// Fall through to exit thread
} }
// Clear queue before terminating thread // Clear queue before terminating thread
processResourceQueue(); processResourceQueue();
} catch (Exception e) {
LOGGER.error(e.getMessage(), e);
}
} }
public void shutdown() { public void shutdown() {
@ -85,14 +103,25 @@ public class ArbitraryDataCacheManager extends Thread {
// Update arbitrary resource caches // Update arbitrary resource caches
try { try {
ArbitraryTransaction arbitraryTransaction = new ArbitraryTransaction(repository, transactionData); ArbitraryTransaction arbitraryTransaction = new ArbitraryTransaction(repository, transactionData);
arbitraryTransaction.updateArbitraryResourceCache(repository); arbitraryTransaction.updateArbitraryResourceCacheIncludingMetadata(repository, new HashSet<>(0), new HashMap<>(0));
arbitraryTransaction.updateArbitraryMetadataCache(repository);
repository.saveChanges(); repository.saveChanges();
// Update status as separate commit, as this is more prone to failure // Update status as separate commit, as this is more prone to failure
arbitraryTransaction.updateArbitraryResourceStatus(repository); arbitraryTransaction.updateArbitraryResourceStatus(repository);
repository.saveChanges(); repository.saveChanges();
EventBus.INSTANCE.notify(
new DataMonitorEvent(
System.currentTimeMillis(),
transactionData.getIdentifier(),
transactionData.getName(),
transactionData.getService().name(),
"updated resource cache and status, queue",
transactionData.getTimestamp(),
transactionData.getTimestamp()
)
);
LOGGER.debug(() -> String.format("Finished processing transaction %.8s in arbitrary resource queue...", Base58.encode(transactionData.getSignature()))); LOGGER.debug(() -> String.format("Finished processing transaction %.8s in arbitrary resource queue...", Base58.encode(transactionData.getSignature())));
} catch (DataException e) { } catch (DataException e) {
@ -103,6 +132,9 @@ public class ArbitraryDataCacheManager extends Thread {
} catch (DataException e) { } catch (DataException e) {
LOGGER.error("Repository issue while processing arbitrary resource cache updates", e); LOGGER.error("Repository issue while processing arbitrary resource cache updates", e);
} }
catch (Exception e) {
LOGGER.error(e.getMessage(), e);
}
} }
public void addToUpdateQueue(ArbitraryTransactionData transactionData) { public void addToUpdateQueue(ArbitraryTransactionData transactionData) {
@ -148,34 +180,66 @@ public class ArbitraryDataCacheManager extends Thread {
LOGGER.info("Building arbitrary resources cache..."); LOGGER.info("Building arbitrary resources cache...");
SplashFrame.getInstance().updateStatus("Building QDN cache - please wait..."); SplashFrame.getInstance().updateStatus("Building QDN cache - please wait...");
final int batchSize = 100; final int batchSize = Settings.getInstance().getBuildArbitraryResourcesBatchSize();
int offset = 0; int offset = 0;
List<ArbitraryTransactionData> allArbitraryTransactionsInDescendingOrder
= repository.getArbitraryRepository().getLatestArbitraryTransactions();
LOGGER.info("arbitrary transactions: count = " + allArbitraryTransactionsInDescendingOrder.size());
List<ArbitraryResourceData> resources = repository.getArbitraryRepository().getArbitraryResources(null, null, true);
Map<ArbitraryTransactionDataHashWrapper, ArbitraryResourceData> resourceByWrapper = new HashMap<>(resources.size());
for( ArbitraryResourceData resource : resources ) {
resourceByWrapper.put(
new ArbitraryTransactionDataHashWrapper(resource.service.value, resource.name, resource.identifier),
resource
);
}
LOGGER.info("arbitrary resources: count = " + resourceByWrapper.size());
Set<ArbitraryTransactionDataHashWrapper> latestTransactionsWrapped = new HashSet<>(allArbitraryTransactionsInDescendingOrder.size());
// Loop through all ARBITRARY transactions, and determine latest state // Loop through all ARBITRARY transactions, and determine latest state
while (!Controller.isStopping()) { while (!Controller.isStopping()) {
LOGGER.info("Fetching arbitrary transactions {} - {}", offset, offset+batchSize-1); LOGGER.info(
"Fetching arbitrary transactions {} - {} / {} Total",
FORMATTER.format(offset),
FORMATTER.format(offset+batchSize-1),
FORMATTER.format(allArbitraryTransactionsInDescendingOrder.size())
);
List<byte[]> signatures = repository.getTransactionRepository().getSignaturesMatchingCriteria(null, null, null, List.of(Transaction.TransactionType.ARBITRARY), null, null, null, TransactionsResource.ConfirmationStatus.BOTH, batchSize, offset, false); List<ArbitraryTransactionData> transactionsToProcess
if (signatures.isEmpty()) { = allArbitraryTransactionsInDescendingOrder.stream()
.skip(offset)
.limit(batchSize)
.collect(Collectors.toList());
if (transactionsToProcess.isEmpty()) {
// Complete // Complete
break; break;
} }
// Expand signatures to transactions try {
for (byte[] signature : signatures) { for( ArbitraryTransactionData transactionData : transactionsToProcess) {
ArbitraryTransactionData transactionData = (ArbitraryTransactionData) repository
.getTransactionRepository().fromSignature(signature);
if (transactionData.getService() == null) { if (transactionData.getService() == null) {
// Unsupported service - ignore this resource // Unsupported service - ignore this resource
continue; continue;
} }
latestTransactionsWrapped.add(new ArbitraryTransactionDataHashWrapper(transactionData));
// Update arbitrary resource caches // Update arbitrary resource caches
ArbitraryTransaction arbitraryTransaction = new ArbitraryTransaction(repository, transactionData); ArbitraryTransaction arbitraryTransaction = new ArbitraryTransaction(repository, transactionData);
arbitraryTransaction.updateArbitraryResourceCache(repository); arbitraryTransaction.updateArbitraryResourceCacheIncludingMetadata(repository, latestTransactionsWrapped, resourceByWrapper);
arbitraryTransaction.updateArbitraryMetadataCache(repository); }
repository.saveChanges(); repository.saveChanges();
} catch (DataException e) {
repository.discardChanges();
LOGGER.error(e.getMessage(), e);
} }
offset += batchSize; offset += batchSize;
} }
@ -193,6 +257,11 @@ public class ArbitraryDataCacheManager extends Thread {
repository.discardChanges(); repository.discardChanges();
throw new DataException("Build of arbitrary resources cache failed."); throw new DataException("Build of arbitrary resources cache failed.");
} }
catch (Exception e) {
LOGGER.error(e.getMessage(), e);
return false;
}
} }
private boolean refreshArbitraryStatuses(Repository repository) throws DataException { private boolean refreshArbitraryStatuses(Repository repository) throws DataException {
@ -200,27 +269,48 @@ public class ArbitraryDataCacheManager extends Thread {
LOGGER.info("Refreshing arbitrary resource statuses for locally hosted transactions..."); LOGGER.info("Refreshing arbitrary resource statuses for locally hosted transactions...");
SplashFrame.getInstance().updateStatus("Refreshing statuses - please wait..."); SplashFrame.getInstance().updateStatus("Refreshing statuses - please wait...");
final int batchSize = 100; final int batchSize = Settings.getInstance().getBuildArbitraryResourcesBatchSize();
int offset = 0; int offset = 0;
List<ArbitraryTransactionData> allHostedTransactions
= ArbitraryDataStorageManager.getInstance()
.listAllHostedTransactions(repository, null, null);
// Loop through all ARBITRARY transactions, and determine latest state // Loop through all ARBITRARY transactions, and determine latest state
while (!Controller.isStopping()) { while (!Controller.isStopping()) {
LOGGER.info("Fetching hosted transactions {} - {}", offset, offset+batchSize-1); LOGGER.info(
"Fetching hosted transactions {} - {} / {} Total",
FORMATTER.format(offset),
FORMATTER.format(offset+batchSize-1),
FORMATTER.format(allHostedTransactions.size())
);
List<ArbitraryTransactionData> hostedTransactions
= allHostedTransactions.stream()
.skip(offset)
.limit(batchSize)
.collect(Collectors.toList());
List<ArbitraryTransactionData> hostedTransactions = ArbitraryDataStorageManager.getInstance().listAllHostedTransactions(repository, batchSize, offset);
if (hostedTransactions.isEmpty()) { if (hostedTransactions.isEmpty()) {
// Complete // Complete
break; break;
} }
try {
// Loop through hosted transactions // Loop through hosted transactions
for (ArbitraryTransactionData transactionData : hostedTransactions) { for (ArbitraryTransactionData transactionData : hostedTransactions) {
// Determine status and update cache // Determine status and update cache
ArbitraryTransaction arbitraryTransaction = new ArbitraryTransaction(repository, transactionData); ArbitraryTransaction arbitraryTransaction = new ArbitraryTransaction(repository, transactionData);
arbitraryTransaction.updateArbitraryResourceStatus(repository); arbitraryTransaction.updateArbitraryResourceStatus(repository);
repository.saveChanges();
} }
repository.saveChanges();
} catch (DataException e) {
repository.discardChanges();
LOGGER.error(e.getMessage(), e);
}
offset += batchSize; offset += batchSize;
} }
@ -234,6 +324,11 @@ public class ArbitraryDataCacheManager extends Thread {
repository.discardChanges(); repository.discardChanges();
throw new DataException("Refresh of arbitrary resource statuses failed."); throw new DataException("Refresh of arbitrary resource statuses failed.");
} }
catch (Exception e) {
LOGGER.error(e.getMessage(), e);
return false;
}
} }
} }

View File

@ -2,9 +2,10 @@ package org.qortal.controller.arbitrary;
import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.Logger;
import org.qortal.api.resource.TransactionsResource.ConfirmationStatus;
import org.qortal.data.transaction.ArbitraryTransactionData; import org.qortal.data.transaction.ArbitraryTransactionData;
import org.qortal.data.transaction.TransactionData; import org.qortal.data.transaction.TransactionData;
import org.qortal.event.DataMonitorEvent;
import org.qortal.event.EventBus;
import org.qortal.repository.DataException; import org.qortal.repository.DataException;
import org.qortal.repository.Repository; import org.qortal.repository.Repository;
import org.qortal.repository.RepositoryManager; import org.qortal.repository.RepositoryManager;
@ -21,8 +22,12 @@ import java.nio.file.Paths;
import java.security.SecureRandom; import java.security.SecureRandom;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
import java.util.HashSet;
import java.util.List; import java.util.List;
import java.util.Objects; import java.util.Objects;
import java.util.Optional;
import java.util.Set;
import java.util.stream.Collectors;
import static org.qortal.controller.arbitrary.ArbitraryDataStorageManager.DELETION_THRESHOLD; import static org.qortal.controller.arbitrary.ArbitraryDataStorageManager.DELETION_THRESHOLD;
@ -77,6 +82,19 @@ public class ArbitraryDataCleanupManager extends Thread {
final int limit = 100; final int limit = 100;
int offset = 0; int offset = 0;
List<ArbitraryTransactionData> allArbitraryTransactionsInDescendingOrder;
try (final Repository repository = RepositoryManager.getRepository()) {
allArbitraryTransactionsInDescendingOrder
= repository.getArbitraryRepository()
.getLatestArbitraryTransactions();
} catch( Exception e) {
LOGGER.error(e.getMessage(), e);
allArbitraryTransactionsInDescendingOrder = new ArrayList<>(0);
}
Set<ArbitraryTransactionData> processedTransactions = new HashSet<>();
try { try {
while (!isStopping) { while (!isStopping) {
Thread.sleep(30000); Thread.sleep(30000);
@ -107,27 +125,31 @@ public class ArbitraryDataCleanupManager extends Thread {
// Any arbitrary transactions we want to fetch data for? // Any arbitrary transactions we want to fetch data for?
try (final Repository repository = RepositoryManager.getRepository()) { try (final Repository repository = RepositoryManager.getRepository()) {
List<byte[]> signatures = repository.getTransactionRepository().getSignaturesMatchingCriteria(null, null, null, ARBITRARY_TX_TYPE, null, null, null, ConfirmationStatus.BOTH, limit, offset, true); List<ArbitraryTransactionData> transactions = allArbitraryTransactionsInDescendingOrder.stream().skip(offset).limit(limit).collect(Collectors.toList());
// LOGGER.info("Found {} arbitrary transactions at offset: {}, limit: {}", signatures.size(), offset, limit);
if (isStopping) { if (isStopping) {
return; return;
} }
if (signatures == null || signatures.isEmpty()) { if (transactions == null || transactions.isEmpty()) {
offset = 0; offset = 0;
continue; allArbitraryTransactionsInDescendingOrder
= repository.getArbitraryRepository()
.getLatestArbitraryTransactions();
transactions = allArbitraryTransactionsInDescendingOrder.stream().limit(limit).collect(Collectors.toList());
processedTransactions.clear();
} }
offset += limit; offset += limit;
now = NTP.getTime(); now = NTP.getTime();
// Loop through the signatures in this batch // Loop through the signatures in this batch
for (int i=0; i<signatures.size(); i++) { for (int i=0; i<transactions.size(); i++) {
if (isStopping) { if (isStopping) {
return; return;
} }
byte[] signature = signatures.get(i); ArbitraryTransactionData arbitraryTransactionData = transactions.get(i);
if (signature == null) { if (arbitraryTransactionData == null) {
continue; continue;
} }
@ -136,9 +158,7 @@ public class ArbitraryDataCleanupManager extends Thread {
Thread.sleep(5000); Thread.sleep(5000);
} }
// Fetch the transaction data if (arbitraryTransactionData.getService() == null) {
ArbitraryTransactionData arbitraryTransactionData = ArbitraryTransactionUtils.fetchTransactionData(repository, signature);
if (arbitraryTransactionData == null || arbitraryTransactionData.getService() == null) {
continue; continue;
} }
@ -147,6 +167,8 @@ public class ArbitraryDataCleanupManager extends Thread {
continue; continue;
} }
boolean mostRecentTransaction = processedTransactions.add(arbitraryTransactionData);
// Check if we have the complete file // Check if we have the complete file
boolean completeFileExists = ArbitraryTransactionUtils.completeFileExists(arbitraryTransactionData); boolean completeFileExists = ArbitraryTransactionUtils.completeFileExists(arbitraryTransactionData);
@ -167,20 +189,54 @@ public class ArbitraryDataCleanupManager extends Thread {
LOGGER.info("Deleting transaction {} because we can't host its data", LOGGER.info("Deleting transaction {} because we can't host its data",
Base58.encode(arbitraryTransactionData.getSignature())); Base58.encode(arbitraryTransactionData.getSignature()));
ArbitraryTransactionUtils.deleteCompleteFileAndChunks(arbitraryTransactionData); ArbitraryTransactionUtils.deleteCompleteFileAndChunks(arbitraryTransactionData);
EventBus.INSTANCE.notify(
new DataMonitorEvent(
System.currentTimeMillis(),
arbitraryTransactionData.getIdentifier(),
arbitraryTransactionData.getName(),
arbitraryTransactionData.getService().name(),
"can't store data, deleting",
arbitraryTransactionData.getTimestamp(),
arbitraryTransactionData.getTimestamp()
)
);
continue; continue;
} }
// Check to see if we have had a more recent PUT // Check to see if we have had a more recent PUT
boolean hasMoreRecentPutTransaction = ArbitraryTransactionUtils.hasMoreRecentPutTransaction(repository, arbitraryTransactionData); if (!mostRecentTransaction) {
if (hasMoreRecentPutTransaction) {
// There is a more recent PUT transaction than the one we are currently processing. // There is a more recent PUT transaction than the one we are currently processing.
// When a PUT is issued, it replaces any layers that would have been there before. // When a PUT is issued, it replaces any layers that would have been there before.
// Therefore any data relating to this older transaction is no longer needed. // Therefore any data relating to this older transaction is no longer needed.
LOGGER.info(String.format("Newer PUT found for %s %s since transaction %s. " + LOGGER.info(String.format("Newer PUT found for %s %s since transaction %s. " +
"Deleting all files associated with the earlier transaction.", arbitraryTransactionData.getService(), "Deleting all files associated with the earlier transaction.", arbitraryTransactionData.getService(),
arbitraryTransactionData.getName(), Base58.encode(signature))); arbitraryTransactionData.getName(), Base58.encode(arbitraryTransactionData.getSignature())));
ArbitraryTransactionUtils.deleteCompleteFileAndChunks(arbitraryTransactionData); ArbitraryTransactionUtils.deleteCompleteFileAndChunks(arbitraryTransactionData);
Optional<ArbitraryTransactionData> moreRecentPutTransaction
= processedTransactions.stream()
.filter(data -> data.equals(arbitraryTransactionData))
.findAny();
if( moreRecentPutTransaction.isPresent() ) {
EventBus.INSTANCE.notify(
new DataMonitorEvent(
System.currentTimeMillis(),
arbitraryTransactionData.getIdentifier(),
arbitraryTransactionData.getName(),
arbitraryTransactionData.getService().name(),
"deleting data due to replacement",
arbitraryTransactionData.getTimestamp(),
moreRecentPutTransaction.get().getTimestamp()
)
);
}
else {
LOGGER.warn("Something went wrong with the most recent put transaction determination!");
}
continue; continue;
} }
@ -199,7 +255,21 @@ public class ArbitraryDataCleanupManager extends Thread {
LOGGER.debug(String.format("Transaction %s has complete file and all chunks", LOGGER.debug(String.format("Transaction %s has complete file and all chunks",
Base58.encode(arbitraryTransactionData.getSignature()))); Base58.encode(arbitraryTransactionData.getSignature())));
ArbitraryTransactionUtils.deleteCompleteFile(arbitraryTransactionData, now, STALE_FILE_TIMEOUT); boolean wasDeleted = ArbitraryTransactionUtils.deleteCompleteFile(arbitraryTransactionData, now, STALE_FILE_TIMEOUT);
if( wasDeleted ) {
EventBus.INSTANCE.notify(
new DataMonitorEvent(
System.currentTimeMillis(),
arbitraryTransactionData.getIdentifier(),
arbitraryTransactionData.getName(),
arbitraryTransactionData.getService().name(),
"deleting file, retaining chunks",
arbitraryTransactionData.getTimestamp(),
arbitraryTransactionData.getTimestamp()
)
);
}
continue; continue;
} }
@ -237,17 +307,6 @@ public class ArbitraryDataCleanupManager extends Thread {
this.storageLimitReached(repository); this.storageLimitReached(repository);
} }
// Delete random data associated with name if we're over our storage limit for this name
// Use the DELETION_THRESHOLD, for the same reasons as above
for (String followedName : ListUtils.followedNames()) {
if (isStopping) {
return;
}
if (!storageManager.isStorageSpaceAvailableForName(repository, followedName, DELETION_THRESHOLD)) {
this.storageLimitReachedForName(repository, followedName);
}
}
} catch (DataException e) { } catch (DataException e) {
LOGGER.error("Repository issue when cleaning up arbitrary transaction data", e); LOGGER.error("Repository issue when cleaning up arbitrary transaction data", e);
} }
@ -326,25 +385,6 @@ public class ArbitraryDataCleanupManager extends Thread {
// FUTURE: consider reducing the expiry time of the reader cache // FUTURE: consider reducing the expiry time of the reader cache
} }
public void storageLimitReachedForName(Repository repository, String name) throws InterruptedException {
// We think that the storage limit has been reached for supplied name - but we should double check
if (ArbitraryDataStorageManager.getInstance().isStorageSpaceAvailableForName(repository, name, DELETION_THRESHOLD)) {
// We have space available for this name, so don't delete anything
return;
}
// Delete a batch of random chunks associated with this name
// This reduces the chance of too many nodes deleting the same chunk
// when they reach their storage limit
Path dataPath = Paths.get(Settings.getInstance().getDataPath());
for (int i=0; i<CHUNK_DELETION_BATCH_SIZE; i++) {
if (isStopping) {
return;
}
this.deleteRandomFile(repository, dataPath.toFile(), name);
}
}
/** /**
* Iteratively walk through given directory and delete a single random file * Iteratively walk through given directory and delete a single random file
* *
@ -423,6 +463,7 @@ public class ArbitraryDataCleanupManager extends Thread {
} }
LOGGER.info("Deleting random file {} because we have reached max storage capacity...", randomItem.toString()); LOGGER.info("Deleting random file {} because we have reached max storage capacity...", randomItem.toString());
fireRandomItemDeletionNotification(randomItem, repository, "Deleting random file, because we have reached max storage capacity");
boolean success = randomItem.delete(); boolean success = randomItem.delete();
if (success) { if (success) {
try { try {
@ -437,6 +478,35 @@ public class ArbitraryDataCleanupManager extends Thread {
return false; return false;
} }
private void fireRandomItemDeletionNotification(File randomItem, Repository repository, String reason) {
try {
Path parentFileNamePath = randomItem.toPath().toAbsolutePath().getParent().getFileName();
if (parentFileNamePath != null) {
String signature58 = parentFileNamePath.toString();
byte[] signature = Base58.decode(signature58);
TransactionData transactionData = repository.getTransactionRepository().fromSignature(signature);
if (transactionData != null && transactionData.getType() == Transaction.TransactionType.ARBITRARY) {
ArbitraryTransactionData arbitraryTransactionData = (ArbitraryTransactionData) transactionData;
EventBus.INSTANCE.notify(
new DataMonitorEvent(
System.currentTimeMillis(),
arbitraryTransactionData.getIdentifier(),
arbitraryTransactionData.getName(),
arbitraryTransactionData.getService().name(),
reason,
arbitraryTransactionData.getTimestamp(),
arbitraryTransactionData.getTimestamp()
)
);
}
}
} catch (Exception e) {
LOGGER.error(e.getMessage(), e);
}
}
private void cleanupTempDirectory(String folder, long now, long minAge) { private void cleanupTempDirectory(String folder, long now, long minAge) {
String baseDir = Settings.getInstance().getTempDataPath(); String baseDir = Settings.getInstance().getTempDataPath();
Path tempDir = Paths.get(baseDir, folder); Path tempDir = Paths.get(baseDir, folder);

View File

@ -0,0 +1,21 @@
package org.qortal.controller.arbitrary;
public class ArbitraryDataExamination {
private boolean pass;
private String notes;
public ArbitraryDataExamination(boolean pass, String notes) {
this.pass = pass;
this.notes = notes;
}
public boolean isPass() {
return pass;
}
public String getNotes() {
return notes;
}
}

View File

@ -5,6 +5,8 @@ import org.apache.logging.log4j.Logger;
import org.qortal.controller.Controller; import org.qortal.controller.Controller;
import org.qortal.data.arbitrary.ArbitraryFileListResponseInfo; import org.qortal.data.arbitrary.ArbitraryFileListResponseInfo;
import org.qortal.data.transaction.ArbitraryTransactionData; import org.qortal.data.transaction.ArbitraryTransactionData;
import org.qortal.event.DataMonitorEvent;
import org.qortal.event.EventBus;
import org.qortal.network.Peer; import org.qortal.network.Peer;
import org.qortal.repository.DataException; import org.qortal.repository.DataException;
import org.qortal.repository.Repository; import org.qortal.repository.Repository;

View File

@ -10,6 +10,8 @@ import org.qortal.arbitrary.misc.Service;
import org.qortal.controller.Controller; import org.qortal.controller.Controller;
import org.qortal.data.transaction.ArbitraryTransactionData; import org.qortal.data.transaction.ArbitraryTransactionData;
import org.qortal.data.transaction.TransactionData; import org.qortal.data.transaction.TransactionData;
import org.qortal.event.DataMonitorEvent;
import org.qortal.event.EventBus;
import org.qortal.network.Network; import org.qortal.network.Network;
import org.qortal.network.Peer; import org.qortal.network.Peer;
import org.qortal.repository.DataException; import org.qortal.repository.DataException;
@ -28,6 +30,7 @@ import java.nio.file.Files;
import java.nio.file.Path; import java.nio.file.Path;
import java.nio.file.Paths; import java.nio.file.Paths;
import java.util.*; import java.util.*;
import java.util.stream.Collectors;
public class ArbitraryDataManager extends Thread { public class ArbitraryDataManager extends Thread {
@ -195,13 +198,35 @@ public class ArbitraryDataManager extends Thread {
final int limit = 100; final int limit = 100;
int offset = 0; int offset = 0;
List<ArbitraryTransactionData> allArbitraryTransactionsInDescendingOrder;
try (final Repository repository = RepositoryManager.getRepository()) {
if( name == null ) {
allArbitraryTransactionsInDescendingOrder
= repository.getArbitraryRepository()
.getLatestArbitraryTransactions();
}
else {
allArbitraryTransactionsInDescendingOrder
= repository.getArbitraryRepository()
.getLatestArbitraryTransactionsByName(name);
}
} catch( Exception e) {
LOGGER.error(e.getMessage(), e);
allArbitraryTransactionsInDescendingOrder = new ArrayList<>(0);
}
// collect processed transactions in a set to ensure outdated data transactions do not get fetched
Set<ArbitraryTransactionDataHashWrapper> processedTransactions = new HashSet<>();
while (!isStopping) { while (!isStopping) {
Thread.sleep(1000L); Thread.sleep(1000L);
// Any arbitrary transactions we want to fetch data for? // Any arbitrary transactions we want to fetch data for?
try (final Repository repository = RepositoryManager.getRepository()) { try (final Repository repository = RepositoryManager.getRepository()) {
List<byte[]> signatures = repository.getTransactionRepository().getSignaturesMatchingCriteria(null, null, null, ARBITRARY_TX_TYPE, null, name, null, ConfirmationStatus.BOTH, limit, offset, true); List<byte[]> signatures = processTransactionsForSignatures(limit, offset, allArbitraryTransactionsInDescendingOrder, processedTransactions);
// LOGGER.trace("Found {} arbitrary transactions at offset: {}, limit: {}", signatures.size(), offset, limit);
if (signatures == null || signatures.isEmpty()) { if (signatures == null || signatures.isEmpty()) {
offset = 0; offset = 0;
break; break;
@ -223,14 +248,38 @@ public class ArbitraryDataManager extends Thread {
ArbitraryTransactionData arbitraryTransactionData = (ArbitraryTransactionData) arbitraryTransaction.getTransactionData(); ArbitraryTransactionData arbitraryTransactionData = (ArbitraryTransactionData) arbitraryTransaction.getTransactionData();
// Skip transactions that we don't need to proactively store data for // Skip transactions that we don't need to proactively store data for
if (!storageManager.shouldPreFetchData(repository, arbitraryTransactionData)) { ArbitraryDataExamination arbitraryDataExamination = storageManager.shouldPreFetchData(repository, arbitraryTransactionData);
if (!arbitraryDataExamination.isPass()) {
iterator.remove(); iterator.remove();
EventBus.INSTANCE.notify(
new DataMonitorEvent(
System.currentTimeMillis(),
arbitraryTransactionData.getIdentifier(),
arbitraryTransactionData.getName(),
arbitraryTransactionData.getService().name(),
arbitraryDataExamination.getNotes(),
arbitraryTransactionData.getTimestamp(),
arbitraryTransactionData.getTimestamp()
)
);
continue; continue;
} }
// Remove transactions that we already have local data for // Remove transactions that we already have local data for
if (hasLocalData(arbitraryTransaction)) { if (hasLocalData(arbitraryTransaction)) {
iterator.remove(); iterator.remove();
EventBus.INSTANCE.notify(
new DataMonitorEvent(
System.currentTimeMillis(),
arbitraryTransactionData.getIdentifier(),
arbitraryTransactionData.getName(),
arbitraryTransactionData.getService().name(),
"already have local data, skipping",
arbitraryTransactionData.getTimestamp(),
arbitraryTransactionData.getTimestamp()
)
);
} }
} }
@ -248,8 +297,21 @@ public class ArbitraryDataManager extends Thread {
// Check to see if we have had a more recent PUT // Check to see if we have had a more recent PUT
ArbitraryTransactionData arbitraryTransactionData = ArbitraryTransactionUtils.fetchTransactionData(repository, signature); ArbitraryTransactionData arbitraryTransactionData = ArbitraryTransactionUtils.fetchTransactionData(repository, signature);
boolean hasMoreRecentPutTransaction = ArbitraryTransactionUtils.hasMoreRecentPutTransaction(repository, arbitraryTransactionData);
if (hasMoreRecentPutTransaction) { Optional<ArbitraryTransactionData> moreRecentPutTransaction = ArbitraryTransactionUtils.hasMoreRecentPutTransaction(repository, arbitraryTransactionData);
if (moreRecentPutTransaction.isPresent()) {
EventBus.INSTANCE.notify(
new DataMonitorEvent(
System.currentTimeMillis(),
arbitraryTransactionData.getIdentifier(),
arbitraryTransactionData.getName(),
arbitraryTransactionData.getService().name(),
"not fetching old data",
arbitraryTransactionData.getTimestamp(),
moreRecentPutTransaction.get().getTimestamp()
)
);
// There is a more recent PUT transaction than the one we are currently processing. // There is a more recent PUT transaction than the one we are currently processing.
// When a PUT is issued, it replaces any layers that would have been there before. // When a PUT is issued, it replaces any layers that would have been there before.
// Therefore any data relating to this older transaction is no longer needed and we // Therefore any data relating to this older transaction is no longer needed and we
@ -257,10 +319,34 @@ public class ArbitraryDataManager extends Thread {
continue; continue;
} }
EventBus.INSTANCE.notify(
new DataMonitorEvent(
System.currentTimeMillis(),
arbitraryTransactionData.getIdentifier(),
arbitraryTransactionData.getName(),
arbitraryTransactionData.getService().name(),
"fetching data",
arbitraryTransactionData.getTimestamp(),
arbitraryTransactionData.getTimestamp()
)
);
// Ask our connected peers if they have files for this signature // Ask our connected peers if they have files for this signature
// This process automatically then fetches the files themselves if a peer is found // This process automatically then fetches the files themselves if a peer is found
fetchData(arbitraryTransactionData); fetchData(arbitraryTransactionData);
EventBus.INSTANCE.notify(
new DataMonitorEvent(
System.currentTimeMillis(),
arbitraryTransactionData.getIdentifier(),
arbitraryTransactionData.getName(),
arbitraryTransactionData.getService().name(),
"fetched data",
arbitraryTransactionData.getTimestamp(),
arbitraryTransactionData.getTimestamp()
)
);
} catch (DataException e) { } catch (DataException e) {
LOGGER.error("Repository issue when fetching arbitrary transaction data", e); LOGGER.error("Repository issue when fetching arbitrary transaction data", e);
} }
@ -274,6 +360,20 @@ public class ArbitraryDataManager extends Thread {
final int limit = 100; final int limit = 100;
int offset = 0; int offset = 0;
List<ArbitraryTransactionData> allArbitraryTransactionsInDescendingOrder;
try (final Repository repository = RepositoryManager.getRepository()) {
allArbitraryTransactionsInDescendingOrder
= repository.getArbitraryRepository()
.getLatestArbitraryTransactions();
} catch( Exception e) {
LOGGER.error(e.getMessage(), e);
allArbitraryTransactionsInDescendingOrder = new ArrayList<>(0);
}
// collect processed transactions in a set to ensure outdated data transactions do not get fetched
Set<ArbitraryTransactionDataHashWrapper> processedTransactions = new HashSet<>();
while (!isStopping) { while (!isStopping) {
final int minSeconds = 3; final int minSeconds = 3;
final int maxSeconds = 10; final int maxSeconds = 10;
@ -282,8 +382,8 @@ public class ArbitraryDataManager extends Thread {
// Any arbitrary transactions we want to fetch data for? // Any arbitrary transactions we want to fetch data for?
try (final Repository repository = RepositoryManager.getRepository()) { try (final Repository repository = RepositoryManager.getRepository()) {
List<byte[]> signatures = repository.getTransactionRepository().getSignaturesMatchingCriteria(null, null, null, ARBITRARY_TX_TYPE, null, null, null, ConfirmationStatus.BOTH, limit, offset, true); List<byte[]> signatures = processTransactionsForSignatures(limit, offset, allArbitraryTransactionsInDescendingOrder, processedTransactions);
// LOGGER.trace("Found {} arbitrary transactions at offset: {}, limit: {}", signatures.size(), offset, limit);
if (signatures == null || signatures.isEmpty()) { if (signatures == null || signatures.isEmpty()) {
offset = 0; offset = 0;
break; break;
@ -328,26 +428,74 @@ public class ArbitraryDataManager extends Thread {
continue; continue;
} }
// Check to see if we have had a more recent PUT // No longer need to see if we have had a more recent PUT since we compared the transactions to process
// to the transactions previously processed, so we can fetch the transactiondata, notify the event bus,
// fetch the metadata and notify the event bus again
ArbitraryTransactionData arbitraryTransactionData = ArbitraryTransactionUtils.fetchTransactionData(repository, signature); ArbitraryTransactionData arbitraryTransactionData = ArbitraryTransactionUtils.fetchTransactionData(repository, signature);
boolean hasMoreRecentPutTransaction = ArbitraryTransactionUtils.hasMoreRecentPutTransaction(repository, arbitraryTransactionData);
if (hasMoreRecentPutTransaction) {
// There is a more recent PUT transaction than the one we are currently processing.
// When a PUT is issued, it replaces any layers that would have been there before.
// Therefore any data relating to this older transaction is no longer needed and we
// shouldn't fetch it from the network.
continue;
}
// Ask our connected peers if they have metadata for this signature // Ask our connected peers if they have metadata for this signature
fetchMetadata(arbitraryTransactionData); fetchMetadata(arbitraryTransactionData);
EventBus.INSTANCE.notify(
new DataMonitorEvent(
System.currentTimeMillis(),
arbitraryTransactionData.getIdentifier(),
arbitraryTransactionData.getName(),
arbitraryTransactionData.getService().name(),
"fetched metadata",
arbitraryTransactionData.getTimestamp(),
arbitraryTransactionData.getTimestamp()
)
);
} catch (DataException e) { } catch (DataException e) {
LOGGER.error("Repository issue when fetching arbitrary transaction data", e); LOGGER.error("Repository issue when fetching arbitrary transaction data", e);
} catch (Exception e) {
LOGGER.error(e.getMessage(), e);
} }
} }
} }
private static List<byte[]> processTransactionsForSignatures(
int limit,
int offset,
List<ArbitraryTransactionData> transactionsInDescendingOrder,
Set<ArbitraryTransactionDataHashWrapper> processedTransactions) {
// these transactions are in descending order, latest transactions come first
List<ArbitraryTransactionData> transactions
= transactionsInDescendingOrder.stream()
.skip(offset)
.limit(limit)
.collect(Collectors.toList());
// wrap the transactions, so they can be used for hashing and comparing
// Class ArbitraryTransactionDataHashWrapper supports hashCode() and equals(...) for this purpose
List<ArbitraryTransactionDataHashWrapper> wrappedTransactions
= transactions.stream()
.map(transaction -> new ArbitraryTransactionDataHashWrapper(transaction))
.collect(Collectors.toList());
// create a set of wrappers and populate it first to last, so that all outdated transactions get rejected
Set<ArbitraryTransactionDataHashWrapper> transactionsToProcess = new HashSet<>(wrappedTransactions.size());
for(ArbitraryTransactionDataHashWrapper wrappedTransaction : wrappedTransactions) {
transactionsToProcess.add(wrappedTransaction);
}
// remove the matches for previously processed transactions,
// because these transactions have had updates that have already been processed
transactionsToProcess.removeAll(processedTransactions);
// add to processed transactions to compare and remove matches from future processing iterations
processedTransactions.addAll(transactionsToProcess);
List<byte[]> signatures
= transactionsToProcess.stream()
.map(transactionToProcess -> transactionToProcess.getData()
.getSignature())
.collect(Collectors.toList());
return signatures;
}
private ArbitraryTransaction fetchTransaction(final Repository repository, byte[] signature) { private ArbitraryTransaction fetchTransaction(final Repository repository, byte[] signature) {
try { try {
TransactionData transactionData = repository.getTransactionRepository().fromSignature(signature); TransactionData transactionData = repository.getTransactionRepository().fromSignature(signature);

View File

@ -155,31 +155,24 @@ public class ArbitraryDataStorageManager extends Thread {
* @param arbitraryTransactionData - the transaction * @param arbitraryTransactionData - the transaction
* @return boolean - whether to prefetch or not * @return boolean - whether to prefetch or not
*/ */
public boolean shouldPreFetchData(Repository repository, ArbitraryTransactionData arbitraryTransactionData) { public ArbitraryDataExamination shouldPreFetchData(Repository repository, ArbitraryTransactionData arbitraryTransactionData) {
String name = arbitraryTransactionData.getName(); String name = arbitraryTransactionData.getName();
// Only fetch data associated with hashes, as we already have RAW_DATA // Only fetch data associated with hashes, as we already have RAW_DATA
if (arbitraryTransactionData.getDataType() != ArbitraryTransactionData.DataType.DATA_HASH) { if (arbitraryTransactionData.getDataType() != ArbitraryTransactionData.DataType.DATA_HASH) {
return false; return new ArbitraryDataExamination(false, "Only fetch data associated with hashes");
} }
// Don't fetch anything more if we're (nearly) out of space // Don't fetch anything more if we're (nearly) out of space
// Make sure to keep STORAGE_FULL_THRESHOLD considerably less than 1, to // Make sure to keep STORAGE_FULL_THRESHOLD considerably less than 1, to
// avoid a fetch/delete loop // avoid a fetch/delete loop
if (!this.isStorageSpaceAvailable(STORAGE_FULL_THRESHOLD)) { if (!this.isStorageSpaceAvailable(STORAGE_FULL_THRESHOLD)) {
return false; return new ArbitraryDataExamination(false,"Don't fetch anything more if we're (nearly) out of space");
}
// Don't fetch anything if we're (nearly) out of space for this name
// Again, make sure to keep STORAGE_FULL_THRESHOLD considerably less than 1, to
// avoid a fetch/delete loop
if (!this.isStorageSpaceAvailableForName(repository, arbitraryTransactionData.getName(), STORAGE_FULL_THRESHOLD)) {
return false;
} }
// Don't store data unless it's an allowed type (public/private) // Don't store data unless it's an allowed type (public/private)
if (!this.isDataTypeAllowed(arbitraryTransactionData)) { if (!this.isDataTypeAllowed(arbitraryTransactionData)) {
return false; return new ArbitraryDataExamination(false, "Don't store data unless it's an allowed type (public/private)");
} }
// Handle transactions without names differently // Handle transactions without names differently
@ -189,21 +182,21 @@ public class ArbitraryDataStorageManager extends Thread {
// Never fetch data from blocked names, even if they are followed // Never fetch data from blocked names, even if they are followed
if (ListUtils.isNameBlocked(name)) { if (ListUtils.isNameBlocked(name)) {
return false; return new ArbitraryDataExamination(false, "blocked name");
} }
switch (Settings.getInstance().getStoragePolicy()) { switch (Settings.getInstance().getStoragePolicy()) {
case FOLLOWED: case FOLLOWED:
case FOLLOWED_OR_VIEWED: case FOLLOWED_OR_VIEWED:
return ListUtils.isFollowingName(name); return new ArbitraryDataExamination(ListUtils.isFollowingName(name), Settings.getInstance().getStoragePolicy().name());
case ALL: case ALL:
return true; return new ArbitraryDataExamination(true, Settings.getInstance().getStoragePolicy().name());
case NONE: case NONE:
case VIEWED: case VIEWED:
default: default:
return false; return new ArbitraryDataExamination(false, Settings.getInstance().getStoragePolicy().name());
} }
} }
@ -214,17 +207,17 @@ public class ArbitraryDataStorageManager extends Thread {
* *
* @return boolean - whether the storage policy allows for unnamed data * @return boolean - whether the storage policy allows for unnamed data
*/ */
private boolean shouldPreFetchDataWithoutName() { private ArbitraryDataExamination shouldPreFetchDataWithoutName() {
switch (Settings.getInstance().getStoragePolicy()) { switch (Settings.getInstance().getStoragePolicy()) {
case ALL: case ALL:
return true; return new ArbitraryDataExamination(true, "Fetching all data");
case NONE: case NONE:
case VIEWED: case VIEWED:
case FOLLOWED: case FOLLOWED:
case FOLLOWED_OR_VIEWED: case FOLLOWED_OR_VIEWED:
default: default:
return false; return new ArbitraryDataExamination(false, Settings.getInstance().getStoragePolicy().name());
} }
} }
@ -484,51 +477,6 @@ public class ArbitraryDataStorageManager extends Thread {
return true; return true;
} }
public boolean isStorageSpaceAvailableForName(Repository repository, String name, double threshold) {
if (!this.isStorageSpaceAvailable(threshold)) {
// No storage space available at all, so no need to check this name
return false;
}
if (Settings.getInstance().getStoragePolicy() == StoragePolicy.ALL) {
// Using storage policy ALL, so don't limit anything per name
return true;
}
if (name == null) {
// This transaction doesn't have a name, so fall back to total space limitations
return true;
}
int followedNamesCount = ListUtils.followedNamesCount();
if (followedNamesCount == 0) {
// Not following any names, so we have space
return true;
}
long totalSizeForName = 0;
long maxStoragePerName = this.storageCapacityPerName(threshold);
// Fetch all hosted transactions
List<ArbitraryTransactionData> hostedTransactions = this.listAllHostedTransactions(repository, null, null);
for (ArbitraryTransactionData transactionData : hostedTransactions) {
String transactionName = transactionData.getName();
if (!Objects.equals(name, transactionName)) {
// Transaction relates to a different name
continue;
}
totalSizeForName += transactionData.getSize();
}
// Have we reached the limit for this name?
if (totalSizeForName > maxStoragePerName) {
return false;
}
return true;
}
public long storageCapacityPerName(double threshold) { public long storageCapacityPerName(double threshold) {
int followedNamesCount = ListUtils.followedNamesCount(); int followedNamesCount = ListUtils.followedNamesCount();
if (followedNamesCount == 0) { if (followedNamesCount == 0) {

View File

@ -0,0 +1,48 @@
package org.qortal.controller.arbitrary;
import org.qortal.arbitrary.misc.Service;
import org.qortal.data.transaction.ArbitraryTransactionData;
import java.util.Objects;
public class ArbitraryTransactionDataHashWrapper {
private ArbitraryTransactionData data;
private int service;
private String name;
private String identifier;
public ArbitraryTransactionDataHashWrapper(ArbitraryTransactionData data) {
this.data = data;
this.service = data.getService().value;
this.name = data.getName();
this.identifier = data.getIdentifier();
}
public ArbitraryTransactionDataHashWrapper(int service, String name, String identifier) {
this.service = service;
this.name = name;
this.identifier = identifier;
}
public ArbitraryTransactionData getData() {
return data;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
ArbitraryTransactionDataHashWrapper that = (ArbitraryTransactionDataHashWrapper) o;
return service == that.service && name.equals(that.name) && Objects.equals(identifier, that.identifier);
}
@Override
public int hashCode() {
return Objects.hash(service, name, identifier);
}
}

View File

@ -0,0 +1,33 @@
package org.qortal.controller.arbitrary;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.qortal.repository.DataException;
import org.qortal.repository.Repository;
import org.qortal.repository.RepositoryManager;
import java.util.TimerTask;
public class RebuildArbitraryResourceCacheTask extends TimerTask {
private static final Logger LOGGER = LogManager.getLogger(RebuildArbitraryResourceCacheTask.class);
public static final long MILLIS_IN_HOUR = 60 * 60 * 1000;
public static final long MILLIS_IN_MINUTE = 60 * 1000;
private static final String REBUILD_ARBITRARY_RESOURCE_CACHE_TASK = "Rebuild Arbitrary Resource Cache Task";
@Override
public void run() {
Thread.currentThread().setName(REBUILD_ARBITRARY_RESOURCE_CACHE_TASK);
try (final Repository repository = RepositoryManager.getRepository()) {
ArbitraryDataCacheManager.getInstance().buildArbitraryResourcesCache(repository, true);
}
catch( DataException e ) {
LOGGER.error(e.getMessage(), e);
}
}
}

View File

@ -83,6 +83,7 @@ public abstract class Bitcoiny implements ForeignBlockchain {
return this.bitcoinjContext; return this.bitcoinjContext;
} }
@Override
public String getCurrencyCode() { public String getCurrencyCode() {
return this.currencyCode; return this.currencyCode;
} }

View File

@ -2,6 +2,8 @@ package org.qortal.crosschain;
public interface ForeignBlockchain { public interface ForeignBlockchain {
public String getCurrencyCode();
public boolean isValidAddress(String address); public boolean isValidAddress(String address);
public boolean isValidWalletKey(String walletKey); public boolean isValidWalletKey(String walletKey);

View File

@ -0,0 +1,34 @@
package org.qortal.data.arbitrary;
import org.qortal.arbitrary.misc.Service;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
@XmlAccessorType(XmlAccessType.FIELD)
public class ArbitraryDataIndex {
public String t;
public String n;
public int c;
public String l;
public ArbitraryDataIndex() {}
public ArbitraryDataIndex(String t, String n, int c, String l) {
this.t = t;
this.n = n;
this.c = c;
this.l = l;
}
@Override
public String toString() {
return "ArbitraryDataIndex{" +
"t='" + t + '\'' +
", n='" + n + '\'' +
", c=" + c +
", l='" + l + '\'' +
'}';
}
}

View File

@ -0,0 +1,41 @@
package org.qortal.data.arbitrary;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
@XmlAccessorType(XmlAccessType.FIELD)
public class ArbitraryDataIndexDetail {
public String issuer;
public int rank;
public String term;
public String name;
public int category;
public String link;
public String indexIdentifer;
public ArbitraryDataIndexDetail() {}
public ArbitraryDataIndexDetail(String issuer, int rank, ArbitraryDataIndex index, String indexIdentifer) {
this.issuer = issuer;
this.rank = rank;
this.term = index.t;
this.name = index.n;
this.category = index.c;
this.link = index.l;
this.indexIdentifer = indexIdentifer;
}
@Override
public String toString() {
return "ArbitraryDataIndexDetail{" +
"issuer='" + issuer + '\'' +
", rank=" + rank +
", term='" + term + '\'' +
", name='" + name + '\'' +
", category=" + category +
", link='" + link + '\'' +
", indexIdentifer='" + indexIdentifer + '\'' +
'}';
}
}

View File

@ -0,0 +1,38 @@
package org.qortal.data.arbitrary;
import org.qortal.arbitrary.misc.Service;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import java.util.Objects;
@XmlAccessorType(XmlAccessType.FIELD)
public class ArbitraryDataIndexScoreKey {
public String name;
public int category;
public String link;
public ArbitraryDataIndexScoreKey() {}
public ArbitraryDataIndexScoreKey(String name, int category, String link) {
this.name = name;
this.category = category;
this.link = link;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
ArbitraryDataIndexScoreKey that = (ArbitraryDataIndexScoreKey) o;
return category == that.category && Objects.equals(name, that.name) && Objects.equals(link, that.link);
}
@Override
public int hashCode() {
return Objects.hash(name, category, link);
}
}

View File

@ -0,0 +1,38 @@
package org.qortal.data.arbitrary;
import org.qortal.arbitrary.misc.Service;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
@XmlAccessorType(XmlAccessType.FIELD)
public class ArbitraryDataIndexScorecard {
public double score;
public String name;
public int category;
public String link;
public ArbitraryDataIndexScorecard() {}
public ArbitraryDataIndexScorecard(double score, String name, int category, String link) {
this.score = score;
this.name = name;
this.category = category;
this.link = link;
}
public double getScore() {
return score;
}
@Override
public String toString() {
return "ArbitraryDataIndexScorecard{" +
"score=" + score +
", name='" + name + '\'' +
", category=" + category +
", link='" + link + '\'' +
'}';
}
}

View File

@ -0,0 +1,57 @@
package org.qortal.data.arbitrary;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
@XmlAccessorType(XmlAccessType.FIELD)
public class DataMonitorInfo {
private long timestamp;
private String identifier;
private String name;
private String service;
private String description;
private long transactionTimestamp;
private long latestPutTimestamp;
public DataMonitorInfo() {
}
public DataMonitorInfo(long timestamp, String identifier, String name, String service, String description, long transactionTimestamp, long latestPutTimestamp) {
this.timestamp = timestamp;
this.identifier = identifier;
this.name = name;
this.service = service;
this.description = description;
this.transactionTimestamp = transactionTimestamp;
this.latestPutTimestamp = latestPutTimestamp;
}
public long getTimestamp() {
return timestamp;
}
public String getIdentifier() {
return identifier;
}
public String getName() {
return name;
}
public String getService() {
return service;
}
public String getDescription() {
return description;
}
public long getTransactionTimestamp() {
return transactionTimestamp;
}
public long getLatestPutTimestamp() {
return latestPutTimestamp;
}
}

View File

@ -0,0 +1,23 @@
package org.qortal.data.arbitrary;
import java.util.List;
import java.util.concurrent.ConcurrentHashMap;
public class IndexCache {
public static final IndexCache SINGLETON = new IndexCache();
private ConcurrentHashMap<String, List<ArbitraryDataIndexDetail>> indicesByTerm = new ConcurrentHashMap<>();
private ConcurrentHashMap<String, List<ArbitraryDataIndexDetail>> indicesByIssuer = new ConcurrentHashMap<>();
public static IndexCache getInstance() {
return SINGLETON;
}
public ConcurrentHashMap<String, List<ArbitraryDataIndexDetail>> getIndicesByTerm() {
return indicesByTerm;
}
public ConcurrentHashMap<String, List<ArbitraryDataIndexDetail>> getIndicesByIssuer() {
return indicesByIssuer;
}
}

View File

@ -200,4 +200,26 @@ public class ArbitraryTransactionData extends TransactionData {
return this.payments; return this.payments;
} }
@Override
public String toString() {
return "ArbitraryTransactionData{" +
"version=" + version +
", service=" + service +
", nonce=" + nonce +
", size=" + size +
", name='" + name + '\'' +
", identifier='" + identifier + '\'' +
", method=" + method +
", compression=" + compression +
", dataType=" + dataType +
", type=" + type +
", timestamp=" + timestamp +
", fee=" + fee +
", txGroupId=" + txGroupId +
", blockHeight=" + blockHeight +
", blockSequence=" + blockSequence +
", approvalStatus=" + approvalStatus +
", approvalHeight=" + approvalHeight +
'}';
}
} }

View File

@ -0,0 +1,57 @@
package org.qortal.event;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
@XmlAccessorType(XmlAccessType.FIELD)
public class DataMonitorEvent implements Event{
private long timestamp;
private String identifier;
private String name;
private String service;
private String description;
private long transactionTimestamp;
private long latestPutTimestamp;
public DataMonitorEvent() {
}
public DataMonitorEvent(long timestamp, String identifier, String name, String service, String description, long transactionTimestamp, long latestPutTimestamp) {
this.timestamp = timestamp;
this.identifier = identifier;
this.name = name;
this.service = service;
this.description = description;
this.transactionTimestamp = transactionTimestamp;
this.latestPutTimestamp = latestPutTimestamp;
}
public long getTimestamp() {
return timestamp;
}
public String getIdentifier() {
return identifier;
}
public String getName() {
return name;
}
public String getService() {
return service;
}
public String getDescription() {
return description;
}
public long getTransactionTimestamp() {
return transactionTimestamp;
}
public long getLatestPutTimestamp() {
return latestPutTimestamp;
}
}

View File

@ -674,8 +674,8 @@ public class Group {
public void uninvite(GroupInviteTransactionData groupInviteTransactionData) throws DataException { public void uninvite(GroupInviteTransactionData groupInviteTransactionData) throws DataException {
String invitee = groupInviteTransactionData.getInvitee(); String invitee = groupInviteTransactionData.getInvitee();
// If member exists then they were added when invite matched join request // If member exists and the join request is present then they were added when invite matched join request
if (this.memberExists(invitee)) { if (this.memberExists(invitee) && groupInviteTransactionData.getJoinReference() != null) {
// Rebuild join request using cached reference to transaction that created join request. // Rebuild join request using cached reference to transaction that created join request.
this.rebuildJoinRequest(invitee, groupInviteTransactionData.getJoinReference()); this.rebuildJoinRequest(invitee, groupInviteTransactionData.getJoinReference());

View File

@ -76,7 +76,7 @@ public interface ATRepository {
* Although <tt>expectedValue</tt>, if provided, is natively an unsigned long, * Although <tt>expectedValue</tt>, if provided, is natively an unsigned long,
* the data segment comparison is done via unsigned hex string. * the data segment comparison is done via unsigned hex string.
*/ */
public List<ATStateData> getMatchingFinalATStates(byte[] codeHash, String buyerAddress, String sellerAddress, Boolean isFinished, public List<ATStateData> getMatchingFinalATStates(byte[] codeHash, byte[] buyerPublicKey, byte[] sellerPublicKey, Boolean isFinished,
Integer dataByteOffset, Long expectedValue, Integer minimumFinalHeight, Integer dataByteOffset, Long expectedValue, Integer minimumFinalHeight,
Integer limit, Integer offset, Boolean reverse) throws DataException; Integer limit, Integer offset, Boolean reverse) throws DataException;

View File

@ -27,6 +27,10 @@ public interface ArbitraryRepository {
public List<ArbitraryTransactionData> getArbitraryTransactions(String name, Service service, String identifier, long since) throws DataException; public List<ArbitraryTransactionData> getArbitraryTransactions(String name, Service service, String identifier, long since) throws DataException;
List<ArbitraryTransactionData> getLatestArbitraryTransactions() throws DataException;
List<ArbitraryTransactionData> getLatestArbitraryTransactionsByName(String name) throws DataException;
public ArbitraryTransactionData getInitialTransaction(String name, Service service, Method method, String identifier) throws DataException; public ArbitraryTransactionData getInitialTransaction(String name, Service service, Method method, String identifier) throws DataException;
public ArbitraryTransactionData getLatestTransaction(String name, Service service, Method method, String identifier) throws DataException; public ArbitraryTransactionData getLatestTransaction(String name, Service service, Method method, String identifier) throws DataException;
@ -42,7 +46,7 @@ public interface ArbitraryRepository {
public List<ArbitraryResourceData> getArbitraryResources(Service service, String identifier, List<String> names, boolean defaultResource, Boolean followedOnly, Boolean excludeBlocked, Boolean includeMetadata, Boolean includeStatus, Integer limit, Integer offset, Boolean reverse) throws DataException; public List<ArbitraryResourceData> getArbitraryResources(Service service, String identifier, List<String> names, boolean defaultResource, Boolean followedOnly, Boolean excludeBlocked, Boolean includeMetadata, Boolean includeStatus, Integer limit, Integer offset, Boolean reverse) throws DataException;
public List<ArbitraryResourceData> searchArbitraryResources(Service service, String query, String identifier, List<String> names, String title, String description, boolean prefixOnly, List<String> namesFilter, boolean defaultResource, SearchMode mode, Integer minLevel, Boolean followedOnly, Boolean excludeBlocked, Boolean includeMetadata, Boolean includeStatus, Long before, Long after, Integer limit, Integer offset, Boolean reverse) throws DataException; public List<ArbitraryResourceData> searchArbitraryResources(Service service, String query, String identifier, List<String> names, String title, String description, List<String> keywords, boolean prefixOnly, List<String> namesFilter, boolean defaultResource, SearchMode mode, Integer minLevel, Boolean followedOnly, Boolean excludeBlocked, Boolean includeMetadata, Boolean includeStatus, Long before, Long after, Integer limit, Integer offset, Boolean reverse) throws DataException;
List<ArbitraryResourceData> searchArbitraryResourcesSimple( List<ArbitraryResourceData> searchArbitraryResourcesSimple(
Service service, Service service,

View File

@ -5,6 +5,7 @@ import com.google.common.primitives.Longs;
import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.Logger;
import org.qortal.controller.Controller; import org.qortal.controller.Controller;
import org.qortal.crypto.Crypto;
import org.qortal.data.at.ATData; import org.qortal.data.at.ATData;
import org.qortal.data.at.ATStateData; import org.qortal.data.at.ATStateData;
import org.qortal.repository.ATRepository; import org.qortal.repository.ATRepository;
@ -403,7 +404,7 @@ public class HSQLDBATRepository implements ATRepository {
} }
@Override @Override
public List<ATStateData> getMatchingFinalATStates(byte[] codeHash, String buyerAddress, String sellerAddress, Boolean isFinished, public List<ATStateData> getMatchingFinalATStates(byte[] codeHash, byte[] buyerPublicKey, byte[] sellerPublicKey, Boolean isFinished,
Integer dataByteOffset, Long expectedValue, Integer minimumFinalHeight, Integer dataByteOffset, Long expectedValue, Integer minimumFinalHeight,
Integer limit, Integer offset, Boolean reverse) throws DataException { Integer limit, Integer offset, Boolean reverse) throws DataException {
StringBuilder sql = new StringBuilder(1024); StringBuilder sql = new StringBuilder(1024);
@ -426,9 +427,9 @@ public class HSQLDBATRepository implements ATRepository {
// Both must be the same direction (DESC) also // Both must be the same direction (DESC) also
sql.append("ORDER BY ATStates.height DESC LIMIT 1) AS FinalATStates "); sql.append("ORDER BY ATStates.height DESC LIMIT 1) AS FinalATStates ");
// Optional LEFT JOIN with ATTRANSACTIONS for buyerAddress // Optional JOIN with ATTRANSACTIONS for buyerAddress
if (buyerAddress != null && !buyerAddress.isEmpty()) { if (buyerPublicKey != null && buyerPublicKey.length > 0) {
sql.append("LEFT JOIN ATTRANSACTIONS tx ON tx.at_address = ATs.AT_address "); sql.append("JOIN ATTRANSACTIONS tx ON tx.at_address = ATs.AT_address ");
} }
sql.append("WHERE ATs.code_hash = ? "); sql.append("WHERE ATs.code_hash = ? ");
@ -450,18 +451,18 @@ public class HSQLDBATRepository implements ATRepository {
bindParams.add(rawExpectedValue); bindParams.add(rawExpectedValue);
} }
if (buyerAddress != null && !buyerAddress.isEmpty()) { if (buyerPublicKey != null && buyerPublicKey.length > 0 ) {
sql.append("AND tx.recipient = ? "); // the buyer must be the recipient of the transaction and not the creator of the AT
bindParams.add(buyerAddress); sql.append("AND tx.recipient = ? AND ATs.creator != ? ");
bindParams.add(Crypto.toAddress(buyerPublicKey));
bindParams.add(buyerPublicKey);
} }
if (sellerAddress != null && !sellerAddress.isEmpty()) { if (sellerPublicKey != null && sellerPublicKey.length > 0) {
// Convert sellerAddress to publicKey (method depends on your implementation)
AccountData accountData = this.repository.getAccountRepository().getAccount(sellerAddress);
byte[] publicKey = accountData.getPublicKey();
sql.append("AND ATs.creator = ? "); sql.append("AND ATs.creator = ? ");
bindParams.add(publicKey); bindParams.add(sellerPublicKey);
} }
sql.append(" ORDER BY FinalATStates.height "); sql.append(" ORDER BY FinalATStates.height ");

View File

@ -7,7 +7,6 @@ import org.qortal.arbitrary.ArbitraryDataFile;
import org.qortal.arbitrary.metadata.ArbitraryDataTransactionMetadata; import org.qortal.arbitrary.metadata.ArbitraryDataTransactionMetadata;
import org.qortal.arbitrary.misc.Category; import org.qortal.arbitrary.misc.Category;
import org.qortal.arbitrary.misc.Service; import org.qortal.arbitrary.misc.Service;
import org.qortal.controller.arbitrary.ArbitraryDataManager;
import org.qortal.data.arbitrary.ArbitraryResourceCache; import org.qortal.data.arbitrary.ArbitraryResourceCache;
import org.qortal.data.arbitrary.ArbitraryResourceData; import org.qortal.data.arbitrary.ArbitraryResourceData;
import org.qortal.data.arbitrary.ArbitraryResourceMetadata; import org.qortal.data.arbitrary.ArbitraryResourceMetadata;
@ -29,6 +28,7 @@ import org.qortal.utils.ListUtils;
import java.sql.ResultSet; import java.sql.ResultSet;
import java.sql.SQLException; import java.sql.SQLException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays;
import java.util.List; import java.util.List;
import java.util.Objects; import java.util.Objects;
import java.util.Optional; import java.util.Optional;
@ -227,6 +227,144 @@ public class HSQLDBArbitraryRepository implements ArbitraryRepository {
} }
} }
@Override
public List<ArbitraryTransactionData> getLatestArbitraryTransactions() throws DataException {
String sql = "SELECT type, reference, signature, creator, created_when, fee, " +
"tx_group_id, block_height, approval_status, approval_height, " +
"version, nonce, service, size, is_data_raw, data, metadata_hash, " +
"name, identifier, update_method, secret, compression FROM ArbitraryTransactions " +
"JOIN Transactions USING (signature) " +
"WHERE name IS NOT NULL " +
"ORDER BY created_when DESC";
List<ArbitraryTransactionData> arbitraryTransactionData = new ArrayList<>();
try (ResultSet resultSet = this.repository.checkedExecute(sql)) {
if (resultSet == null)
return new ArrayList<>(0);
do {
byte[] reference = resultSet.getBytes(2);
byte[] signature = resultSet.getBytes(3);
byte[] creatorPublicKey = resultSet.getBytes(4);
long timestamp = resultSet.getLong(5);
Long fee = resultSet.getLong(6);
if (fee == 0 && resultSet.wasNull())
fee = null;
int txGroupId = resultSet.getInt(7);
Integer blockHeight = resultSet.getInt(8);
if (blockHeight == 0 && resultSet.wasNull())
blockHeight = null;
ApprovalStatus approvalStatus = ApprovalStatus.valueOf(resultSet.getInt(9));
Integer approvalHeight = resultSet.getInt(10);
if (approvalHeight == 0 && resultSet.wasNull())
approvalHeight = null;
BaseTransactionData baseTransactionData = new BaseTransactionData(timestamp, txGroupId, reference, creatorPublicKey, fee, approvalStatus, blockHeight, approvalHeight, signature);
int version = resultSet.getInt(11);
int nonce = resultSet.getInt(12);
int serviceInt = resultSet.getInt(13);
int size = resultSet.getInt(14);
boolean isDataRaw = resultSet.getBoolean(15); // NOT NULL, so no null to false
DataType dataType = isDataRaw ? DataType.RAW_DATA : DataType.DATA_HASH;
byte[] data = resultSet.getBytes(16);
byte[] metadataHash = resultSet.getBytes(17);
String nameResult = resultSet.getString(18);
String identifierResult = resultSet.getString(19);
Method method = Method.valueOf(resultSet.getInt(20));
byte[] secret = resultSet.getBytes(21);
Compression compression = Compression.valueOf(resultSet.getInt(22));
// FUTURE: get payments from signature if needed. Avoiding for now to reduce database calls.
ArbitraryTransactionData transactionData = new ArbitraryTransactionData(baseTransactionData,
version, serviceInt, nonce, size, nameResult, identifierResult, method, secret,
compression, data, dataType, metadataHash, null);
arbitraryTransactionData.add(transactionData);
} while (resultSet.next());
return arbitraryTransactionData;
} catch (SQLException e) {
throw new DataException("Unable to fetch arbitrary transactions from repository", e);
} catch (Exception e) {
LOGGER.error(e.getMessage(), e);
return new ArrayList<>(0);
}
}
@Override
public List<ArbitraryTransactionData> getLatestArbitraryTransactionsByName( String name ) throws DataException {
String sql = "SELECT type, reference, signature, creator, created_when, fee, " +
"tx_group_id, block_height, approval_status, approval_height, " +
"version, nonce, service, size, is_data_raw, data, metadata_hash, " +
"name, identifier, update_method, secret, compression FROM ArbitraryTransactions " +
"JOIN Transactions USING (signature) " +
"WHERE name = ? " +
"ORDER BY created_when DESC";
List<ArbitraryTransactionData> arbitraryTransactionData = new ArrayList<>();
try (ResultSet resultSet = this.repository.checkedExecute(sql, name)) {
if (resultSet == null)
return new ArrayList<>(0);
do {
byte[] reference = resultSet.getBytes(2);
byte[] signature = resultSet.getBytes(3);
byte[] creatorPublicKey = resultSet.getBytes(4);
long timestamp = resultSet.getLong(5);
Long fee = resultSet.getLong(6);
if (fee == 0 && resultSet.wasNull())
fee = null;
int txGroupId = resultSet.getInt(7);
Integer blockHeight = resultSet.getInt(8);
if (blockHeight == 0 && resultSet.wasNull())
blockHeight = null;
ApprovalStatus approvalStatus = ApprovalStatus.valueOf(resultSet.getInt(9));
Integer approvalHeight = resultSet.getInt(10);
if (approvalHeight == 0 && resultSet.wasNull())
approvalHeight = null;
BaseTransactionData baseTransactionData = new BaseTransactionData(timestamp, txGroupId, reference, creatorPublicKey, fee, approvalStatus, blockHeight, approvalHeight, signature);
int version = resultSet.getInt(11);
int nonce = resultSet.getInt(12);
int serviceInt = resultSet.getInt(13);
int size = resultSet.getInt(14);
boolean isDataRaw = resultSet.getBoolean(15); // NOT NULL, so no null to false
DataType dataType = isDataRaw ? DataType.RAW_DATA : DataType.DATA_HASH;
byte[] data = resultSet.getBytes(16);
byte[] metadataHash = resultSet.getBytes(17);
String nameResult = resultSet.getString(18);
String identifierResult = resultSet.getString(19);
Method method = Method.valueOf(resultSet.getInt(20));
byte[] secret = resultSet.getBytes(21);
Compression compression = Compression.valueOf(resultSet.getInt(22));
// FUTURE: get payments from signature if needed. Avoiding for now to reduce database calls.
ArbitraryTransactionData transactionData = new ArbitraryTransactionData(baseTransactionData,
version, serviceInt, nonce, size, nameResult, identifierResult, method, secret,
compression, data, dataType, metadataHash, null);
arbitraryTransactionData.add(transactionData);
} while (resultSet.next());
return arbitraryTransactionData;
} catch (SQLException e) {
throw new DataException("Unable to fetch arbitrary transactions from repository", e);
} catch (Exception e) {
LOGGER.error(e.getMessage(), e);
return new ArrayList<>(0);
}
}
private ArbitraryTransactionData getSingleTransaction(String name, Service service, Method method, String identifier, boolean firstNotLast) throws DataException { private ArbitraryTransactionData getSingleTransaction(String name, Service service, Method method, String identifier, boolean firstNotLast) throws DataException {
if (name == null || service == null) { if (name == null || service == null) {
// Required fields // Required fields
@ -724,12 +862,11 @@ public class HSQLDBArbitraryRepository implements ArbitraryRepository {
} }
@Override @Override
public List<ArbitraryResourceData> searchArbitraryResources(Service service, String query, String identifier, List<String> names, String title, String description, boolean prefixOnly, public List<ArbitraryResourceData> searchArbitraryResources(Service service, String query, String identifier, List<String> names, String title, String description, List<String> keywords, boolean prefixOnly,
List<String> exactMatchNames, boolean defaultResource, SearchMode mode, Integer minLevel, Boolean followedOnly, Boolean excludeBlocked, List<String> exactMatchNames, boolean defaultResource, SearchMode mode, Integer minLevel, Boolean followedOnly, Boolean excludeBlocked,
Boolean includeMetadata, Boolean includeStatus, Long before, Long after, Integer limit, Integer offset, Boolean reverse) throws DataException { Boolean includeMetadata, Boolean includeStatus, Long before, Long after, Integer limit, Integer offset, Boolean reverse) throws DataException {
if(Settings.getInstance().isDbCacheEnabled()) { if(Settings.getInstance().isDbCacheEnabled()) {
List<ArbitraryResourceData> list List<ArbitraryResourceData> list
= HSQLDBCacheUtils.callCache( = HSQLDBCacheUtils.callCache(
ArbitraryResourceCache.getInstance(), ArbitraryResourceCache.getInstance(),
@ -751,6 +888,7 @@ public class HSQLDBArbitraryRepository implements ArbitraryRepository {
Optional.ofNullable(description), Optional.ofNullable(description),
prefixOnly, prefixOnly,
Optional.ofNullable(exactMatchNames), Optional.ofNullable(exactMatchNames),
Optional.ofNullable(keywords),
defaultResource, defaultResource,
Optional.ofNullable(minLevel), Optional.ofNullable(minLevel),
Optional.ofNullable(() -> ListUtils.followedNames()), Optional.ofNullable(() -> ListUtils.followedNames()),
@ -771,6 +909,7 @@ public class HSQLDBArbitraryRepository implements ArbitraryRepository {
} }
} }
StringBuilder sql = new StringBuilder(512); StringBuilder sql = new StringBuilder(512);
List<Object> bindParams = new ArrayList<>(); List<Object> bindParams = new ArrayList<>();
@ -857,6 +996,26 @@ public class HSQLDBArbitraryRepository implements ArbitraryRepository {
bindParams.add(queryWildcard); bindParams.add(queryWildcard);
} }
if (keywords != null && !keywords.isEmpty()) {
List<String> searchKeywords = new ArrayList<>(keywords);
List<String> conditions = new ArrayList<>();
List<String> bindValues = new ArrayList<>();
for (int i = 0; i < searchKeywords.size(); i++) {
conditions.add("LOWER(description) LIKE ?");
bindValues.add("%" + searchKeywords.get(i).trim().toLowerCase() + "%");
}
String finalCondition = String.join(" OR ", conditions);
sql.append(" AND (").append(finalCondition).append(")");
bindParams.addAll(bindValues);
}
// Handle name searches // Handle name searches
if (names != null && !names.isEmpty()) { if (names != null && !names.isEmpty()) {
sql.append(" AND ("); sql.append(" AND (");

View File

@ -167,6 +167,7 @@ public class HSQLDBCacheUtils {
Optional<String> description, Optional<String> description,
boolean prefixOnly, boolean prefixOnly,
Optional<List<String>> exactMatchNames, Optional<List<String>> exactMatchNames,
Optional<List<String>> keywords,
boolean defaultResource, boolean defaultResource,
Optional<Integer> minLevel, Optional<Integer> minLevel,
Optional<Supplier<List<String>>> includeOnly, Optional<Supplier<List<String>>> includeOnly,
@ -182,6 +183,14 @@ public class HSQLDBCacheUtils {
// retain only candidates with names // retain only candidates with names
Stream<ArbitraryResourceData> stream = candidates.stream().filter(candidate -> candidate.name != null ); Stream<ArbitraryResourceData> stream = candidates.stream().filter(candidate -> candidate.name != null );
if(after.isPresent()) {
stream = stream.filter( candidate -> candidate.created > after.get().longValue() );
}
if(before.isPresent()) {
stream = stream.filter( candidate -> candidate.created < before.get().longValue() );
}
if(exclude.isPresent()) if(exclude.isPresent())
stream = stream.filter( candidate -> !exclude.get().get().contains( candidate.name )); stream = stream.filter( candidate -> !exclude.get().get().contains( candidate.name ));
@ -207,6 +216,36 @@ public class HSQLDBCacheUtils {
stream = filterTerm(title, data -> data.metadata != null ? data.metadata.getTitle() : null, prefixOnly, stream); stream = filterTerm(title, data -> data.metadata != null ? data.metadata.getTitle() : null, prefixOnly, stream);
stream = filterTerm(description, data -> data.metadata != null ? data.metadata.getDescription() : null, prefixOnly, stream); stream = filterTerm(description, data -> data.metadata != null ? data.metadata.getDescription() : null, prefixOnly, stream);
// New: Filter by keywords if provided
if (keywords.isPresent() && !keywords.get().isEmpty()) {
List<String> searchKeywords = keywords.get().stream()
.map(String::toLowerCase)
.collect(Collectors.toList());
stream = stream.filter(candidate -> {
if (candidate.metadata != null && candidate.metadata.getDescription() != null) {
String descriptionLower = candidate.metadata.getDescription().toLowerCase();
return searchKeywords.stream().anyMatch(descriptionLower::contains);
}
return false;
});
}
if (keywords.isPresent() && !keywords.get().isEmpty()) {
List<String> searchKeywords = keywords.get().stream()
.map(String::toLowerCase)
.collect(Collectors.toList());
stream = stream.filter(candidate -> {
if (candidate.metadata != null && candidate.metadata.getDescription() != null) {
String descriptionLower = candidate.metadata.getDescription().toLowerCase();
return searchKeywords.stream().anyMatch(descriptionLower::contains);
}
return false;
});
}
// if exact names is set, retain resources with exact names // if exact names is set, retain resources with exact names
if( exactMatchNames.isPresent() && !exactMatchNames.get().isEmpty()) { if( exactMatchNames.isPresent() && !exactMatchNames.get().isEmpty()) {
@ -262,15 +301,58 @@ public class HSQLDBCacheUtils {
// truncate to limit // truncate to limit
if( limit.isPresent() && limit.get() > 0 ) stream = stream.limit(limit.get()); if( limit.isPresent() && limit.get() > 0 ) stream = stream.limit(limit.get());
// include metadata List<ArbitraryResourceData> listCopy1 = stream.collect(Collectors.toList());
if( includeMetadata.isEmpty() || !includeMetadata.get() )
stream = stream.peek( candidate -> candidate.metadata = null );
// include status List<ArbitraryResourceData> listCopy2 = new ArrayList<>(listCopy1.size());
if( includeStatus.isEmpty() || !includeStatus.get() )
stream = stream.peek( candidate -> candidate.status = null);
return stream.collect(Collectors.toList()); // remove metadata from the first copy
if( includeMetadata.isEmpty() || !includeMetadata.get() ) {
for( ArbitraryResourceData data : listCopy1 ) {
ArbitraryResourceData copy = new ArbitraryResourceData();
copy.name = data.name;
copy.service = data.service;
copy.identifier = data.identifier;
copy.status = data.status;
copy.metadata = null;
copy.size = data.size;
copy.created = data.created;
copy.updated = data.updated;
listCopy2.add(copy);
}
}
// put the list copy 1 into the second copy
else {
listCopy2.addAll(listCopy1);
}
// remove status from final copy
if( includeStatus.isEmpty() || !includeStatus.get() ) {
List<ArbitraryResourceData> finalCopy = new ArrayList<>(listCopy2.size());
for( ArbitraryResourceData data : listCopy2 ) {
ArbitraryResourceData copy = new ArbitraryResourceData();
copy.name = data.name;
copy.service = data.service;
copy.identifier = data.identifier;
copy.status = null;
copy.metadata = data.metadata;
copy.size = data.size;
copy.created = data.created;
copy.updated = data.updated;
finalCopy.add(copy);
}
return finalCopy;
}
// keep status included by returning the second copy
else {
return listCopy2;
}
} }
/** /**

View File

@ -386,7 +386,7 @@ public class Settings {
/** /**
* DB Cache Enabled? * DB Cache Enabled?
*/ */
private boolean dbCacheEnabled = false; private boolean dbCacheEnabled = true;
/** /**
* DB Cache Thread Priority * DB Cache Thread Priority
@ -508,6 +508,43 @@ public class Settings {
*/ */
private boolean connectionPoolMonitorEnabled = false; private boolean connectionPoolMonitorEnabled = false;
/**
* Buiild Arbitrary Resources Batch Size
*
* The number resources to batch per iteration when rebuilding.
*/
private int buildArbitraryResourcesBatchSize = 200;
/**
* Arbitrary Indexing Priority
*
* The thread priority when indexing arbirary resources.
*/
private int arbitraryIndexingPriority = 5;
/**
* Arbitrary Indexing Frequency (In Minutes)
*
* The frequency at which the arbitrary indices are cached.
*/
private int arbitraryIndexingFrequency = 10;
private boolean rebuildArbitraryResourceCacheTaskEnabled = false;
/**
* Rebuild Arbitrary Resource Cache Task Delay (In Minutes)
*
* Waiting period before the first rebuild task is started.
*/
private int rebuildArbitraryResourceCacheTaskDelay = 300;
/**
* Rebuild Arbitrary Resource Cache Task Period (In Hours)
*
* The frequency the arbitrary resource cache is rebuilt.
*/
private int rebuildArbitraryResourceCacheTaskPeriod = 24;
// Domain mapping // Domain mapping
public static class ThreadLimit { public static class ThreadLimit {
private String messageType; private String messageType;
@ -1333,4 +1370,28 @@ public class Settings {
public boolean isConnectionPoolMonitorEnabled() { public boolean isConnectionPoolMonitorEnabled() {
return connectionPoolMonitorEnabled; return connectionPoolMonitorEnabled;
} }
public int getBuildArbitraryResourcesBatchSize() {
return buildArbitraryResourcesBatchSize;
}
public int getArbitraryIndexingPriority() {
return arbitraryIndexingPriority;
}
public int getArbitraryIndexingFrequency() {
return arbitraryIndexingFrequency;
}
public boolean isRebuildArbitraryResourceCacheTaskEnabled() {
return rebuildArbitraryResourceCacheTaskEnabled;
}
public int getRebuildArbitraryResourceCacheTaskDelay() {
return rebuildArbitraryResourceCacheTaskDelay;
}
public int getRebuildArbitraryResourceCacheTaskPeriod() {
return rebuildArbitraryResourceCacheTaskPeriod;
}
} }

View File

@ -9,6 +9,7 @@ import org.qortal.arbitrary.metadata.ArbitraryDataTransactionMetadata;
import org.qortal.arbitrary.misc.Service; import org.qortal.arbitrary.misc.Service;
import org.qortal.block.BlockChain; import org.qortal.block.BlockChain;
import org.qortal.controller.arbitrary.ArbitraryDataManager; import org.qortal.controller.arbitrary.ArbitraryDataManager;
import org.qortal.controller.arbitrary.ArbitraryTransactionDataHashWrapper;
import org.qortal.controller.repository.NamesDatabaseIntegrityCheck; import org.qortal.controller.repository.NamesDatabaseIntegrityCheck;
import org.qortal.crypto.Crypto; import org.qortal.crypto.Crypto;
import org.qortal.crypto.MemoryPoW; import org.qortal.crypto.MemoryPoW;
@ -31,8 +32,12 @@ import org.qortal.utils.ArbitraryTransactionUtils;
import org.qortal.utils.NTP; import org.qortal.utils.NTP;
import java.io.IOException; import java.io.IOException;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List; import java.util.List;
import java.util.Map;
import java.util.Objects; import java.util.Objects;
import java.util.Set;
import java.util.stream.Collectors; import java.util.stream.Collectors;
public class ArbitraryTransaction extends Transaction { public class ArbitraryTransaction extends Transaction {
@ -303,8 +308,13 @@ public class ArbitraryTransaction extends Transaction {
// Add/update arbitrary resource caches, but don't update the status as this involves time-consuming // Add/update arbitrary resource caches, but don't update the status as this involves time-consuming
// disk reads, and is more prone to failure. The status will be updated on metadata retrieval, or when // disk reads, and is more prone to failure. The status will be updated on metadata retrieval, or when
// accessing the resource. // accessing the resource.
this.updateArbitraryResourceCache(repository); // Also, must add this transaction as a latest transaction, since the it has not been saved to the
this.updateArbitraryMetadataCache(repository); // repository yet.
this.updateArbitraryResourceCacheIncludingMetadata(
repository,
Set.of(new ArbitraryTransactionDataHashWrapper(arbitraryTransactionData)),
new HashMap<>(0)
);
repository.saveChanges(); repository.saveChanges();
@ -360,7 +370,10 @@ public class ArbitraryTransaction extends Transaction {
* *
* @throws DataException * @throws DataException
*/ */
public void updateArbitraryResourceCache(Repository repository) throws DataException { public void updateArbitraryResourceCacheIncludingMetadata(
Repository repository,
Set<ArbitraryTransactionDataHashWrapper> latestTransactionWrappers,
Map<ArbitraryTransactionDataHashWrapper, ArbitraryResourceData> resourceByWrapper) throws DataException {
// Don't cache resources without a name (such as auto updates) // Don't cache resources without a name (such as auto updates)
if (arbitraryTransactionData.getName() == null) { if (arbitraryTransactionData.getName() == null) {
return; return;
@ -385,29 +398,42 @@ public class ArbitraryTransaction extends Transaction {
arbitraryResourceData.name = name; arbitraryResourceData.name = name;
arbitraryResourceData.identifier = identifier; arbitraryResourceData.identifier = identifier;
final ArbitraryTransactionDataHashWrapper wrapper = new ArbitraryTransactionDataHashWrapper(arbitraryTransactionData);
ArbitraryTransactionData latestTransactionData;
if( latestTransactionWrappers.contains(wrapper)) {
latestTransactionData
= latestTransactionWrappers.stream()
.filter( latestWrapper -> latestWrapper.equals(wrapper))
.findAny().get()
.getData();
}
else {
// Get the latest transaction // Get the latest transaction
ArbitraryTransactionData latestTransactionData = repository.getArbitraryRepository().getLatestTransaction(arbitraryTransactionData.getName(), arbitraryTransactionData.getService(), null, arbitraryTransactionData.getIdentifier()); latestTransactionData = repository.getArbitraryRepository().getLatestTransaction(arbitraryTransactionData.getName(), arbitraryTransactionData.getService(), null, arbitraryTransactionData.getIdentifier());
if (latestTransactionData == null) { if (latestTransactionData == null) {
LOGGER.info("We don't have a latest transaction, so delete from cache: arbitraryResourceData = " + arbitraryResourceData); LOGGER.info("We don't have a latest transaction, so delete from cache: arbitraryResourceData = " + arbitraryResourceData);
// We don't have a latest transaction, so delete from cache // We don't have a latest transaction, so delete from cache
repository.getArbitraryRepository().delete(arbitraryResourceData); repository.getArbitraryRepository().delete(arbitraryResourceData);
return; return;
} }
}
ArbitraryResourceData existingArbitraryResourceData = resourceByWrapper.get(wrapper);
if( existingArbitraryResourceData == null ) {
// Get existing cached entry if it exists // Get existing cached entry if it exists
ArbitraryResourceData existingArbitraryResourceData = repository.getArbitraryRepository() existingArbitraryResourceData = repository.getArbitraryRepository()
.getArbitraryResource(service, name, identifier); .getArbitraryResource(service, name, identifier);
}
LOGGER.info("updating existing arbitraryResourceData" + existingArbitraryResourceData);
// Check for existing cached data // Check for existing cached data
if (existingArbitraryResourceData == null) { if (existingArbitraryResourceData == null) {
// Nothing exists yet, so set creation date from the current transaction (it will be reduced later if needed) // Nothing exists yet, so set creation date from the current transaction (it will be reduced later if needed)
arbitraryResourceData.created = arbitraryTransactionData.getTimestamp(); arbitraryResourceData.created = arbitraryTransactionData.getTimestamp();
arbitraryResourceData.updated = null; arbitraryResourceData.updated = null;
LOGGER.info("updated = null, reason = existingArbitraryResourceData == null" );
} }
else { else {
resourceByWrapper.put(wrapper, existingArbitraryResourceData);
// An entry already exists - update created time from current transaction if this is older // An entry already exists - update created time from current transaction if this is older
arbitraryResourceData.created = Math.min(existingArbitraryResourceData.created, arbitraryTransactionData.getTimestamp()); arbitraryResourceData.created = Math.min(existingArbitraryResourceData.created, arbitraryTransactionData.getTimestamp());
@ -415,22 +441,44 @@ public class ArbitraryTransaction extends Transaction {
if (existingArbitraryResourceData.created == latestTransactionData.getTimestamp()) { if (existingArbitraryResourceData.created == latestTransactionData.getTimestamp()) {
// Latest transaction matches created time, so it hasn't been updated // Latest transaction matches created time, so it hasn't been updated
arbitraryResourceData.updated = null; arbitraryResourceData.updated = null;
LOGGER.info(
"updated = null, reason: existingArbitraryResourceData.created == latestTransactionData.getTimestamp() == " +
existingArbitraryResourceData.created );
} }
else { else {
arbitraryResourceData.updated = latestTransactionData.getTimestamp(); arbitraryResourceData.updated = latestTransactionData.getTimestamp();
LOGGER.info("setting updated to a non-null value");
} }
} }
arbitraryResourceData.size = latestTransactionData.getSize(); arbitraryResourceData.size = latestTransactionData.getSize();
LOGGER.info("saving updated arbitraryResourceData: updated = " + arbitraryResourceData.updated);
// Save // Save
repository.getArbitraryRepository().save(arbitraryResourceData); repository.getArbitraryRepository().save(arbitraryResourceData);
// Update metadata for latest transaction if it is local
if (latestTransactionData.getMetadataHash() != null) {
ArbitraryDataFile metadataFile = ArbitraryDataFile.fromHash(latestTransactionData.getMetadataHash(), latestTransactionData.getSignature());
if (metadataFile.exists()) {
ArbitraryDataTransactionMetadata transactionMetadata = new ArbitraryDataTransactionMetadata(metadataFile.getFilePath());
try {
transactionMetadata.read();
ArbitraryResourceMetadata metadata = new ArbitraryResourceMetadata();
metadata.setArbitraryResourceData(arbitraryResourceData);
metadata.setTitle(transactionMetadata.getTitle());
metadata.setDescription(transactionMetadata.getDescription());
metadata.setCategory(transactionMetadata.getCategory());
metadata.setTags(transactionMetadata.getTags());
repository.getArbitraryRepository().save(metadata);
} catch (IOException e) {
// Ignore, as we can add it again later
}
} else {
// We don't have a local copy of this metadata file, so delete it from the cache
// It will be re-added if the file later arrives via the network
ArbitraryResourceMetadata metadata = new ArbitraryResourceMetadata();
metadata.setArbitraryResourceData(arbitraryResourceData);
repository.getArbitraryRepository().delete(metadata);
}
}
} }
public void updateArbitraryResourceStatus(Repository repository) throws DataException { public void updateArbitraryResourceStatus(Repository repository) throws DataException {
@ -465,60 +513,4 @@ public class ArbitraryTransaction extends Transaction {
repository.getArbitraryRepository().setStatus(arbitraryResourceData, status); repository.getArbitraryRepository().setStatus(arbitraryResourceData, status);
} }
public void updateArbitraryMetadataCache(Repository repository) throws DataException {
// Get the latest transaction
ArbitraryTransactionData latestTransactionData = repository.getArbitraryRepository().getLatestTransaction(arbitraryTransactionData.getName(), arbitraryTransactionData.getService(), null, arbitraryTransactionData.getIdentifier());
if (latestTransactionData == null) {
// We don't have a latest transaction, so give up
return;
}
Service service = latestTransactionData.getService();
String name = latestTransactionData.getName();
String identifier = latestTransactionData.getIdentifier();
if (service == null) {
// Unsupported service - ignore this resource
return;
}
// In the cache we store null identifiers as "default", as it is part of the primary key
if (identifier == null) {
identifier = "default";
}
ArbitraryResourceData arbitraryResourceData = new ArbitraryResourceData();
arbitraryResourceData.service = service;
arbitraryResourceData.name = name;
arbitraryResourceData.identifier = identifier;
// Update metadata for latest transaction if it is local
if (latestTransactionData.getMetadataHash() != null) {
ArbitraryDataFile metadataFile = ArbitraryDataFile.fromHash(latestTransactionData.getMetadataHash(), latestTransactionData.getSignature());
if (metadataFile.exists()) {
ArbitraryDataTransactionMetadata transactionMetadata = new ArbitraryDataTransactionMetadata(metadataFile.getFilePath());
try {
transactionMetadata.read();
ArbitraryResourceMetadata metadata = new ArbitraryResourceMetadata();
metadata.setArbitraryResourceData(arbitraryResourceData);
metadata.setTitle(transactionMetadata.getTitle());
metadata.setDescription(transactionMetadata.getDescription());
metadata.setCategory(transactionMetadata.getCategory());
metadata.setTags(transactionMetadata.getTags());
repository.getArbitraryRepository().save(metadata);
} catch (IOException e) {
// Ignore, as we can add it again later
}
} else {
// We don't have a local copy of this metadata file, so delete it from the cache
// It will be re-added if the file later arrives via the network
ArbitraryResourceMetadata metadata = new ArbitraryResourceMetadata();
metadata.setArbitraryResourceData(arbitraryResourceData);
repository.getArbitraryRepository().delete(metadata);
}
}
}
} }

View File

@ -0,0 +1,250 @@
package org.qortal.utils;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.exc.InvalidFormatException;
import com.fasterxml.jackson.databind.exc.UnrecognizedPropertyException;
import org.apache.commons.lang3.ArrayUtils;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.qortal.api.SearchMode;
import org.qortal.arbitrary.ArbitraryDataFile;
import org.qortal.arbitrary.ArbitraryDataReader;
import org.qortal.arbitrary.exception.MissingDataException;
import org.qortal.arbitrary.misc.Service;
import org.qortal.controller.Controller;
import org.qortal.data.arbitrary.ArbitraryDataIndex;
import org.qortal.data.arbitrary.ArbitraryDataIndexDetail;
import org.qortal.data.arbitrary.ArbitraryResourceData;
import org.qortal.data.arbitrary.IndexCache;
import org.qortal.repository.DataException;
import org.qortal.repository.Repository;
import org.qortal.repository.RepositoryManager;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Timer;
import java.util.TimerTask;
import java.util.stream.Collectors;
import java.util.stream.Stream;
public class ArbitraryIndexUtils {
public static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
private static final Logger LOGGER = LogManager.getLogger(ArbitraryIndexUtils.class);
public static final String INDEX_CACHE_TIMER = "Arbitrary Index Cache Timer";
public static final String INDEX_CACHE_TIMER_TASK = "Arbitrary Index Cache Timer Task";
public static void startCaching(int priorityRequested, int frequency) {
Timer timer = buildTimer(INDEX_CACHE_TIMER, priorityRequested);
TimerTask task = new TimerTask() {
@Override
public void run() {
Thread.currentThread().setName(INDEX_CACHE_TIMER_TASK);
try {
fillCache(IndexCache.getInstance());
} catch (IOException | DataException e) {
LOGGER.error(e.getMessage(), e);
}
}
};
// delay 1 second
timer.scheduleAtFixedRate(task, 1_000, frequency * 60_000);
}
private static void fillCache(IndexCache instance) throws DataException, IOException {
try (final Repository repository = RepositoryManager.getRepository()) {
List<ArbitraryResourceData> indexResources
= repository.getArbitraryRepository().searchArbitraryResources(
Service.JSON,
null,
"idx-",
null,
null,
null,
null,
true,
null,
false,
SearchMode.ALL,
0,
null,
null,
null,
null,
null,
null,
null,
null,
true);
List<ArbitraryDataIndexDetail> indexDetails = new ArrayList<>();
LOGGER.debug("processing index resource data: count = " + indexResources.size());
// process all index resources
for( ArbitraryResourceData indexResource : indexResources ) {
try {
LOGGER.debug("processing index resource: name = " + indexResource.name + ", identifier = " + indexResource.identifier);
String json = ArbitraryIndexUtils.getJson(indexResource.name, indexResource.identifier);
// map the JSON string to a list of Java objects
List<ArbitraryDataIndex> indices = OBJECT_MAPPER.readValue(json, new TypeReference<List<ArbitraryDataIndex>>() {});
LOGGER.debug("processed indices = " + indices);
// rank and create index detail for each index in this index resource
for( int rank = 1; rank <= indices.size(); rank++ ) {
indexDetails.add( new ArbitraryDataIndexDetail(indexResource.name, rank, indices.get(rank - 1), indexResource.identifier ));
}
} catch (InvalidFormatException e) {
LOGGER.debug("invalid format, skipping: " + indexResource);
} catch (UnrecognizedPropertyException e) {
LOGGER.debug("unrecognized property, skipping " + indexResource);
}
}
LOGGER.debug("processing indices by term ...");
Map<String, List<ArbitraryDataIndexDetail>> indicesByTerm
= indexDetails.stream().collect(
Collectors.toMap(
detail -> detail.term, // map by term
detail -> List.of(detail), // create list for term
(list1, list2) // merge lists for same term
-> Stream.of(list1, list2)
.flatMap(List::stream)
.collect(Collectors.toList())
)
);
LOGGER.info("processed indices by term: count = " + indicesByTerm.size());
// lock, clear old, load new
synchronized( IndexCache.getInstance().getIndicesByTerm() ) {
IndexCache.getInstance().getIndicesByTerm().clear();
IndexCache.getInstance().getIndicesByTerm().putAll(indicesByTerm);
}
LOGGER.info("loaded indices by term");
LOGGER.debug("processing indices by issuer ...");
Map<String, List<ArbitraryDataIndexDetail>> indicesByIssuer
= indexDetails.stream().collect(
Collectors.toMap(
detail -> detail.issuer, // map by issuer
detail -> List.of(detail), // create list for issuer
(list1, list2) // merge lists for same issuer
-> Stream.of(list1, list2)
.flatMap(List::stream)
.collect(Collectors.toList())
)
);
LOGGER.info("processed indices by issuer: count = " + indicesByIssuer.size());
// lock, clear old, load new
synchronized( IndexCache.getInstance().getIndicesByIssuer() ) {
IndexCache.getInstance().getIndicesByIssuer().clear();
IndexCache.getInstance().getIndicesByIssuer().putAll(indicesByIssuer);
}
LOGGER.info("loaded indices by issuer");
}
}
private static Timer buildTimer( final String name, int priorityRequested) {
// ensure priority is in between 1-10
final int priority = Math.max(0, Math.min(10, priorityRequested));
// Create a custom Timer with updated priority threads
Timer timer = new Timer(true) { // 'true' to make the Timer daemon
@Override
public void schedule(TimerTask task, long delay) {
Thread thread = new Thread(task, name) {
@Override
public void run() {
this.setPriority(priority);
super.run();
}
};
thread.setPriority(priority);
thread.start();
}
};
return timer;
}
public static String getJsonWithExceptionHandling( String name, String identifier ) {
try {
return getJson(name, identifier);
}
catch( Exception e ) {
LOGGER.error(e.getMessage(), e);
return e.getMessage();
}
}
public static String getJson(String name, String identifier) throws IOException {
try {
ArbitraryDataReader arbitraryDataReader
= new ArbitraryDataReader(name, ArbitraryDataFile.ResourceIdType.NAME, Service.JSON, identifier);
int attempts = 0;
Integer maxAttempts = 5;
while (!Controller.isStopping()) {
attempts++;
if (!arbitraryDataReader.isBuilding()) {
try {
arbitraryDataReader.loadSynchronously(false);
break;
} catch (MissingDataException e) {
if (attempts > maxAttempts) {
// Give up after 5 attempts
throw new IOException("Data unavailable. Please try again later.");
}
}
}
Thread.sleep(3000L);
}
java.nio.file.Path outputPath = arbitraryDataReader.getFilePath();
if (outputPath == null) {
// Assume the resource doesn't exist
throw new IOException( "File not found");
}
// No file path supplied - so check if this is a single file resource
String[] files = ArrayUtils.removeElement(outputPath.toFile().list(), ".qortal");
String filepath = files[0];
java.nio.file.Path path = Paths.get(outputPath.toString(), filepath);
if (!Files.exists(path)) {
String message = String.format("No file exists at filepath: %s", filepath);
throw new IOException( message );
}
String data = Files.readString(path);
return data;
} catch (Exception e) {
throw new IOException(String.format("Unable to load %s %s: %s", Service.JSON, name, e.getMessage()));
}
}
}

View File

@ -24,6 +24,7 @@ import java.nio.file.attribute.BasicFileAttributes;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
import java.util.List; import java.util.List;
import java.util.Optional;
import java.util.stream.Collectors; import java.util.stream.Collectors;
import static java.nio.file.StandardCopyOption.REPLACE_EXISTING; import static java.nio.file.StandardCopyOption.REPLACE_EXISTING;
@ -72,23 +73,23 @@ public class ArbitraryTransactionUtils {
return latestPut; return latestPut;
} }
public static boolean hasMoreRecentPutTransaction(Repository repository, ArbitraryTransactionData arbitraryTransactionData) { public static Optional<ArbitraryTransactionData> hasMoreRecentPutTransaction(Repository repository, ArbitraryTransactionData arbitraryTransactionData) {
byte[] signature = arbitraryTransactionData.getSignature(); byte[] signature = arbitraryTransactionData.getSignature();
if (signature == null) { if (signature == null) {
// We can't make a sensible decision without a signature // We can't make a sensible decision without a signature
// so it's best to assume there is nothing newer // so it's best to assume there is nothing newer
return false; return Optional.empty();
} }
ArbitraryTransactionData latestPut = ArbitraryTransactionUtils.fetchLatestPut(repository, arbitraryTransactionData); ArbitraryTransactionData latestPut = ArbitraryTransactionUtils.fetchLatestPut(repository, arbitraryTransactionData);
if (latestPut == null) { if (latestPut == null) {
return false; return Optional.empty();
} }
// If the latest PUT transaction has a newer timestamp, it will override the existing transaction // If the latest PUT transaction has a newer timestamp, it will override the existing transaction
// Any data relating to the older transaction is no longer needed // Any data relating to the older transaction is no longer needed
boolean hasNewerPut = (latestPut.getTimestamp() > arbitraryTransactionData.getTimestamp()); boolean hasNewerPut = (latestPut.getTimestamp() > arbitraryTransactionData.getTimestamp());
return hasNewerPut; return hasNewerPut ? Optional.of(latestPut) : Optional.empty();
} }
public static boolean completeFileExists(ArbitraryTransactionData transactionData) throws DataException { public static boolean completeFileExists(ArbitraryTransactionData transactionData) throws DataException {
@ -208,7 +209,15 @@ public class ArbitraryTransactionUtils {
return ArbitraryTransactionUtils.isFileRecent(filePath, now, cleanupAfter); return ArbitraryTransactionUtils.isFileRecent(filePath, now, cleanupAfter);
} }
public static void deleteCompleteFile(ArbitraryTransactionData arbitraryTransactionData, long now, long cleanupAfter) throws DataException { /**
*
* @param arbitraryTransactionData
* @param now
* @param cleanupAfter
* @return true if file is deleted, otherwise return false
* @throws DataException
*/
public static boolean deleteCompleteFile(ArbitraryTransactionData arbitraryTransactionData, long now, long cleanupAfter) throws DataException {
byte[] completeHash = arbitraryTransactionData.getData(); byte[] completeHash = arbitraryTransactionData.getData();
byte[] signature = arbitraryTransactionData.getSignature(); byte[] signature = arbitraryTransactionData.getSignature();
@ -219,6 +228,11 @@ public class ArbitraryTransactionUtils {
"if needed", Base58.encode(completeHash)); "if needed", Base58.encode(completeHash));
arbitraryDataFile.delete(); arbitraryDataFile.delete();
return true;
}
else {
return false;
} }
} }

View File

@ -53,10 +53,10 @@ public class Blocks {
// all minting group member addresses // all minting group member addresses
List<String> mintingGroupAddresses List<String> mintingGroupAddresses
= repository.getGroupRepository() = Groups.getAllMembers(
.getGroupMembers(BlockChain.getInstance().getMintingGroupId()).stream() repository.getGroupRepository(),
.map(GroupMemberData::getMember) Groups.getGroupIdsToMint(BlockChain.getInstance(), blockData.getHeight())
.collect(Collectors.toList()); );
// all names, indexed by address // all names, indexed by address
Map<String, String> nameByAddress Map<String, String> nameByAddress

View File

@ -0,0 +1,122 @@
package org.qortal.utils;
import org.qortal.block.BlockChain;
import org.qortal.data.group.GroupAdminData;
import org.qortal.data.group.GroupMemberData;
import org.qortal.repository.DataException;
import org.qortal.repository.GroupRepository;
import java.util.ArrayList;
import java.util.Comparator;
import java.util.HashSet;
import java.util.List;
import java.util.Optional;
import java.util.Set;
import java.util.stream.Collectors;
/**
* Class Groups
*
* A utility class for group related functionality.
*/
public class Groups {
/**
* Does the member exist in any of these groups?
*
* @param groupRepository the group data repository
* @param groupsIds the group Ids to look for the address
* @param address the address
*
* @return true if the address is in any of the groups listed otherwise false
* @throws DataException
*/
public static boolean memberExistsInAnyGroup(GroupRepository groupRepository, List<Integer> groupsIds, String address) throws DataException {
// if any of the listed groups have the address as a member, then return true
for( Integer groupIdToMint : groupsIds) {
if( groupRepository.memberExists(groupIdToMint, address) ) {
return true;
}
}
// if none of the listed groups have the address as a member, then return false
return false;
}
/**
* Get All Members
*
* Get all the group members from a list of groups.
*
* @param groupRepository the group data repository
* @param groupIds the list of group Ids to look at
*
* @return the list of all members belonging to any of the groups, no duplicates
* @throws DataException
*/
public static List<String> getAllMembers( GroupRepository groupRepository, List<Integer> groupIds ) throws DataException {
// collect all the members in a set, the set keeps out duplicates
Set<String> allMembers = new HashSet<>();
// add all members from each group to the all members set
for( int groupId : groupIds ) {
allMembers.addAll( groupRepository.getGroupMembers(groupId).stream().map(GroupMemberData::getMember).collect(Collectors.toList()));
}
return new ArrayList<>(allMembers);
}
/**
* Get All Admins
*
* Get all the admins from a list of groups.
*
* @param groupRepository the group data repository
* @param groupIds the list of group Ids to look at
*
* @return the list of all admins to any of the groups, no duplicates
* @throws DataException
*/
public static List<String> getAllAdmins( GroupRepository groupRepository, List<Integer> groupIds ) throws DataException {
// collect all the admins in a set, the set keeps out duplicates
Set<String> allAdmins = new HashSet<>();
// collect admins for each group
for( int groupId : groupIds ) {
allAdmins.addAll( groupRepository.getGroupAdmins(groupId).stream().map(GroupAdminData::getAdmin).collect(Collectors.toList()) );
}
return new ArrayList<>(allAdmins);
}
/**
* Get Group Ids To Mint
*
* @param blockchain the blockchain
* @param blockchainHeight the block height to mint
*
* @return the group Ids for the minting groups at the height given
*/
public static List<Integer> getGroupIdsToMint(BlockChain blockchain, int blockchainHeight) {
// sort heights lowest to highest
Comparator<BlockChain.IdsForHeight> compareByHeight = Comparator.comparingInt(entry -> entry.height);
// sort heights highest to lowest
Comparator<BlockChain.IdsForHeight> compareByHeightReversed = compareByHeight.reversed();
// get highest height that is less than the blockchain height
Optional<BlockChain.IdsForHeight> ids = blockchain.getMintingGroupIds().stream()
.filter(entry -> entry.height < blockchainHeight)
.sorted(compareByHeightReversed)
.findFirst();
if( ids.isPresent()) {
return ids.get().ids;
}
else {
return new ArrayList<>(0);
}
}
}

View File

@ -38,7 +38,9 @@
"blockRewardBatchStartHeight": 1508000, "blockRewardBatchStartHeight": 1508000,
"blockRewardBatchSize": 1000, "blockRewardBatchSize": 1000,
"blockRewardBatchAccountsBlockCount": 25, "blockRewardBatchAccountsBlockCount": 25,
"mintingGroupId": 694, "mintingGroupIds": [
{ "height": 0, "ids": [ 694 ]}
],
"rewardsByHeight": [ "rewardsByHeight": [
{ "height": 1, "reward": 5.00 }, { "height": 1, "reward": 5.00 },
{ "height": 259201, "reward": 4.75 }, { "height": 259201, "reward": 4.75 },

View File

@ -84,6 +84,7 @@ isDOMContentLoaded: isDOMContentLoaded ? true : false
function handleQDNResourceDisplayed(pathurl, isDOMContentLoaded) { function handleQDNResourceDisplayed(pathurl, isDOMContentLoaded) {
// make sure that an empty string the root path // make sure that an empty string the root path
if(pathurl?.startsWith('/render/hash/')) return;
const path = pathurl || '/' const path = pathurl || '/'
if (!isManualNavigation) { if (!isManualNavigation) {
isManualNavigation = true isManualNavigation = true
@ -284,11 +285,9 @@ window.addEventListener("message", async (event) => {
return; return;
} }
console.log("Core received action: " + JSON.stringify(event.data.action));
let url; let url;
let data = event.data; let data = event.data;
let identifier;
switch (data.action) { switch (data.action) {
case "GET_ACCOUNT_DATA": case "GET_ACCOUNT_DATA":
return httpGetAsyncWithEvent(event, "/addresses/" + data.address); return httpGetAsyncWithEvent(event, "/addresses/" + data.address);
@ -383,6 +382,7 @@ window.addEventListener("message", async (event) => {
if (data.identifier != null) url = url.concat("&identifier=" + data.identifier); if (data.identifier != null) url = url.concat("&identifier=" + data.identifier);
if (data.name != null) url = url.concat("&name=" + data.name); if (data.name != null) url = url.concat("&name=" + data.name);
if (data.names != null) data.names.forEach((x, i) => url = url.concat("&name=" + x)); if (data.names != null) data.names.forEach((x, i) => url = url.concat("&name=" + x));
if (data.keywords != null) data.keywords.forEach((x, i) => url = url.concat("&keywords=" + x));
if (data.title != null) url = url.concat("&title=" + data.title); if (data.title != null) url = url.concat("&title=" + data.title);
if (data.description != null) url = url.concat("&description=" + data.description); if (data.description != null) url = url.concat("&description=" + data.description);
if (data.prefix != null) url = url.concat("&prefix=" + new Boolean(data.prefix).toString()); if (data.prefix != null) url = url.concat("&prefix=" + new Boolean(data.prefix).toString());
@ -419,7 +419,7 @@ window.addEventListener("message", async (event) => {
return httpGetAsyncWithEvent(event, url); return httpGetAsyncWithEvent(event, url);
case "GET_QDN_RESOURCE_PROPERTIES": case "GET_QDN_RESOURCE_PROPERTIES":
let identifier = (data.identifier != null) ? data.identifier : "default"; identifier = (data.identifier != null) ? data.identifier : "default";
url = "/arbitrary/resource/properties/" + data.service + "/" + data.name + "/" + identifier; url = "/arbitrary/resource/properties/" + data.service + "/" + data.name + "/" + identifier;
return httpGetAsyncWithEvent(event, url); return httpGetAsyncWithEvent(event, url);
@ -456,7 +456,7 @@ window.addEventListener("message", async (event) => {
return httpGetAsyncWithEvent(event, url); return httpGetAsyncWithEvent(event, url);
case "GET_AT": case "GET_AT":
url = "/at" + data.atAddress; url = "/at/" + data.atAddress;
return httpGetAsyncWithEvent(event, url); return httpGetAsyncWithEvent(event, url);
case "GET_AT_DATA": case "GET_AT_DATA":
@ -473,7 +473,7 @@ window.addEventListener("message", async (event) => {
case "FETCH_BLOCK": case "FETCH_BLOCK":
if (data.signature != null) { if (data.signature != null) {
url = "/blocks/" + data.signature; url = "/blocks/signature/" + data.signature;
} else if (data.height != null) { } else if (data.height != null) {
url = "/blocks/byheight/" + data.height; url = "/blocks/byheight/" + data.height;
} }
@ -694,6 +694,7 @@ const qortalRequestWithTimeout = (request, timeout) =>
* Send current page details to UI * Send current page details to UI
*/ */
document.addEventListener('DOMContentLoaded', (event) => { document.addEventListener('DOMContentLoaded', (event) => {
resetVariables() resetVariables()
qortalRequest({ qortalRequest({
action: "QDN_RESOURCE_DISPLAYED", action: "QDN_RESOURCE_DISPLAYED",
@ -712,6 +713,7 @@ resetVariables()
* Handle app navigation * Handle app navigation
*/ */
navigation.addEventListener('navigate', (event) => { navigation.addEventListener('navigate', (event) => {
const url = new URL(event.destination.url); const url = new URL(event.destination.url);
let fullpath = url.pathname + url.hash; let fullpath = url.pathname + url.hash;

View File

@ -3,10 +3,15 @@ package org.qortal.test.api;
import org.json.simple.JSONObject; import org.json.simple.JSONObject;
import org.junit.Assert; import org.junit.Assert;
import org.junit.Test; import org.junit.Test;
import org.qortal.api.model.CrossChainTradeLedgerEntry;
import org.qortal.api.resource.CrossChainUtils; import org.qortal.api.resource.CrossChainUtils;
import org.qortal.test.common.ApiCommon; import org.qortal.test.common.ApiCommon;
import java.io.IOException;
import java.io.PrintWriter;
import java.util.ArrayList;
import java.util.HashMap; import java.util.HashMap;
import java.util.List;
import java.util.Map; import java.util.Map;
public class CrossChainUtilsTests extends ApiCommon { public class CrossChainUtilsTests extends ApiCommon {
@ -137,4 +142,53 @@ public class CrossChainUtilsTests extends ApiCommon {
Assert.assertEquals(5, versionDecimal, 0.001); Assert.assertEquals(5, versionDecimal, 0.001);
Assert.assertFalse(thrown); Assert.assertFalse(thrown);
} }
@Test
public void testWriteToLedgerHeaderOnly() throws IOException {
CrossChainUtils.writeToLedger(new PrintWriter(System.out), new ArrayList<>());
}
@Test
public void testWriteToLedgerOneRow() throws IOException {
CrossChainUtils.writeToLedger(
new PrintWriter(System.out),
List.of(
new CrossChainTradeLedgerEntry(
"QORT",
"LTC",
1000,
0,
"LTC",
1,
System.currentTimeMillis())
)
);
}
@Test
public void testWriteToLedgerTwoRows() throws IOException {
CrossChainUtils.writeToLedger(
new PrintWriter(System.out),
List.of(
new CrossChainTradeLedgerEntry(
"QORT",
"LTC",
1000,
0,
"LTC",
1,
System.currentTimeMillis()
),
new CrossChainTradeLedgerEntry(
"LTC",
"QORT",
1,
0,
"LTC",
1000,
System.currentTimeMillis()
)
)
);
}
} }

View File

@ -145,56 +145,6 @@ public class ArbitraryDataStorageCapacityTests extends Common {
} }
} }
@Test
public void testDeleteRandomFilesForName() throws DataException, IOException, InterruptedException, IllegalAccessException {
try (final Repository repository = RepositoryManager.getRepository()) {
String identifier = null; // Not used for this test
Service service = Service.ARBITRARY_DATA;
int chunkSize = 100;
int dataLength = 900; // Actual data length will be longer due to encryption
// Set originalCopyIndicatorFileEnabled to false, otherwise nothing will be deleted as it all originates from this node
FieldUtils.writeField(Settings.getInstance(), "originalCopyIndicatorFileEnabled", false, true);
// Alice hosts some data (with 10 chunks)
PrivateKeyAccount alice = Common.getTestAccount(repository, "alice");
String aliceName = "alice";
RegisterNameTransactionData transactionData = new RegisterNameTransactionData(TestTransaction.generateBase(alice), aliceName, "");
transactionData.setFee(new RegisterNameTransaction(null, null).getUnitFee(transactionData.getTimestamp()));
TransactionUtils.signAndMint(repository, transactionData, alice);
Path alicePath = ArbitraryUtils.generateRandomDataPath(dataLength);
ArbitraryDataFile aliceArbitraryDataFile = ArbitraryUtils.createAndMintTxn(repository, Base58.encode(alice.getPublicKey()), alicePath, aliceName, identifier, ArbitraryTransactionData.Method.PUT, service, alice, chunkSize);
// Bob hosts some data too (also with 10 chunks)
PrivateKeyAccount bob = Common.getTestAccount(repository, "bob");
String bobName = "bob";
transactionData = new RegisterNameTransactionData(TestTransaction.generateBase(bob), bobName, "");
transactionData.setFee(new RegisterNameTransaction(null, null).getUnitFee(transactionData.getTimestamp()));
TransactionUtils.signAndMint(repository, transactionData, bob);
Path bobPath = ArbitraryUtils.generateRandomDataPath(dataLength);
ArbitraryDataFile bobArbitraryDataFile = ArbitraryUtils.createAndMintTxn(repository, Base58.encode(bob.getPublicKey()), bobPath, bobName, identifier, ArbitraryTransactionData.Method.PUT, service, bob, chunkSize);
// All 20 chunks should exist
assertEquals(10, aliceArbitraryDataFile.chunkCount());
assertTrue(aliceArbitraryDataFile.allChunksExist());
assertEquals(10, bobArbitraryDataFile.chunkCount());
assertTrue(bobArbitraryDataFile.allChunksExist());
// Now pretend that Bob has reached his storage limit - this should delete random files
// Run it 10 times to remove the likelihood of the randomizer always picking Alice's files
for (int i=0; i<10; i++) {
ArbitraryDataCleanupManager.getInstance().storageLimitReachedForName(repository, bobName);
}
// Alice should still have all chunks
assertTrue(aliceArbitraryDataFile.allChunksExist());
// Bob should be missing some chunks
assertFalse(bobArbitraryDataFile.allChunksExist());
}
}
private void deleteListsDirectory() { private void deleteListsDirectory() {
// Delete lists directory if exists // Delete lists directory if exists
Path listsPath = Paths.get(Settings.getInstance().getListsPath()); Path listsPath = Paths.get(Settings.getInstance().getListsPath());

View File

@ -73,14 +73,14 @@ public class ArbitraryDataStoragePolicyTests extends Common {
// We should store and pre-fetch data for this transaction // We should store and pre-fetch data for this transaction
assertEquals(StoragePolicy.FOLLOWED_OR_VIEWED, Settings.getInstance().getStoragePolicy()); assertEquals(StoragePolicy.FOLLOWED_OR_VIEWED, Settings.getInstance().getStoragePolicy());
assertTrue(storageManager.canStoreData(arbitraryTransactionData)); assertTrue(storageManager.canStoreData(arbitraryTransactionData));
assertTrue(storageManager.shouldPreFetchData(repository, arbitraryTransactionData)); assertTrue(storageManager.shouldPreFetchData(repository, arbitraryTransactionData).isPass());
// Now unfollow the name // Now unfollow the name
assertTrue(ResourceListManager.getInstance().removeFromList("followedNames", name, false)); assertTrue(ResourceListManager.getInstance().removeFromList("followedNames", name, false));
// We should store but not pre-fetch data for this transaction // We should store but not pre-fetch data for this transaction
assertTrue(storageManager.canStoreData(arbitraryTransactionData)); assertTrue(storageManager.canStoreData(arbitraryTransactionData));
assertFalse(storageManager.shouldPreFetchData(repository, arbitraryTransactionData)); assertFalse(storageManager.shouldPreFetchData(repository, arbitraryTransactionData).isPass());
} }
} }
@ -108,14 +108,14 @@ public class ArbitraryDataStoragePolicyTests extends Common {
// We should store and pre-fetch data for this transaction // We should store and pre-fetch data for this transaction
assertEquals(StoragePolicy.FOLLOWED, Settings.getInstance().getStoragePolicy()); assertEquals(StoragePolicy.FOLLOWED, Settings.getInstance().getStoragePolicy());
assertTrue(storageManager.canStoreData(arbitraryTransactionData)); assertTrue(storageManager.canStoreData(arbitraryTransactionData));
assertTrue(storageManager.shouldPreFetchData(repository, arbitraryTransactionData)); assertTrue(storageManager.shouldPreFetchData(repository, arbitraryTransactionData).isPass());
// Now unfollow the name // Now unfollow the name
assertTrue(ResourceListManager.getInstance().removeFromList("followedNames", name, false)); assertTrue(ResourceListManager.getInstance().removeFromList("followedNames", name, false));
// We shouldn't store or pre-fetch data for this transaction // We shouldn't store or pre-fetch data for this transaction
assertFalse(storageManager.canStoreData(arbitraryTransactionData)); assertFalse(storageManager.canStoreData(arbitraryTransactionData));
assertFalse(storageManager.shouldPreFetchData(repository, arbitraryTransactionData)); assertFalse(storageManager.shouldPreFetchData(repository, arbitraryTransactionData).isPass());
} }
} }
@ -143,14 +143,14 @@ public class ArbitraryDataStoragePolicyTests extends Common {
// We should store but not pre-fetch data for this transaction // We should store but not pre-fetch data for this transaction
assertEquals(StoragePolicy.VIEWED, Settings.getInstance().getStoragePolicy()); assertEquals(StoragePolicy.VIEWED, Settings.getInstance().getStoragePolicy());
assertTrue(storageManager.canStoreData(arbitraryTransactionData)); assertTrue(storageManager.canStoreData(arbitraryTransactionData));
assertFalse(storageManager.shouldPreFetchData(repository, arbitraryTransactionData)); assertFalse(storageManager.shouldPreFetchData(repository, arbitraryTransactionData).isPass());
// Now unfollow the name // Now unfollow the name
assertTrue(ResourceListManager.getInstance().removeFromList("followedNames", name, false)); assertTrue(ResourceListManager.getInstance().removeFromList("followedNames", name, false));
// We should store but not pre-fetch data for this transaction // We should store but not pre-fetch data for this transaction
assertTrue(storageManager.canStoreData(arbitraryTransactionData)); assertTrue(storageManager.canStoreData(arbitraryTransactionData));
assertFalse(storageManager.shouldPreFetchData(repository, arbitraryTransactionData)); assertFalse(storageManager.shouldPreFetchData(repository, arbitraryTransactionData).isPass());
} }
} }
@ -178,14 +178,14 @@ public class ArbitraryDataStoragePolicyTests extends Common {
// We should store and pre-fetch data for this transaction // We should store and pre-fetch data for this transaction
assertEquals(StoragePolicy.ALL, Settings.getInstance().getStoragePolicy()); assertEquals(StoragePolicy.ALL, Settings.getInstance().getStoragePolicy());
assertTrue(storageManager.canStoreData(arbitraryTransactionData)); assertTrue(storageManager.canStoreData(arbitraryTransactionData));
assertTrue(storageManager.shouldPreFetchData(repository, arbitraryTransactionData)); assertTrue(storageManager.shouldPreFetchData(repository, arbitraryTransactionData).isPass());
// Now unfollow the name // Now unfollow the name
assertTrue(ResourceListManager.getInstance().removeFromList("followedNames", name, false)); assertTrue(ResourceListManager.getInstance().removeFromList("followedNames", name, false));
// We should store and pre-fetch data for this transaction // We should store and pre-fetch data for this transaction
assertTrue(storageManager.canStoreData(arbitraryTransactionData)); assertTrue(storageManager.canStoreData(arbitraryTransactionData));
assertTrue(storageManager.shouldPreFetchData(repository, arbitraryTransactionData)); assertTrue(storageManager.shouldPreFetchData(repository, arbitraryTransactionData).isPass());
} }
} }
@ -213,14 +213,14 @@ public class ArbitraryDataStoragePolicyTests extends Common {
// We shouldn't store or pre-fetch data for this transaction // We shouldn't store or pre-fetch data for this transaction
assertEquals(StoragePolicy.NONE, Settings.getInstance().getStoragePolicy()); assertEquals(StoragePolicy.NONE, Settings.getInstance().getStoragePolicy());
assertFalse(storageManager.canStoreData(arbitraryTransactionData)); assertFalse(storageManager.canStoreData(arbitraryTransactionData));
assertFalse(storageManager.shouldPreFetchData(repository, arbitraryTransactionData)); assertFalse(storageManager.shouldPreFetchData(repository, arbitraryTransactionData).isPass());
// Now unfollow the name // Now unfollow the name
assertTrue(ResourceListManager.getInstance().removeFromList("followedNames", name, false)); assertTrue(ResourceListManager.getInstance().removeFromList("followedNames", name, false));
// We shouldn't store or pre-fetch data for this transaction // We shouldn't store or pre-fetch data for this transaction
assertFalse(storageManager.canStoreData(arbitraryTransactionData)); assertFalse(storageManager.canStoreData(arbitraryTransactionData));
assertFalse(storageManager.shouldPreFetchData(repository, arbitraryTransactionData)); assertFalse(storageManager.shouldPreFetchData(repository, arbitraryTransactionData).isPass());
} }
} }
@ -236,7 +236,7 @@ public class ArbitraryDataStoragePolicyTests extends Common {
// We should store but not pre-fetch data for this transaction // We should store but not pre-fetch data for this transaction
assertTrue(storageManager.canStoreData(transactionData)); assertTrue(storageManager.canStoreData(transactionData));
assertFalse(storageManager.shouldPreFetchData(repository, transactionData)); assertFalse(storageManager.shouldPreFetchData(repository, transactionData).isPass());
} }
} }

View File

@ -26,6 +26,7 @@ public class HSQLDBCacheUtilsTests {
private static final String DESCRIPTION = "description"; private static final String DESCRIPTION = "description";
private static final String PREFIX_ONLY = "prefixOnly"; private static final String PREFIX_ONLY = "prefixOnly";
private static final String EXACT_MATCH_NAMES = "exactMatchNames"; private static final String EXACT_MATCH_NAMES = "exactMatchNames";
private static final String KEYWORDS = "keywords";
private static final String DEFAULT_RESOURCE = "defaultResource"; private static final String DEFAULT_RESOURCE = "defaultResource";
private static final String MODE = "mode"; private static final String MODE = "mode";
private static final String MIN_LEVEL = "minLevel"; private static final String MIN_LEVEL = "minLevel";
@ -299,6 +300,19 @@ public class HSQLDBCacheUtilsTests {
); );
} }
@Test
public void testAfterNegative() {
ArbitraryResourceData data = new ArbitraryResourceData();
data.created = 10L;
data.name = "Joe";
filterListByMap(
List.of(data),
NAME_LEVEL, new HashMap<>(Map.of(AFTER, 11L)),
0
);
}
@Test @Test
public void testBeforePositive(){ public void testBeforePositive(){
ArbitraryResourceData data = new ArbitraryResourceData(); ArbitraryResourceData data = new ArbitraryResourceData();
@ -312,6 +326,19 @@ public class HSQLDBCacheUtilsTests {
); );
} }
@Test
public void testBeforeNegative(){
ArbitraryResourceData data = new ArbitraryResourceData();
data.created = 10L;
data.name = "Joe";
filterListByMap(
List.of(data),
NAME_LEVEL, new HashMap<>(Map.of(BEFORE, 9L)),
0
);
}
@Test @Test
public void testTitlePositive() { public void testTitlePositive() {
@ -342,6 +369,25 @@ public class HSQLDBCacheUtilsTests {
); );
} }
@Test
public void testMetadataNullificationBugSolution(){
ArbitraryResourceData data = new ArbitraryResourceData();
data.metadata = new ArbitraryResourceMetadata();
data.metadata.setDescription("Once upon a time.");
data.name = "Joe";
List<ArbitraryResourceData> list = List.of(data);
filterListByMap(
List.of(data),
NAME_LEVEL, new HashMap<>(Map.of(DESCRIPTION, "Once upon a time.")),
1
);
Assert.assertNotNull(data.metadata);
}
@Test @Test
public void testMinLevelPositive() { public void testMinLevelPositive() {
@ -615,6 +661,7 @@ public class HSQLDBCacheUtilsTests {
Optional<String> description = Optional.ofNullable((String) valueByKey.get(DESCRIPTION)); Optional<String> description = Optional.ofNullable((String) valueByKey.get(DESCRIPTION));
boolean prefixOnly = valueByKey.containsKey(PREFIX_ONLY); boolean prefixOnly = valueByKey.containsKey(PREFIX_ONLY);
Optional<List<String>> exactMatchNames = Optional.ofNullable((List<String>) valueByKey.get(EXACT_MATCH_NAMES)); Optional<List<String>> exactMatchNames = Optional.ofNullable((List<String>) valueByKey.get(EXACT_MATCH_NAMES));
Optional<List<String>> keywords = Optional.ofNullable((List<String>) valueByKey.get(KEYWORDS));
boolean defaultResource = valueByKey.containsKey(DEFAULT_RESOURCE); boolean defaultResource = valueByKey.containsKey(DEFAULT_RESOURCE);
Optional<SearchMode> mode = Optional.of((SearchMode) valueByKey.getOrDefault(MODE, SearchMode.ALL)); Optional<SearchMode> mode = Optional.of((SearchMode) valueByKey.getOrDefault(MODE, SearchMode.ALL));
Optional<Integer> minLevel = Optional.ofNullable((Integer) valueByKey.get(MIN_LEVEL)); Optional<Integer> minLevel = Optional.ofNullable((Integer) valueByKey.get(MIN_LEVEL));
@ -641,6 +688,7 @@ public class HSQLDBCacheUtilsTests {
description, description,
prefixOnly, prefixOnly,
exactMatchNames, exactMatchNames,
keywords,
defaultResource, defaultResource,
minLevel, minLevel,
followedOnly, followedOnly,

View File

@ -0,0 +1,102 @@
package org.qortal.test.utils;
import org.qortal.account.PrivateKeyAccount;
import org.qortal.data.transaction.CreateGroupTransactionData;
import org.qortal.data.transaction.GroupInviteTransactionData;
import org.qortal.data.transaction.JoinGroupTransactionData;
import org.qortal.data.transaction.LeaveGroupTransactionData;
import org.qortal.group.Group;
import org.qortal.repository.DataException;
import org.qortal.repository.Repository;
import org.qortal.test.common.TransactionUtils;
import org.qortal.test.common.transaction.TestTransaction;
/**
* Class GroupsTestUtils
*
* Utility methods for testing the Groups class.
*/
public class GroupsTestUtils {
/**
* Create Group
*
* @param repository the data repository
* @param owner the group owner
* @param groupName the group name
* @param isOpen true if the group is public, false for private
*
* @return the group Id
* @throws DataException
*/
public static Integer createGroup(Repository repository, PrivateKeyAccount owner, String groupName, boolean isOpen) throws DataException {
String description = groupName + " (description)";
Group.ApprovalThreshold approvalThreshold = Group.ApprovalThreshold.ONE;
int minimumBlockDelay = 10;
int maximumBlockDelay = 1440;
CreateGroupTransactionData transactionData = new CreateGroupTransactionData(TestTransaction.generateBase(owner), groupName, description, isOpen, approvalThreshold, minimumBlockDelay, maximumBlockDelay);
TransactionUtils.signAndMint(repository, transactionData, owner);
return repository.getGroupRepository().fromGroupName(groupName).getGroupId();
}
/**
* Join Group
*
* @param repository the data repository
* @param joiner the address for the account joining the group
* @param groupId the Id for the group to join
*
* @throws DataException
*/
public static void joinGroup(Repository repository, PrivateKeyAccount joiner, int groupId) throws DataException {
JoinGroupTransactionData transactionData = new JoinGroupTransactionData(TestTransaction.generateBase(joiner), groupId);
TransactionUtils.signAndMint(repository, transactionData, joiner);
}
/**
* Group Invite
*
* @param repository the data repository
* @param admin the admin account to sign the invite
* @param groupId the Id of the group to invite to
* @param invitee the recipient address for the invite
* @param timeToLive the time length of the invite
*
* @throws DataException
*/
public static void groupInvite(Repository repository, PrivateKeyAccount admin, int groupId, String invitee, int timeToLive) throws DataException {
GroupInviteTransactionData transactionData = new GroupInviteTransactionData(TestTransaction.generateBase(admin), groupId, invitee, timeToLive);
TransactionUtils.signAndMint(repository, transactionData, admin);
}
/**
* Leave Group
*
* @param repository the data repository
* @param leaver the account leaving
* @param groupId the Id of the group being left
*
* @throws DataException
*/
public static void leaveGroup(Repository repository, PrivateKeyAccount leaver, int groupId) throws DataException {
LeaveGroupTransactionData transactionData = new LeaveGroupTransactionData(TestTransaction.generateBase(leaver), groupId);
TransactionUtils.signAndMint(repository, transactionData, leaver);
}
/**
* Is Member?
*
* @param repository the data repository
* @param address the account address
* @param groupId the group Id
*
* @return true if the account is a member of the group, otherwise false
* @throws DataException
*/
public static boolean isMember(Repository repository, String address, int groupId) throws DataException {
return repository.getGroupRepository().memberExists(groupId, address);
}
}

View File

@ -0,0 +1,199 @@
package org.qortal.test.utils;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.qortal.account.PrivateKeyAccount;
import org.qortal.block.Block;
import org.qortal.block.BlockChain;
import org.qortal.repository.DataException;
import org.qortal.repository.Repository;
import org.qortal.repository.RepositoryManager;
import org.qortal.test.common.BlockUtils;
import org.qortal.test.common.Common;
import org.qortal.utils.Groups;
import java.util.List;
import static org.junit.Assert.*;
public class GroupsTests extends Common {
public static final String ALICE = "alice";
public static final String BOB = "bob";
public static final String CHLOE = "chloe";
public static final String DILBERT = "dilbert";
private static final int HEIGHT_1 = 5;
private static final int HEIGHT_2 = 8;
private static final int HEIGHT_3 = 12;
@Before
public void beforeTest() throws DataException {
Common.useDefaultSettings();
}
@After
public void afterTest() throws DataException {
Common.orphanCheck();
}
@Test
public void testGetGroupIdsToMintSimple() {
List<Integer> ids = Groups.getGroupIdsToMint(BlockChain.getInstance(), 0);
Assert.assertNotNull(ids);
Assert.assertEquals(0, ids.size());
}
@Test
public void testGetGroupIdsToMintComplex() throws DataException {
try (final Repository repository = RepositoryManager.getRepository()) {
Block block1 = BlockUtils.mintBlocks(repository, HEIGHT_1);
int height1 = block1.getBlockData().getHeight().intValue();
assertEquals(HEIGHT_1 + 1, height1);
List<Integer> ids1 = Groups.getGroupIdsToMint(BlockChain.getInstance(), height1);
Assert.assertEquals(1, ids1.size() );
Assert.assertTrue( ids1.contains( 694 ) );
Block block2 = BlockUtils.mintBlocks(repository, HEIGHT_2 - HEIGHT_1);
int height2 = block2.getBlockData().getHeight().intValue();
assertEquals( HEIGHT_2 + 1, height2);
List<Integer> ids2 = Groups.getGroupIdsToMint(BlockChain.getInstance(), height2);
Assert.assertEquals(2, ids2.size() );
Assert.assertTrue( ids2.contains( 694 ) );
Assert.assertTrue( ids2.contains( 800 ) );
Block block3 = BlockUtils.mintBlocks(repository, HEIGHT_3 - HEIGHT_2);
int height3 = block3.getBlockData().getHeight().intValue();
assertEquals( HEIGHT_3 + 1, height3);
List<Integer> ids3 = Groups.getGroupIdsToMint(BlockChain.getInstance(), height3);
Assert.assertEquals( 1, ids3.size() );
Assert.assertTrue( ids3.contains( 800 ) );
}
}
@Test
public void testMemberExistsInAnyGroupSimple() throws DataException {
try (final Repository repository = RepositoryManager.getRepository()) {
PrivateKeyAccount alice = Common.getTestAccount(repository, "alice");
PrivateKeyAccount bob = Common.getTestAccount(repository, "bob");
// Create group
int groupId = GroupsTestUtils.createGroup(repository, alice, "closed-group", false);
// Confirm Bob is not a member
Assert.assertFalse( Groups.memberExistsInAnyGroup(repository.getGroupRepository(), List.of(groupId), bob.getAddress()) );
// Bob to join
GroupsTestUtils.joinGroup(repository, bob, groupId);
// Confirm Bob still not a member
assertFalse(GroupsTestUtils.isMember(repository, bob.getAddress(), groupId));
// Have Alice 'invite' Bob to confirm membership
GroupsTestUtils.groupInvite(repository, alice, groupId, bob.getAddress(), 0); // non-expiring invite
// Confirm Bob now a member
Assert.assertTrue( Groups.memberExistsInAnyGroup(repository.getGroupRepository(), List.of(groupId), bob.getAddress()) );
}
}
@Test
public void testGroupsListedFunctionality() throws DataException {
try (final Repository repository = RepositoryManager.getRepository()) {
PrivateKeyAccount alice = Common.getTestAccount(repository, ALICE);
PrivateKeyAccount bob = Common.getTestAccount(repository, BOB);
PrivateKeyAccount chloe = Common.getTestAccount(repository, CHLOE);
PrivateKeyAccount dilbert = Common.getTestAccount(repository, DILBERT);
// Create groups
int group1Id = GroupsTestUtils.createGroup(repository, alice, "group-1", false);
int group2Id = GroupsTestUtils.createGroup(repository, bob, "group-2", false);
// test memberExistsInAnyGroup
Assert.assertTrue(Groups.memberExistsInAnyGroup(repository.getGroupRepository(), List.of(group1Id, group2Id), alice.getAddress()));
Assert.assertFalse(Groups.memberExistsInAnyGroup(repository.getGroupRepository(), List.of(group1Id, group2Id), chloe.getAddress()));
// alice is a member
Assert.assertTrue(GroupsTestUtils.isMember(repository, alice.getAddress(), group1Id));
List<String> allMembersBeforeJoin = Groups.getAllMembers(repository.getGroupRepository(), List.of(group1Id));
// assert one member
Assert.assertNotNull(allMembersBeforeJoin);
Assert.assertEquals(1, allMembersBeforeJoin.size());
List<String> allAdminsBeforeJoin = Groups.getAllAdmins(repository.getGroupRepository(), List.of(group1Id));
// assert one admin
Assert.assertNotNull(allAdminsBeforeJoin);
Assert.assertEquals( 1, allAdminsBeforeJoin.size());
// Bob to join
GroupsTestUtils.joinGroup(repository, bob, group1Id);
// Have Alice 'invite' Bob to confirm membership
GroupsTestUtils.groupInvite(repository, alice, group1Id, bob.getAddress(), 0); // non-expiring invite
List<String> allMembersAfterJoin = Groups.getAllMembers(repository.getGroupRepository(), List.of(group1Id));
// alice and bob are members
Assert.assertNotNull(allMembersAfterJoin);
Assert.assertEquals(2, allMembersAfterJoin.size());
List<String> allAdminsAfterJoin = Groups.getAllAdmins(repository.getGroupRepository(), List.of(group1Id));
// assert still one admin
Assert.assertNotNull(allAdminsAfterJoin);
Assert.assertEquals(1, allAdminsAfterJoin.size());
List<String> allAdminsFor2Groups = Groups.getAllAdmins(repository.getGroupRepository(), List.of(group1Id, group2Id));
// assert 2 admins when including the second group
Assert.assertNotNull(allAdminsFor2Groups);
Assert.assertEquals(2, allAdminsFor2Groups.size());
List<String> allMembersFor2Groups = Groups.getAllMembers(repository.getGroupRepository(), List.of(group1Id, group2Id));
// assert 2 members when including the seconds group
Assert.assertNotNull(allMembersFor2Groups);
Assert.assertEquals(2, allMembersFor2Groups.size());
GroupsTestUtils.leaveGroup(repository, bob, group1Id);
List<String> allMembersForAfterBobLeavesGroup1InAllGroups = Groups.getAllMembers(repository.getGroupRepository(), List.of(group1Id, group2Id));
// alice and bob are members of one group still
Assert.assertNotNull(allMembersForAfterBobLeavesGroup1InAllGroups);
Assert.assertEquals(2, allMembersForAfterBobLeavesGroup1InAllGroups.size());
GroupsTestUtils.groupInvite(repository, alice, group1Id, chloe.getAddress(), 3600);
GroupsTestUtils.groupInvite(repository, bob, group2Id, chloe.getAddress(), 3600);
GroupsTestUtils.joinGroup(repository, chloe, group1Id);
GroupsTestUtils.joinGroup(repository, chloe, group2Id);
List<String> allMembersAfterDilbert = Groups.getAllMembers((repository.getGroupRepository()), List.of(group1Id, group2Id));
// 3 accounts are now members of one group or another
Assert.assertNotNull(allMembersAfterDilbert);
Assert.assertEquals(3, allMembersAfterDilbert.size());
}
}
}

View File

@ -31,6 +31,12 @@
"blockRewardBatchStartHeight": 999999000, "blockRewardBatchStartHeight": 999999000,
"blockRewardBatchSize": 10, "blockRewardBatchSize": 10,
"blockRewardBatchAccountsBlockCount": 3, "blockRewardBatchAccountsBlockCount": 3,
"mintingGroupIds": [
{ "height": 0, "ids": []},
{ "height": 5, "ids": [694]},
{ "height": 8, "ids": [694, 800]},
{ "height": 12, "ids": [800]}
],
"rewardsByHeight": [ "rewardsByHeight": [
{ "height": 1, "reward": 100 }, { "height": 1, "reward": 100 },
{ "height": 11, "reward": 10 }, { "height": 11, "reward": 10 },