3
0
mirror of https://github.com/Qortal/qortal.git synced 2025-02-11 17:55:50 +00:00

Merge remote-tracking branch 'kenny/master'

This commit is contained in:
crowetic 2024-11-01 13:49:18 -07:00
commit 209920b2f8
22 changed files with 1701 additions and 248 deletions

View File

@ -13,6 +13,7 @@ import org.qortal.block.Block;
import org.qortal.block.BlockChain;
import org.qortal.block.BlockChain.BlockTimingByHeight;
import org.qortal.controller.arbitrary.*;
import org.qortal.controller.hsqldb.HSQLDBDataCacheManager;
import org.qortal.controller.repository.NamesDatabaseIntegrityCheck;
import org.qortal.controller.repository.PruneManager;
import org.qortal.controller.tradebot.TradeBot;
@ -35,6 +36,7 @@ import org.qortal.network.Peer;
import org.qortal.network.PeerAddress;
import org.qortal.network.message.*;
import org.qortal.repository.*;
import org.qortal.repository.hsqldb.HSQLDBRepository;
import org.qortal.repository.hsqldb.HSQLDBRepositoryFactory;
import org.qortal.settings.Settings;
import org.qortal.transaction.Transaction;
@ -408,6 +410,15 @@ public class Controller extends Thread {
try (final Repository repository = RepositoryManager.getRepository()) {
// RepositoryManager.rebuildTransactionSequences(repository);
ArbitraryDataCacheManager.getInstance().buildArbitraryResourcesCache(repository, false);
if( Settings.getInstance().isDbCacheEnabled() ) {
LOGGER.info("Db Cache Starting ...");
HSQLDBDataCacheManager hsqldbDataCacheManager = new HSQLDBDataCacheManager((HSQLDBRepository) repositoryFactory.getRepository());
hsqldbDataCacheManager.start();
}
else {
LOGGER.info("Db Cache Disabled");
}
}
} catch (DataException e) {
// If exception has no cause or message then repository is in use by some other process.

View File

@ -80,7 +80,7 @@ public class OnlineAccountsManager {
// one for the transition period.
private static long[] POW_VERIFY_WORK_BUFFER = new long[getPoWBufferSize() / 8];
private final ScheduledExecutorService executor = Executors.newScheduledThreadPool(4, new NamedThreadFactory("OnlineAccounts"));
private final ScheduledExecutorService executor = Executors.newScheduledThreadPool(4, new NamedThreadFactory("OnlineAccounts", Thread.NORM_PRIORITY));
private volatile boolean isStopping = false;
private final Set<OnlineAccountData> onlineAccountsImportQueue = ConcurrentHashMap.newKeySet();

View File

@ -118,8 +118,12 @@ public class Synchronizer extends Thread {
}
public static Synchronizer getInstance() {
if (instance == null)
if (instance == null) {
instance = new Synchronizer();
instance.setPriority(Settings.getInstance().getSynchronizerThreadPriority());
LOGGER.info("thread priority = " + instance.getPriority());
}
return instance;
}

View File

@ -0,0 +1,29 @@
package org.qortal.controller.hsqldb;
import org.qortal.data.arbitrary.ArbitraryResourceCache;
import org.qortal.repository.RepositoryManager;
import org.qortal.repository.hsqldb.HSQLDBCacheUtils;
import org.qortal.repository.hsqldb.HSQLDBRepository;
import org.qortal.settings.Settings;
public class HSQLDBDataCacheManager extends Thread{
private ArbitraryResourceCache cache = ArbitraryResourceCache.getInstance();
private HSQLDBRepository respository;
public HSQLDBDataCacheManager(HSQLDBRepository respository) {
this.respository = respository;
}
@Override
public void run() {
Thread.currentThread().setName("HSQLDB Data Cache Manager");
this.cache
= HSQLDBCacheUtils.startCaching(
Settings.getInstance().getDbCacheThreadPriority(),
Settings.getInstance().getDbCacheFrequency(),
this.respository
);
}
}

View File

@ -49,72 +49,81 @@ public class AtStatesPruner implements Runnable {
repository.saveChanges();
while (!Controller.isStopping()) {
repository.discardChanges();
try {
repository.discardChanges();
Thread.sleep(Settings.getInstance().getAtStatesPruneInterval());
Thread.sleep(Settings.getInstance().getAtStatesPruneInterval());
BlockData chainTip = Controller.getInstance().getChainTip();
if (chainTip == null || NTP.getTime() == null)
continue;
BlockData chainTip = Controller.getInstance().getChainTip();
if (chainTip == null || NTP.getTime() == null)
continue;
// Don't even attempt if we're mid-sync as our repository requests will be delayed for ages
if (Synchronizer.getInstance().isSynchronizing())
continue;
// Don't even attempt if we're mid-sync as our repository requests will be delayed for ages
if (Synchronizer.getInstance().isSynchronizing())
continue;
// Prune AT states for all blocks up until our latest minus pruneBlockLimit
final int ourLatestHeight = chainTip.getHeight();
int upperPrunableHeight = ourLatestHeight - Settings.getInstance().getPruneBlockLimit();
// Prune AT states for all blocks up until our latest minus pruneBlockLimit
final int ourLatestHeight = chainTip.getHeight();
int upperPrunableHeight = ourLatestHeight - Settings.getInstance().getPruneBlockLimit();
// In archive mode we are only allowed to trim blocks that have already been archived
if (archiveMode) {
upperPrunableHeight = repository.getBlockArchiveRepository().getBlockArchiveHeight() - 1;
// In archive mode we are only allowed to trim blocks that have already been archived
if (archiveMode) {
upperPrunableHeight = repository.getBlockArchiveRepository().getBlockArchiveHeight() - 1;
// TODO: validate that the actual archived data exists before pruning it?
}
// TODO: validate that the actual archived data exists before pruning it?
}
int upperBatchHeight = pruneStartHeight + Settings.getInstance().getAtStatesPruneBatchSize();
int upperPruneHeight = Math.min(upperBatchHeight, upperPrunableHeight);
int upperBatchHeight = pruneStartHeight + Settings.getInstance().getAtStatesPruneBatchSize();
int upperPruneHeight = Math.min(upperBatchHeight, upperPrunableHeight);
if (pruneStartHeight >= upperPruneHeight)
continue;
if (pruneStartHeight >= upperPruneHeight)
continue;
LOGGER.debug(String.format("Pruning AT states between blocks %d and %d...", pruneStartHeight, upperPruneHeight));
LOGGER.info(String.format("Pruning AT states between blocks %d and %d...", pruneStartHeight, upperPruneHeight));
int numAtStatesPruned = repository.getATRepository().pruneAtStates(pruneStartHeight, upperPruneHeight);
repository.saveChanges();
int numAtStateDataRowsTrimmed = repository.getATRepository().trimAtStates(
pruneStartHeight, upperPruneHeight, Settings.getInstance().getAtStatesTrimLimit());
repository.saveChanges();
if (numAtStatesPruned > 0 || numAtStateDataRowsTrimmed > 0) {
final int finalPruneStartHeight = pruneStartHeight;
LOGGER.debug(() -> String.format("Pruned %d AT state%s between blocks %d and %d",
numAtStatesPruned, (numAtStatesPruned != 1 ? "s" : ""),
finalPruneStartHeight, upperPruneHeight));
} else {
// Can we move onto next batch?
if (upperPrunableHeight > upperBatchHeight) {
pruneStartHeight = upperBatchHeight;
repository.getATRepository().setAtPruneHeight(pruneStartHeight);
maxLatestAtStatesHeight = PruneManager.getMaxHeightForLatestAtStates(repository);
repository.getATRepository().rebuildLatestAtStates(maxLatestAtStatesHeight);
repository.saveChanges();
int numAtStatesPruned = repository.getATRepository().pruneAtStates(pruneStartHeight, upperPruneHeight);
repository.saveChanges();
int numAtStateDataRowsTrimmed = repository.getATRepository().trimAtStates(
pruneStartHeight, upperPruneHeight, Settings.getInstance().getAtStatesTrimLimit());
repository.saveChanges();
if (numAtStatesPruned > 0 || numAtStateDataRowsTrimmed > 0) {
final int finalPruneStartHeight = pruneStartHeight;
LOGGER.debug(() -> String.format("Bumping AT state base prune height to %d", finalPruneStartHeight));
LOGGER.info(() -> String.format("Pruned %d AT state%s between blocks %d and %d",
numAtStatesPruned, (numAtStatesPruned != 1 ? "s" : ""),
finalPruneStartHeight, upperPruneHeight));
} else {
// Can we move onto next batch?
if (upperPrunableHeight > upperBatchHeight) {
pruneStartHeight = upperBatchHeight;
repository.getATRepository().setAtPruneHeight(pruneStartHeight);
maxLatestAtStatesHeight = PruneManager.getMaxHeightForLatestAtStates(repository);
repository.getATRepository().rebuildLatestAtStates(maxLatestAtStatesHeight);
repository.saveChanges();
final int finalPruneStartHeight = pruneStartHeight;
LOGGER.info(() -> String.format("Bumping AT state base prune height to %d", finalPruneStartHeight));
}
else {
// We've pruned up to the upper prunable height
// Back off for a while to save CPU for syncing
repository.discardChanges();
Thread.sleep(5*60*1000L);
}
}
} catch (InterruptedException e) {
if(Controller.isStopping()) {
LOGGER.info("AT States Pruning Shutting Down");
}
else {
// We've pruned up to the upper prunable height
// Back off for a while to save CPU for syncing
repository.discardChanges();
Thread.sleep(5*60*1000L);
LOGGER.warn("AT States Pruning interrupted. Trying again. Report this error immediately to the developers.", e);
}
} catch (Exception e) {
LOGGER.warn("AT States Pruning stopped working. Trying again. Report this error immediately to the developers.", e);
}
}
} catch (DataException e) {
LOGGER.warn(String.format("Repository issue trying to prune AT states: %s", e.getMessage()));
} catch (InterruptedException e) {
// Time to exit
} catch (Exception e) {
LOGGER.error("AT States Pruning is not working! Not trying again. Restart ASAP. Report this error immediately to the developers.", e);
}
}

View File

@ -36,57 +36,66 @@ public class AtStatesTrimmer implements Runnable {
repository.saveChanges();
while (!Controller.isStopping()) {
repository.discardChanges();
try {
repository.discardChanges();
Thread.sleep(Settings.getInstance().getAtStatesTrimInterval());
Thread.sleep(Settings.getInstance().getAtStatesTrimInterval());
BlockData chainTip = Controller.getInstance().getChainTip();
if (chainTip == null || NTP.getTime() == null)
continue;
BlockData chainTip = Controller.getInstance().getChainTip();
if (chainTip == null || NTP.getTime() == null)
continue;
// Don't even attempt if we're mid-sync as our repository requests will be delayed for ages
if (Synchronizer.getInstance().isSynchronizing())
continue;
// Don't even attempt if we're mid-sync as our repository requests will be delayed for ages
if (Synchronizer.getInstance().isSynchronizing())
continue;
long currentTrimmableTimestamp = NTP.getTime() - Settings.getInstance().getAtStatesMaxLifetime();
// We want to keep AT states near the tip of our copy of blockchain so we can process/orphan nearby blocks
long chainTrimmableTimestamp = chainTip.getTimestamp() - Settings.getInstance().getAtStatesMaxLifetime();
long currentTrimmableTimestamp = NTP.getTime() - Settings.getInstance().getAtStatesMaxLifetime();
// We want to keep AT states near the tip of our copy of blockchain so we can process/orphan nearby blocks
long chainTrimmableTimestamp = chainTip.getTimestamp() - Settings.getInstance().getAtStatesMaxLifetime();
long upperTrimmableTimestamp = Math.min(currentTrimmableTimestamp, chainTrimmableTimestamp);
int upperTrimmableHeight = repository.getBlockRepository().getHeightFromTimestamp(upperTrimmableTimestamp);
long upperTrimmableTimestamp = Math.min(currentTrimmableTimestamp, chainTrimmableTimestamp);
int upperTrimmableHeight = repository.getBlockRepository().getHeightFromTimestamp(upperTrimmableTimestamp);
int upperBatchHeight = trimStartHeight + Settings.getInstance().getAtStatesTrimBatchSize();
int upperTrimHeight = Math.min(upperBatchHeight, upperTrimmableHeight);
int upperBatchHeight = trimStartHeight + Settings.getInstance().getAtStatesTrimBatchSize();
int upperTrimHeight = Math.min(upperBatchHeight, upperTrimmableHeight);
if (trimStartHeight >= upperTrimHeight)
continue;
if (trimStartHeight >= upperTrimHeight)
continue;
int numAtStatesTrimmed = repository.getATRepository().trimAtStates(trimStartHeight, upperTrimHeight, Settings.getInstance().getAtStatesTrimLimit());
repository.saveChanges();
if (numAtStatesTrimmed > 0) {
final int finalTrimStartHeight = trimStartHeight;
LOGGER.debug(() -> String.format("Trimmed %d AT state%s between blocks %d and %d",
numAtStatesTrimmed, (numAtStatesTrimmed != 1 ? "s" : ""),
finalTrimStartHeight, upperTrimHeight));
} else {
// Can we move onto next batch?
if (upperTrimmableHeight > upperBatchHeight) {
trimStartHeight = upperBatchHeight;
repository.getATRepository().setAtTrimHeight(trimStartHeight);
maxLatestAtStatesHeight = PruneManager.getMaxHeightForLatestAtStates(repository);
repository.getATRepository().rebuildLatestAtStates(maxLatestAtStatesHeight);
repository.saveChanges();
int numAtStatesTrimmed = repository.getATRepository().trimAtStates(trimStartHeight, upperTrimHeight, Settings.getInstance().getAtStatesTrimLimit());
repository.saveChanges();
if (numAtStatesTrimmed > 0) {
final int finalTrimStartHeight = trimStartHeight;
LOGGER.debug(() -> String.format("Bumping AT state base trim height to %d", finalTrimStartHeight));
LOGGER.info(() -> String.format("Trimmed %d AT state%s between blocks %d and %d",
numAtStatesTrimmed, (numAtStatesTrimmed != 1 ? "s" : ""),
finalTrimStartHeight, upperTrimHeight));
} else {
// Can we move onto next batch?
if (upperTrimmableHeight > upperBatchHeight) {
trimStartHeight = upperBatchHeight;
repository.getATRepository().setAtTrimHeight(trimStartHeight);
maxLatestAtStatesHeight = PruneManager.getMaxHeightForLatestAtStates(repository);
repository.getATRepository().rebuildLatestAtStates(maxLatestAtStatesHeight);
repository.saveChanges();
final int finalTrimStartHeight = trimStartHeight;
LOGGER.info(() -> String.format("Bumping AT state base trim height to %d", finalTrimStartHeight));
}
}
} catch (InterruptedException e) {
if(Controller.isStopping()) {
LOGGER.info("AT States Trimming Shutting Down");
}
else {
LOGGER.warn("AT States Trimming interrupted. Trying again. Report this error immediately to the developers.", e);
}
} catch (Exception e) {
LOGGER.warn("AT States Trimming stopped working. Trying again. Report this error immediately to the developers.", e);
}
}
} catch (DataException e) {
LOGGER.warn(String.format("Repository issue trying to trim AT states: %s", e.getMessage()));
} catch (InterruptedException e) {
// Time to exit
} catch (Exception e) {
LOGGER.error("AT States Trimming is not working! Not trying again. Restart ASAP. Report this error immediately to the developers.", e);
}
}

View File

@ -48,71 +48,79 @@ public class BlockArchiver implements Runnable {
LOGGER.info("Starting block archiver from height {}...", startHeight);
while (!Controller.isStopping()) {
repository.discardChanges();
Thread.sleep(Settings.getInstance().getArchiveInterval());
BlockData chainTip = Controller.getInstance().getChainTip();
if (chainTip == null || NTP.getTime() == null) {
continue;
}
// Don't even attempt if we're mid-sync as our repository requests will be delayed for ages
if (Synchronizer.getInstance().isSynchronizing()) {
continue;
}
// Don't attempt to archive if we're not synced yet
final Long minLatestBlockTimestamp = Controller.getMinimumLatestBlockTimestamp();
if (minLatestBlockTimestamp == null || chainTip.getTimestamp() < minLatestBlockTimestamp) {
continue;
}
// Build cache of blocks
try {
final int maximumArchiveHeight = BlockArchiveWriter.getMaxArchiveHeight(repository);
BlockArchiveWriter writer = new BlockArchiveWriter(startHeight, maximumArchiveHeight, repository);
BlockArchiveWriter.BlockArchiveWriteResult result = writer.write();
switch (result) {
case OK:
// Increment block archive height
startHeight += writer.getWrittenCount();
repository.getBlockArchiveRepository().setBlockArchiveHeight(startHeight);
repository.saveChanges();
break;
repository.discardChanges();
case STOPPING:
return;
Thread.sleep(Settings.getInstance().getArchiveInterval());
// We've reached the limit of the blocks we can archive
// Sleep for a while to allow more to become available
case NOT_ENOUGH_BLOCKS:
// We didn't reach our file size target, so that must mean that we don't have enough blocks
// yet or something went wrong. Sleep for a while and then try again.
repository.discardChanges();
Thread.sleep(60 * 60 * 1000L); // 1 hour
break;
case BLOCK_NOT_FOUND:
// We tried to archive a block that didn't exist. This is a major failure and likely means
// that a bootstrap or re-sync is needed. Try again every minute until then.
LOGGER.info("Error: block not found when building archive. If this error persists, " +
"a bootstrap or re-sync may be needed.");
repository.discardChanges();
Thread.sleep( 60 * 1000L); // 1 minute
break;
BlockData chainTip = Controller.getInstance().getChainTip();
if (chainTip == null || NTP.getTime() == null) {
continue;
}
} catch (IOException | TransformationException e) {
LOGGER.info("Caught exception when creating block cache", e);
}
// Don't even attempt if we're mid-sync as our repository requests will be delayed for ages
if (Synchronizer.getInstance().isSynchronizing()) {
continue;
}
// Don't attempt to archive if we're not synced yet
final Long minLatestBlockTimestamp = Controller.getMinimumLatestBlockTimestamp();
if (minLatestBlockTimestamp == null || chainTip.getTimestamp() < minLatestBlockTimestamp) {
continue;
}
// Build cache of blocks
try {
final int maximumArchiveHeight = BlockArchiveWriter.getMaxArchiveHeight(repository);
BlockArchiveWriter writer = new BlockArchiveWriter(startHeight, maximumArchiveHeight, repository);
BlockArchiveWriter.BlockArchiveWriteResult result = writer.write();
switch (result) {
case OK:
// Increment block archive height
startHeight += writer.getWrittenCount();
repository.getBlockArchiveRepository().setBlockArchiveHeight(startHeight);
repository.saveChanges();
break;
case STOPPING:
return;
// We've reached the limit of the blocks we can archive
// Sleep for a while to allow more to become available
case NOT_ENOUGH_BLOCKS:
// We didn't reach our file size target, so that must mean that we don't have enough blocks
// yet or something went wrong. Sleep for a while and then try again.
repository.discardChanges();
Thread.sleep(60 * 60 * 1000L); // 1 hour
break;
case BLOCK_NOT_FOUND:
// We tried to archive a block that didn't exist. This is a major failure and likely means
// that a bootstrap or re-sync is needed. Try again every minute until then.
LOGGER.info("Error: block not found when building archive. If this error persists, " +
"a bootstrap or re-sync may be needed.");
repository.discardChanges();
Thread.sleep( 60 * 1000L); // 1 minute
break;
}
} catch (IOException | TransformationException e) {
LOGGER.info("Caught exception when creating block cache", e);
}
} catch (InterruptedException e) {
if(Controller.isStopping()) {
LOGGER.info("Block Archiving Shutting Down");
}
else {
LOGGER.warn("Block Archiving interrupted. Trying again. Report this error immediately to the developers.", e);
}
} catch (Exception e) {
LOGGER.warn("Block Archiving stopped working. Trying again. Report this error immediately to the developers.", e);
}
}
} catch (DataException e) {
LOGGER.info("Caught exception when creating block cache", e);
} catch (InterruptedException e) {
// Do nothing
} catch (Exception e) {
LOGGER.error("Block Archiving is not working! Not trying again. Restart ASAP. Report this error immediately to the developers.", e);
}
}

View File

@ -51,72 +51,81 @@ public class BlockPruner implements Runnable {
}
while (!Controller.isStopping()) {
repository.discardChanges();
try {
repository.discardChanges();
Thread.sleep(Settings.getInstance().getBlockPruneInterval());
Thread.sleep(Settings.getInstance().getBlockPruneInterval());
BlockData chainTip = Controller.getInstance().getChainTip();
if (chainTip == null || NTP.getTime() == null)
continue;
BlockData chainTip = Controller.getInstance().getChainTip();
if (chainTip == null || NTP.getTime() == null)
continue;
// Don't even attempt if we're mid-sync as our repository requests will be delayed for ages
if (Synchronizer.getInstance().isSynchronizing()) {
continue;
}
// Don't even attempt if we're mid-sync as our repository requests will be delayed for ages
if (Synchronizer.getInstance().isSynchronizing()) {
continue;
}
// Don't attempt to prune if we're not synced yet
final Long minLatestBlockTimestamp = Controller.getMinimumLatestBlockTimestamp();
if (minLatestBlockTimestamp == null || chainTip.getTimestamp() < minLatestBlockTimestamp) {
continue;
}
// Don't attempt to prune if we're not synced yet
final Long minLatestBlockTimestamp = Controller.getMinimumLatestBlockTimestamp();
if (minLatestBlockTimestamp == null || chainTip.getTimestamp() < minLatestBlockTimestamp) {
continue;
}
// Prune all blocks up until our latest minus pruneBlockLimit
final int ourLatestHeight = chainTip.getHeight();
int upperPrunableHeight = ourLatestHeight - Settings.getInstance().getPruneBlockLimit();
// Prune all blocks up until our latest minus pruneBlockLimit
final int ourLatestHeight = chainTip.getHeight();
int upperPrunableHeight = ourLatestHeight - Settings.getInstance().getPruneBlockLimit();
// In archive mode we are only allowed to trim blocks that have already been archived
if (archiveMode) {
upperPrunableHeight = repository.getBlockArchiveRepository().getBlockArchiveHeight() - 1;
}
// In archive mode we are only allowed to trim blocks that have already been archived
if (archiveMode) {
upperPrunableHeight = repository.getBlockArchiveRepository().getBlockArchiveHeight() - 1;
}
int upperBatchHeight = pruneStartHeight + Settings.getInstance().getBlockPruneBatchSize();
int upperPruneHeight = Math.min(upperBatchHeight, upperPrunableHeight);
int upperBatchHeight = pruneStartHeight + Settings.getInstance().getBlockPruneBatchSize();
int upperPruneHeight = Math.min(upperBatchHeight, upperPrunableHeight);
if (pruneStartHeight >= upperPruneHeight) {
continue;
}
if (pruneStartHeight >= upperPruneHeight) {
continue;
}
LOGGER.debug(String.format("Pruning blocks between %d and %d...", pruneStartHeight, upperPruneHeight));
LOGGER.info(String.format("Pruning blocks between %d and %d...", pruneStartHeight, upperPruneHeight));
int numBlocksPruned = repository.getBlockRepository().pruneBlocks(pruneStartHeight, upperPruneHeight);
repository.saveChanges();
if (numBlocksPruned > 0) {
LOGGER.debug(String.format("Pruned %d block%s between %d and %d",
numBlocksPruned, (numBlocksPruned != 1 ? "s" : ""),
pruneStartHeight, upperPruneHeight));
} else {
final int nextPruneHeight = upperPruneHeight + 1;
repository.getBlockRepository().setBlockPruneHeight(nextPruneHeight);
int numBlocksPruned = repository.getBlockRepository().pruneBlocks(pruneStartHeight, upperPruneHeight);
repository.saveChanges();
LOGGER.debug(String.format("Bumping block base prune height to %d", pruneStartHeight));
// Can we move onto next batch?
if (upperPrunableHeight > nextPruneHeight) {
pruneStartHeight = nextPruneHeight;
if (numBlocksPruned > 0) {
LOGGER.info(String.format("Pruned %d block%s between %d and %d",
numBlocksPruned, (numBlocksPruned != 1 ? "s" : ""),
pruneStartHeight, upperPruneHeight));
} else {
final int nextPruneHeight = upperPruneHeight + 1;
repository.getBlockRepository().setBlockPruneHeight(nextPruneHeight);
repository.saveChanges();
LOGGER.info(String.format("Bumping block base prune height to %d", pruneStartHeight));
// Can we move onto next batch?
if (upperPrunableHeight > nextPruneHeight) {
pruneStartHeight = nextPruneHeight;
}
else {
// We've pruned up to the upper prunable height
// Back off for a while to save CPU for syncing
repository.discardChanges();
Thread.sleep(10*60*1000L);
}
}
} catch (InterruptedException e) {
if(Controller.isStopping()) {
LOGGER.info("Block Pruning Shutting Down");
}
else {
// We've pruned up to the upper prunable height
// Back off for a while to save CPU for syncing
repository.discardChanges();
Thread.sleep(10*60*1000L);
LOGGER.warn("Block Pruning interrupted. Trying again. Report this error immediately to the developers.", e);
}
} catch (Exception e) {
LOGGER.warn("Block Pruning stopped working. Trying again. Report this error immediately to the developers.", e);
}
}
} catch (DataException e) {
LOGGER.warn(String.format("Repository issue trying to prune blocks: %s", e.getMessage()));
} catch (InterruptedException e) {
// Time to exit
} catch (Exception e) {
LOGGER.error("Block Pruning is not working! Not trying again. Restart ASAP. Report this error immediately to the developers.", e);
}
}

View File

@ -36,53 +36,62 @@ public class OnlineAccountsSignaturesTrimmer implements Runnable {
int trimStartHeight = repository.getBlockRepository().getOnlineAccountsSignaturesTrimHeight();
while (!Controller.isStopping()) {
repository.discardChanges();
try {
repository.discardChanges();
Thread.sleep(Settings.getInstance().getOnlineSignaturesTrimInterval());
Thread.sleep(Settings.getInstance().getOnlineSignaturesTrimInterval());
BlockData chainTip = Controller.getInstance().getChainTip();
if (chainTip == null || NTP.getTime() == null)
continue;
BlockData chainTip = Controller.getInstance().getChainTip();
if (chainTip == null || NTP.getTime() == null)
continue;
// Don't even attempt if we're mid-sync as our repository requests will be delayed for ages
if (Synchronizer.getInstance().isSynchronizing())
continue;
// Don't even attempt if we're mid-sync as our repository requests will be delayed for ages
if (Synchronizer.getInstance().isSynchronizing())
continue;
// Trim blockchain by removing 'old' online accounts signatures
long upperTrimmableTimestamp = NTP.getTime() - BlockChain.getInstance().getOnlineAccountSignaturesMaxLifetime();
int upperTrimmableHeight = repository.getBlockRepository().getHeightFromTimestamp(upperTrimmableTimestamp);
// Trim blockchain by removing 'old' online accounts signatures
long upperTrimmableTimestamp = NTP.getTime() - BlockChain.getInstance().getOnlineAccountSignaturesMaxLifetime();
int upperTrimmableHeight = repository.getBlockRepository().getHeightFromTimestamp(upperTrimmableTimestamp);
int upperBatchHeight = trimStartHeight + Settings.getInstance().getOnlineSignaturesTrimBatchSize();
int upperTrimHeight = Math.min(upperBatchHeight, upperTrimmableHeight);
int upperBatchHeight = trimStartHeight + Settings.getInstance().getOnlineSignaturesTrimBatchSize();
int upperTrimHeight = Math.min(upperBatchHeight, upperTrimmableHeight);
if (trimStartHeight >= upperTrimHeight)
continue;
if (trimStartHeight >= upperTrimHeight)
continue;
int numSigsTrimmed = repository.getBlockRepository().trimOldOnlineAccountsSignatures(trimStartHeight, upperTrimHeight);
repository.saveChanges();
if (numSigsTrimmed > 0) {
final int finalTrimStartHeight = trimStartHeight;
LOGGER.debug(() -> String.format("Trimmed %d online accounts signature%s between blocks %d and %d",
numSigsTrimmed, (numSigsTrimmed != 1 ? "s" : ""),
finalTrimStartHeight, upperTrimHeight));
} else {
// Can we move onto next batch?
if (upperTrimmableHeight > upperBatchHeight) {
trimStartHeight = upperBatchHeight;
repository.getBlockRepository().setOnlineAccountsSignaturesTrimHeight(trimStartHeight);
repository.saveChanges();
int numSigsTrimmed = repository.getBlockRepository().trimOldOnlineAccountsSignatures(trimStartHeight, upperTrimHeight);
repository.saveChanges();
if (numSigsTrimmed > 0) {
final int finalTrimStartHeight = trimStartHeight;
LOGGER.debug(() -> String.format("Bumping online accounts signatures base trim height to %d", finalTrimStartHeight));
LOGGER.info(() -> String.format("Trimmed %d online accounts signature%s between blocks %d and %d",
numSigsTrimmed, (numSigsTrimmed != 1 ? "s" : ""),
finalTrimStartHeight, upperTrimHeight));
} else {
// Can we move onto next batch?
if (upperTrimmableHeight > upperBatchHeight) {
trimStartHeight = upperBatchHeight;
repository.getBlockRepository().setOnlineAccountsSignaturesTrimHeight(trimStartHeight);
repository.saveChanges();
final int finalTrimStartHeight = trimStartHeight;
LOGGER.info(() -> String.format("Bumping online accounts signatures base trim height to %d", finalTrimStartHeight));
}
}
} catch (InterruptedException e) {
if(Controller.isStopping()) {
LOGGER.info("Online Accounts Signatures Trimming Shutting Down");
}
else {
LOGGER.warn("Online Accounts Signatures Trimming interrupted. Trying again. Report this error immediately to the developers.", e);
}
} catch (Exception e) {
LOGGER.warn("Online Accounts Signatures Trimming stopped working. Trying again. Report this error immediately to the developers.", e);
}
}
} catch (DataException e) {
LOGGER.warn(String.format("Repository issue trying to trim online accounts signatures: %s", e.getMessage()));
} catch (InterruptedException e) {
// Time to exit
} catch (Exception e) {
LOGGER.error("Online Accounts Signatures Trimming is not working! Not trying again. Restart ASAP. Report this error immediately to the developers.", e);
}
}

View File

@ -40,7 +40,7 @@ public class PruneManager {
}
public void start() {
this.executorService = Executors.newCachedThreadPool(new DaemonThreadFactory());
this.executorService = Executors.newCachedThreadPool(new DaemonThreadFactory(Settings.getInstance().getPruningThreadPriority()));
if (Settings.getInstance().isTopOnly()) {
// Top-only-sync

View File

@ -33,9 +33,10 @@ public class AddressLevelPairing {
public int getLevel() {
return level;
}
@Override
public String toString() {
return "SponsorshipReport{" +
return "AddressLevelPairing{" +
"address='" + address + '\'' +
", level=" + level +
'}';

View File

@ -0,0 +1,26 @@
package org.qortal.data.arbitrary;
import java.util.List;
import java.util.Optional;
import java.util.concurrent.ConcurrentHashMap;
public class ArbitraryResourceCache {
private ConcurrentHashMap<Integer, List<ArbitraryResourceData>> dataByService = new ConcurrentHashMap<>();
private ConcurrentHashMap<String, Integer> levelByName = new ConcurrentHashMap<>();
private ArbitraryResourceCache() {}
private static ArbitraryResourceCache SINGLETON = new ArbitraryResourceCache();
public static ArbitraryResourceCache getInstance(){
return SINGLETON;
}
public ConcurrentHashMap<String, Integer> getLevelByName() {
return levelByName;
}
public ConcurrentHashMap<Integer, List<ArbitraryResourceData>> getDataByService() {
return this.dataByService;
}
}

View File

@ -269,7 +269,7 @@ public enum Handshake {
private static final int POW_DIFFICULTY_POST_131 = 2; // leading zero bits
private static final ExecutorService responseExecutor = Executors.newFixedThreadPool(Settings.getInstance().getNetworkPoWComputePoolSize(), new DaemonThreadFactory("Network-PoW"));
private static final ExecutorService responseExecutor = Executors.newFixedThreadPool(Settings.getInstance().getNetworkPoWComputePoolSize(), new DaemonThreadFactory("Network-PoW", Settings.getInstance().getHandshakeThreadPriority()));
private static final byte[] ZERO_CHALLENGE = new byte[ChallengeMessage.CHALLENGE_LENGTH];

View File

@ -168,7 +168,7 @@ public class Network {
Settings.getInstance().getMaxNetworkThreadPoolSize(),
NETWORK_EPC_KEEPALIVE, TimeUnit.SECONDS,
new SynchronousQueue<Runnable>(),
new NamedThreadFactory("Network-EPC"));
new NamedThreadFactory("Network-EPC", Settings.getInstance().getNetworkThreadPriority()));
networkEPC = new NetworkProcessor(networkExecutor);
}

View File

@ -153,13 +153,16 @@ public class BlockArchiveWriter {
int i = 0;
while (headerBytes.size() + bytes.size() < this.fileSizeTarget) {
// pause, since this can be a long process and other processes need to execute
Thread.sleep(Settings.getInstance().getArchivingPause());
if (Controller.isStopping()) {
return BlockArchiveWriteResult.STOPPING;
}
if (Synchronizer.getInstance().isSynchronizing()) {
Thread.sleep(1000L);
// wait until the Synchronizer stops
if( Synchronizer.getInstance().isSynchronizing() )
continue;
}
int currentHeight = startHeight + i;
if (currentHeight > endHeight) {

View File

@ -1215,7 +1215,7 @@ public class HSQLDBAccountRepository implements AccountRepository {
sponseeSql.append(")");
// Create a new array to hold both
String[] combinedArray = new String[realRewardShareRecipients.length + 1];
Object[] combinedArray = new Object[realRewardShareRecipients.length + 1];
// Add the single string to the first position
combinedArray[0] = account;
@ -1439,7 +1439,7 @@ public class HSQLDBAccountRepository implements AccountRepository {
sql.append(String.join(", ", Collections.nCopies(addressCount, "?")));
sql.append(") ");
sql.append("AND a.account = tx.recipient AND a.public_key != ats.creator AND asset_id = 0 ");
String[] sponsees = addresses.toArray(new String[addressCount]);
Object[] sponsees = addresses.toArray(new Object[addressCount]);
ResultSet buySellResultSet = this.repository.checkedExecute(sql.toString(), sponsees);
return buySellResultSet;
@ -1456,7 +1456,7 @@ public class HSQLDBAccountRepository implements AccountRepository {
sql.append(String.join(", ", Collections.nCopies(addressCount, "?")));
sql.append(") ");
sql.append("AND a.account != tx.recipient AND asset_id = 0 ");
String[] sponsees = addresses.toArray(new String[addressCount]);
Object[] sponsees = addresses.toArray(new Object[addressCount]);
return this.repository.checkedExecute(sql.toString(), sponsees);
}
@ -1490,7 +1490,7 @@ public class HSQLDBAccountRepository implements AccountRepository {
txTypeTotalsSql.append(") and type in (10, 12, 40) ");
txTypeTotalsSql.append("group by type order by type");
String[] sponsees = sponseeAddresses.toArray(new String[sponseeCount]);
Object[] sponsees = sponseeAddresses.toArray(new Object[sponseeCount]);
ResultSet txTypeResultSet = this.repository.checkedExecute(txTypeTotalsSql.toString(), sponsees);
return txTypeResultSet;
}
@ -1502,7 +1502,7 @@ public class HSQLDBAccountRepository implements AccountRepository {
avgBalanceSql.append(String.join(", ", Collections.nCopies(sponseeCount, "?")));
avgBalanceSql.append(") and ASSET_ID = 0");
String[] sponsees = sponseeAddresses.toArray(new String[sponseeCount]);
Object[] sponsees = sponseeAddresses.toArray(new Object[sponseeCount]);
return this.repository.checkedExecute(avgBalanceSql.toString(), sponsees);
}
@ -1538,7 +1538,7 @@ public class HSQLDBAccountRepository implements AccountRepository {
namesSql.append(String.join(", ", Collections.nCopies(sponseeCount, "?")));
namesSql.append(")");
String[] sponsees = sponseeAddresses.toArray(new String[sponseeCount]);
Object[] sponsees = sponseeAddresses.toArray(new Object[sponseeCount]);
ResultSet namesResultSet = this.repository.checkedExecute(namesSql.toString(), sponsees);
return namesResultSet;
}

View File

@ -7,6 +7,8 @@ import org.qortal.arbitrary.ArbitraryDataFile;
import org.qortal.arbitrary.metadata.ArbitraryDataTransactionMetadata;
import org.qortal.arbitrary.misc.Category;
import org.qortal.arbitrary.misc.Service;
import org.qortal.controller.arbitrary.ArbitraryDataManager;
import org.qortal.data.arbitrary.ArbitraryResourceCache;
import org.qortal.data.arbitrary.ArbitraryResourceData;
import org.qortal.data.arbitrary.ArbitraryResourceMetadata;
import org.qortal.data.arbitrary.ArbitraryResourceStatus;
@ -18,6 +20,7 @@ import org.qortal.data.transaction.BaseTransactionData;
import org.qortal.data.transaction.TransactionData;
import org.qortal.repository.ArbitraryRepository;
import org.qortal.repository.DataException;
import org.qortal.settings.Settings;
import org.qortal.transaction.ArbitraryTransaction;
import org.qortal.transaction.Transaction.ApprovalStatus;
import org.qortal.utils.Base58;
@ -28,6 +31,7 @@ import java.sql.SQLException;
import java.util.ArrayList;
import java.util.List;
import java.util.Objects;
import java.util.Optional;
public class HSQLDBArbitraryRepository implements ArbitraryRepository {
@ -723,6 +727,50 @@ public class HSQLDBArbitraryRepository implements ArbitraryRepository {
public List<ArbitraryResourceData> searchArbitraryResources(Service service, String query, String identifier, List<String> names, String title, String description, boolean prefixOnly,
List<String> exactMatchNames, boolean defaultResource, SearchMode mode, Integer minLevel, Boolean followedOnly, Boolean excludeBlocked,
Boolean includeMetadata, Boolean includeStatus, Long before, Long after, Integer limit, Integer offset, Boolean reverse) throws DataException {
if(Settings.getInstance().isDbCacheEnabled()) {
List<ArbitraryResourceData> list
= HSQLDBCacheUtils.callCache(
ArbitraryResourceCache.getInstance(),
service, query, identifier, names, title, description, prefixOnly, exactMatchNames,
defaultResource, mode, minLevel, followedOnly, excludeBlocked, includeMetadata, includeStatus,
before, after, limit, offset, reverse);
if( !list.isEmpty() ) {
List<ArbitraryResourceData> results
= HSQLDBCacheUtils.filterList(
list,
ArbitraryResourceCache.getInstance().getLevelByName(),
Optional.ofNullable(mode),
Optional.ofNullable(service),
Optional.ofNullable(query),
Optional.ofNullable(identifier),
Optional.ofNullable(names),
Optional.ofNullable(title),
Optional.ofNullable(description),
prefixOnly,
Optional.ofNullable(exactMatchNames),
defaultResource,
Optional.ofNullable(minLevel),
Optional.ofNullable(() -> ListUtils.followedNames()),
Optional.ofNullable(ListUtils::blockedNames),
Optional.ofNullable(includeMetadata),
Optional.ofNullable(includeStatus),
Optional.ofNullable(before),
Optional.ofNullable(after),
Optional.ofNullable(limit),
Optional.ofNullable(offset),
Optional.ofNullable(reverse)
);
return results;
}
else {
LOGGER.info("Db Enabled Cache has zero candidates.");
}
}
StringBuilder sql = new StringBuilder(512);
List<Object> bindParams = new ArrayList<>();

View File

@ -0,0 +1,544 @@
package org.qortal.repository.hsqldb;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.qortal.api.SearchMode;
import org.qortal.arbitrary.misc.Category;
import org.qortal.arbitrary.misc.Service;
import org.qortal.data.arbitrary.ArbitraryResourceCache;
import org.qortal.data.arbitrary.ArbitraryResourceData;
import org.qortal.data.arbitrary.ArbitraryResourceMetadata;
import org.qortal.data.arbitrary.ArbitraryResourceStatus;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import java.time.format.DateTimeFormatter;
import java.util.AbstractMap;
import java.util.ArrayList;
import java.util.Comparator;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.Timer;
import java.util.TimerTask;
import java.util.concurrent.ConcurrentHashMap;
import java.util.function.Function;
import java.util.function.Predicate;
import java.util.function.Supplier;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import static org.qortal.api.SearchMode.LATEST;
public class HSQLDBCacheUtils {
private static final Logger LOGGER = LogManager.getLogger(HSQLDBCacheUtils.class);
private static final DateTimeFormatter TIME_FORMATTER = DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm");
private static final Comparator<? super ArbitraryResourceData> CREATED_WHEN_COMPARATOR = new Comparator<ArbitraryResourceData>() {
@Override
public int compare(ArbitraryResourceData data1, ArbitraryResourceData data2) {
Long a = data1.created;
Long b = data2.created;
return Long.compare(a != null ? a : Long.MIN_VALUE, b != null ? b : Long.MIN_VALUE);
}
};
private static final String DEFAULT_IDENTIFIER = "default";
/**
*
* @param cache
* @param service the service to filter
* @param query query for name, identifier, title or description match
* @param identifier the identifier to match
* @param names the names to match, ignored if there are exact names
* @param title the title to match for
* @param description the description to match for
* @param prefixOnly true to match on prefix only, false for match anywhere in string
* @param exactMatchNames names to match exactly, overrides names
* @param defaultResource true to query filter identifier on the default identifier and use the query terms to match candidates names only
* @param mode LATEST or ALL
* @param minLevel the minimum account level for resource creators
* @param includeOnly names to retain, exclude all others
* @param exclude names to exclude, retain all others
* @param includeMetadata true to include resource metadata in the results, false to exclude metadata
* @param includeStatus true to include resource status in the results, false to exclude status
* @param before the latest creation timestamp for any candidate
* @param after the earliest creation timestamp for any candidate
* @param limit the maximum number of resource results to return
* @param offset the number of resource results to skip after the results have been retained, filtered and sorted
* @param reverse true to reverse the sort order, false to order in chronological order
*
* @return the resource results
*/
public static List<ArbitraryResourceData> callCache(
ArbitraryResourceCache cache,
Service service,
String query,
String identifier,
List<String> names,
String title,
String description,
boolean prefixOnly,
List<String> exactMatchNames,
boolean defaultResource,
SearchMode mode,
Integer minLevel,
Boolean followedOnly,
Boolean excludeBlocked,
Boolean includeMetadata,
Boolean includeStatus,
Long before,
Long after,
Integer limit,
Integer offset,
Boolean reverse) {
List<ArbitraryResourceData> candidates = new ArrayList<>();
// cache all results for requested service
if( service != null ) {
candidates.addAll(cache.getDataByService().getOrDefault(service.value, new ArrayList<>(0)));
}
// if no requested, then empty cache
return candidates;
}
/**
* Filter candidates
*
* @param candidates the candidates, they may be preprocessed
* @param levelByName name -> level map
* @param mode LATEST or ALL
* @param service the service to filter
* @param query query for name, identifier, title or description match
* @param identifier the identifier to match
* @param names the names to match, ignored if there are exact names
* @param title the title to match for
* @param description the description to match for
* @param prefixOnly true to match on prefix only, false for match anywhere in string
* @param exactMatchNames names to match exactly, overrides names
* @param defaultResource true to query filter identifier on the default identifier and use the query terms to match candidates names only
* @param minLevel the minimum account level for resource creators
* @param includeOnly names to retain, exclude all others
* @param exclude names to exclude, retain all others
* @param includeMetadata true to include resource metadata in the results, false to exclude metadata
* @param includeStatus true to include resource status in the results, false to exclude status
* @param before the latest creation timestamp for any candidate
* @param after the earliest creation timestamp for any candidate
* @param limit the maximum number of resource results to return
* @param offset the number of resource results to skip after the results have been retained, filtered and sorted
* @param reverse true to reverse the sort order, false to order in chronological order
*
* @return the resource results
*/
public static List<ArbitraryResourceData> filterList(
List<ArbitraryResourceData> candidates,
Map<String, Integer> levelByName,
Optional<SearchMode> mode,
Optional<Service> service,
Optional<String> query,
Optional<String> identifier,
Optional<List<String>> names,
Optional<String> title,
Optional<String> description,
boolean prefixOnly,
Optional<List<String>> exactMatchNames,
boolean defaultResource,
Optional<Integer> minLevel,
Optional<Supplier<List<String>>> includeOnly,
Optional<Supplier<List<String>>> exclude,
Optional<Boolean> includeMetadata,
Optional<Boolean> includeStatus,
Optional<Long> before,
Optional<Long> after,
Optional<Integer> limit,
Optional<Integer> offset,
Optional<Boolean> reverse) {
// retain only candidates with names
Stream<ArbitraryResourceData> stream = candidates.stream().filter(candidate -> candidate.name != null);
// filter by service
if( service.isPresent() )
stream = stream.filter(candidate -> candidate.service.equals(service.get()));
// filter by query (either identifier, name, title or description)
if (query.isPresent()) {
Predicate<String> predicate
= prefixOnly ? getPrefixPredicate(query.get()) : getContainsPredicate(query.get());
if (defaultResource) {
stream = stream.filter( candidate -> DEFAULT_IDENTIFIER.equals( candidate.identifier ) && predicate.test(candidate.name));
} else {
stream = stream.filter( candidate -> passQuery(predicate, candidate));
}
}
// filter for identifier, title and description
stream = filterTerm(identifier, data -> data.identifier, prefixOnly, stream);
stream = filterTerm(title, data -> data.metadata != null ? data.metadata.getTitle() : null, prefixOnly, stream);
stream = filterTerm(description, data -> data.metadata != null ? data.metadata.getDescription() : null, prefixOnly, stream);
// if exact names is set, retain resources with exact names
if( exactMatchNames.isPresent() && !exactMatchNames.get().isEmpty()) {
// key the data by lower case name
Map<String, List<ArbitraryResourceData>> dataByName
= stream.collect(Collectors.groupingBy(data -> data.name.toLowerCase()));
// lower the case of the exact names
// retain the lower case names of the data above
List<String> exactNamesToSearch
= exactMatchNames.get().stream()
.map(String::toLowerCase)
.collect(Collectors.toList());
exactNamesToSearch.retainAll(dataByName.keySet());
// get the data for the names retained and
// set them to the stream
stream
= dataByName.entrySet().stream()
.filter(entry -> exactNamesToSearch.contains(entry.getKey())).flatMap(entry -> entry.getValue().stream());
}
// if exact names is not set, retain resources that match
else if( names.isPresent() && !names.get().isEmpty() ) {
stream = retainTerms(names.get(), data -> data.name, prefixOnly, stream);
}
// filter for minimum account level
if(minLevel.isPresent())
stream = stream.filter( candidate -> levelByName.getOrDefault(candidate.name, 0) >= minLevel.get() );
// if latest mode or empty
if( LATEST.equals( mode.orElse( LATEST ) ) ) {
// Include latest item only for a name/service combination
stream
= stream.filter(candidate -> candidate.service != null && candidate.created != null ).collect(
Collectors.groupingBy(
data -> new AbstractMap.SimpleEntry<>(data.name, data.service), // name, service combination
Collectors.maxBy(Comparator.comparingLong(data -> data.created)) // latest data item
)).values().stream().filter(Optional::isPresent).map(Optional::get); // if there is a value for the group, then retain it
}
// sort
if( reverse.isPresent() && reverse.get())
stream = stream.sorted(CREATED_WHEN_COMPARATOR.reversed());
else
stream = stream.sorted(CREATED_WHEN_COMPARATOR);
// skip to offset
if( offset.isPresent() ) stream = stream.skip(offset.get());
// truncate to limit
if( limit.isPresent() && limit.get() > 0 ) stream = stream.limit(limit.get());
// include metadata
if( includeMetadata.isEmpty() || !includeMetadata.get() )
stream = stream.peek( candidate -> candidate.metadata = null );
// include status
if( includeStatus.isEmpty() || !includeStatus.get() )
stream = stream.peek( candidate -> candidate.status = null);
return stream.collect(Collectors.toList());
}
/**
* Filter Terms
*
* @param term the term to filter
* @param stringSupplier the string of interest from the resource candidates
* @param prefixOnly true if prexif only, false for contains
* @param stream the stream of candidates
*
* @return the stream that filtered the term
*/
private static Stream<ArbitraryResourceData> filterTerm(
Optional<String> term,
Function<ArbitraryResourceData,String> stringSupplier,
boolean prefixOnly,
Stream<ArbitraryResourceData> stream) {
if(term.isPresent()){
Predicate<String> predicate
= prefixOnly ? getPrefixPredicate(term.get()): getContainsPredicate(term.get());
stream = stream.filter(candidate -> predicate.test(stringSupplier.apply(candidate)));
}
return stream;
}
/**
* Retain Terms
*
* Retain resources that satisfy terms given.
*
* @param terms the terms to retain
* @param stringSupplier the string of interest from the resource candidates
* @param prefixOnly true if prexif only, false for contains
* @param stream the stream of candidates
*
* @return the stream that retained the terms
*/
private static Stream<ArbitraryResourceData> retainTerms(
List<String> terms,
Function<ArbitraryResourceData,String> stringSupplier,
boolean prefixOnly,
Stream<ArbitraryResourceData> stream) {
// collect the data to process, start the data to retain
List<ArbitraryResourceData> toProcess = stream.collect(Collectors.toList());
List<ArbitraryResourceData> toRetain = new ArrayList<>();
// for each term, get the predicate, get a new stream process and
// apply the predicate to each data item in the stream
for( String term : terms ) {
Predicate<String> predicate
= prefixOnly ? getPrefixPredicate(term) : getContainsPredicate(term);
toRetain.addAll(
toProcess.stream()
.filter(candidate -> predicate.test(stringSupplier.apply(candidate)))
.collect(Collectors.toList())
);
}
return toRetain.stream();
}
private static Predicate<String> getContainsPredicate(String term) {
return value -> value != null && value.toLowerCase().contains(term.toLowerCase());
}
private static Predicate<String> getPrefixPredicate(String term) {
return value -> value != null && value.toLowerCase().startsWith(term.toLowerCase());
}
/**
* Pass Query
*
* Compare name, identifier, title and description
*
* @param predicate the string comparison predicate
* @param candidate the candiddte to compare
*
* @return true if there is a match, otherwise false
*/
private static boolean passQuery(Predicate<String> predicate, ArbitraryResourceData candidate) {
if( predicate.test(candidate.name) ) return true;
if( predicate.test(candidate.identifier) ) return true;
if( candidate.metadata != null ) {
if( predicate.test(candidate.metadata.getTitle() )) return true;
if( predicate.test(candidate.metadata.getDescription())) return true;
}
return false;
}
/**
* Start Caching
*
* @param priorityRequested the thread priority to fill cache in
* @param frequency the frequency to fill the cache (in seconds)
* @param respository the data source
*
* @return the data cache
*/
public static ArbitraryResourceCache startCaching(int priorityRequested, int frequency, HSQLDBRepository respository) {
final ArbitraryResourceCache cache = ArbitraryResourceCache.getInstance();
// ensure priority is in between 1-10
final int priority = Math.max(0, Math.min(10, priorityRequested));
// Create a custom Timer with updated priority threads
Timer timer = new Timer(true) { // 'true' to make the Timer daemon
@Override
public void schedule(TimerTask task, long delay) {
Thread thread = new Thread(task) {
@Override
public void run() {
this.setPriority(priority);
super.run();
}
};
thread.setPriority(priority);
thread.start();
}
};
TimerTask task = new TimerTask() {
@Override
public void run() {
fillCache(ArbitraryResourceCache.getInstance(), respository);
}
};
// delay 1 second
timer.scheduleAtFixedRate(task, 1000, frequency * 1000);
return cache;
}
/**
* Fill Cache
*
* @param cache the cache to fill
* @param repository the data source to fill the cache with
*/
public static void fillCache(ArbitraryResourceCache cache, HSQLDBRepository repository) {
try {
// ensure all data is committed in, before we query it
repository.saveChanges();
List<ArbitraryResourceData> resources = getResources(repository);
Map<Integer, List<ArbitraryResourceData>> dataByService
= resources.stream()
.collect(Collectors.groupingBy(data -> data.service.value));
// lock, clear and refill
synchronized (cache.getDataByService()) {
cache.getDataByService().clear();
cache.getDataByService().putAll(dataByService);
}
fillNamepMap(cache.getLevelByName(), repository);
}
catch (Exception e) {
LOGGER.error(e.getMessage(), e);
}
}
/**
* Fill Name Map
*
* Name -> Level
*
* @param levelByName the map to fill
* @param repository the data source
*
* @throws SQLException
*/
private static void fillNamepMap(ConcurrentHashMap<String, Integer> levelByName, HSQLDBRepository repository ) throws SQLException {
StringBuilder sql = new StringBuilder(512);
sql.append("SELECT name, level ");
sql.append("FROM NAMES ");
sql.append("INNER JOIN ACCOUNTS on owner = account ");
Statement statement = repository.connection.createStatement();
ResultSet resultSet = statement.executeQuery(sql.toString());
if (resultSet == null)
return;
if (!resultSet.next())
return;
do {
levelByName.put(resultSet.getString(1), resultSet.getInt(2));
} while(resultSet.next());
}
/**
* Get Resource
*
* @param repository source data
*
* @return the resources
* @throws SQLException
*/
private static List<ArbitraryResourceData> getResources( HSQLDBRepository repository) throws SQLException {
List<ArbitraryResourceData> resources = new ArrayList<>();
StringBuilder sql = new StringBuilder(512);
sql.append("SELECT name, service, identifier, size, status, created_when, updated_when, ");
sql.append("title, description, category, tag1, tag2, tag3, tag4, tag5 ");
sql.append("FROM ArbitraryResourcesCache ");
sql.append("LEFT JOIN ArbitraryMetadataCache USING (service, name, identifier) WHERE name IS NOT NULL");
List<ArbitraryResourceData> arbitraryResources = new ArrayList<>();
Statement statement = repository.connection.createStatement();
ResultSet resultSet = statement.executeQuery(sql.toString());
if (resultSet == null)
return resources;
if (!resultSet.next())
return resources;
do {
String nameResult = resultSet.getString(1);
int serviceResult = resultSet.getInt(2);
String identifierResult = resultSet.getString(3);
Integer sizeResult = resultSet.getInt(4);
Integer status = resultSet.getInt(5);
Long created = resultSet.getLong(6);
Long updated = resultSet.getLong(7);
String titleResult = resultSet.getString(8);
String descriptionResult = resultSet.getString(9);
String category = resultSet.getString(10);
String tag1 = resultSet.getString(11);
String tag2 = resultSet.getString(12);
String tag3 = resultSet.getString(13);
String tag4 = resultSet.getString(14);
String tag5 = resultSet.getString(15);
if (Objects.equals(identifierResult, "default")) {
// Map "default" back to null. This is optional but probably less confusing than returning "default".
identifierResult = null;
}
ArbitraryResourceData arbitraryResourceData = new ArbitraryResourceData();
arbitraryResourceData.name = nameResult;
arbitraryResourceData.service = Service.valueOf(serviceResult);
arbitraryResourceData.identifier = identifierResult;
arbitraryResourceData.size = sizeResult;
arbitraryResourceData.created = created;
arbitraryResourceData.updated = (updated == 0) ? null : updated;
arbitraryResourceData.setStatus(ArbitraryResourceStatus.Status.valueOf(status));
ArbitraryResourceMetadata metadata = new ArbitraryResourceMetadata();
metadata.setTitle(titleResult);
metadata.setDescription(descriptionResult);
metadata.setCategory(Category.uncategorizedValueOf(category));
List<String> tags = new ArrayList<>();
if (tag1 != null) tags.add(tag1);
if (tag2 != null) tags.add(tag2);
if (tag3 != null) tags.add(tag3);
if (tag4 != null) tags.add(tag4);
if (tag5 != null) tags.add(tag5);
metadata.setTags(!tags.isEmpty() ? tags : null);
if (metadata.hasMetadata()) {
arbitraryResourceData.metadata = metadata;
}
resources.add( arbitraryResourceData );
} while (resultSet.next());
return resources;
}
}

View File

@ -378,6 +378,66 @@ public class Settings {
* Exclude from settings.json to disable this warning. */
private Integer threadCountPerMessageTypeWarningThreshold = null;
/**
* DB Cache Enabled?
*/
private boolean dbCacheEnabled = false;
/**
* DB Cache Thread Priority
*
* If DB Cache is disabled, then this is ignored. If value is lower then 1, than 1 is used. If value is higher
* than 10,, then 10 is used.
*/
private int dbCacheThreadPriority = 1;
/**
* DB Cache Frequency
*
* The number of seconds in between DB cache updates. If DB Cache is disabled, then this is ignored.
*/
private int dbCacheFrequency = 120;
/**
* Network Thread Priority
*
* The Network Thread Priority
*
* The thread priority (1 is lowest, 10 is highest) of the threads used for network peer connections. This is the
* main thread connecting to a peer in the network.
*/
private int networkThreadPriority = 5;
/**
* The Handshake Thread Priority
*
* The thread priority (1 i slowest, 10 is highest) of the threads used for peer handshake messaging. This is a
* secondary thread to exchange status messaging to a peer in the network.
*/
private int handshakeThreadPriority = 5;
/**
* Pruning Thread Priority
*
* The thread priority (1 is lowest, 10 is highest) of the threads used for database pruning and trimming.
*/
private int pruningThreadPriority = 1;
/**
* Sychronizer Thread Priority
*
* The thread priority (1 is lowest, 10 is highest) of the threads used for synchronizing with the others peers.
*/
private int synchronizerThreadPriority = 10;
/**
* Archiving Pause
*
* In milliseconds
*
* The pause in between archiving blocks to allow other processes to execute.
*/
private long archivingPause = 3000;
// Domain mapping
public static class ThreadLimit {
@ -1132,4 +1192,36 @@ public class Settings {
public Integer getThreadCountPerMessageTypeWarningThreshold() {
return this.threadCountPerMessageTypeWarningThreshold;
}
public boolean isDbCacheEnabled() {
return dbCacheEnabled;
}
public int getDbCacheThreadPriority() {
return dbCacheThreadPriority;
}
public int getDbCacheFrequency() {
return dbCacheFrequency;
}
public int getNetworkThreadPriority() {
return networkThreadPriority;
}
public int getHandshakeThreadPriority() {
return handshakeThreadPriority;
}
public int getPruningThreadPriority() {
return pruningThreadPriority;
}
public int getSynchronizerThreadPriority() {
return synchronizerThreadPriority;
}
public long getArchivingPause() {
return archivingPause;
}
}

View File

@ -8,19 +8,22 @@ public class DaemonThreadFactory implements ThreadFactory {
private final String name;
private final AtomicInteger threadNumber = new AtomicInteger(1);
private int priority = Thread.NORM_PRIORITY;
public DaemonThreadFactory(String name) {
public DaemonThreadFactory(String name, int priority) {
this.name = name;
this.priority = priority;
}
public DaemonThreadFactory() {
this(null);
public DaemonThreadFactory(int priority) {
this(null, priority);;
}
@Override
public Thread newThread(Runnable runnable) {
Thread thread = Executors.defaultThreadFactory().newThread(runnable);
thread.setDaemon(true);
thread.setPriority(this.priority);
if (this.name != null)
thread.setName(this.name + "-" + this.threadNumber.getAndIncrement());

View File

@ -8,15 +8,18 @@ public class NamedThreadFactory implements ThreadFactory {
private final String name;
private final AtomicInteger threadNumber = new AtomicInteger(1);
private final int priority;
public NamedThreadFactory(String name) {
public NamedThreadFactory(String name, int priority) {
this.name = name;
this.priority = priority;
}
@Override
public Thread newThread(Runnable runnable) {
Thread thread = Executors.defaultThreadFactory().newThread(runnable);
thread.setName(this.name + "-" + this.threadNumber.getAndIncrement());
thread.setPriority(this.priority);
return thread;
}

View File

@ -0,0 +1,645 @@
package org.qortal.test.repository;
import org.junit.Assert;
import org.junit.Test;
import org.qortal.api.SearchMode;
import org.qortal.arbitrary.misc.Service;
import org.qortal.data.arbitrary.ArbitraryResourceData;
import org.qortal.data.arbitrary.ArbitraryResourceMetadata;
import org.qortal.data.arbitrary.ArbitraryResourceStatus;
import org.qortal.repository.hsqldb.HSQLDBCacheUtils;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.function.Supplier;
public class HSQLDBCacheUtilsTests {
private static final Map<String, Integer> NAME_LEVEL = Map.of("Joe", 4);
private static final String SERVICE = "service";
private static final String QUERY = "query";
private static final String IDENTIFIER = "identifier";
private static final String NAMES = "names";
private static final String TITLE = "title";
private static final String DESCRIPTION = "description";
private static final String PREFIX_ONLY = "prefixOnly";
private static final String EXACT_MATCH_NAMES = "exactMatchNames";
private static final String DEFAULT_RESOURCE = "defaultResource";
private static final String MODE = "mode";
private static final String MIN_LEVEL = "minLevel";
private static final String FOLLOWED_ONLY = "followedOnly";
private static final String EXCLUDE_BLOCKED = "excludeBlocked";
private static final String INCLUDE_METADATA = "includeMetadata";
private static final String INCLUDE_STATUS = "includeStatus";
private static final String BEFORE = "before";
private static final String AFTER = "after";
private static final String LIMIT = "limit";
private static final String OFFSET = "offset";
private static final String REVERSE = "reverse";
@Test
public void test000EmptyQuery() {
ArbitraryResourceData data = new ArbitraryResourceData();
data.name = "joe";
List<ArbitraryResourceData> candidates = List.of(data);
filterListByMap(candidates, NAME_LEVEL, new HashMap<>(Map.of()), 1);
}
@Test
public void testLatestModeNoService() {
ArbitraryResourceData data = new ArbitraryResourceData();
data.name = "joe";
filterListByMap(
List.of(data),
NAME_LEVEL,
new HashMap<>(Map.of(MODE, SearchMode.LATEST )),
0);
}
@Test
public void testLatestModeNoCreated() {
ArbitraryResourceData data = new ArbitraryResourceData();
data.name = "joe";
data.service = Service.FILE;
filterListByMap(
List.of(data),
NAME_LEVEL,
new HashMap<>(Map.of(MODE, SearchMode.LATEST )),
0);
}
@Test
public void testLatestModeReturnFirst() {
ArbitraryResourceData first = new ArbitraryResourceData();
first.name = "joe";
first.service = Service.FILE;
first.created = 1L;
ArbitraryResourceData last = new ArbitraryResourceData();
last.name = "joe";
last.service = Service.FILE;
last.created = 2L;
List<ArbitraryResourceData>
results = filterListByMap(
List.of(first, last),
NAME_LEVEL,
new HashMap<>(Map.of(MODE, SearchMode.LATEST)),
1
);
ArbitraryResourceData singleResult = results.get(0);
Assert.assertTrue( singleResult.created == 2 );
}
@Test
public void testLatestModeReturn2From4() {
ArbitraryResourceData firstFile = new ArbitraryResourceData();
firstFile.name = "joe";
firstFile.service = Service.FILE;
firstFile.created = 1L;
ArbitraryResourceData lastFile = new ArbitraryResourceData();
lastFile.name = "joe";
lastFile.service = Service.FILE;
lastFile.created = 2L;
List<ArbitraryResourceData>
results = filterListByMap(
List.of(firstFile, lastFile),
NAME_LEVEL,
new HashMap<>(Map.of(MODE, SearchMode.LATEST)),
1
);
ArbitraryResourceData singleResult = results.get(0);
Assert.assertTrue( singleResult.created == 2 );
}
@Test
public void testServicePositive() {
ArbitraryResourceData data = new ArbitraryResourceData();
data.service = Service.AUDIO;
data.name = "Joe";
filterListByMap(
List.of(data),
NAME_LEVEL, new HashMap<>(Map.of(SERVICE, Service.AUDIO)),
1
);
}
@Test
public void testQueryPositiveDescription() {
ArbitraryResourceData data = new ArbitraryResourceData();
data.metadata = new ArbitraryResourceMetadata();
data.metadata.setDescription("has keyword");
data.name = "Joe";
filterListByMap(
List.of(data),
NAME_LEVEL, new HashMap<>(Map.of(QUERY, "keyword")),
1
);
}
@Test
public void testQueryNegativeDescriptionPrefix() {
ArbitraryResourceData data = new ArbitraryResourceData();
data.metadata = new ArbitraryResourceMetadata();
data.metadata.setDescription("has keyword");
data.name = "Joe";
filterListByMap(List.of(data),
NAME_LEVEL, new HashMap<>((Map.of(QUERY, "keyword", PREFIX_ONLY, true))),
0);
}
@Test
public void testQueryPositiveDescriptionPrefix() {
ArbitraryResourceData data = new ArbitraryResourceData();
data.metadata = new ArbitraryResourceMetadata();
data.metadata.setDescription("keyword starts sentence");
data.name = "Joe";
filterListByMap(List.of(data),
NAME_LEVEL, new HashMap<>((Map.of(QUERY, "keyword", PREFIX_ONLY, true))),
1);
}
@Test
public void testQueryNegative() {
ArbitraryResourceData data = new ArbitraryResourceData();
data.name = "admin";
data.identifier = "id-0";
filterListByMap(
List.of(data),
NAME_LEVEL, new HashMap<>(Map.of(QUERY, "keyword")),
0
);
}
@Test
public void testExactNamePositive() {
ArbitraryResourceData data = new ArbitraryResourceData();
data.name = "Joe";
filterListByMap(
List.of(data),
NAME_LEVEL, new HashMap<>(Map.of(EXACT_MATCH_NAMES,List.of("Joe"))),
1
);
}
@Test
public void testExactNameNegative() {
ArbitraryResourceData data = new ArbitraryResourceData();
data.name = "Joe";
filterListByMap(
List.of(data),
NAME_LEVEL,
new HashMap<>(Map.of(EXACT_MATCH_NAMES,List.of("Joey"))),
0
);
}
@Test
public void testNamePositive() {
ArbitraryResourceData data = new ArbitraryResourceData();
data.name = "Mr Joe";
filterListByMap(
List.of(data),
NAME_LEVEL,
new HashMap<>(Map.of(NAMES,List.of("Joe"))),
1
);
}
@Test
public void testNameNegative() {
ArbitraryResourceData data = new ArbitraryResourceData();
data.name = "Jay";
filterListByMap(
List.of(data),
NAME_LEVEL,
new HashMap<>(Map.of(NAMES,List.of("Joe"))),
0
);
}
@Test
public void testNamePrefixOnlyPositive() {
ArbitraryResourceData data = new ArbitraryResourceData();
data.name = "Joey";
filterListByMap(
List.of(data),
NAME_LEVEL,
new HashMap<>(Map.of(NAMES,List.of("Joe"), PREFIX_ONLY, true)),
1
);
}
@Test
public void testNamePrefixOnlyNegative() {
ArbitraryResourceData data = new ArbitraryResourceData();
data.name = "Joe";
filterListByMap(
List.of(data),
NAME_LEVEL,
new HashMap<>(Map.of(NAMES,List.of("Joey"), PREFIX_ONLY, true)),
0
);
}
@Test
public void testIdentifierPositive() {
ArbitraryResourceData data = new ArbitraryResourceData();
data.identifier = "007";
data.name = "Joe";
filterListByMap(
List.of(data),
NAME_LEVEL, new HashMap<>(Map.of(IDENTIFIER, "007")),
1
);
}
@Test
public void testAfterPositive() {
ArbitraryResourceData data = new ArbitraryResourceData();
data.created = 10L;
data.name = "Joe";
filterListByMap(
List.of(data),
NAME_LEVEL, new HashMap<>(Map.of(AFTER, 9L)),
1
);
}
@Test
public void testBeforePositive(){
ArbitraryResourceData data = new ArbitraryResourceData();
data.created = 10L;
data.name = "Joe";
filterListByMap(
List.of(data),
NAME_LEVEL, new HashMap<>(Map.of(BEFORE, 11L)),
1
);
}
@Test
public void testTitlePositive() {
ArbitraryResourceData data = new ArbitraryResourceData();
data.metadata = new ArbitraryResourceMetadata();
data.metadata.setTitle("Sunday");
data.name = "Joe";
filterListByMap(
List.of(data),
NAME_LEVEL, new HashMap<>(Map.of(TITLE, "Sunday")),
1
);
}
@Test
public void testDescriptionPositive(){
ArbitraryResourceData data = new ArbitraryResourceData();
data.metadata = new ArbitraryResourceMetadata();
data.metadata.setDescription("Once upon a time.");
data.name = "Joe";
filterListByMap(
List.of(data),
NAME_LEVEL, new HashMap<>(Map.of(DESCRIPTION, "Once upon a time.")),
1
);
}
@Test
public void testMinLevelPositive() {
ArbitraryResourceData data = new ArbitraryResourceData();
data.name = "Joe";
filterListByMap(
List.of(data),
NAME_LEVEL,
new HashMap<>(Map.of(MIN_LEVEL, 4)),
1
);
}
@Test
public void testMinLevelNegative() {
ArbitraryResourceData data = new ArbitraryResourceData();
data.name = "Joe";
filterListByMap(
List.of(data),
NAME_LEVEL,
new HashMap<>(Map.of(MIN_LEVEL, 5)),
0
);
}
@Test
public void testDefaultResourcePositive() {
ArbitraryResourceData data = new ArbitraryResourceData();
data.name = "Joe";
filterListByMap(
List.of(data),
NAME_LEVEL, new HashMap<>(Map.of(DEFAULT_RESOURCE, true)),
1
);
}
@Test
public void testFollowedNamesPositive() {
ArbitraryResourceData data = new ArbitraryResourceData();
data.name = "Joe";
Supplier<List<String>> supplier = () -> List.of("admin");
filterListByMap(
List.of(data),
NAME_LEVEL, new HashMap<>(Map.of(FOLLOWED_ONLY, supplier)),
1
);
}
@Test
public void testExcludeBlockedPositive() {
ArbitraryResourceData data = new ArbitraryResourceData();
data.name = "Joe";
Supplier<List<String>> supplier = () -> List.of("admin");
filterListByMap(
List.of(data),
NAME_LEVEL, new HashMap<>(Map.of(EXCLUDE_BLOCKED, supplier)),
1
);
}
@Test
public void testIncludeMetadataPositive() {
ArbitraryResourceData data = new ArbitraryResourceData();
data.metadata = new ArbitraryResourceMetadata();
data.name = "Joe";
ArbitraryResourceData result
= filterListByMap(
List.of(data),
NAME_LEVEL, new HashMap<>(Map.of(INCLUDE_METADATA, true)),
1
).get(0);
Assert.assertNotNull(result);
Assert.assertNotNull(result.metadata);
}
@Test
public void testIncludesMetadataNegative() {
ArbitraryResourceData data = new ArbitraryResourceData();
data.metadata = new ArbitraryResourceMetadata();
data.name = "Joe";
ArbitraryResourceData result
= filterListByMap(
List.of(data),
NAME_LEVEL, new HashMap<>(Map.of(INCLUDE_METADATA, false)),
1
).get(0);
Assert.assertNotNull(result);
Assert.assertNull(result.metadata);
}
@Test
public void testIncludeStatusPositive() {
ArbitraryResourceData data = new ArbitraryResourceData();
data.status = new ArbitraryResourceStatus();
data.name = "Joe";
ArbitraryResourceData result
= filterListByMap(
List.of(data),
NAME_LEVEL, new HashMap<>(Map.of(INCLUDE_STATUS, true)),
1
).get(0);
Assert.assertNotNull(result);
Assert.assertNotNull(result.status);
}
@Test
public void testIncludeStatusNegative() {
ArbitraryResourceData data = new ArbitraryResourceData();
data.status = new ArbitraryResourceStatus();
data.name = "Joe";
ArbitraryResourceData result
= filterListByMap(
List.of(data),
NAME_LEVEL, new HashMap<>(Map.of(INCLUDE_STATUS, false)),
1
).get(0);
Assert.assertNotNull(result);
Assert.assertNull(result.status);
}
@Test
public void testLimit() {
ArbitraryResourceData data1 = new ArbitraryResourceData();
data1.name = "Joe";
ArbitraryResourceData data2 = new ArbitraryResourceData();
data2.name = "Joe";
ArbitraryResourceData data3 = new ArbitraryResourceData();
data3.name = "Joe";
filterListByMap(
List.of(data1, data2, data3),
NAME_LEVEL, new HashMap<>(Map.of(LIMIT, 2)),
2
);
}
@Test
public void testLimitZero() {
ArbitraryResourceData data = new ArbitraryResourceData();
data.name = "Joe";
filterListByMap(
List.of(data),
NAME_LEVEL,
new HashMap<>(Map.of(LIMIT, 0)),
1
);
}
@Test
public void testOffset() {
ArbitraryResourceData data1 = new ArbitraryResourceData();
data1.created = 1L;
data1.name = "Joe";
ArbitraryResourceData data2 = new ArbitraryResourceData();
data2.created = 2L;
data2.name = "Joe";
ArbitraryResourceData data3 = new ArbitraryResourceData();
data3.created = 3L;
data3.name = "Joe";
List<ArbitraryResourceData> result
= filterListByMap(
List.of(data1, data2, data3),
NAME_LEVEL, new HashMap<>(Map.of(OFFSET, 1)),
2
);
Assert.assertNotNull(result.get(0));
Assert.assertTrue(2L == result.get(0).created);
}
@Test
public void testOrder() {
ArbitraryResourceData data2 = new ArbitraryResourceData();
data2.created = 2L;
data2.name = "Joe";
ArbitraryResourceData data1 = new ArbitraryResourceData();
data1.created = 1L;
data1.name = "Joe";
List<ArbitraryResourceData> result
= filterListByMap(
List.of(data2, data1),
NAME_LEVEL, new HashMap<>(),
2
);
Assert.assertNotNull(result.get(0));
Assert.assertTrue( result.get(0).created == 1L );
}
@Test
public void testReverseOrder() {
ArbitraryResourceData data1 = new ArbitraryResourceData();
data1.created = 1L;
data1.name = "Joe";
ArbitraryResourceData data2 = new ArbitraryResourceData();
data2.created = 2L;
data2.name = "Joe";
List<ArbitraryResourceData> result
= filterListByMap(
List.of(data1, data2),
NAME_LEVEL, new HashMap<>(Map.of(REVERSE, true)),
2);
Assert.assertNotNull( result.get(0));
Assert.assertTrue( result.get(0).created == 2L);
}
public static List<ArbitraryResourceData> filterListByMap(
List<ArbitraryResourceData> candidates,
Map<String, Integer> levelByName,
HashMap<String, Object> valueByKey,
int sizeToAssert) {
Optional<Service> service = Optional.ofNullable((Service) valueByKey.get(SERVICE));
Optional<String> query = Optional.ofNullable( (String) valueByKey.get(QUERY));
Optional<String> identifier = Optional.ofNullable((String) valueByKey.get(IDENTIFIER));
Optional<List<String>> names = Optional.ofNullable((List<String>) valueByKey.get(NAMES));
Optional<String> title = Optional.ofNullable((String) valueByKey.get(TITLE));
Optional<String> description = Optional.ofNullable((String) valueByKey.get(DESCRIPTION));
boolean prefixOnly = valueByKey.containsKey(PREFIX_ONLY);
Optional<List<String>> exactMatchNames = Optional.ofNullable((List<String>) valueByKey.get(EXACT_MATCH_NAMES));
boolean defaultResource = valueByKey.containsKey(DEFAULT_RESOURCE);
Optional<SearchMode> mode = Optional.of((SearchMode) valueByKey.getOrDefault(MODE, SearchMode.ALL));
Optional<Integer> minLevel = Optional.ofNullable((Integer) valueByKey.get(MIN_LEVEL));
Optional<Supplier<List<String>>> followedOnly = Optional.ofNullable((Supplier<List<String>>) valueByKey.get(FOLLOWED_ONLY));
Optional<Supplier<List<String>>> excludeBlocked = Optional.ofNullable((Supplier<List<String>>) valueByKey.get(EXCLUDE_BLOCKED));
Optional<Boolean> includeMetadata = Optional.ofNullable((Boolean) valueByKey.get(INCLUDE_METADATA));
Optional<Boolean> includeStatus = Optional.ofNullable((Boolean) valueByKey.get(INCLUDE_STATUS));
Optional<Long> before = Optional.ofNullable((Long) valueByKey.get(BEFORE));
Optional<Long> after = Optional.ofNullable((Long) valueByKey.get(AFTER));
Optional<Integer> limit = Optional.ofNullable((Integer) valueByKey.get(LIMIT));
Optional<Integer> offset = Optional.ofNullable((Integer) valueByKey.get(OFFSET));
Optional<Boolean> reverse = Optional.ofNullable((Boolean) valueByKey.get(REVERSE));
List<ArbitraryResourceData> filteredList
= HSQLDBCacheUtils.filterList(
candidates,
levelByName,
mode,
service,
query,
identifier,
names,
title,
description,
prefixOnly,
exactMatchNames,
defaultResource,
minLevel,
followedOnly,
excludeBlocked,
includeMetadata,
includeStatus,
before,
after,
limit,
offset,
reverse);
Assert.assertEquals(sizeToAssert, filteredList.size());
return filteredList;
}
}