3
0
mirror of https://github.com/Qortal/qortal.git synced 2025-02-14 11:15:49 +00:00

Regularly clean up old and unused files/folders in the temp directory

Also added code to purge built resource caches, but it is currently disabled. This will become more useful once we implement local storage limits.
This commit is contained in:
CalDescent 2021-08-20 19:27:42 +01:00
parent 6cb39795a9
commit 4ba72f7eeb
2 changed files with 99 additions and 23 deletions

View File

@ -3,10 +3,7 @@ package org.qortal.controller.arbitrary;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.qortal.api.resource.TransactionsResource.ConfirmationStatus;
import org.qortal.controller.Controller;
import org.qortal.data.transaction.ArbitraryTransactionData;
import org.qortal.network.Network;
import org.qortal.network.Peer;
import org.qortal.repository.DataException;
import org.qortal.repository.Repository;
import org.qortal.repository.RepositoryManager;
@ -14,8 +11,13 @@ import org.qortal.settings.Settings;
import org.qortal.transaction.Transaction.TransactionType;
import org.qortal.utils.ArbitraryTransactionUtils;
import org.qortal.utils.Base58;
import org.qortal.utils.FilesystemUtils;
import org.qortal.utils.NTP;
import java.io.File;
import java.io.IOException;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.*;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
@ -35,12 +37,20 @@ public class ArbitraryDataCleanupManager extends Thread {
* rebuilding them. The main purpose of this is to avoid deleting files that are currently
* being used by other parts of the system.
*/
private static long STALE_FILE_TIMEOUT = 60*60*1000; // 1 hour
private static long STALE_FILE_TIMEOUT = 60*60*1000L; // 1 hour
/**
* The amount of time that must pass before a built resource is cleaned up. This should be
* considerably longer than STALE_FILE_TIMEOUT because building a resource is costly. Longer
* term we could consider tracking when each resource is requested, and only delete those
* that haven't been requested for a large amount of time. We could also consider only purging
* built resources when the disk space is getting low.
*/
private static long PURGE_BUILT_RESOURCES_TIMEOUT = 30*24*60*60*1000L; // 30 days
/*
TODO:
- Delete old files from _temp
- Delete old files not associated with transactions
*/
@ -72,11 +82,17 @@ public class ArbitraryDataCleanupManager extends Thread {
while (!isStopping) {
Thread.sleep(30000);
if (NTP.getTime() == null) {
Long now = NTP.getTime();
if (now == null) {
// Don't attempt to make decisions if we haven't synced our time yet
continue;
}
// Periodically delete any unnecessary files from the temp directory
if (offset == 0 || offset % (limit * 10) == 0) {
this.cleanupTempDirectory(now);
}
// Any arbitrary transactions we want to fetch data for?
try (final Repository repository = RepositoryManager.getRepository()) {
List<byte[]> signatures = repository.getTransactionRepository().getSignaturesMatchingCriteria(null, null, null, ARBITRARY_TX_TYPE, null, null, ConfirmationStatus.BOTH, limit, offset, true);
@ -86,7 +102,7 @@ public class ArbitraryDataCleanupManager extends Thread {
continue;
}
offset += limit;
Long now = NTP.getTime();
now = NTP.getTime();
// Loop through the signatures in this batch
for (int i=0; i<signatures.size(); i++) {
@ -167,6 +183,62 @@ public class ArbitraryDataCleanupManager extends Thread {
}
}
private void cleanupTempDirectory(String folder, long now, long minAge) {
String baseDir = Settings.getInstance().getTempDataPath();
Path tempDir = Paths.get(baseDir, folder);
int contentsCount = 0;
// Loop through the contents and check each one
final File[] directories = tempDir.toFile().listFiles();
if (directories != null) {
for (final File directory : directories) {
contentsCount++;
// We're expecting the contents of each subfolder to be a directory
if (directory.isDirectory()) {
if (!ArbitraryTransactionUtils.isFileRecent(directory.toPath(), now, minAge)) {
// File isn't recent, so can be deleted
LOGGER.info("Deleting directory {}", directory);
try {
FilesystemUtils.safeDeleteDirectory(directory.toPath(), true);
} catch (IOException e) {
LOGGER.info("Unable to delete directory: {}", directory);
}
}
}
}
}
// If the directory is empty, we still need to delete its parent folder
if (contentsCount == 0 && tempDir.toFile().exists()) {
try {
LOGGER.info("Parent directory {} is empty, so deleting it", tempDir);
FilesystemUtils.safeDeleteDirectory(tempDir, false);
} catch(IOException e){
LOGGER.info("Unable to delete parent directory: {}", tempDir);
}
}
}
private void cleanupTempDirectory(long now) {
// Use the "stale file timeout" for the intermediate directories.
// These aren't used for serving content - only for building it.
// Once the files have become stale, it's safe to delete them.
this.cleanupTempDirectory("diff", now, STALE_FILE_TIMEOUT);
this.cleanupTempDirectory("join", now, STALE_FILE_TIMEOUT);
this.cleanupTempDirectory("merge", now, STALE_FILE_TIMEOUT);
this.cleanupTempDirectory("writer", now, STALE_FILE_TIMEOUT);
// Built resources are served out of the "reader" directory so these
// need to be kept around for much longer.
// Purging currently disabled, as it's not very helpful. Will revisit
// once we implement local storage limits.
// this.cleanupTempDirectory("reader", now, PURGE_BUILT_RESOURCES_TIMEOUT);
}
public void shutdown() {
isStopping = true;
this.interrupt();

View File

@ -3,7 +3,6 @@ package org.qortal.utils;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.qortal.arbitrary.ArbitraryDataFile;
import org.qortal.controller.arbitrary.ArbitraryDataCleanupManager;
import org.qortal.data.transaction.ArbitraryTransactionData;
import org.qortal.data.transaction.TransactionData;
import org.qortal.repository.DataException;
@ -152,31 +151,36 @@ public class ArbitraryTransactionUtils {
return arbitraryDataFile.chunkCount();
}
public static boolean isFileHashRecent(byte[] hash, long now, long cleanupAfter) {
public static boolean isFileRecent(Path filePath, long now, long cleanupAfter) {
try {
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(hash);
if (arbitraryDataFile == null || !arbitraryDataFile.exists()) {
// No hash, or file doesn't exist, so it's not recent
return false;
}
Path filePath = arbitraryDataFile.getFilePath();
BasicFileAttributes attr = Files.readAttributes(filePath, BasicFileAttributes.class);
long timeSinceCreated = now - attr.creationTime().toMillis();
long timeSinceModified = now - attr.lastModifiedTime().toMillis();
//LOGGER.info(String.format("timeSinceCreated for path %s is %d. cleanupAfter: %d", filePath, timeSinceCreated, cleanupAfter));
// Check if the file has been created or modified recently
if (timeSinceCreated < cleanupAfter) {
return true;
if (timeSinceCreated > cleanupAfter) {
return false;
}
if (timeSinceModified < cleanupAfter) {
return true;
if (timeSinceModified > cleanupAfter) {
return false;
}
} catch (IOException e) {
// Can't read file attributes, so assume it's not recent
// Can't read file attributes, so assume it's recent so that we don't delete something accidentally
}
return false;
return true;
}
public static boolean isFileHashRecent(byte[] hash, long now, long cleanupAfter) {
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(hash);
if (arbitraryDataFile == null || !arbitraryDataFile.exists()) {
// No hash, or file doesn't exist, so it's not recent
return false;
}
Path filePath = arbitraryDataFile.getFilePath();
return ArbitraryTransactionUtils.isFileRecent(filePath, now, cleanupAfter);
}
public static void deleteCompleteFile(ArbitraryTransactionData arbitraryTransactionData, long now, long cleanupAfter) {