mirror of
https://github.com/Qortal/qortal.git
synced 2025-03-16 12:12:32 +00:00
Major rework of chunk hashes
Chunk hashes are now stored off chain in a metadata file. The metadata file's hash is then included in the transaction. The main benefits of this approach are: 1. We no longer need to limit the total file size, because adding more chunks doesn't increase the transaction size. 2. This increases the chain capacity by a huge amount - a 512MB file would have previously increased the transaction size by 16kB, whereas it now requires only an additional 32 bytes. 3. We no longer need to use variable difficulty; every transaction is the same size and so the difficulty can be constant no matter how large the files are. 4. Additional metadata (such as title, description, and tags) can ultimately be stored in the metadata file, as apposed to using a separate transaction & resource. 5. There is also scope for adding hashes of individual files into the metadata file, if we ever wanted to allow single files to be requested without having to download and build the entire resource. Although this is unlikely to be available in the short term. The only real negative is that we now how to fetch the metadata file before we know anything about the chunks for a transaction. This seems to be quite a small trade off by comparison. Since we're not live yet, there is no backwards support for on-chain hashes, so a new data testchain will be required. This hasn't been tested outside of unit tests yet, so there will likely be several fixes needed before it is stable.
This commit is contained in:
parent
7c16a90221
commit
a2cac003a4
6
pom.xml
6
pom.xml
@ -16,6 +16,7 @@
|
||||
<commons-text.version>1.8</commons-text.version>
|
||||
<commons-io.version>2.6</commons-io.version>
|
||||
<commons-compress.version>1.21</commons-compress.version>
|
||||
<commons-lang3.version>3.12.0</commons-lang3.version>
|
||||
<xz.version>1.9</xz.version>
|
||||
<dagger.version>1.2.2</dagger.version>
|
||||
<guava.version>28.1-jre</guava.version>
|
||||
@ -464,6 +465,11 @@
|
||||
<artifactId>commons-compress</artifactId>
|
||||
<version>${commons-compress.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.commons</groupId>
|
||||
<artifactId>commons-lang3</artifactId>
|
||||
<version>${commons-lang3.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.tukaani</groupId>
|
||||
<artifactId>xz</artifactId>
|
||||
|
@ -2,15 +2,14 @@ package org.qortal.arbitrary;
|
||||
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.qortal.arbitrary.metadata.ArbitraryDataTransactionMetadata;
|
||||
import org.qortal.crypto.Crypto;
|
||||
import org.qortal.repository.DataException;
|
||||
import org.qortal.settings.Settings;
|
||||
import org.qortal.transform.transaction.TransactionTransformer;
|
||||
import org.qortal.utils.Base58;
|
||||
import org.qortal.utils.FilesystemUtils;
|
||||
|
||||
import java.io.*;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.Paths;
|
||||
@ -63,6 +62,12 @@ public class ArbitraryDataFile {
|
||||
private ArrayList<ArbitraryDataFileChunk> chunks;
|
||||
private byte[] secret;
|
||||
|
||||
// Metadata
|
||||
private byte[] metadataHash;
|
||||
private ArbitraryDataFile metadataFile;
|
||||
private ArbitraryDataTransactionMetadata metadata;
|
||||
|
||||
|
||||
public ArbitraryDataFile() {
|
||||
}
|
||||
|
||||
@ -220,19 +225,16 @@ public class ArbitraryDataFile {
|
||||
return ValidationResult.OK;
|
||||
}
|
||||
|
||||
public void addChunk(ArbitraryDataFileChunk chunk) {
|
||||
private void addChunk(ArbitraryDataFileChunk chunk) {
|
||||
this.chunks.add(chunk);
|
||||
}
|
||||
|
||||
public void addChunkHashes(byte[] chunks) throws DataException {
|
||||
if (chunks == null || chunks.length == 0) {
|
||||
private void addChunkHashes(List<byte[]> chunkHashes) throws DataException {
|
||||
if (chunkHashes == null || chunkHashes.isEmpty()) {
|
||||
return;
|
||||
}
|
||||
ByteBuffer byteBuffer = ByteBuffer.wrap(chunks);
|
||||
while (byteBuffer.remaining() >= TransactionTransformer.SHA256_LENGTH) {
|
||||
byte[] chunkDigest = new byte[TransactionTransformer.SHA256_LENGTH];
|
||||
byteBuffer.get(chunkDigest);
|
||||
ArbitraryDataFileChunk chunk = ArbitraryDataFileChunk.fromHash(chunkDigest, this.signature);
|
||||
for (byte[] chunkHash : chunkHashes) {
|
||||
ArbitraryDataFileChunk chunk = ArbitraryDataFileChunk.fromHash(chunkHash, this.signature);
|
||||
this.addChunk(chunk);
|
||||
}
|
||||
}
|
||||
@ -364,14 +366,24 @@ public class ArbitraryDataFile {
|
||||
return success;
|
||||
}
|
||||
|
||||
public boolean deleteMetadata() {
|
||||
if (this.metadataFile != null && this.metadataFile.exists()) {
|
||||
return this.metadataFile.delete();
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
public boolean deleteAll() {
|
||||
// Delete the complete file
|
||||
boolean fileDeleted = this.delete();
|
||||
|
||||
// Delete the metadata file
|
||||
boolean metadataDeleted = this.deleteMetadata();
|
||||
|
||||
// Delete the individual chunks
|
||||
boolean chunksDeleted = this.deleteAllChunks();
|
||||
|
||||
return fileDeleted && chunksDeleted;
|
||||
return fileDeleted || metadataDeleted || chunksDeleted;
|
||||
}
|
||||
|
||||
protected void cleanupFilesystem() {
|
||||
@ -432,35 +444,98 @@ public class ArbitraryDataFile {
|
||||
return false;
|
||||
}
|
||||
|
||||
public boolean allChunksExist(byte[] chunks) throws DataException {
|
||||
if (chunks == null) {
|
||||
return true;
|
||||
}
|
||||
ByteBuffer byteBuffer = ByteBuffer.wrap(chunks);
|
||||
while (byteBuffer.remaining() >= TransactionTransformer.SHA256_LENGTH) {
|
||||
byte[] chunkHash = new byte[TransactionTransformer.SHA256_LENGTH];
|
||||
byteBuffer.get(chunkHash);
|
||||
ArbitraryDataFileChunk chunk = ArbitraryDataFileChunk.fromHash(chunkHash, this.signature);
|
||||
if (!chunk.exists()) {
|
||||
public boolean allChunksExist() {
|
||||
try {
|
||||
if (this.metadataHash == null) {
|
||||
// We don't have any metadata so can't check if we have the chunks
|
||||
// Even if this transaction has no chunks, we don't have the file either (already checked above)
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
public boolean anyChunksExist(byte[] chunks) throws DataException {
|
||||
if (chunks == null) {
|
||||
if (this.metadataFile == null) {
|
||||
this.metadataFile = ArbitraryDataFile.fromHash(this.metadataHash, this.signature);
|
||||
if (!metadataFile.exists()) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// If the metadata file doesn't exist, we can't check if we have the chunks
|
||||
if (!metadataFile.getFilePath().toFile().exists()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (this.metadata == null) {
|
||||
this.setMetadata(new ArbitraryDataTransactionMetadata(this.metadataFile.getFilePath()));
|
||||
}
|
||||
|
||||
// Read the metadata
|
||||
List<byte[]> chunks = metadata.getChunks();
|
||||
for (byte[] chunkHash : chunks) {
|
||||
ArbitraryDataFileChunk chunk = ArbitraryDataFileChunk.fromHash(chunkHash, this.signature);
|
||||
if (!chunk.exists()) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
|
||||
} catch (DataException e) {
|
||||
// Something went wrong, so assume we don't have all the chunks
|
||||
return false;
|
||||
}
|
||||
ByteBuffer byteBuffer = ByteBuffer.wrap(chunks);
|
||||
while (byteBuffer.remaining() >= TransactionTransformer.SHA256_LENGTH) {
|
||||
byte[] chunkHash = new byte[TransactionTransformer.SHA256_LENGTH];
|
||||
byteBuffer.get(chunkHash);
|
||||
ArbitraryDataFileChunk chunk = ArbitraryDataFileChunk.fromHash(chunkHash, this.signature);
|
||||
if (chunk.exists()) {
|
||||
return true;
|
||||
}
|
||||
|
||||
public boolean anyChunksExist() throws DataException {
|
||||
try {
|
||||
if (this.metadataHash == null) {
|
||||
// We don't have any metadata so can't check if we have the chunks
|
||||
// Even if this transaction has no chunks, we don't have the file either (already checked above)
|
||||
return false;
|
||||
}
|
||||
|
||||
if (this.metadataFile == null) {
|
||||
this.metadataFile = ArbitraryDataFile.fromHash(this.metadataHash, this.signature);
|
||||
if (!metadataFile.exists()) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// If the metadata file doesn't exist, we can't check if we have any chunks
|
||||
if (!metadataFile.getFilePath().toFile().exists()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (this.metadata == null) {
|
||||
this.setMetadata(new ArbitraryDataTransactionMetadata(this.metadataFile.getFilePath()));
|
||||
}
|
||||
|
||||
// Read the metadata
|
||||
List<byte[]> chunks = metadata.getChunks();
|
||||
for (byte[] chunkHash : chunks) {
|
||||
ArbitraryDataFileChunk chunk = ArbitraryDataFileChunk.fromHash(chunkHash, this.signature);
|
||||
if (chunk.exists()) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
|
||||
} catch (DataException e) {
|
||||
// Something went wrong, so assume we don't have all the chunks
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
public boolean allFilesExist() {
|
||||
if (this.exists()) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Complete file doesn't exist, so check the chunks
|
||||
if (this.allChunksExist()) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -514,6 +589,43 @@ public class ArbitraryDataFile {
|
||||
return null;
|
||||
}
|
||||
|
||||
public List<byte[]> chunkHashList() {
|
||||
List<byte[]> chunks = new ArrayList<>();
|
||||
|
||||
if (this.chunks != null && this.chunks.size() > 0) {
|
||||
// Return null if we only have one chunk, with the same hash as the parent
|
||||
if (Arrays.equals(this.digest(), this.chunks.get(0).digest())) {
|
||||
return null;
|
||||
}
|
||||
|
||||
try {
|
||||
ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
|
||||
for (ArbitraryDataFileChunk chunk : this.chunks) {
|
||||
byte[] chunkHash = chunk.digest();
|
||||
if (chunkHash.length != 32) {
|
||||
LOGGER.info("Invalid chunk hash length: {}", chunkHash.length);
|
||||
throw new DataException("Invalid chunk hash length");
|
||||
}
|
||||
chunks.add(chunkHash);
|
||||
}
|
||||
return chunks;
|
||||
|
||||
} catch (DataException e) {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
private void loadMetadata() throws DataException {
|
||||
try {
|
||||
this.metadata.read();
|
||||
|
||||
} catch (DataException | IOException e) {
|
||||
throw new DataException(e);
|
||||
}
|
||||
}
|
||||
|
||||
private File getFile() {
|
||||
File file = this.filePath.toFile();
|
||||
if (file.exists()) {
|
||||
@ -582,6 +694,36 @@ public class ArbitraryDataFile {
|
||||
return this.secret;
|
||||
}
|
||||
|
||||
public void setMetadataFile(ArbitraryDataFile metadataFile) {
|
||||
this.metadataFile = metadataFile;
|
||||
}
|
||||
|
||||
public ArbitraryDataFile getMetadataFile() {
|
||||
return this.metadataFile;
|
||||
}
|
||||
|
||||
public void setMetadataHash(byte[] hash) throws DataException {
|
||||
this.metadataHash = hash;
|
||||
|
||||
if (hash == null) {
|
||||
return;
|
||||
}
|
||||
this.metadataFile = ArbitraryDataFile.fromHash(hash, this.signature);
|
||||
if (metadataFile.exists()) {
|
||||
this.setMetadata(new ArbitraryDataTransactionMetadata(this.metadataFile.getFilePath()));
|
||||
this.addChunkHashes(this.metadata.getChunks());
|
||||
}
|
||||
}
|
||||
|
||||
public byte[] getMetadataHash() {
|
||||
return this.metadataHash;
|
||||
}
|
||||
|
||||
public void setMetadata(ArbitraryDataTransactionMetadata metadata) throws DataException {
|
||||
this.metadata = metadata;
|
||||
this.loadMetadata();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return this.shortHash58();
|
||||
|
@ -303,7 +303,7 @@ public class ArbitraryDataReader {
|
||||
|
||||
// Load hashes
|
||||
byte[] digest = transactionData.getData();
|
||||
byte[] chunkHashes = transactionData.getChunkHashes();
|
||||
byte[] metadataHash = transactionData.getMetadataHash();
|
||||
byte[] signature = transactionData.getSignature();
|
||||
|
||||
// Load secret
|
||||
@ -315,36 +315,37 @@ public class ArbitraryDataReader {
|
||||
// Load data file(s)
|
||||
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(digest, signature);
|
||||
ArbitraryTransactionUtils.checkAndRelocateMiscFiles(transactionData);
|
||||
if (!arbitraryDataFile.exists()) {
|
||||
if (!arbitraryDataFile.allChunksExist(chunkHashes) || chunkHashes == null) {
|
||||
if (ArbitraryDataStorageManager.getInstance().isNameInBlacklist(transactionData.getName())) {
|
||||
throw new DataException(
|
||||
String.format("Unable to request missing data for file %s due to blacklist", arbitraryDataFile));
|
||||
arbitraryDataFile.setMetadataHash(metadataHash);
|
||||
|
||||
if (!arbitraryDataFile.allFilesExist()) {
|
||||
if (ArbitraryDataStorageManager.getInstance().isNameInBlacklist(transactionData.getName())) {
|
||||
throw new DataException(
|
||||
String.format("Unable to request missing data for file %s due to blacklist", arbitraryDataFile));
|
||||
}
|
||||
else {
|
||||
// Ask the arbitrary data manager to fetch data for this transaction
|
||||
String message;
|
||||
if (this.canRequestMissingFiles) {
|
||||
boolean requested = ArbitraryDataManager.getInstance().fetchData(transactionData);
|
||||
|
||||
if (requested) {
|
||||
message = String.format("Requested missing data for file %s", arbitraryDataFile);
|
||||
} else {
|
||||
message = String.format("Unable to reissue request for missing file %s for signature %s due to rate limit. Please try again later.", arbitraryDataFile, Base58.encode(transactionData.getSignature()));
|
||||
}
|
||||
}
|
||||
else {
|
||||
// Ask the arbitrary data manager to fetch data for this transaction
|
||||
String message;
|
||||
if (this.canRequestMissingFiles) {
|
||||
boolean requested = ArbitraryDataManager.getInstance().fetchDataForSignature(transactionData.getSignature());
|
||||
|
||||
if (requested) {
|
||||
message = String.format("Requested missing data for file %s", arbitraryDataFile);
|
||||
} else {
|
||||
message = String.format("Unable to reissue request for missing file %s for signature %s due to rate limit. Please try again later.", arbitraryDataFile, Base58.encode(transactionData.getSignature()));
|
||||
}
|
||||
}
|
||||
else {
|
||||
message = String.format("Missing data for file %s", arbitraryDataFile);
|
||||
}
|
||||
|
||||
// Throw a missing data exception, which allows subsequent layers to fetch data
|
||||
LOGGER.info(message);
|
||||
throw new MissingDataException(message);
|
||||
message = String.format("Missing data for file %s", arbitraryDataFile);
|
||||
}
|
||||
}
|
||||
|
||||
// Throw a missing data exception, which allows subsequent layers to fetch data
|
||||
LOGGER.info(message);
|
||||
throw new MissingDataException(message);
|
||||
}
|
||||
}
|
||||
|
||||
if (arbitraryDataFile.allChunksExist() && !arbitraryDataFile.exists()) {
|
||||
// We have all the chunks but not the complete file, so join them
|
||||
arbitraryDataFile.addChunkHashes(chunkHashes);
|
||||
arbitraryDataFile.join();
|
||||
}
|
||||
|
||||
|
@ -51,6 +51,8 @@ public class ArbitraryDataTransactionBuilder {
|
||||
private final String identifier;
|
||||
private final Repository repository;
|
||||
|
||||
private int chunkSize = ArbitraryDataFile.CHUNK_SIZE;
|
||||
|
||||
private ArbitraryTransactionData arbitraryTransactionData;
|
||||
private ArbitraryDataFile arbitraryDataFile;
|
||||
|
||||
@ -189,17 +191,25 @@ public class ArbitraryDataTransactionBuilder {
|
||||
|
||||
ArbitraryDataWriter arbitraryDataWriter = new ArbitraryDataWriter(path, name, service, identifier, method, compression);
|
||||
try {
|
||||
arbitraryDataWriter.setChunkSize(this.chunkSize);
|
||||
arbitraryDataWriter.save();
|
||||
} catch (IOException | DataException | InterruptedException | RuntimeException | MissingDataException e) {
|
||||
LOGGER.info("Unable to create arbitrary data file: {}", e.getMessage());
|
||||
throw new DataException(e.getMessage());
|
||||
}
|
||||
|
||||
// Get main file
|
||||
arbitraryDataFile = arbitraryDataWriter.getArbitraryDataFile();
|
||||
if (arbitraryDataFile == null) {
|
||||
throw new DataException("Arbitrary data file is null");
|
||||
}
|
||||
|
||||
// Get chunks metadata file
|
||||
ArbitraryDataFile metadataFile = arbitraryDataFile.getMetadataFile();
|
||||
if (metadataFile == null && arbitraryDataFile.chunkCount() > 1) {
|
||||
throw new DataException(String.format("Chunks metadata data file is null but there are %i chunks", arbitraryDataFile.chunkCount()));
|
||||
}
|
||||
|
||||
String digest58 = arbitraryDataFile.digest58();
|
||||
if (digest58 == null) {
|
||||
LOGGER.error("Unable to calculate file digest");
|
||||
@ -214,12 +224,12 @@ public class ArbitraryDataTransactionBuilder {
|
||||
byte[] secret = arbitraryDataFile.getSecret();
|
||||
final ArbitraryTransactionData.DataType dataType = ArbitraryTransactionData.DataType.DATA_HASH;
|
||||
final byte[] digest = arbitraryDataFile.digest();
|
||||
final byte[] chunkHashes = arbitraryDataFile.chunkHashes();
|
||||
final byte[] metadataHash = (metadataFile != null) ? metadataFile.getHash() : null;
|
||||
final List<PaymentData> payments = new ArrayList<>();
|
||||
|
||||
ArbitraryTransactionData transactionData = new ArbitraryTransactionData(baseTransactionData,
|
||||
version, service, nonce, size, name, identifier, method,
|
||||
secret, compression, digest, dataType, chunkHashes, payments);
|
||||
secret, compression, digest, dataType, metadataHash, payments);
|
||||
|
||||
this.arbitraryTransactionData = transactionData;
|
||||
|
||||
@ -253,4 +263,12 @@ public class ArbitraryDataTransactionBuilder {
|
||||
return this.arbitraryTransactionData;
|
||||
}
|
||||
|
||||
public ArbitraryDataFile getArbitraryDataFile() {
|
||||
return this.arbitraryDataFile;
|
||||
}
|
||||
|
||||
public void setChunkSize(int chunkSize) {
|
||||
this.chunkSize = chunkSize;
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -4,6 +4,7 @@ import org.apache.commons.io.FileUtils;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.qortal.arbitrary.exception.MissingDataException;
|
||||
import org.qortal.arbitrary.metadata.ArbitraryDataTransactionMetadata;
|
||||
import org.qortal.arbitrary.misc.Service;
|
||||
import org.qortal.crypto.Crypto;
|
||||
import org.qortal.data.transaction.ArbitraryTransactionData.*;
|
||||
@ -39,6 +40,8 @@ public class ArbitraryDataWriter {
|
||||
private final Method method;
|
||||
private final Compression compression;
|
||||
|
||||
private int chunkSize = ArbitraryDataFile.CHUNK_SIZE;
|
||||
|
||||
private SecretKey aesKey;
|
||||
private ArbitraryDataFile arbitraryDataFile;
|
||||
|
||||
@ -64,6 +67,7 @@ public class ArbitraryDataWriter {
|
||||
this.compress();
|
||||
this.encrypt();
|
||||
this.split();
|
||||
this.createMetadataFile();
|
||||
this.validate();
|
||||
|
||||
} finally {
|
||||
@ -184,7 +188,8 @@ public class ArbitraryDataWriter {
|
||||
|
||||
if (this.compression == Compression.ZIP) {
|
||||
LOGGER.info("Compressing...");
|
||||
ZipUtils.zip(this.filePath.toString(), this.compressedPath.toString(), "data");
|
||||
String fileName = "data"; //isSingleFile ? singleFileName : null;
|
||||
ZipUtils.zip(this.filePath.toString(), this.compressedPath.toString(), fileName);
|
||||
}
|
||||
else {
|
||||
throw new DataException(String.format("Unknown compression type specified: %s", compression.toString()));
|
||||
@ -226,6 +231,37 @@ public class ArbitraryDataWriter {
|
||||
}
|
||||
}
|
||||
|
||||
private void split() throws IOException, DataException {
|
||||
// We don't have a signature yet, so use null to put the file in a generic folder
|
||||
this.arbitraryDataFile = ArbitraryDataFile.fromPath(this.filePath, null);
|
||||
if (this.arbitraryDataFile == null) {
|
||||
throw new IOException("No file available when trying to split");
|
||||
}
|
||||
|
||||
int chunkCount = this.arbitraryDataFile.split(this.chunkSize);
|
||||
if (chunkCount > 0) {
|
||||
LOGGER.info(String.format("Successfully split into %d chunk%s", chunkCount, (chunkCount == 1 ? "" : "s")));
|
||||
}
|
||||
else {
|
||||
throw new DataException("Unable to split file into chunks");
|
||||
}
|
||||
}
|
||||
|
||||
private void createMetadataFile() throws IOException, DataException {
|
||||
// If we have at least one chunk, we need to create an index file containing their hashes
|
||||
if (this.arbitraryDataFile.chunkCount() > 1) {
|
||||
// Create the JSON file
|
||||
Path chunkFilePath = Paths.get(this.workingPath.toString(), "metadata.json");
|
||||
ArbitraryDataTransactionMetadata chunkMetadata = new ArbitraryDataTransactionMetadata(chunkFilePath);
|
||||
chunkMetadata.setChunks(this.arbitraryDataFile.chunkHashList());
|
||||
chunkMetadata.write();
|
||||
|
||||
// Create an ArbitraryDataFile from the JSON file (we don't have a signature yet)
|
||||
ArbitraryDataFile metadataFile = ArbitraryDataFile.fromPath(chunkFilePath, null);
|
||||
this.arbitraryDataFile.setMetadataFile(metadataFile);
|
||||
}
|
||||
}
|
||||
|
||||
private void validate() throws IOException, DataException {
|
||||
if (this.arbitraryDataFile == null) {
|
||||
throw new IOException("No file available when validating");
|
||||
@ -248,21 +284,21 @@ public class ArbitraryDataWriter {
|
||||
}
|
||||
LOGGER.info("Chunk hashes are valid");
|
||||
|
||||
}
|
||||
|
||||
private void split() throws IOException, DataException {
|
||||
// We don't have a signature yet, so use null to put the file in a generic folder
|
||||
this.arbitraryDataFile = ArbitraryDataFile.fromPath(this.filePath, null);
|
||||
if (this.arbitraryDataFile == null) {
|
||||
throw new IOException("No file available when trying to split");
|
||||
}
|
||||
|
||||
int chunkCount = this.arbitraryDataFile.split(ArbitraryDataFile.CHUNK_SIZE);
|
||||
if (chunkCount > 0) {
|
||||
LOGGER.info(String.format("Successfully split into %d chunk%s", chunkCount, (chunkCount == 1 ? "" : "s")));
|
||||
}
|
||||
else {
|
||||
throw new DataException("Unable to split file into chunks");
|
||||
// Validate chunks metadata file
|
||||
if (this.arbitraryDataFile.chunkCount() > 1) {
|
||||
ArbitraryDataFile metadataFile = this.arbitraryDataFile.getMetadataFile();
|
||||
if (metadataFile == null || !metadataFile.exists()) {
|
||||
throw new IOException("No metadata file available, but there are multiple chunks");
|
||||
}
|
||||
// Read the file
|
||||
ArbitraryDataTransactionMetadata metadata = new ArbitraryDataTransactionMetadata(metadataFile.getFilePath());
|
||||
metadata.read();
|
||||
// Check all chunks exist
|
||||
for (byte[] chunk : this.arbitraryDataFile.chunkHashList()) {
|
||||
if (!metadata.containsChunk(chunk)) {
|
||||
throw new IOException(String.format("Missing chunk %s in metadata file", Base58.encode(chunk)));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -290,4 +326,8 @@ public class ArbitraryDataWriter {
|
||||
return this.arbitraryDataFile;
|
||||
}
|
||||
|
||||
public void setChunkSize(int chunkSize) {
|
||||
this.chunkSize = chunkSize;
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -10,25 +10,28 @@ import java.io.FileWriter;
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.Paths;
|
||||
|
||||
/**
|
||||
* ArbitraryDataMetadata
|
||||
*
|
||||
* This is a base class to handle reading and writing JSON to the supplied filePath.
|
||||
*
|
||||
* It is not usable on its own; it must be subclassed, with two methods overridden:
|
||||
*
|
||||
* readJson() - code to unserialize the JSON file
|
||||
* buildJson() - code to serialize the JSON file
|
||||
*
|
||||
*/
|
||||
public class ArbitraryDataMetadata {
|
||||
|
||||
protected static final Logger LOGGER = LogManager.getLogger(ArbitraryDataMetadata.class);
|
||||
|
||||
protected Path filePath;
|
||||
protected Path qortalDirectoryPath;
|
||||
|
||||
protected String jsonString;
|
||||
|
||||
public ArbitraryDataMetadata(Path filePath) {
|
||||
this.filePath = filePath;
|
||||
this.qortalDirectoryPath = Paths.get(filePath.toString(), ".qortal");
|
||||
}
|
||||
|
||||
protected String fileName() {
|
||||
// To be overridden
|
||||
return null;
|
||||
}
|
||||
|
||||
protected void readJson() throws DataException {
|
||||
@ -47,36 +50,32 @@ public class ArbitraryDataMetadata {
|
||||
|
||||
public void write() throws IOException, DataException {
|
||||
this.buildJson();
|
||||
this.createQortalDirectory();
|
||||
this.writeToQortalPath();
|
||||
this.createParentDirectories();
|
||||
|
||||
BufferedWriter writer = new BufferedWriter(new FileWriter(this.filePath.toString()));
|
||||
writer.write(this.jsonString);
|
||||
writer.close();
|
||||
}
|
||||
|
||||
|
||||
protected void loadJson() throws IOException {
|
||||
Path path = Paths.get(this.qortalDirectoryPath.toString(), this.fileName());
|
||||
File patchFile = new File(path.toString());
|
||||
if (!patchFile.exists()) {
|
||||
throw new IOException(String.format("Patch file doesn't exist: %s", path.toString()));
|
||||
File metadataFile = new File(this.filePath.toString());
|
||||
if (!metadataFile.exists()) {
|
||||
throw new IOException(String.format("Metadata file doesn't exist: %s", this.filePath.toString()));
|
||||
}
|
||||
|
||||
this.jsonString = new String(Files.readAllBytes(path));
|
||||
this.jsonString = new String(Files.readAllBytes(this.filePath));
|
||||
}
|
||||
|
||||
protected void createQortalDirectory() throws DataException {
|
||||
|
||||
protected void createParentDirectories() throws DataException {
|
||||
try {
|
||||
Files.createDirectories(this.qortalDirectoryPath);
|
||||
Files.createDirectories(this.filePath.getParent());
|
||||
} catch (IOException e) {
|
||||
throw new DataException("Unable to create .qortal directory");
|
||||
throw new DataException("Unable to create parent directories");
|
||||
}
|
||||
}
|
||||
|
||||
protected void writeToQortalPath() throws IOException {
|
||||
Path patchPath = Paths.get(this.qortalDirectoryPath.toString(), this.fileName());
|
||||
BufferedWriter writer = new BufferedWriter(new FileWriter(patchPath.toString()));
|
||||
writer.write(this.jsonString);
|
||||
writer.close();
|
||||
}
|
||||
|
||||
|
||||
public String getJsonString() {
|
||||
return this.jsonString;
|
||||
|
@ -6,7 +6,7 @@ import org.qortal.utils.Base58;
|
||||
|
||||
import java.nio.file.Path;
|
||||
|
||||
public class ArbitraryDataMetadataCache extends ArbitraryDataMetadata {
|
||||
public class ArbitraryDataMetadataCache extends ArbitraryDataQortalMetadata {
|
||||
|
||||
private byte[] signature;
|
||||
private long timestamp;
|
||||
|
@ -15,7 +15,7 @@ import java.util.ArrayList;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.List;
|
||||
|
||||
public class ArbitraryDataMetadataPatch extends ArbitraryDataMetadata {
|
||||
public class ArbitraryDataMetadataPatch extends ArbitraryDataQortalMetadata {
|
||||
|
||||
private static final Logger LOGGER = LogManager.getLogger(ArbitraryDataMetadataPatch.class);
|
||||
|
||||
|
@ -0,0 +1,101 @@
|
||||
package org.qortal.arbitrary.metadata;
|
||||
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.qortal.repository.DataException;
|
||||
|
||||
import java.io.BufferedWriter;
|
||||
import java.io.File;
|
||||
import java.io.FileWriter;
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.Paths;
|
||||
|
||||
/**
|
||||
* ArbitraryDataQortalMetadata
|
||||
*
|
||||
* This is a base class to handle reading and writing JSON to a .qortal folder
|
||||
* within the supplied filePath. This is used when storing data against an existing
|
||||
* arbitrary data file structure.
|
||||
*
|
||||
* It is not usable on its own; it must be subclassed, with three methods overridden:
|
||||
*
|
||||
* fileName() - the file name to use within the .qortal folder
|
||||
* readJson() - code to unserialize the JSON file
|
||||
* buildJson() - code to serialize the JSON file
|
||||
*
|
||||
*/
|
||||
public class ArbitraryDataQortalMetadata extends ArbitraryDataMetadata {
|
||||
|
||||
protected static final Logger LOGGER = LogManager.getLogger(ArbitraryDataQortalMetadata.class);
|
||||
|
||||
protected Path filePath;
|
||||
protected Path qortalDirectoryPath;
|
||||
|
||||
protected String jsonString;
|
||||
|
||||
public ArbitraryDataQortalMetadata(Path filePath) {
|
||||
super(filePath);
|
||||
|
||||
this.qortalDirectoryPath = Paths.get(filePath.toString(), ".qortal");
|
||||
}
|
||||
|
||||
protected String fileName() {
|
||||
// To be overridden
|
||||
return null;
|
||||
}
|
||||
|
||||
protected void readJson() throws DataException {
|
||||
// To be overridden
|
||||
}
|
||||
|
||||
protected void buildJson() {
|
||||
// To be overridden
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void read() throws IOException, DataException {
|
||||
this.loadJson();
|
||||
this.readJson();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write() throws IOException, DataException {
|
||||
this.buildJson();
|
||||
this.createParentDirectories();
|
||||
this.createQortalDirectory();
|
||||
|
||||
Path patchPath = Paths.get(this.qortalDirectoryPath.toString(), this.fileName());
|
||||
BufferedWriter writer = new BufferedWriter(new FileWriter(patchPath.toString()));
|
||||
writer.write(this.jsonString);
|
||||
writer.close();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void loadJson() throws IOException {
|
||||
Path path = Paths.get(this.qortalDirectoryPath.toString(), this.fileName());
|
||||
File patchFile = new File(path.toString());
|
||||
if (!patchFile.exists()) {
|
||||
throw new IOException(String.format("Patch file doesn't exist: %s", path.toString()));
|
||||
}
|
||||
|
||||
this.jsonString = new String(Files.readAllBytes(path));
|
||||
}
|
||||
|
||||
|
||||
protected void createQortalDirectory() throws DataException {
|
||||
try {
|
||||
Files.createDirectories(this.qortalDirectoryPath);
|
||||
} catch (IOException e) {
|
||||
throw new DataException("Unable to create .qortal directory");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public String getJsonString() {
|
||||
return this.jsonString;
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,78 @@
|
||||
package org.qortal.arbitrary.metadata;
|
||||
|
||||
import org.json.JSONArray;
|
||||
import org.json.JSONObject;
|
||||
import org.qortal.repository.DataException;
|
||||
import org.qortal.utils.Base58;
|
||||
|
||||
import java.nio.file.Path;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
|
||||
public class ArbitraryDataTransactionMetadata extends ArbitraryDataMetadata {
|
||||
|
||||
private List<byte[]> chunks;
|
||||
|
||||
public ArbitraryDataTransactionMetadata(Path filePath) {
|
||||
super(filePath);
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void readJson() throws DataException {
|
||||
if (this.jsonString == null) {
|
||||
throw new DataException("Transaction metadata JSON string is null");
|
||||
}
|
||||
|
||||
List<byte[]> chunksList = new ArrayList<>();
|
||||
JSONObject cache = new JSONObject(this.jsonString);
|
||||
if (cache.has("chunks")) {
|
||||
JSONArray chunks = cache.getJSONArray("chunks");
|
||||
if (chunks != null) {
|
||||
for (int i=0; i<chunks.length(); i++) {
|
||||
String chunk = chunks.getString(i);
|
||||
if (chunk != null) {
|
||||
chunksList.add(Base58.decode(chunk));
|
||||
}
|
||||
}
|
||||
}
|
||||
this.chunks = chunksList;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void buildJson() {
|
||||
JSONObject outer = new JSONObject();
|
||||
|
||||
JSONArray chunks = new JSONArray();
|
||||
if (this.chunks != null) {
|
||||
for (byte[] chunk : this.chunks) {
|
||||
chunks.put(Base58.encode(chunk));
|
||||
}
|
||||
}
|
||||
outer.put("chunks", chunks);
|
||||
|
||||
this.jsonString = outer.toString(2);
|
||||
LOGGER.trace("Transaction metadata: {}", this.jsonString);
|
||||
}
|
||||
|
||||
|
||||
public void setChunks(List<byte[]> chunks) {
|
||||
this.chunks = chunks;
|
||||
}
|
||||
|
||||
public List<byte[]> getChunks() {
|
||||
return this.chunks;
|
||||
}
|
||||
|
||||
public boolean containsChunk(byte[] chunk) {
|
||||
for (byte[] c : this.chunks) {
|
||||
if (Arrays.equals(c, chunk)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
}
|
@ -130,7 +130,7 @@ public class ArbitraryDataCleanupManager extends Thread {
|
||||
|
||||
// Check if we have any of the chunks
|
||||
boolean anyChunksExist = ArbitraryTransactionUtils.anyChunksExist(arbitraryTransactionData);
|
||||
boolean transactionHasChunks = (arbitraryTransactionData.getChunkHashes() != null);
|
||||
boolean transactionHasChunks = (arbitraryTransactionData.getMetadataHash() != null);
|
||||
|
||||
if (!completeFileExists && !anyChunksExist) {
|
||||
// We don't have any files at all for this transaction - nothing to do
|
||||
|
@ -227,7 +227,7 @@ public class ArbitraryDataManager extends Thread {
|
||||
|
||||
// Ask our connected peers if they have files for this signature
|
||||
// This process automatically then fetches the files themselves if a peer is found
|
||||
fetchDataForSignature(signature);
|
||||
fetchData(arbitraryTransactionData);
|
||||
|
||||
} catch (DataException e) {
|
||||
LOGGER.error("Repository issue when fetching arbitrary transaction data", e);
|
||||
@ -258,6 +258,34 @@ public class ArbitraryDataManager extends Thread {
|
||||
}
|
||||
}
|
||||
|
||||
private boolean hasLocalMetadata(ArbitraryTransactionData transactionData) {
|
||||
if (transactionData == null) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Load hashes
|
||||
byte[] hash = transactionData.getData();
|
||||
byte[] metadataHash = transactionData.getMetadataHash();
|
||||
|
||||
if (metadataHash == null) {
|
||||
// This transaction has no metadata, so we can treat it as local
|
||||
return true;
|
||||
}
|
||||
|
||||
// Load data file(s)
|
||||
byte[] signature = transactionData.getSignature();
|
||||
try {
|
||||
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(hash, signature);
|
||||
arbitraryDataFile.setMetadataHash(metadataHash);
|
||||
|
||||
return arbitraryDataFile.getMetadataFile().exists();
|
||||
}
|
||||
catch (DataException e) {
|
||||
// Assume not local
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Track file list lookups by signature
|
||||
|
||||
@ -397,11 +425,12 @@ public class ArbitraryDataManager extends Thread {
|
||||
|
||||
// Lookup file lists by signature
|
||||
|
||||
public boolean fetchDataForSignature(byte[] signature) {
|
||||
return this.fetchArbitraryDataFileList(signature);
|
||||
public boolean fetchData(ArbitraryTransactionData arbitraryTransactionData) {
|
||||
return this.fetchArbitraryDataFileList(arbitraryTransactionData);
|
||||
}
|
||||
|
||||
private boolean fetchArbitraryDataFileList(byte[] signature) {
|
||||
private boolean fetchArbitraryDataFileList(ArbitraryTransactionData arbitraryTransactionData) {
|
||||
byte[] signature = arbitraryTransactionData.getSignature();
|
||||
String signature58 = Base58.encode(signature);
|
||||
|
||||
// If we've already tried too many times in a short space of time, make sure to give up
|
||||
@ -625,14 +654,19 @@ public class ArbitraryDataManager extends Thread {
|
||||
|
||||
// Load data file(s)
|
||||
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(arbitraryTransactionData.getData(), signature);
|
||||
arbitraryDataFile.addChunkHashes(arbitraryTransactionData.getChunkHashes());
|
||||
byte[] metadataHash = arbitraryTransactionData.getMetadataHash();
|
||||
arbitraryDataFile.setMetadataHash(metadataHash);
|
||||
|
||||
// If hashes are null, we will treat this to mean all data hashes associated with this file
|
||||
if (hashes == null) {
|
||||
if (arbitraryTransactionData.getChunkHashes() == null) {
|
||||
// This transaction has no chunks, so use the main file hash
|
||||
if (metadataHash == null) {
|
||||
// This transaction has no metadata/chunks, so use the main file hash
|
||||
hashes = Arrays.asList(arbitraryDataFile.getHash());
|
||||
}
|
||||
else if (!arbitraryDataFile.getMetadataFile().exists()) {
|
||||
// We don't have the metadata file yet, so request it
|
||||
hashes = Arrays.asList(arbitraryDataFile.getMetadataFile().getHash());
|
||||
}
|
||||
else {
|
||||
// Add the chunk hashes
|
||||
hashes = arbitraryDataFile.getChunkHashes();
|
||||
@ -671,8 +705,8 @@ public class ArbitraryDataManager extends Thread {
|
||||
repository.saveChanges();
|
||||
}
|
||||
|
||||
// Check if we have all the chunks for this transaction
|
||||
if (arbitraryDataFile.exists() || arbitraryDataFile.allChunksExist(arbitraryTransactionData.getChunkHashes())) {
|
||||
// Check if we have all the files we need for this transaction
|
||||
if (arbitraryDataFile.allFilesExist()) {
|
||||
|
||||
// We have all the chunks for this transaction, so we should invalidate the transaction's name's
|
||||
// data cache so that it is rebuilt the next time we serve it
|
||||
@ -770,19 +804,19 @@ public class ArbitraryDataManager extends Thread {
|
||||
|
||||
// Load data file(s)
|
||||
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(arbitraryTransactionData.getData(), signature);
|
||||
arbitraryDataFile.addChunkHashes(arbitraryTransactionData.getChunkHashes());
|
||||
arbitraryDataFile.setMetadataHash(arbitraryTransactionData.getMetadataHash());
|
||||
|
||||
// Check all hashes exist
|
||||
for (byte[] hash : hashes) {
|
||||
//LOGGER.info("Received hash {}", Base58.encode(hash));
|
||||
if (!arbitraryDataFile.containsChunk(hash)) {
|
||||
// Check the hash against the complete file
|
||||
if (!Arrays.equals(arbitraryDataFile.getHash(), hash)) {
|
||||
LOGGER.info("Received non-matching chunk hash {} for signature {}", Base58.encode(hash), signature58);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
// // Check all hashes exist
|
||||
// for (byte[] hash : hashes) {
|
||||
// //LOGGER.info("Received hash {}", Base58.encode(hash));
|
||||
// if (!arbitraryDataFile.containsChunk(hash)) {
|
||||
// // Check the hash against the complete file
|
||||
// if (!Arrays.equals(arbitraryDataFile.getHash(), hash)) {
|
||||
// LOGGER.info("Received non-matching chunk hash {} for signature {}. This could happen if we haven't obtained the metadata file yet.", Base58.encode(hash), signature58);
|
||||
// return;
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
|
||||
// Update requests map to reflect that we've received it
|
||||
Triple<String, Peer, Long> newEntry = new Triple<>(null, null, request.getC());
|
||||
@ -867,12 +901,18 @@ public class ArbitraryDataManager extends Thread {
|
||||
if (ArbitraryDataStorageManager.getInstance().canStoreData(transactionData)) {
|
||||
|
||||
byte[] hash = transactionData.getData();
|
||||
byte[] chunkHashes = transactionData.getChunkHashes();
|
||||
byte[] metadataHash = transactionData.getMetadataHash();
|
||||
|
||||
// Load file(s) and add any that exist to the list of hashes
|
||||
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(hash, signature);
|
||||
if (chunkHashes != null && chunkHashes.length > 0) {
|
||||
arbitraryDataFile.addChunkHashes(chunkHashes);
|
||||
if (metadataHash != null) {
|
||||
arbitraryDataFile.setMetadataHash(metadataHash);
|
||||
|
||||
// If we have the metadata file, add its hash
|
||||
if (arbitraryDataFile.getMetadataFile().exists()) {
|
||||
hashes.add(arbitraryDataFile.getMetadataHash());
|
||||
}
|
||||
|
||||
for (ArbitraryDataFileChunk chunk : arbitraryDataFile.getChunks()) {
|
||||
if (chunk.exists()) {
|
||||
hashes.add(chunk.getHash());
|
||||
|
@ -86,8 +86,8 @@ public class ArbitraryTransactionData extends TransactionData {
|
||||
@Schema(example = "raw_data_in_base58")
|
||||
private byte[] data;
|
||||
private DataType dataType;
|
||||
@Schema(example = "chunk_hashes_in_base58")
|
||||
private byte[] chunkHashes;
|
||||
@Schema(example = "metadata_file_hash_in_base58")
|
||||
private byte[] metadataHash;
|
||||
|
||||
private List<PaymentData> payments;
|
||||
|
||||
@ -103,9 +103,9 @@ public class ArbitraryTransactionData extends TransactionData {
|
||||
}
|
||||
|
||||
public ArbitraryTransactionData(BaseTransactionData baseTransactionData,
|
||||
int version, Service service, int nonce, int size,
|
||||
String name, String identifier, Method method, byte[] secret, Compression compression,
|
||||
byte[] data, DataType dataType, byte[] chunkHashes, List<PaymentData> payments) {
|
||||
int version, Service service, int nonce, int size,
|
||||
String name, String identifier, Method method, byte[] secret, Compression compression,
|
||||
byte[] data, DataType dataType, byte[] metadataHash, List<PaymentData> payments) {
|
||||
super(TransactionType.ARBITRARY, baseTransactionData);
|
||||
|
||||
this.senderPublicKey = baseTransactionData.creatorPublicKey;
|
||||
@ -120,7 +120,7 @@ public class ArbitraryTransactionData extends TransactionData {
|
||||
this.compression = compression;
|
||||
this.data = data;
|
||||
this.dataType = dataType;
|
||||
this.chunkHashes = chunkHashes;
|
||||
this.metadataHash = metadataHash;
|
||||
this.payments = payments;
|
||||
}
|
||||
|
||||
@ -186,12 +186,12 @@ public class ArbitraryTransactionData extends TransactionData {
|
||||
this.dataType = dataType;
|
||||
}
|
||||
|
||||
public byte[] getChunkHashes() {
|
||||
return this.chunkHashes;
|
||||
public byte[] getMetadataHash() {
|
||||
return this.metadataHash;
|
||||
}
|
||||
|
||||
public void setChunkHashes(byte[] chunkHashes) {
|
||||
this.chunkHashes = chunkHashes;
|
||||
public void setMetadataHash(byte[] metadataHash) {
|
||||
this.metadataHash = metadataHash;
|
||||
}
|
||||
|
||||
public List<PaymentData> getPayments() {
|
||||
|
@ -12,7 +12,6 @@ import org.qortal.repository.ArbitraryRepository;
|
||||
import org.qortal.repository.DataException;
|
||||
import org.qortal.arbitrary.ArbitraryDataFile;
|
||||
import org.qortal.transaction.Transaction.ApprovalStatus;
|
||||
import org.qortal.utils.ArbitraryTransactionUtils;
|
||||
|
||||
import java.sql.ResultSet;
|
||||
import java.sql.SQLException;
|
||||
@ -50,17 +49,15 @@ public class HSQLDBArbitraryRepository implements ArbitraryRepository {
|
||||
}
|
||||
|
||||
// Load hashes
|
||||
byte[] digest = transactionData.getData();
|
||||
byte[] chunkHashes = transactionData.getChunkHashes();
|
||||
byte[] hash = transactionData.getData();
|
||||
byte[] metadataHash = transactionData.getMetadataHash();
|
||||
|
||||
// Load data file(s)
|
||||
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(digest, signature);
|
||||
if (chunkHashes != null && chunkHashes.length > 0) {
|
||||
arbitraryDataFile.addChunkHashes(chunkHashes);
|
||||
}
|
||||
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(hash, signature);
|
||||
arbitraryDataFile.setMetadataHash(metadataHash);
|
||||
|
||||
// Check if we already have the complete data file or all chunks
|
||||
if (arbitraryDataFile.exists() || arbitraryDataFile.allChunksExist(chunkHashes)) {
|
||||
if (arbitraryDataFile.allFilesExist()) {
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -81,13 +78,11 @@ public class HSQLDBArbitraryRepository implements ArbitraryRepository {
|
||||
|
||||
// Load hashes
|
||||
byte[] digest = transactionData.getData();
|
||||
byte[] chunkHashes = transactionData.getChunkHashes();
|
||||
byte[] metadataHash = transactionData.getMetadataHash();
|
||||
|
||||
// Load data file(s)
|
||||
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(digest, signature);
|
||||
if (chunkHashes != null && chunkHashes.length > 0) {
|
||||
arbitraryDataFile.addChunkHashes(chunkHashes);
|
||||
}
|
||||
arbitraryDataFile.setMetadataHash(metadataHash);
|
||||
|
||||
// If we have the complete data file, return it
|
||||
if (arbitraryDataFile.exists()) {
|
||||
@ -95,7 +90,7 @@ public class HSQLDBArbitraryRepository implements ArbitraryRepository {
|
||||
}
|
||||
|
||||
// Alternatively, if we have all the chunks, combine them into a single file
|
||||
if (arbitraryDataFile.allChunksExist(chunkHashes)) {
|
||||
if (arbitraryDataFile.allChunksExist()) {
|
||||
arbitraryDataFile.join();
|
||||
|
||||
// Verify that the combined hash matches the expected hash
|
||||
@ -130,15 +125,13 @@ public class HSQLDBArbitraryRepository implements ArbitraryRepository {
|
||||
}
|
||||
|
||||
// Load hashes
|
||||
byte[] digest = arbitraryTransactionData.getData();
|
||||
byte[] chunkHashes = arbitraryTransactionData.getChunkHashes();
|
||||
byte[] hash = arbitraryTransactionData.getData();
|
||||
byte[] metadataHash = arbitraryTransactionData.getMetadataHash();
|
||||
|
||||
// Load data file(s)
|
||||
byte[] signature = arbitraryTransactionData.getSignature();
|
||||
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(digest, signature);
|
||||
if (chunkHashes != null && chunkHashes.length > 0) {
|
||||
arbitraryDataFile.addChunkHashes(chunkHashes);
|
||||
}
|
||||
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(hash, signature);
|
||||
arbitraryDataFile.setMetadataHash(metadataHash);
|
||||
|
||||
// Delete file and chunks
|
||||
arbitraryDataFile.deleteAll();
|
||||
@ -148,7 +141,7 @@ public class HSQLDBArbitraryRepository implements ArbitraryRepository {
|
||||
public List<ArbitraryTransactionData> getArbitraryTransactions(String name, Service service, String identifier, long since) throws DataException {
|
||||
String sql = "SELECT type, reference, signature, creator, created_when, fee, " +
|
||||
"tx_group_id, block_height, approval_status, approval_height, " +
|
||||
"version, nonce, service, size, is_data_raw, data, chunk_hashes, " +
|
||||
"version, nonce, service, size, is_data_raw, data, metadata_hash, " +
|
||||
"name, identifier, update_method, secret, compression FROM ArbitraryTransactions " +
|
||||
"JOIN Transactions USING (signature) " +
|
||||
"WHERE lower(name) = ? AND service = ?" +
|
||||
@ -192,7 +185,7 @@ public class HSQLDBArbitraryRepository implements ArbitraryRepository {
|
||||
boolean isDataRaw = resultSet.getBoolean(15); // NOT NULL, so no null to false
|
||||
DataType dataType = isDataRaw ? DataType.RAW_DATA : DataType.DATA_HASH;
|
||||
byte[] data = resultSet.getBytes(16);
|
||||
byte[] chunkHashes = resultSet.getBytes(17);
|
||||
byte[] metadataHash = resultSet.getBytes(17);
|
||||
String nameResult = resultSet.getString(18);
|
||||
String identifierResult = resultSet.getString(19);
|
||||
Method method = Method.valueOf(resultSet.getInt(20));
|
||||
@ -202,7 +195,7 @@ public class HSQLDBArbitraryRepository implements ArbitraryRepository {
|
||||
|
||||
ArbitraryTransactionData transactionData = new ArbitraryTransactionData(baseTransactionData,
|
||||
version, serviceResult, nonce, size, nameResult, identifierResult, method, secret,
|
||||
compression, data, dataType, chunkHashes, null);
|
||||
compression, data, dataType, metadataHash, null);
|
||||
|
||||
arbitraryTransactionData.add(transactionData);
|
||||
} while (resultSet.next());
|
||||
@ -219,7 +212,7 @@ public class HSQLDBArbitraryRepository implements ArbitraryRepository {
|
||||
|
||||
sql.append("SELECT type, reference, signature, creator, created_when, fee, " +
|
||||
"tx_group_id, block_height, approval_status, approval_height, " +
|
||||
"version, nonce, service, size, is_data_raw, data, chunk_hashes, " +
|
||||
"version, nonce, service, size, is_data_raw, data, metadata_hash, " +
|
||||
"name, identifier, update_method, secret, compression FROM ArbitraryTransactions " +
|
||||
"JOIN Transactions USING (signature) " +
|
||||
"WHERE lower(name) = ? AND service = ? " +
|
||||
@ -267,7 +260,7 @@ public class HSQLDBArbitraryRepository implements ArbitraryRepository {
|
||||
boolean isDataRaw = resultSet.getBoolean(15); // NOT NULL, so no null to false
|
||||
DataType dataType = isDataRaw ? DataType.RAW_DATA : DataType.DATA_HASH;
|
||||
byte[] data = resultSet.getBytes(16);
|
||||
byte[] chunkHashes = resultSet.getBytes(17);
|
||||
byte[] metadataHash = resultSet.getBytes(17);
|
||||
String nameResult = resultSet.getString(18);
|
||||
String identifierResult = resultSet.getString(19);
|
||||
Method methodResult = Method.valueOf(resultSet.getInt(20));
|
||||
@ -277,7 +270,7 @@ public class HSQLDBArbitraryRepository implements ArbitraryRepository {
|
||||
|
||||
ArbitraryTransactionData transactionData = new ArbitraryTransactionData(baseTransactionData,
|
||||
version, serviceResult, nonce, size, nameResult, identifierResult, methodResult, secret,
|
||||
compression, data, dataType, chunkHashes, null);
|
||||
compression, data, dataType, metadataHash, null);
|
||||
|
||||
return transactionData;
|
||||
} catch (SQLException e) {
|
||||
|
@ -900,13 +900,14 @@ public class HSQLDBDatabaseUpdates {
|
||||
|
||||
case 37:
|
||||
// ARBITRARY transaction updates for off-chain data storage
|
||||
stmt.execute("CREATE TYPE ArbitraryDataHashes AS VARBINARY(8000)");
|
||||
|
||||
// We may want to use a nonce rather than a transaction fee on the data chain
|
||||
stmt.execute("ALTER TABLE ArbitraryTransactions ADD nonce INT NOT NULL DEFAULT 0");
|
||||
// We need to know the total size of the data file(s) associated with each transaction
|
||||
stmt.execute("ALTER TABLE ArbitraryTransactions ADD size INT NOT NULL DEFAULT 0");
|
||||
// Larger data files need to be split into chunks, for easier transmission and greater decentralization
|
||||
stmt.execute("ALTER TABLE ArbitraryTransactions ADD chunk_hashes ArbitraryDataHashes");
|
||||
// We store their hashes (and possibly other things) in a metadata file
|
||||
stmt.execute("ALTER TABLE ArbitraryTransactions ADD metadata_hash VARBINARY(32)");
|
||||
// For finding transactions by file hash
|
||||
stmt.execute("CREATE INDEX ArbitraryDataIndex ON ArbitraryTransactions (is_data_raw, data)");
|
||||
break;
|
||||
|
@ -21,7 +21,7 @@ public class HSQLDBArbitraryTransactionRepository extends HSQLDBTransactionRepos
|
||||
}
|
||||
|
||||
TransactionData fromBase(BaseTransactionData baseTransactionData) throws DataException {
|
||||
String sql = "SELECT version, nonce, service, size, is_data_raw, data, chunk_hashes, " +
|
||||
String sql = "SELECT version, nonce, service, size, is_data_raw, data, metadata_hash, " +
|
||||
"name, identifier, update_method, secret, compression from ArbitraryTransactions " +
|
||||
"WHERE signature = ?";
|
||||
|
||||
@ -36,7 +36,7 @@ public class HSQLDBArbitraryTransactionRepository extends HSQLDBTransactionRepos
|
||||
boolean isDataRaw = resultSet.getBoolean(5); // NOT NULL, so no null to false
|
||||
DataType dataType = isDataRaw ? DataType.RAW_DATA : DataType.DATA_HASH;
|
||||
byte[] data = resultSet.getBytes(6);
|
||||
byte[] chunkHashes = resultSet.getBytes(7);
|
||||
byte[] metadataHash = resultSet.getBytes(7);
|
||||
String name = resultSet.getString(8);
|
||||
String identifier = resultSet.getString(9);
|
||||
ArbitraryTransactionData.Method method = ArbitraryTransactionData.Method.valueOf(resultSet.getInt(10));
|
||||
@ -45,7 +45,7 @@ public class HSQLDBArbitraryTransactionRepository extends HSQLDBTransactionRepos
|
||||
|
||||
List<PaymentData> payments = this.getPaymentsFromSignature(baseTransactionData.getSignature());
|
||||
return new ArbitraryTransactionData(baseTransactionData, version, service, nonce, size, name,
|
||||
identifier, method, secret, compression, data, dataType, chunkHashes, payments);
|
||||
identifier, method, secret, compression, data, dataType, metadataHash, payments);
|
||||
} catch (SQLException e) {
|
||||
throw new DataException("Unable to fetch arbitrary transaction from repository", e);
|
||||
}
|
||||
@ -65,7 +65,7 @@ public class HSQLDBArbitraryTransactionRepository extends HSQLDBTransactionRepos
|
||||
.bind("version", arbitraryTransactionData.getVersion()).bind("service", arbitraryTransactionData.getService().value)
|
||||
.bind("nonce", arbitraryTransactionData.getNonce()).bind("size", arbitraryTransactionData.getSize())
|
||||
.bind("is_data_raw", arbitraryTransactionData.getDataType() == DataType.RAW_DATA).bind("data", arbitraryTransactionData.getData())
|
||||
.bind("chunk_hashes", arbitraryTransactionData.getChunkHashes()).bind("name", arbitraryTransactionData.getName())
|
||||
.bind("metadata_hash", arbitraryTransactionData.getMetadataHash()).bind("name", arbitraryTransactionData.getName())
|
||||
.bind("identifier", arbitraryTransactionData.getIdentifier()).bind("update_method", arbitraryTransactionData.getMethod().value)
|
||||
.bind("secret", arbitraryTransactionData.getSecret()).bind("compression", arbitraryTransactionData.getCompression().value);
|
||||
|
||||
|
@ -1,6 +1,5 @@
|
||||
package org.qortal.transaction;
|
||||
|
||||
import java.math.BigInteger;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
import java.util.stream.Collectors;
|
||||
@ -17,7 +16,6 @@ import org.qortal.payment.Payment;
|
||||
import org.qortal.repository.DataException;
|
||||
import org.qortal.repository.Repository;
|
||||
import org.qortal.arbitrary.ArbitraryDataFile;
|
||||
import org.qortal.arbitrary.ArbitraryDataFileChunk;
|
||||
import org.qortal.transform.TransformationException;
|
||||
import org.qortal.transform.transaction.ArbitraryTransactionTransformer;
|
||||
import org.qortal.transform.transaction.TransactionTransformer;
|
||||
@ -30,12 +28,10 @@ public class ArbitraryTransaction extends Transaction {
|
||||
|
||||
// Other useful constants
|
||||
public static final int MAX_DATA_SIZE = 4000;
|
||||
public static final int MAX_CHUNK_HASHES_LENGTH = 8000;
|
||||
public static final int MAX_METADATA_LENGTH = 32;
|
||||
public static final int HASH_LENGTH = TransactionTransformer.SHA256_LENGTH;
|
||||
public static final int POW_BUFFER_SIZE = 8 * 1024 * 1024; // bytes
|
||||
public static final int POW_MIN_DIFFICULTY = 12; // leading zero bits
|
||||
public static final int POW_MAX_DIFFICULTY = 19; // leading zero bits
|
||||
public static final long MAX_FILE_SIZE = ArbitraryDataFile.MAX_FILE_SIZE;
|
||||
public static final int POW_DIFFICULTY = 12; // leading zero bits
|
||||
public static final int MAX_IDENTIFIER_LENGTH = 64;
|
||||
|
||||
// Constructors
|
||||
@ -73,10 +69,8 @@ public class ArbitraryTransaction extends Transaction {
|
||||
// Clear nonce from transactionBytes
|
||||
ArbitraryTransactionTransformer.clearNonce(transactionBytes);
|
||||
|
||||
int difficulty = difficultyForFileSize(arbitraryTransactionData.getSize());
|
||||
|
||||
// Calculate nonce
|
||||
this.arbitraryTransactionData.setNonce(MemoryPoW.compute2(transactionBytes, POW_BUFFER_SIZE, difficulty));
|
||||
this.arbitraryTransactionData.setNonce(MemoryPoW.compute2(transactionBytes, POW_BUFFER_SIZE, POW_DIFFICULTY));
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -111,7 +105,7 @@ public class ArbitraryTransaction extends Transaction {
|
||||
return ValidationResult.INVALID_DATA_LENGTH;
|
||||
}
|
||||
|
||||
// Check hashes
|
||||
// Check hashes and metadata
|
||||
if (arbitraryTransactionData.getDataType() == ArbitraryTransactionData.DataType.DATA_HASH) {
|
||||
// Check length of data hash
|
||||
if (arbitraryTransactionData.getData().length != HASH_LENGTH) {
|
||||
@ -120,21 +114,10 @@ public class ArbitraryTransaction extends Transaction {
|
||||
|
||||
// Version 5+
|
||||
if (arbitraryTransactionData.getVersion() >= 5) {
|
||||
byte[] chunkHashes = arbitraryTransactionData.getChunkHashes();
|
||||
byte[] metadata = arbitraryTransactionData.getMetadataHash();
|
||||
|
||||
// Check maximum length of chunk hashes
|
||||
if (chunkHashes != null && chunkHashes.length > MAX_CHUNK_HASHES_LENGTH) {
|
||||
return ValidationResult.INVALID_DATA_LENGTH;
|
||||
}
|
||||
|
||||
// Check expected length of chunk hashes
|
||||
int chunkCount = (int)Math.ceil((double)arbitraryTransactionData.getSize() / (double) ArbitraryDataFileChunk.CHUNK_SIZE);
|
||||
int expectedChunkHashesSize = (chunkCount > 1) ? chunkCount * HASH_LENGTH : 0;
|
||||
if (chunkHashes == null && expectedChunkHashesSize > 0) {
|
||||
return ValidationResult.INVALID_DATA_LENGTH;
|
||||
}
|
||||
int chunkHashesLength = chunkHashes != null ? chunkHashes.length : 0;
|
||||
if (chunkHashesLength != expectedChunkHashesSize) {
|
||||
// Check maximum length of metadata hash
|
||||
if (metadata != null && metadata.length > MAX_METADATA_LENGTH) {
|
||||
return ValidationResult.INVALID_DATA_LENGTH;
|
||||
}
|
||||
}
|
||||
@ -199,10 +182,8 @@ public class ArbitraryTransaction extends Transaction {
|
||||
// Clear nonce from transactionBytes
|
||||
ArbitraryTransactionTransformer.clearNonce(transactionBytes);
|
||||
|
||||
int difficulty = difficultyForFileSize(arbitraryTransactionData.getSize());
|
||||
|
||||
// Check nonce
|
||||
return MemoryPoW.verify2(transactionBytes, POW_BUFFER_SIZE, difficulty, nonce);
|
||||
return MemoryPoW.verify2(transactionBytes, POW_BUFFER_SIZE, POW_DIFFICULTY, nonce);
|
||||
}
|
||||
|
||||
return true;
|
||||
@ -274,13 +255,4 @@ public class ArbitraryTransaction extends Transaction {
|
||||
return null;
|
||||
}
|
||||
|
||||
// Helper methods
|
||||
|
||||
public int difficultyForFileSize(long size) {
|
||||
final BigInteger powRange = BigInteger.valueOf(POW_MAX_DIFFICULTY - POW_MIN_DIFFICULTY);
|
||||
final BigInteger multiplier = BigInteger.valueOf(100);
|
||||
final BigInteger percentage = BigInteger.valueOf(size).multiply(multiplier).divide(BigInteger.valueOf(MAX_FILE_SIZE));
|
||||
return POW_MIN_DIFFICULTY + powRange.multiply(percentage).divide(multiplier).intValue();
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -33,18 +33,18 @@ public class ArbitraryTransactionTransformer extends TransactionTransformer {
|
||||
private static final int DATA_TYPE_LENGTH = BYTE_LENGTH;
|
||||
private static final int DATA_SIZE_LENGTH = INT_LENGTH;
|
||||
private static final int RAW_DATA_SIZE_LENGTH = INT_LENGTH;
|
||||
private static final int CHUNKS_SIZE_LENGTH = INT_LENGTH;
|
||||
private static final int METADATA_HASH_SIZE_LENGTH = INT_LENGTH;
|
||||
private static final int NUMBER_PAYMENTS_LENGTH = INT_LENGTH;
|
||||
private static final int NAME_SIZE_LENGTH = INT_LENGTH;
|
||||
private static final int IDENTIFIER_SIZE_LENGTH = INT_LENGTH;
|
||||
private static final int COMPRESSION_LENGTH = INT_LENGTH;
|
||||
private static final int METHOD_LENGTH = INT_LENGTH;
|
||||
private static final int SECRET_LENGTH = INT_LENGTH;
|
||||
private static final int SECRET_LENGTH = INT_LENGTH; // TODO: wtf?
|
||||
|
||||
private static final int EXTRAS_LENGTH = SERVICE_LENGTH + DATA_TYPE_LENGTH + DATA_SIZE_LENGTH;
|
||||
|
||||
private static final int EXTRAS_V5_LENGTH = NONCE_LENGTH + NAME_SIZE_LENGTH + IDENTIFIER_SIZE_LENGTH +
|
||||
METHOD_LENGTH + SECRET_LENGTH + COMPRESSION_LENGTH + RAW_DATA_SIZE_LENGTH + CHUNKS_SIZE_LENGTH;
|
||||
METHOD_LENGTH + SECRET_LENGTH + COMPRESSION_LENGTH + RAW_DATA_SIZE_LENGTH + METADATA_HASH_SIZE_LENGTH;
|
||||
|
||||
protected static final TransactionLayout layout;
|
||||
|
||||
@ -77,8 +77,8 @@ public class ArbitraryTransactionTransformer extends TransactionTransformer {
|
||||
layout.add("data", TransformationType.DATA);
|
||||
|
||||
layout.add("raw data size", TransformationType.INT); // Version 5+
|
||||
layout.add("chunk hashes length", TransformationType.INT); // Version 5+
|
||||
layout.add("chunk hashes", TransformationType.DATA); // Version 5+
|
||||
layout.add("metadata hash length", TransformationType.INT); // Version 5+
|
||||
layout.add("metadata hash", TransformationType.DATA); // Version 5+
|
||||
|
||||
layout.add("fee", TransformationType.AMOUNT);
|
||||
layout.add("signature", TransformationType.SIGNATURE);
|
||||
@ -147,16 +147,16 @@ public class ArbitraryTransactionTransformer extends TransactionTransformer {
|
||||
byteBuffer.get(data);
|
||||
|
||||
int size = 0;
|
||||
byte[] chunkHashes = null;
|
||||
byte[] metadataHash = null;
|
||||
|
||||
if (version >= 5) {
|
||||
size = byteBuffer.getInt();
|
||||
|
||||
int chunkHashesLength = byteBuffer.getInt();
|
||||
int metadataHashLength = byteBuffer.getInt();
|
||||
|
||||
if (chunkHashesLength > 0) {
|
||||
chunkHashes = new byte[chunkHashesLength];
|
||||
byteBuffer.get(chunkHashes);
|
||||
if (metadataHashLength > 0) {
|
||||
metadataHash = new byte[metadataHashLength];
|
||||
byteBuffer.get(metadataHash);
|
||||
}
|
||||
}
|
||||
|
||||
@ -168,7 +168,7 @@ public class ArbitraryTransactionTransformer extends TransactionTransformer {
|
||||
BaseTransactionData baseTransactionData = new BaseTransactionData(timestamp, txGroupId, reference, senderPublicKey, fee, signature);
|
||||
|
||||
return new ArbitraryTransactionData(baseTransactionData, version, service, nonce, size, name, identifier,
|
||||
method, secret, compression, data, dataType, chunkHashes, payments);
|
||||
method, secret, compression, data, dataType, metadataHash, payments);
|
||||
}
|
||||
|
||||
public static int getDataLength(TransactionData transactionData) throws TransformationException {
|
||||
@ -178,9 +178,9 @@ public class ArbitraryTransactionTransformer extends TransactionTransformer {
|
||||
int identifierLength = (arbitraryTransactionData.getIdentifier() != null) ? Utf8.encodedLength(arbitraryTransactionData.getIdentifier()) : 0;
|
||||
int secretLength = (arbitraryTransactionData.getSecret() != null) ? arbitraryTransactionData.getSecret().length : 0;
|
||||
int dataLength = (arbitraryTransactionData.getData() != null) ? arbitraryTransactionData.getData().length : 0;
|
||||
int chunkHashesLength = (arbitraryTransactionData.getChunkHashes() != null) ? arbitraryTransactionData.getChunkHashes().length : 0;
|
||||
int metadataHashLength = (arbitraryTransactionData.getMetadataHash() != null) ? arbitraryTransactionData.getMetadataHash().length : 0;
|
||||
|
||||
int length = getBaseLength(transactionData) + EXTRAS_LENGTH + nameLength + identifierLength + secretLength + dataLength + chunkHashesLength;
|
||||
int length = getBaseLength(transactionData) + EXTRAS_LENGTH + nameLength + identifierLength + secretLength + dataLength + metadataHashLength;
|
||||
|
||||
if (arbitraryTransactionData.getVersion() >= 5) {
|
||||
length += EXTRAS_V5_LENGTH;
|
||||
@ -236,12 +236,12 @@ public class ArbitraryTransactionTransformer extends TransactionTransformer {
|
||||
if (arbitraryTransactionData.getVersion() >= 5) {
|
||||
bytes.write(Ints.toByteArray(arbitraryTransactionData.getSize()));
|
||||
|
||||
byte[] chunkHashes = arbitraryTransactionData.getChunkHashes();
|
||||
int chunkHashesLength = (chunkHashes != null) ? chunkHashes.length : 0;
|
||||
bytes.write(Ints.toByteArray(chunkHashesLength));
|
||||
byte[] metadataHash = arbitraryTransactionData.getMetadataHash();
|
||||
int metadataHashLength = (metadataHash != null) ? metadataHash.length : 0;
|
||||
bytes.write(Ints.toByteArray(metadataHashLength));
|
||||
|
||||
if (chunkHashesLength > 0) {
|
||||
bytes.write(arbitraryTransactionData.getChunkHashes());
|
||||
if (metadataHashLength > 0) {
|
||||
bytes.write(metadataHash);
|
||||
}
|
||||
}
|
||||
|
||||
@ -317,12 +317,12 @@ public class ArbitraryTransactionTransformer extends TransactionTransformer {
|
||||
if (arbitraryTransactionData.getVersion() >= 5) {
|
||||
bytes.write(Ints.toByteArray(arbitraryTransactionData.getSize()));
|
||||
|
||||
byte[] chunkHashes = arbitraryTransactionData.getChunkHashes();
|
||||
int chunkHashesLength = (chunkHashes != null) ? chunkHashes.length : 0;
|
||||
bytes.write(Ints.toByteArray(chunkHashesLength));
|
||||
byte[] metadataHash = arbitraryTransactionData.getMetadataHash();
|
||||
int metadataHashLength = (metadataHash != null) ? metadataHash.length : 0;
|
||||
bytes.write(Ints.toByteArray(metadataHashLength));
|
||||
|
||||
if (chunkHashesLength > 0) {
|
||||
bytes.write(arbitraryTransactionData.getChunkHashes());
|
||||
if (metadataHashLength > 0) {
|
||||
bytes.write(metadataHash);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -101,20 +101,14 @@ public class ArbitraryTransactionUtils {
|
||||
}
|
||||
|
||||
byte[] digest = transactionData.getData();
|
||||
byte[] chunkHashes = transactionData.getChunkHashes();
|
||||
byte[] metadataHash = transactionData.getMetadataHash();
|
||||
byte[] signature = transactionData.getSignature();
|
||||
|
||||
if (chunkHashes == null) {
|
||||
// This file doesn't have any chunks, which is the same as us having them all
|
||||
return true;
|
||||
}
|
||||
|
||||
// Load complete file and chunks
|
||||
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(digest, signature);
|
||||
if (chunkHashes != null && chunkHashes.length > 0) {
|
||||
arbitraryDataFile.addChunkHashes(chunkHashes);
|
||||
}
|
||||
return arbitraryDataFile.allChunksExist(chunkHashes);
|
||||
arbitraryDataFile.setMetadataHash(metadataHash);
|
||||
|
||||
return arbitraryDataFile.allChunksExist();
|
||||
}
|
||||
|
||||
public static boolean anyChunksExist(ArbitraryTransactionData transactionData) throws DataException {
|
||||
@ -123,20 +117,19 @@ public class ArbitraryTransactionUtils {
|
||||
}
|
||||
|
||||
byte[] digest = transactionData.getData();
|
||||
byte[] chunkHashes = transactionData.getChunkHashes();
|
||||
byte[] metadataHash = transactionData.getMetadataHash();
|
||||
byte[] signature = transactionData.getSignature();
|
||||
|
||||
if (chunkHashes == null) {
|
||||
// This file doesn't have any chunks, which means none exist
|
||||
if (metadataHash == null) {
|
||||
// This file doesn't have any metadata/chunks, which means none exist
|
||||
return false;
|
||||
}
|
||||
|
||||
// Load complete file and chunks
|
||||
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(digest, signature);
|
||||
if (chunkHashes != null && chunkHashes.length > 0) {
|
||||
arbitraryDataFile.addChunkHashes(chunkHashes);
|
||||
}
|
||||
return arbitraryDataFile.anyChunksExist(chunkHashes);
|
||||
arbitraryDataFile.setMetadataHash(metadataHash);
|
||||
|
||||
return arbitraryDataFile.anyChunksExist();
|
||||
}
|
||||
|
||||
public static int ourChunkCount(ArbitraryTransactionData transactionData) throws DataException {
|
||||
@ -145,19 +138,18 @@ public class ArbitraryTransactionUtils {
|
||||
}
|
||||
|
||||
byte[] digest = transactionData.getData();
|
||||
byte[] chunkHashes = transactionData.getChunkHashes();
|
||||
byte[] metadataHash = transactionData.getMetadataHash();
|
||||
byte[] signature = transactionData.getSignature();
|
||||
|
||||
if (chunkHashes == null) {
|
||||
// This file doesn't have any chunks
|
||||
if (metadataHash == null) {
|
||||
// This file doesn't have any metadata, therefore it has no chunks
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Load complete file and chunks
|
||||
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(digest, signature);
|
||||
if (chunkHashes != null && chunkHashes.length > 0) {
|
||||
arbitraryDataFile.addChunkHashes(chunkHashes);
|
||||
}
|
||||
arbitraryDataFile.setMetadataHash(metadataHash);
|
||||
|
||||
return arbitraryDataFile.chunkCount();
|
||||
}
|
||||
|
||||
@ -195,11 +187,9 @@ public class ArbitraryTransactionUtils {
|
||||
|
||||
public static void deleteCompleteFile(ArbitraryTransactionData arbitraryTransactionData, long now, long cleanupAfter) throws DataException {
|
||||
byte[] completeHash = arbitraryTransactionData.getData();
|
||||
byte[] chunkHashes = arbitraryTransactionData.getChunkHashes();
|
||||
byte[] signature = arbitraryTransactionData.getSignature();
|
||||
|
||||
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(completeHash, signature);
|
||||
arbitraryDataFile.addChunkHashes(chunkHashes);
|
||||
|
||||
if (!ArbitraryTransactionUtils.isFileHashRecent(completeHash, signature, now, cleanupAfter)) {
|
||||
LOGGER.info("Deleting file {} because it can be rebuilt from chunks " +
|
||||
@ -211,19 +201,28 @@ public class ArbitraryTransactionUtils {
|
||||
|
||||
public static void deleteCompleteFileAndChunks(ArbitraryTransactionData arbitraryTransactionData) throws DataException {
|
||||
byte[] completeHash = arbitraryTransactionData.getData();
|
||||
byte[] chunkHashes = arbitraryTransactionData.getChunkHashes();
|
||||
byte[] metadataHash = arbitraryTransactionData.getMetadataHash();
|
||||
byte[] signature = arbitraryTransactionData.getSignature();
|
||||
|
||||
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(completeHash, signature);
|
||||
arbitraryDataFile.addChunkHashes(chunkHashes);
|
||||
arbitraryDataFile.setMetadataHash(metadataHash);
|
||||
arbitraryDataFile.deleteAll();
|
||||
}
|
||||
|
||||
public static void convertFileToChunks(ArbitraryTransactionData arbitraryTransactionData, long now, long cleanupAfter) throws DataException {
|
||||
byte[] completeHash = arbitraryTransactionData.getData();
|
||||
byte[] chunkHashes = arbitraryTransactionData.getChunkHashes();
|
||||
byte[] metadataHash = arbitraryTransactionData.getMetadataHash();
|
||||
byte[] signature = arbitraryTransactionData.getSignature();
|
||||
|
||||
// Find the expected chunk hashes
|
||||
ArbitraryDataFile expectedDataFile = ArbitraryDataFile.fromHash(completeHash, signature);
|
||||
expectedDataFile.setMetadataHash(metadataHash);
|
||||
|
||||
if (metadataHash == null || !expectedDataFile.getMetadataFile().exists()) {
|
||||
// We don't have the metadata file, or this transaction doesn't have one - nothing to do
|
||||
return;
|
||||
}
|
||||
|
||||
// Split the file into chunks
|
||||
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(completeHash, signature);
|
||||
int chunkCount = arbitraryDataFile.split(ArbitraryDataFile.CHUNK_SIZE);
|
||||
@ -232,9 +231,10 @@ public class ArbitraryTransactionUtils {
|
||||
Base58.encode(completeHash), chunkCount, (chunkCount == 1 ? "" : "s")));
|
||||
|
||||
// Verify that the chunk hashes match those in the transaction
|
||||
byte[] chunkHashes = expectedDataFile.chunkHashes();
|
||||
if (chunkHashes != null && Arrays.equals(chunkHashes, arbitraryDataFile.chunkHashes())) {
|
||||
// Ensure they exist on disk
|
||||
if (arbitraryDataFile.allChunksExist(chunkHashes)) {
|
||||
if (arbitraryDataFile.allChunksExist()) {
|
||||
|
||||
// Now delete the original file if it's not recent
|
||||
if (!ArbitraryTransactionUtils.isFileHashRecent(completeHash, signature, now, cleanupAfter)) {
|
||||
@ -265,17 +265,16 @@ public class ArbitraryTransactionUtils {
|
||||
try {
|
||||
// Load hashes
|
||||
byte[] digest = arbitraryTransactionData.getData();
|
||||
byte[] chunkHashes = arbitraryTransactionData.getChunkHashes();
|
||||
byte[] metadataHash = arbitraryTransactionData.getMetadataHash();
|
||||
|
||||
// Load signature
|
||||
byte[] signature = arbitraryTransactionData.getSignature();
|
||||
|
||||
// Check if any files for this transaction exist in the misc folder
|
||||
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(digest, null);
|
||||
if (chunkHashes != null && chunkHashes.length > 0) {
|
||||
arbitraryDataFile.addChunkHashes(chunkHashes);
|
||||
}
|
||||
if (arbitraryDataFile.anyChunksExist(chunkHashes)) {
|
||||
arbitraryDataFile.setMetadataHash(metadataHash);
|
||||
|
||||
if (arbitraryDataFile.anyChunksExist()) {
|
||||
// At least one chunk exists in the misc folder - move them
|
||||
for (ArbitraryDataFileChunk chunk : arbitraryDataFile.getChunks()) {
|
||||
if (chunk.exists()) {
|
||||
@ -311,6 +310,23 @@ public class ArbitraryTransactionUtils {
|
||||
// Delete empty parent directories
|
||||
FilesystemUtils.safeDeleteEmptyParentDirectories(oldPath);
|
||||
}
|
||||
|
||||
// Also move the metadata file if it exists
|
||||
if (arbitraryDataFile.getMetadataFile() != null && arbitraryDataFile.getMetadataFile().exists()) {
|
||||
// Determine the correct path by initializing a new ArbitraryDataFile instance with the signature
|
||||
ArbitraryDataFile newCompleteFile = ArbitraryDataFile.fromHash(arbitraryDataFile.getMetadataHash(), signature);
|
||||
Path oldPath = arbitraryDataFile.getMetadataFile().getFilePath();
|
||||
Path newPath = newCompleteFile.getFilePath();
|
||||
|
||||
// Ensure parent directories exist, then copy the file
|
||||
LOGGER.info("Relocating metadata file from {} to {}...", oldPath, newPath);
|
||||
Files.createDirectories(newPath.getParent());
|
||||
Files.move(oldPath, newPath, REPLACE_EXISTING);
|
||||
filesRelocatedCount++;
|
||||
|
||||
// Delete empty parent directories
|
||||
FilesystemUtils.safeDeleteEmptyParentDirectories(oldPath);
|
||||
}
|
||||
}
|
||||
catch (DataException | IOException e) {
|
||||
LOGGER.info("Unable to check and relocate all files for signature {}: {}",
|
||||
|
@ -0,0 +1,136 @@
|
||||
package org.qortal.test.arbitrary;
|
||||
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.qortal.account.PrivateKeyAccount;
|
||||
import org.qortal.arbitrary.ArbitraryDataDigest;
|
||||
import org.qortal.arbitrary.ArbitraryDataFile;
|
||||
import org.qortal.arbitrary.ArbitraryDataFile.*;
|
||||
import org.qortal.arbitrary.ArbitraryDataReader;
|
||||
import org.qortal.arbitrary.ArbitraryDataTransactionBuilder;
|
||||
import org.qortal.arbitrary.exception.MissingDataException;
|
||||
import org.qortal.arbitrary.misc.Service;
|
||||
import org.qortal.data.transaction.ArbitraryTransactionData;
|
||||
import org.qortal.data.transaction.RegisterNameTransactionData;
|
||||
import org.qortal.repository.DataException;
|
||||
import org.qortal.repository.Repository;
|
||||
import org.qortal.repository.RepositoryManager;
|
||||
import org.qortal.test.common.BlockUtils;
|
||||
import org.qortal.test.common.Common;
|
||||
import org.qortal.test.common.TransactionUtils;
|
||||
import org.qortal.test.common.transaction.TestTransaction;
|
||||
import org.qortal.transaction.Transaction;
|
||||
import org.qortal.utils.Base58;
|
||||
|
||||
import java.io.BufferedWriter;
|
||||
import java.io.File;
|
||||
import java.io.FileWriter;
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.Paths;
|
||||
import java.util.Random;
|
||||
|
||||
import static org.junit.Assert.*;
|
||||
|
||||
public class ArbitraryTransactionMetadataTests extends Common {
|
||||
|
||||
@Before
|
||||
public void beforeTest() throws DataException {
|
||||
Common.useDefaultSettings();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testMultipleChunks() throws DataException, IOException, MissingDataException {
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
PrivateKeyAccount alice = Common.getTestAccount(repository, "alice");
|
||||
String publicKey58 = Base58.encode(alice.getPublicKey());
|
||||
String name = "TEST"; // Can be anything for this test
|
||||
String identifier = null; // Not used for this test
|
||||
Service service = Service.WEBSITE; // Can be anything for this test
|
||||
int chunkSize = 100;
|
||||
int dataLength = 900; // Actual data length will be longer due to encryption
|
||||
|
||||
// Register the name to Alice
|
||||
RegisterNameTransactionData transactionData = new RegisterNameTransactionData(TestTransaction.generateBase(alice), name, "");
|
||||
TransactionUtils.signAndMint(repository, transactionData, alice);
|
||||
|
||||
// Create PUT transaction
|
||||
Path path1 = generateRandomDataPath(dataLength);
|
||||
ArbitraryDataFile arbitraryDataFile = this.createAndMintTxn(repository, publicKey58, path1, name, identifier, ArbitraryTransactionData.Method.PUT, service, alice, chunkSize);
|
||||
|
||||
// Check the chunk count is correct
|
||||
assertEquals(10, arbitraryDataFile.chunkCount());
|
||||
|
||||
// Now build the latest data state for this name
|
||||
ArbitraryDataReader arbitraryDataReader = new ArbitraryDataReader(name, ResourceIdType.NAME, service, identifier);
|
||||
arbitraryDataReader.loadSynchronously(true);
|
||||
Path initialLayerPath = arbitraryDataReader.getFilePath();
|
||||
ArbitraryDataDigest initialLayerDigest = new ArbitraryDataDigest(initialLayerPath);
|
||||
initialLayerDigest.compute();
|
||||
|
||||
// Its directory hash should match the original directory hash
|
||||
ArbitraryDataDigest path1Digest = new ArbitraryDataDigest(path1);
|
||||
path1Digest.compute();
|
||||
assertEquals(path1Digest.getHash58(), initialLayerDigest.getHash58());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private Path generateRandomDataPath(int length) throws IOException {
|
||||
// Create a file in a random temp directory
|
||||
Path tempDir = Files.createTempDirectory("generateRandomDataPath");
|
||||
File file = new File(Paths.get(tempDir.toString(), "file.txt").toString());
|
||||
file.deleteOnExit();
|
||||
|
||||
// Write a random string to the file
|
||||
BufferedWriter file1Writer = new BufferedWriter(new FileWriter(file));
|
||||
String initialString = this.generateRandomString(length - 1); // -1 due to newline at EOF
|
||||
|
||||
// Add a newline every 50 chars
|
||||
// initialString = initialString.replaceAll("(.{50})", "$1\n");
|
||||
|
||||
file1Writer.write(initialString);
|
||||
file1Writer.newLine();
|
||||
file1Writer.close();
|
||||
|
||||
return tempDir;
|
||||
}
|
||||
|
||||
private String generateRandomString(int length) {
|
||||
int leftLimit = 48; // numeral '0'
|
||||
int rightLimit = 122; // letter 'z'
|
||||
Random random = new Random();
|
||||
|
||||
return random.ints(leftLimit, rightLimit + 1)
|
||||
.filter(i -> (i <= 57 || i >= 65) && (i <= 90 || i >= 97))
|
||||
.limit(length)
|
||||
.collect(StringBuilder::new, StringBuilder::appendCodePoint, StringBuilder::append)
|
||||
.toString();
|
||||
}
|
||||
|
||||
private ArbitraryDataFile createAndMintTxn(Repository repository, String publicKey58, Path path, String name, String identifier,
|
||||
ArbitraryTransactionData.Method method, Service service, PrivateKeyAccount account,
|
||||
int chunkSize) throws DataException {
|
||||
|
||||
ArbitraryDataTransactionBuilder txnBuilder = new ArbitraryDataTransactionBuilder(
|
||||
repository, publicKey58, path, name, method, service, identifier);
|
||||
|
||||
txnBuilder.setChunkSize(chunkSize);
|
||||
txnBuilder.build();
|
||||
txnBuilder.computeNonce();
|
||||
ArbitraryTransactionData transactionData = txnBuilder.getArbitraryTransactionData();
|
||||
Transaction.ValidationResult result = TransactionUtils.signAndImport(repository, transactionData, account);
|
||||
assertEquals(Transaction.ValidationResult.OK, result);
|
||||
BlockUtils.mintBlock(repository);
|
||||
|
||||
// We need a new ArbitraryDataFile instance because the files will have been moved to the signature's folder
|
||||
byte[] hash = txnBuilder.getArbitraryDataFile().getHash();
|
||||
byte[] signature = transactionData.getSignature();
|
||||
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(hash, signature);
|
||||
arbitraryDataFile.setMetadataHash(transactionData.getMetadataHash());
|
||||
|
||||
return arbitraryDataFile;
|
||||
}
|
||||
|
||||
}
|
@ -1,70 +0,0 @@
|
||||
package org.qortal.test.arbitrary;
|
||||
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
||||
import org.qortal.arbitrary.misc.Service;
|
||||
import org.qortal.data.PaymentData;
|
||||
import org.qortal.data.transaction.ArbitraryTransactionData;
|
||||
import org.qortal.repository.DataException;
|
||||
import org.qortal.repository.Repository;
|
||||
import org.qortal.repository.RepositoryManager;
|
||||
import org.qortal.test.common.*;
|
||||
import org.qortal.test.common.transaction.TestTransaction;
|
||||
import org.qortal.transaction.ArbitraryTransaction;
|
||||
import org.qortal.transaction.Transaction;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import static org.junit.Assert.*;
|
||||
|
||||
public class ArbitraryTransactionTests extends Common {
|
||||
|
||||
private static final int version = 4;
|
||||
private static final String recipient = Common.getTestAccount(null, "bob").getAddress();
|
||||
|
||||
|
||||
@Before
|
||||
public void beforeTest() throws DataException {
|
||||
Common.useDefaultSettings();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testDifficultyCalculation() throws DataException {
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
|
||||
TestAccount alice = Common.getTestAccount(repository, "alice");
|
||||
ArbitraryTransactionData.DataType dataType = ArbitraryTransactionData.DataType.DATA_HASH;
|
||||
Service service = Service.ARBITRARY_DATA;
|
||||
ArbitraryTransactionData.Method method = ArbitraryTransactionData.Method.PUT;
|
||||
ArbitraryTransactionData.Compression compression = ArbitraryTransactionData.Compression.NONE;
|
||||
List<PaymentData> payments = new ArrayList<>();
|
||||
|
||||
ArbitraryTransactionData transactionData = new ArbitraryTransactionData(TestTransaction.generateBase(alice),
|
||||
5, service, 0, 0, null, null, method,
|
||||
null, compression, null, dataType, null, payments);
|
||||
|
||||
ArbitraryTransaction transaction = (ArbitraryTransaction) Transaction.fromData(repository, transactionData);
|
||||
assertEquals(12, transaction.difficultyForFileSize(1));
|
||||
assertEquals(12, transaction.difficultyForFileSize(5123456));
|
||||
assertEquals(12, transaction.difficultyForFileSize(74 * 1024 * 1024));
|
||||
assertEquals(13, transaction.difficultyForFileSize(75 * 1024 * 1024));
|
||||
assertEquals(13, transaction.difficultyForFileSize(144 * 1024 * 1024));
|
||||
assertEquals(14, transaction.difficultyForFileSize(145 * 1024 * 1024));
|
||||
assertEquals(14, transaction.difficultyForFileSize(214 * 1024 * 1024));
|
||||
assertEquals(15, transaction.difficultyForFileSize(215 * 1024 * 1024));
|
||||
assertEquals(15, transaction.difficultyForFileSize(289 * 1024 * 1024));
|
||||
assertEquals(16, transaction.difficultyForFileSize(290 * 1024 * 1024));
|
||||
assertEquals(16, transaction.difficultyForFileSize(359 * 1024 * 1024));
|
||||
assertEquals(17, transaction.difficultyForFileSize(360 * 1024 * 1024));
|
||||
assertEquals(17, transaction.difficultyForFileSize(429 * 1024 * 1024));
|
||||
assertEquals(18, transaction.difficultyForFileSize(430 * 1024 * 1024));
|
||||
assertEquals(18, transaction.difficultyForFileSize(499 * 1024 * 1024));
|
||||
assertEquals(19, transaction.difficultyForFileSize(500 * 1024 * 1024));
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
}
|
@ -30,8 +30,8 @@ public class ArbitraryTestTransaction extends TestTransaction {
|
||||
|
||||
final ArbitraryTransactionData.Compression compression = ArbitraryTransactionData.Compression.ZIP;
|
||||
|
||||
final byte[] chunkHashes = new byte[128];
|
||||
random.nextBytes(chunkHashes);
|
||||
final byte[] metadataHash = new byte[32];
|
||||
random.nextBytes(metadataHash);
|
||||
|
||||
byte[] data = new byte[1024];
|
||||
random.nextBytes(data);
|
||||
@ -46,7 +46,7 @@ public class ArbitraryTestTransaction extends TestTransaction {
|
||||
payments.add(new PaymentData(recipient, assetId, amount));
|
||||
|
||||
return new ArbitraryTransactionData(generateBase(account), version, service, nonce, size,name, identifier,
|
||||
method, secret, compression, data, dataType, chunkHashes, payments);
|
||||
method, secret, compression, data, dataType, metadataHash, payments);
|
||||
}
|
||||
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user