forked from Qortal/qortal
Verify that each data file matches the size reported by transaction.
This discourages an incorrect file size being included with a transaction, as the system will reject it and won't even serve it to other peers. FUTURE: we could introduce some kind of blacklist to track invalid files like this, and avoid repeated attempts to retrieve them. It is okay for now as the system will backoff after a few attempts.
This commit is contained in:
parent
8bb3a3f8a6
commit
f7ed3eefc8
@ -225,6 +225,21 @@ public class ArbitraryDataFile {
|
|||||||
return ValidationResult.OK;
|
return ValidationResult.OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void validateFileSize(long expectedSize) throws DataException {
|
||||||
|
// Verify that we can determine the file's size
|
||||||
|
long fileSize = 0;
|
||||||
|
try {
|
||||||
|
fileSize = Files.size(this.getFilePath());
|
||||||
|
} catch (IOException e) {
|
||||||
|
throw new DataException(String.format("Couldn't get file size for transaction %s", Base58.encode(signature)));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure the file's size matches the size reported by the transaction
|
||||||
|
if (fileSize != expectedSize) {
|
||||||
|
throw new DataException(String.format("File size mismatch for transaction %s", Base58.encode(signature)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
private void addChunk(ArbitraryDataFileChunk chunk) {
|
private void addChunk(ArbitraryDataFileChunk chunk) {
|
||||||
this.chunks.add(chunk);
|
this.chunks.add(chunk);
|
||||||
}
|
}
|
||||||
|
@ -357,6 +357,9 @@ public class ArbitraryDataReader {
|
|||||||
if (!Arrays.equals(arbitraryDataFile.digest(), digest)) {
|
if (!Arrays.equals(arbitraryDataFile.digest(), digest)) {
|
||||||
throw new DataException("Unable to validate complete file hash");
|
throw new DataException("Unable to validate complete file hash");
|
||||||
}
|
}
|
||||||
|
// Ensure the file's size matches the size reported by the transaction (throws a DataException if not)
|
||||||
|
arbitraryDataFile.validateFileSize(transactionData.getSize());
|
||||||
|
|
||||||
// Set filePath to the location of the ArbitraryDataFile
|
// Set filePath to the location of the ArbitraryDataFile
|
||||||
this.filePath = arbitraryDataFile.getFilePath();
|
this.filePath = arbitraryDataFile.getFilePath();
|
||||||
}
|
}
|
||||||
|
@ -1,5 +1,7 @@
|
|||||||
package org.qortal.repository.hsqldb;
|
package org.qortal.repository.hsqldb;
|
||||||
|
|
||||||
|
import org.apache.logging.log4j.LogManager;
|
||||||
|
import org.apache.logging.log4j.Logger;
|
||||||
import org.qortal.arbitrary.misc.Service;
|
import org.qortal.arbitrary.misc.Service;
|
||||||
import org.qortal.data.arbitrary.ArbitraryResourceInfo;
|
import org.qortal.data.arbitrary.ArbitraryResourceInfo;
|
||||||
import org.qortal.crypto.Crypto;
|
import org.qortal.crypto.Crypto;
|
||||||
@ -13,6 +15,7 @@ import org.qortal.repository.ArbitraryRepository;
|
|||||||
import org.qortal.repository.DataException;
|
import org.qortal.repository.DataException;
|
||||||
import org.qortal.arbitrary.ArbitraryDataFile;
|
import org.qortal.arbitrary.ArbitraryDataFile;
|
||||||
import org.qortal.transaction.Transaction.ApprovalStatus;
|
import org.qortal.transaction.Transaction.ApprovalStatus;
|
||||||
|
import org.qortal.utils.Base58;
|
||||||
|
|
||||||
import java.sql.ResultSet;
|
import java.sql.ResultSet;
|
||||||
import java.sql.SQLException;
|
import java.sql.SQLException;
|
||||||
@ -21,6 +24,8 @@ import java.util.List;
|
|||||||
|
|
||||||
public class HSQLDBArbitraryRepository implements ArbitraryRepository {
|
public class HSQLDBArbitraryRepository implements ArbitraryRepository {
|
||||||
|
|
||||||
|
private static final Logger LOGGER = LogManager.getLogger(HSQLDBArbitraryRepository.class);
|
||||||
|
|
||||||
private static final int MAX_RAW_DATA_SIZE = 255; // size of VARBINARY
|
private static final int MAX_RAW_DATA_SIZE = 255; // size of VARBINARY
|
||||||
|
|
||||||
protected HSQLDBRepository repository;
|
protected HSQLDBRepository repository;
|
||||||
@ -66,38 +71,53 @@ public class HSQLDBArbitraryRepository implements ArbitraryRepository {
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public byte[] fetchData(byte[] signature) throws DataException {
|
public byte[] fetchData(byte[] signature) {
|
||||||
ArbitraryTransactionData transactionData = getTransactionData(signature);
|
try {
|
||||||
if (transactionData == null) {
|
ArbitraryTransactionData transactionData = getTransactionData(signature);
|
||||||
return null;
|
if (transactionData == null) {
|
||||||
}
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
// Raw data is always available
|
// Raw data is always available
|
||||||
if (transactionData.getDataType() == DataType.RAW_DATA) {
|
if (transactionData.getDataType() == DataType.RAW_DATA) {
|
||||||
return transactionData.getData();
|
return transactionData.getData();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Load hashes
|
// Load hashes
|
||||||
byte[] digest = transactionData.getData();
|
byte[] digest = transactionData.getData();
|
||||||
byte[] metadataHash = transactionData.getMetadataHash();
|
byte[] metadataHash = transactionData.getMetadataHash();
|
||||||
|
|
||||||
// Load data file(s)
|
// Load data file(s)
|
||||||
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(digest, signature);
|
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(digest, signature);
|
||||||
arbitraryDataFile.setMetadataHash(metadataHash);
|
arbitraryDataFile.setMetadataHash(metadataHash);
|
||||||
|
|
||||||
// If we have the complete data file, return it
|
// If we have the complete data file, return it
|
||||||
if (arbitraryDataFile.exists()) {
|
if (arbitraryDataFile.exists()) {
|
||||||
return arbitraryDataFile.getBytes();
|
// Ensure the file's size matches the size reported by the transaction (throws a DataException if not)
|
||||||
}
|
arbitraryDataFile.validateFileSize(transactionData.getSize());
|
||||||
|
|
||||||
// Alternatively, if we have all the chunks, combine them into a single file
|
|
||||||
if (arbitraryDataFile.allChunksExist()) {
|
|
||||||
arbitraryDataFile.join();
|
|
||||||
|
|
||||||
// Verify that the combined hash matches the expected hash
|
|
||||||
if (digest.equals(arbitraryDataFile.digest())) {
|
|
||||||
return arbitraryDataFile.getBytes();
|
return arbitraryDataFile.getBytes();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Alternatively, if we have all the chunks, combine them into a single file
|
||||||
|
if (arbitraryDataFile.allChunksExist()) {
|
||||||
|
arbitraryDataFile.join();
|
||||||
|
|
||||||
|
// Verify that the combined hash matches the expected hash
|
||||||
|
if (!digest.equals(arbitraryDataFile.digest())) {
|
||||||
|
LOGGER.info(String.format("Hash mismatch for transaction: %s", Base58.encode(signature)));
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure the file's size matches the size reported by the transaction
|
||||||
|
arbitraryDataFile.validateFileSize(transactionData.getSize());
|
||||||
|
|
||||||
|
return arbitraryDataFile.getBytes();
|
||||||
|
}
|
||||||
|
|
||||||
|
} catch (DataException e) {
|
||||||
|
LOGGER.info("Unable to fetch data for transaction {}: {}", Base58.encode(signature), e.getMessage());
|
||||||
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
return null;
|
return null;
|
||||||
|
Loading…
x
Reference in New Issue
Block a user