mirror of
https://github.com/Qortal/qortal.git
synced 2025-07-16 17:31:23 +00:00
Merge branch 'master' into master
This commit is contained in:
commit
8bea11bc52
7
pom.xml
7
pom.xml
@ -3,7 +3,7 @@
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<groupId>org.qortal</groupId>
|
||||
<artifactId>qortal</artifactId>
|
||||
<version>4.7.2</version>
|
||||
<version>5.0.2</version>
|
||||
<packaging>jar</packaging>
|
||||
<properties>
|
||||
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
|
||||
@ -804,5 +804,10 @@
|
||||
<artifactId>jaxb-runtime</artifactId>
|
||||
<version>${jaxb-runtime.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.tika</groupId>
|
||||
<artifactId>tika-core</artifactId>
|
||||
<version>3.1.0</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
</project>
|
||||
|
@ -2,12 +2,14 @@ package org.qortal.account;
|
||||
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.qortal.api.resource.TransactionsResource;
|
||||
import org.qortal.block.BlockChain;
|
||||
import org.qortal.controller.LiteNode;
|
||||
import org.qortal.data.account.AccountBalanceData;
|
||||
import org.qortal.data.account.AccountData;
|
||||
import org.qortal.data.account.RewardShareData;
|
||||
import org.qortal.data.naming.NameData;
|
||||
import org.qortal.data.transaction.TransactionData;
|
||||
import org.qortal.repository.DataException;
|
||||
import org.qortal.repository.GroupRepository;
|
||||
import org.qortal.repository.NameRepository;
|
||||
@ -19,7 +21,11 @@ import org.qortal.utils.Groups;
|
||||
import javax.xml.bind.annotation.XmlAccessType;
|
||||
import javax.xml.bind.annotation.XmlAccessorType;
|
||||
|
||||
import java.util.Comparator;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
|
||||
import static org.qortal.utils.Amounts.prettyAmount;
|
||||
|
||||
@ -361,6 +367,142 @@ public class Account {
|
||||
return accountData.getLevel();
|
||||
}
|
||||
|
||||
/**
|
||||
* Get Primary Name
|
||||
*
|
||||
* @return the primary name for this address if present, otherwise empty
|
||||
*
|
||||
* @throws DataException
|
||||
*/
|
||||
public Optional<String> getPrimaryName() throws DataException {
|
||||
|
||||
return this.repository.getNameRepository().getPrimaryName(this.address);
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove Primary Name
|
||||
*
|
||||
* @throws DataException
|
||||
*/
|
||||
public void removePrimaryName() throws DataException {
|
||||
this.repository.getNameRepository().removePrimaryName(this.address);
|
||||
}
|
||||
|
||||
/**
|
||||
* Reset Primary Name
|
||||
*
|
||||
* Set primary name based on the names (and their history) this account owns.
|
||||
*
|
||||
* @param confirmationStatus the status of the transactions for the determining the primary name
|
||||
*
|
||||
* @return the primary name, empty if their isn't one
|
||||
*
|
||||
* @throws DataException
|
||||
*/
|
||||
public Optional<String> resetPrimaryName(TransactionsResource.ConfirmationStatus confirmationStatus) throws DataException {
|
||||
Optional<String> primaryName = determinePrimaryName(confirmationStatus);
|
||||
|
||||
if(primaryName.isPresent()) {
|
||||
return setPrimaryName(primaryName.get());
|
||||
}
|
||||
else {
|
||||
return primaryName;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Determine Primary Name
|
||||
*
|
||||
* Determine primary name based on a list of registered names.
|
||||
*
|
||||
* @param confirmationStatus the status of the transactions for this determination
|
||||
*
|
||||
* @return the primary name, empty if there is no primary name
|
||||
*
|
||||
* @throws DataException
|
||||
*/
|
||||
public Optional<String> determinePrimaryName(TransactionsResource.ConfirmationStatus confirmationStatus) throws DataException {
|
||||
|
||||
// all registered names for the owner
|
||||
List<NameData> names = this.repository.getNameRepository().getNamesByOwner(this.address);
|
||||
|
||||
Optional<String> primaryName;
|
||||
|
||||
// if no registered names, the no primary name possible
|
||||
if (names.isEmpty()) {
|
||||
primaryName = Optional.empty();
|
||||
}
|
||||
// if names
|
||||
else {
|
||||
// if one name, then that is the primary name
|
||||
if (names.size() == 1) {
|
||||
primaryName = Optional.of( names.get(0).getName() );
|
||||
}
|
||||
// if more than one name, then seek the earliest name acquisition that was never released
|
||||
else {
|
||||
Map<String, TransactionData> txByName = new HashMap<>(names.size());
|
||||
|
||||
// for each name, get the latest transaction
|
||||
for (NameData nameData : names) {
|
||||
|
||||
// since the name is currently registered to the owner,
|
||||
// we assume the latest transaction involving this name was the transaction that the acquired
|
||||
// name through registration, purchase or update
|
||||
Optional<TransactionData> latestTransaction
|
||||
= this.repository
|
||||
.getTransactionRepository()
|
||||
.getTransactionsInvolvingName(
|
||||
nameData.getName(),
|
||||
confirmationStatus
|
||||
)
|
||||
.stream()
|
||||
.sorted(Comparator.comparing(
|
||||
TransactionData::getTimestamp).reversed()
|
||||
)
|
||||
.findFirst(); // first is the last, since it was reversed
|
||||
|
||||
// if there is a latest transaction, expected for all registered names
|
||||
if (latestTransaction.isPresent()) {
|
||||
txByName.put(nameData.getName(), latestTransaction.get());
|
||||
}
|
||||
// if there is no latest transaction, then
|
||||
else {
|
||||
LOGGER.warn("No matching transaction for name: " + nameData.getName());
|
||||
}
|
||||
}
|
||||
|
||||
// get the first name aqcuistion for this address
|
||||
Optional<Map.Entry<String, TransactionData>> firstNameEntry
|
||||
= txByName.entrySet().stream().sorted(Comparator.comparing(entry -> entry.getValue().getTimestamp())).findFirst();
|
||||
|
||||
// if their is a name acquisition, then the first one is the primary name
|
||||
if (firstNameEntry.isPresent()) {
|
||||
primaryName = Optional.of( firstNameEntry.get().getKey() );
|
||||
}
|
||||
// if there is no nameacquistion, then there is no primary name
|
||||
else {
|
||||
primaryName = Optional.empty();
|
||||
}
|
||||
}
|
||||
}
|
||||
return primaryName;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set Primary Name
|
||||
*
|
||||
* @param primaryName the primary to set to this address
|
||||
*
|
||||
* @return the primary name if successful, empty if unsuccessful
|
||||
*
|
||||
* @throws DataException
|
||||
*/
|
||||
public Optional<String> setPrimaryName( String primaryName ) throws DataException {
|
||||
int changed = this.repository.getNameRepository().setPrimaryName(this.address, primaryName);
|
||||
|
||||
return changed > 0 ? Optional.of(primaryName) : Optional.empty();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns reward-share minting address, or unknown if reward-share does not exist.
|
||||
*
|
||||
|
@ -46,6 +46,7 @@ public class ApiService {
|
||||
private ApiService() {
|
||||
this.config = new ResourceConfig();
|
||||
this.config.packages("org.qortal.api.resource", "org.qortal.api.restricted.resource");
|
||||
this.config.register(org.glassfish.jersey.media.multipart.MultiPartFeature.class);
|
||||
this.config.register(OpenApiResource.class);
|
||||
this.config.register(ApiDefinition.class);
|
||||
this.config.register(AnnotationPostProcessor.class);
|
||||
@ -197,6 +198,7 @@ public class ApiService {
|
||||
context.addServlet(DataMonitorSocket.class, "/websockets/datamonitor");
|
||||
context.addServlet(ActiveChatsWebSocket.class, "/websockets/chat/active/*");
|
||||
context.addServlet(ChatMessagesWebSocket.class, "/websockets/chat/messages");
|
||||
context.addServlet(UnsignedFeesSocket.class, "/websockets/crosschain/unsignedfees");
|
||||
context.addServlet(TradeOffersWebSocket.class, "/websockets/crosschain/tradeoffers");
|
||||
context.addServlet(TradeBotWebSocket.class, "/websockets/crosschain/tradebot");
|
||||
context.addServlet(TradePresenceWebSocket.class, "/websockets/crosschain/tradepresence");
|
||||
|
@ -40,6 +40,7 @@ public class DevProxyService {
|
||||
private DevProxyService() {
|
||||
this.config = new ResourceConfig();
|
||||
this.config.packages("org.qortal.api.proxy.resource", "org.qortal.api.resource");
|
||||
this.config.register(org.glassfish.jersey.media.multipart.MultiPartFeature.class);
|
||||
this.config.register(OpenApiResource.class);
|
||||
this.config.register(ApiDefinition.class);
|
||||
this.config.register(AnnotationPostProcessor.class);
|
||||
|
@ -39,6 +39,7 @@ public class DomainMapService {
|
||||
private DomainMapService() {
|
||||
this.config = new ResourceConfig();
|
||||
this.config.packages("org.qortal.api.resource", "org.qortal.api.domainmap.resource");
|
||||
this.config.register(org.glassfish.jersey.media.multipart.MultiPartFeature.class);
|
||||
this.config.register(OpenApiResource.class);
|
||||
this.config.register(ApiDefinition.class);
|
||||
this.config.register(AnnotationPostProcessor.class);
|
||||
|
@ -39,6 +39,7 @@ public class GatewayService {
|
||||
private GatewayService() {
|
||||
this.config = new ResourceConfig();
|
||||
this.config.packages("org.qortal.api.resource", "org.qortal.api.gateway.resource");
|
||||
this.config.register(org.glassfish.jersey.media.multipart.MultiPartFeature.class);
|
||||
this.config.register(OpenApiResource.class);
|
||||
this.config.register(ApiDefinition.class);
|
||||
this.config.register(AnnotationPostProcessor.class);
|
||||
|
@ -1,14 +1,13 @@
|
||||
package org.qortal.api;
|
||||
|
||||
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.jsoup.Jsoup;
|
||||
import org.jsoup.nodes.Document;
|
||||
import org.jsoup.select.Elements;
|
||||
import org.qortal.arbitrary.misc.Service;
|
||||
|
||||
import java.util.Objects;
|
||||
|
||||
public class HTMLParser {
|
||||
|
||||
private static final Logger LOGGER = LogManager.getLogger(HTMLParser.class);
|
||||
@ -22,10 +21,11 @@ public class HTMLParser {
|
||||
private String identifier;
|
||||
private String path;
|
||||
private String theme;
|
||||
private String lang;
|
||||
private boolean usingCustomRouting;
|
||||
|
||||
public HTMLParser(String resourceId, String inPath, String prefix, boolean includeResourceIdInPrefix, byte[] data,
|
||||
String qdnContext, Service service, String identifier, String theme, boolean usingCustomRouting) {
|
||||
String qdnContext, Service service, String identifier, String theme, boolean usingCustomRouting, String lang) {
|
||||
String inPathWithoutFilename = inPath.contains("/") ? inPath.substring(0, inPath.lastIndexOf('/')) : String.format("/%s",inPath);
|
||||
this.qdnBase = includeResourceIdInPrefix ? String.format("%s/%s", prefix, resourceId) : prefix;
|
||||
this.qdnBaseWithPath = includeResourceIdInPrefix ? String.format("%s/%s%s", prefix, resourceId, inPathWithoutFilename) : String.format("%s%s", prefix, inPathWithoutFilename);
|
||||
@ -36,6 +36,7 @@ public class HTMLParser {
|
||||
this.identifier = identifier;
|
||||
this.path = inPath;
|
||||
this.theme = theme;
|
||||
this.lang = lang;
|
||||
this.usingCustomRouting = usingCustomRouting;
|
||||
}
|
||||
|
||||
@ -61,9 +62,13 @@ public class HTMLParser {
|
||||
String identifier = this.identifier != null ? this.identifier.replace("\\", "").replace("\"","\\\"") : "";
|
||||
String path = this.path != null ? this.path.replace("\\", "").replace("\"","\\\"") : "";
|
||||
String theme = this.theme != null ? this.theme.replace("\\", "").replace("\"","\\\"") : "";
|
||||
String lang = this.lang != null ? this.lang.replace("\\", "").replace("\"", "\\\"") : "";
|
||||
String qdnBase = this.qdnBase != null ? this.qdnBase.replace("\\", "").replace("\"","\\\"") : "";
|
||||
String qdnBaseWithPath = this.qdnBaseWithPath != null ? this.qdnBaseWithPath.replace("\\", "").replace("\"","\\\"") : "";
|
||||
String qdnContextVar = String.format("<script>var _qdnContext=\"%s\"; var _qdnTheme=\"%s\"; var _qdnService=\"%s\"; var _qdnName=\"%s\"; var _qdnIdentifier=\"%s\"; var _qdnPath=\"%s\"; var _qdnBase=\"%s\"; var _qdnBaseWithPath=\"%s\";</script>", qdnContext, theme, service, name, identifier, path, qdnBase, qdnBaseWithPath);
|
||||
String qdnContextVar = String.format(
|
||||
"<script>var _qdnContext=\"%s\"; var _qdnTheme=\"%s\"; var _qdnLang=\"%s\"; var _qdnService=\"%s\"; var _qdnName=\"%s\"; var _qdnIdentifier=\"%s\"; var _qdnPath=\"%s\"; var _qdnBase=\"%s\"; var _qdnBaseWithPath=\"%s\";</script>",
|
||||
qdnContext, theme, lang, service, name, identifier, path, qdnBase, qdnBaseWithPath
|
||||
);
|
||||
head.get(0).prepend(qdnContextVar);
|
||||
|
||||
// Add base href tag
|
||||
|
@ -304,11 +304,11 @@ public class BitcoinyTBDRequest {
|
||||
private String networkName;
|
||||
|
||||
/**
|
||||
* Fee Ceiling
|
||||
* Fee Required
|
||||
*
|
||||
* web search, LTC fee ceiling = 1000L
|
||||
* web search, LTC fee required = 1000L
|
||||
*/
|
||||
private long feeCeiling;
|
||||
private long feeRequired;
|
||||
|
||||
/**
|
||||
* Extended Public Key
|
||||
@ -570,8 +570,8 @@ public class BitcoinyTBDRequest {
|
||||
return this.networkName;
|
||||
}
|
||||
|
||||
public long getFeeCeiling() {
|
||||
return this.feeCeiling;
|
||||
public long getFeeRequired() {
|
||||
return this.feeRequired;
|
||||
}
|
||||
|
||||
public String getExtendedPublicKey() {
|
||||
@ -671,7 +671,7 @@ public class BitcoinyTBDRequest {
|
||||
", minimumOrderAmount=" + minimumOrderAmount +
|
||||
", feePerKb=" + feePerKb +
|
||||
", networkName='" + networkName + '\'' +
|
||||
", feeCeiling=" + feeCeiling +
|
||||
", feeRequired=" + feeRequired +
|
||||
", extendedPublicKey='" + extendedPublicKey + '\'' +
|
||||
", sendAmount=" + sendAmount +
|
||||
", sendingFeePerByte=" + sendingFeePerByte +
|
||||
|
@ -142,10 +142,20 @@ public class DevProxyServerResource {
|
||||
}
|
||||
}
|
||||
|
||||
String lang = request.getParameter("lang");
|
||||
if (lang == null || lang.isBlank()) {
|
||||
lang = "en"; // fallback
|
||||
}
|
||||
|
||||
String theme = request.getParameter("theme");
|
||||
if (theme == null || theme.isBlank()) {
|
||||
theme = "light";
|
||||
}
|
||||
|
||||
// Parse and modify output if needed
|
||||
if (HTMLParser.isHtmlFile(filename)) {
|
||||
// HTML file - needs to be parsed
|
||||
HTMLParser htmlParser = new HTMLParser("", inPath, "", false, data, "proxy", Service.APP, null, "light", true);
|
||||
HTMLParser htmlParser = new HTMLParser("", inPath, "", false, data, "proxy", Service.APP, null, theme , true, lang);
|
||||
htmlParser.addAdditionalHeaderTags();
|
||||
response.addHeader("Content-Security-Policy", "default-src 'self' 'unsafe-inline' 'unsafe-eval'; media-src 'self' data: blob:; img-src 'self' data: blob:; connect-src 'self' ws:; font-src 'self' data:;");
|
||||
response.setContentType(con.getContentType());
|
||||
|
@ -3,6 +3,7 @@ package org.qortal.api.resource;
|
||||
import com.google.common.primitives.Bytes;
|
||||
import com.j256.simplemagic.ContentInfo;
|
||||
import com.j256.simplemagic.ContentInfoUtil;
|
||||
|
||||
import io.swagger.v3.oas.annotations.Operation;
|
||||
import io.swagger.v3.oas.annotations.Parameter;
|
||||
import io.swagger.v3.oas.annotations.media.ArraySchema;
|
||||
@ -12,6 +13,7 @@ import io.swagger.v3.oas.annotations.parameters.RequestBody;
|
||||
import io.swagger.v3.oas.annotations.responses.ApiResponse;
|
||||
import io.swagger.v3.oas.annotations.security.SecurityRequirement;
|
||||
import io.swagger.v3.oas.annotations.tags.Tag;
|
||||
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.commons.lang3.ArrayUtils;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
@ -63,14 +65,19 @@ import javax.servlet.http.HttpServletResponse;
|
||||
import javax.ws.rs.*;
|
||||
import javax.ws.rs.core.Context;
|
||||
import javax.ws.rs.core.MediaType;
|
||||
|
||||
import java.io.BufferedWriter;
|
||||
import java.io.File;
|
||||
import java.io.FileWriter;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.OutputStream;
|
||||
import java.net.FileNameMap;
|
||||
import java.net.URLConnection;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Paths;
|
||||
import java.nio.file.StandardCopyOption;
|
||||
import java.nio.file.StandardOpenOption;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Comparator;
|
||||
@ -78,6 +85,16 @@ import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.zip.GZIPOutputStream;
|
||||
|
||||
import org.apache.tika.Tika;
|
||||
import org.apache.tika.mime.MimeTypeException;
|
||||
import org.apache.tika.mime.MimeTypes;
|
||||
|
||||
import javax.ws.rs.core.Response;
|
||||
|
||||
import org.glassfish.jersey.media.multipart.FormDataParam;
|
||||
import static org.qortal.api.ApiError.REPOSITORY_ISSUE;
|
||||
|
||||
@Path("/arbitrary")
|
||||
@Tag(name = "Arbitrary")
|
||||
@ -686,20 +703,20 @@ public class ArbitraryResource {
|
||||
)
|
||||
}
|
||||
)
|
||||
public HttpServletResponse get(@PathParam("service") Service service,
|
||||
public void get(@PathParam("service") Service service,
|
||||
@PathParam("name") String name,
|
||||
@QueryParam("filepath") String filepath,
|
||||
@QueryParam("encoding") String encoding,
|
||||
@QueryParam("rebuild") boolean rebuild,
|
||||
@QueryParam("async") boolean async,
|
||||
@QueryParam("attempts") Integer attempts) {
|
||||
@QueryParam("attempts") Integer attempts, @QueryParam("attachment") boolean attachment, @QueryParam("attachmentFilename") String attachmentFilename) {
|
||||
|
||||
// Authentication can be bypassed in the settings, for those running public QDN nodes
|
||||
if (!Settings.getInstance().isQDNAuthBypassEnabled()) {
|
||||
Security.checkApiCallAllowed(request);
|
||||
}
|
||||
|
||||
return this.download(service, name, null, filepath, encoding, rebuild, async, attempts);
|
||||
this.download(service, name, null, filepath, encoding, rebuild, async, attempts, attachment, attachmentFilename);
|
||||
}
|
||||
|
||||
@GET
|
||||
@ -719,21 +736,21 @@ public class ArbitraryResource {
|
||||
)
|
||||
}
|
||||
)
|
||||
public HttpServletResponse get(@PathParam("service") Service service,
|
||||
public void get(@PathParam("service") Service service,
|
||||
@PathParam("name") String name,
|
||||
@PathParam("identifier") String identifier,
|
||||
@QueryParam("filepath") String filepath,
|
||||
@QueryParam("encoding") String encoding,
|
||||
@QueryParam("rebuild") boolean rebuild,
|
||||
@QueryParam("async") boolean async,
|
||||
@QueryParam("attempts") Integer attempts) {
|
||||
@QueryParam("attempts") Integer attempts, @QueryParam("attachment") boolean attachment, @QueryParam("attachmentFilename") String attachmentFilename) {
|
||||
|
||||
// Authentication can be bypassed in the settings, for those running public QDN nodes
|
||||
if (!Settings.getInstance().isQDNAuthBypassEnabled()) {
|
||||
Security.checkApiCallAllowed(request, null);
|
||||
}
|
||||
|
||||
return this.download(service, name, identifier, filepath, encoding, rebuild, async, attempts);
|
||||
this.download(service, name, identifier, filepath, encoding, rebuild, async, attempts, attachment, attachmentFilename);
|
||||
}
|
||||
|
||||
|
||||
@ -878,6 +895,464 @@ public class ArbitraryResource {
|
||||
}
|
||||
|
||||
|
||||
@GET
|
||||
@Path("/check/tmp")
|
||||
@Produces(MediaType.TEXT_PLAIN)
|
||||
@Operation(
|
||||
summary = "Check if the disk has enough disk space for an upcoming upload",
|
||||
responses = {
|
||||
@ApiResponse(description = "OK if sufficient space", responseCode = "200"),
|
||||
@ApiResponse(description = "Insufficient space", responseCode = "507") // 507 = Insufficient Storage
|
||||
}
|
||||
)
|
||||
@SecurityRequirement(name = "apiKey")
|
||||
public Response checkUploadSpace(@HeaderParam(Security.API_KEY_HEADER) String apiKey,
|
||||
@QueryParam("totalSize") Long totalSize) {
|
||||
Security.checkApiCallAllowed(request);
|
||||
|
||||
if (totalSize == null || totalSize <= 0) {
|
||||
return Response.status(Response.Status.BAD_REQUEST)
|
||||
.entity("Missing or invalid totalSize parameter").build();
|
||||
}
|
||||
|
||||
File uploadDir = new File("uploads-temp");
|
||||
if (!uploadDir.exists()) {
|
||||
uploadDir.mkdirs(); // ensure the folder exists
|
||||
}
|
||||
|
||||
long usableSpace = uploadDir.getUsableSpace();
|
||||
long requiredSpace = (long)(((double)totalSize) * 2.2); // estimate for chunks + merge
|
||||
|
||||
if (usableSpace < requiredSpace) {
|
||||
return Response.status(507).entity("Insufficient disk space").build();
|
||||
}
|
||||
|
||||
return Response.ok("Sufficient disk space").build();
|
||||
}
|
||||
|
||||
@POST
|
||||
@Path("/{service}/{name}/chunk")
|
||||
@Consumes(MediaType.MULTIPART_FORM_DATA)
|
||||
@Operation(
|
||||
summary = "Upload a single file chunk to be later assembled into a complete arbitrary resource (no identifier)",
|
||||
requestBody = @RequestBody(
|
||||
required = true,
|
||||
content = @Content(
|
||||
mediaType = MediaType.MULTIPART_FORM_DATA,
|
||||
schema = @Schema(
|
||||
implementation = Object.class
|
||||
)
|
||||
)
|
||||
),
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
description = "Chunk uploaded successfully",
|
||||
responseCode = "200"
|
||||
),
|
||||
@ApiResponse(
|
||||
description = "Error writing chunk",
|
||||
responseCode = "500"
|
||||
)
|
||||
}
|
||||
)
|
||||
@SecurityRequirement(name = "apiKey")
|
||||
public Response uploadChunkNoIdentifier(@HeaderParam(Security.API_KEY_HEADER) String apiKey,
|
||||
@PathParam("service") String serviceString,
|
||||
@PathParam("name") String name,
|
||||
@FormDataParam("chunk") InputStream chunkStream,
|
||||
@FormDataParam("index") int index) {
|
||||
Security.checkApiCallAllowed(request);
|
||||
|
||||
try {
|
||||
String safeService = Paths.get(serviceString).getFileName().toString();
|
||||
String safeName = Paths.get(name).getFileName().toString();
|
||||
|
||||
java.nio.file.Path tempDir = Paths.get("uploads-temp", safeService, safeName);
|
||||
Files.createDirectories(tempDir);
|
||||
|
||||
|
||||
|
||||
java.nio.file.Path chunkFile = tempDir.resolve("chunk_" + index);
|
||||
Files.copy(chunkStream, chunkFile, StandardCopyOption.REPLACE_EXISTING);
|
||||
|
||||
return Response.ok("Chunk " + index + " received").build();
|
||||
} catch (IOException e) {
|
||||
LOGGER.error("Failed to write chunk {} for service '{}' and name '{}'", index, serviceString, name, e);
|
||||
return Response.serverError().entity("Failed to write chunk: " + e.getMessage()).build();
|
||||
}
|
||||
}
|
||||
|
||||
@POST
|
||||
@Path("/{service}/{name}/finalize")
|
||||
@Produces(MediaType.TEXT_PLAIN)
|
||||
@Operation(
|
||||
summary = "Finalize a chunked upload (no identifier) and build a raw, unsigned, ARBITRARY transaction",
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
description = "raw, unsigned, ARBITRARY transaction encoded in Base58",
|
||||
content = @Content(mediaType = MediaType.TEXT_PLAIN)
|
||||
)
|
||||
}
|
||||
)
|
||||
@SecurityRequirement(name = "apiKey")
|
||||
public String finalizeUploadNoIdentifier(
|
||||
@HeaderParam(Security.API_KEY_HEADER) String apiKey,
|
||||
@PathParam("service") String serviceString,
|
||||
@PathParam("name") String name,
|
||||
@QueryParam("title") String title,
|
||||
@QueryParam("description") String description,
|
||||
@QueryParam("tags") List<String> tags,
|
||||
@QueryParam("category") Category category,
|
||||
@QueryParam("filename") String filename,
|
||||
@QueryParam("fee") Long fee,
|
||||
@QueryParam("preview") Boolean preview,
|
||||
@QueryParam("isZip") Boolean isZip
|
||||
) {
|
||||
Security.checkApiCallAllowed(request);
|
||||
java.nio.file.Path tempFile = null;
|
||||
java.nio.file.Path tempDir = null;
|
||||
java.nio.file.Path chunkDir = null;
|
||||
String safeService = Paths.get(serviceString).getFileName().toString();
|
||||
String safeName = Paths.get(name).getFileName().toString();
|
||||
|
||||
|
||||
|
||||
try {
|
||||
chunkDir = Paths.get("uploads-temp", safeService, safeName);
|
||||
|
||||
if (!Files.exists(chunkDir) || !Files.isDirectory(chunkDir)) {
|
||||
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.INVALID_CRITERIA, "No chunks found for upload");
|
||||
}
|
||||
|
||||
String safeFilename = (filename == null || filename.isBlank()) ? "qortal-" + NTP.getTime() : filename;
|
||||
tempDir = Files.createTempDirectory("qortal-");
|
||||
String sanitizedFilename = Paths.get(safeFilename).getFileName().toString();
|
||||
tempFile = tempDir.resolve(sanitizedFilename);
|
||||
|
||||
try (OutputStream out = Files.newOutputStream(tempFile, StandardOpenOption.CREATE, StandardOpenOption.TRUNCATE_EXISTING)) {
|
||||
byte[] buffer = new byte[65536];
|
||||
for (java.nio.file.Path chunk : Files.list(chunkDir)
|
||||
.filter(path -> path.getFileName().toString().startsWith("chunk_"))
|
||||
.sorted(Comparator.comparingInt(path -> {
|
||||
String name2 = path.getFileName().toString();
|
||||
String numberPart = name2.substring("chunk_".length());
|
||||
return Integer.parseInt(numberPart);
|
||||
})).collect(Collectors.toList())) {
|
||||
try (InputStream in = Files.newInputStream(chunk)) {
|
||||
int bytesRead;
|
||||
while ((bytesRead = in.read(buffer)) != -1) {
|
||||
out.write(buffer, 0, bytesRead);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
String detectedExtension = "";
|
||||
String uploadFilename = null;
|
||||
boolean extensionIsValid = false;
|
||||
|
||||
if (filename != null && !filename.isBlank()) {
|
||||
int lastDot = filename.lastIndexOf('.');
|
||||
if (lastDot > 0 && lastDot < filename.length() - 1) {
|
||||
extensionIsValid = true;
|
||||
uploadFilename = filename;
|
||||
}
|
||||
}
|
||||
|
||||
if (!extensionIsValid) {
|
||||
Tika tika = new Tika();
|
||||
String mimeType = tika.detect(tempFile.toFile());
|
||||
try {
|
||||
MimeTypes allTypes = MimeTypes.getDefaultMimeTypes();
|
||||
org.apache.tika.mime.MimeType mime = allTypes.forName(mimeType);
|
||||
detectedExtension = mime.getExtension();
|
||||
} catch (MimeTypeException e) {
|
||||
LOGGER.warn("Could not determine file extension for MIME type: {}", mimeType, e);
|
||||
}
|
||||
|
||||
if (filename != null && !filename.isBlank()) {
|
||||
int lastDot = filename.lastIndexOf('.');
|
||||
String baseName = (lastDot > 0) ? filename.substring(0, lastDot) : filename;
|
||||
uploadFilename = baseName + (detectedExtension != null ? detectedExtension : "");
|
||||
} else {
|
||||
uploadFilename = "qortal-" + NTP.getTime() + (detectedExtension != null ? detectedExtension : "");
|
||||
}
|
||||
}
|
||||
|
||||
Boolean isZipBoolean = false;
|
||||
|
||||
if (isZip != null && isZip) {
|
||||
isZipBoolean = true;
|
||||
}
|
||||
|
||||
|
||||
// ✅ Call upload with `null` as identifier
|
||||
return this.upload(
|
||||
Service.valueOf(serviceString),
|
||||
name,
|
||||
null, // no identifier
|
||||
tempFile.toString(),
|
||||
null,
|
||||
null,
|
||||
isZipBoolean,
|
||||
fee,
|
||||
uploadFilename,
|
||||
title,
|
||||
description,
|
||||
tags,
|
||||
category,
|
||||
preview
|
||||
);
|
||||
|
||||
} catch (IOException e) {
|
||||
LOGGER.error("Failed to merge chunks for service='{}', name='{}'", serviceString, name, e);
|
||||
|
||||
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.REPOSITORY_ISSUE, "Failed to merge chunks: " + e.getMessage());
|
||||
} finally {
|
||||
if (tempDir != null) {
|
||||
try {
|
||||
Files.walk(tempDir)
|
||||
.sorted(Comparator.reverseOrder())
|
||||
.map(java.nio.file.Path::toFile)
|
||||
.forEach(File::delete);
|
||||
} catch (IOException e) {
|
||||
LOGGER.warn("Failed to delete temp directory: {}", tempDir, e);
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
Files.walk(chunkDir)
|
||||
.sorted(Comparator.reverseOrder())
|
||||
.map(java.nio.file.Path::toFile)
|
||||
.forEach(File::delete);
|
||||
} catch (IOException e) {
|
||||
LOGGER.warn("Failed to delete chunk directory: {}", chunkDir, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
@POST
|
||||
@Path("/{service}/{name}/{identifier}/chunk")
|
||||
@Consumes(MediaType.MULTIPART_FORM_DATA)
|
||||
@Operation(
|
||||
summary = "Upload a single file chunk to be later assembled into a complete arbitrary resource",
|
||||
requestBody = @RequestBody(
|
||||
required = true,
|
||||
content = @Content(
|
||||
mediaType = MediaType.MULTIPART_FORM_DATA,
|
||||
schema = @Schema(
|
||||
implementation = Object.class
|
||||
)
|
||||
)
|
||||
),
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
description = "Chunk uploaded successfully",
|
||||
responseCode = "200"
|
||||
),
|
||||
@ApiResponse(
|
||||
description = "Error writing chunk",
|
||||
responseCode = "500"
|
||||
)
|
||||
}
|
||||
)
|
||||
@SecurityRequirement(name = "apiKey")
|
||||
public Response uploadChunk(@HeaderParam(Security.API_KEY_HEADER) String apiKey,
|
||||
@PathParam("service") String serviceString,
|
||||
@PathParam("name") String name,
|
||||
@PathParam("identifier") String identifier,
|
||||
@FormDataParam("chunk") InputStream chunkStream,
|
||||
@FormDataParam("index") int index) {
|
||||
Security.checkApiCallAllowed(request);
|
||||
|
||||
try {
|
||||
String safeService = Paths.get(serviceString).getFileName().toString();
|
||||
String safeName = Paths.get(name).getFileName().toString();
|
||||
String safeIdentifier = Paths.get(identifier).getFileName().toString();
|
||||
|
||||
java.nio.file.Path tempDir = Paths.get("uploads-temp", safeService, safeName, safeIdentifier);
|
||||
|
||||
Files.createDirectories(tempDir);
|
||||
|
||||
java.nio.file.Path chunkFile = tempDir.resolve("chunk_" + index);
|
||||
Files.copy(chunkStream, chunkFile, StandardCopyOption.REPLACE_EXISTING);
|
||||
|
||||
return Response.ok("Chunk " + index + " received").build();
|
||||
} catch (IOException e) {
|
||||
LOGGER.error("Failed to write chunk {} for service='{}', name='{}', identifier='{}'", index, serviceString, name, identifier, e);
|
||||
return Response.serverError().entity("Failed to write chunk: " + e.getMessage()).build();
|
||||
}
|
||||
}
|
||||
|
||||
@POST
|
||||
@Path("/{service}/{name}/{identifier}/finalize")
|
||||
@Produces(MediaType.TEXT_PLAIN)
|
||||
@Operation(
|
||||
summary = "Finalize a chunked upload and build a raw, unsigned, ARBITRARY transaction",
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
description = "raw, unsigned, ARBITRARY transaction encoded in Base58",
|
||||
content = @Content(mediaType = MediaType.TEXT_PLAIN)
|
||||
)
|
||||
}
|
||||
)
|
||||
@SecurityRequirement(name = "apiKey")
|
||||
public String finalizeUpload(
|
||||
@HeaderParam(Security.API_KEY_HEADER) String apiKey,
|
||||
@PathParam("service") String serviceString,
|
||||
@PathParam("name") String name,
|
||||
@PathParam("identifier") String identifier,
|
||||
@QueryParam("title") String title,
|
||||
@QueryParam("description") String description,
|
||||
@QueryParam("tags") List<String> tags,
|
||||
@QueryParam("category") Category category,
|
||||
@QueryParam("filename") String filename,
|
||||
@QueryParam("fee") Long fee,
|
||||
@QueryParam("preview") Boolean preview,
|
||||
@QueryParam("isZip") Boolean isZip
|
||||
) {
|
||||
Security.checkApiCallAllowed(request);
|
||||
java.nio.file.Path tempFile = null;
|
||||
java.nio.file.Path tempDir = null;
|
||||
java.nio.file.Path chunkDir = null;
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
try {
|
||||
String safeService = Paths.get(serviceString).getFileName().toString();
|
||||
String safeName = Paths.get(name).getFileName().toString();
|
||||
String safeIdentifier = Paths.get(identifier).getFileName().toString();
|
||||
java.nio.file.Path baseUploadsDir = Paths.get("uploads-temp"); // relative to Qortal working dir
|
||||
chunkDir = baseUploadsDir.resolve(safeService).resolve(safeName).resolve(safeIdentifier);
|
||||
|
||||
if (!Files.exists(chunkDir) || !Files.isDirectory(chunkDir)) {
|
||||
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.INVALID_CRITERIA, "No chunks found for upload");
|
||||
}
|
||||
|
||||
// Step 1: Determine a safe filename for disk temp file (regardless of extension correctness)
|
||||
String safeFilename = filename;
|
||||
if (filename == null || filename.isBlank()) {
|
||||
safeFilename = "qortal-" + NTP.getTime();
|
||||
}
|
||||
|
||||
tempDir = Files.createTempDirectory("qortal-");
|
||||
String sanitizedFilename = Paths.get(safeFilename).getFileName().toString();
|
||||
tempFile = tempDir.resolve(sanitizedFilename);
|
||||
|
||||
|
||||
// Step 2: Merge chunks
|
||||
|
||||
try (OutputStream out = Files.newOutputStream(tempFile, StandardOpenOption.CREATE, StandardOpenOption.TRUNCATE_EXISTING)) {
|
||||
byte[] buffer = new byte[65536];
|
||||
for (java.nio.file.Path chunk : Files.list(chunkDir)
|
||||
.filter(path -> path.getFileName().toString().startsWith("chunk_"))
|
||||
.sorted(Comparator.comparingInt(path -> {
|
||||
String name2 = path.getFileName().toString();
|
||||
String numberPart = name2.substring("chunk_".length());
|
||||
return Integer.parseInt(numberPart);
|
||||
})).collect(Collectors.toList())) {
|
||||
try (InputStream in = Files.newInputStream(chunk)) {
|
||||
int bytesRead;
|
||||
while ((bytesRead = in.read(buffer)) != -1) {
|
||||
out.write(buffer, 0, bytesRead);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Step 3: Determine correct extension
|
||||
String detectedExtension = "";
|
||||
String uploadFilename = null;
|
||||
boolean extensionIsValid = false;
|
||||
|
||||
if (filename != null && !filename.isBlank()) {
|
||||
int lastDot = filename.lastIndexOf('.');
|
||||
if (lastDot > 0 && lastDot < filename.length() - 1) {
|
||||
extensionIsValid = true;
|
||||
uploadFilename = filename;
|
||||
}
|
||||
}
|
||||
|
||||
if (!extensionIsValid) {
|
||||
Tika tika = new Tika();
|
||||
String mimeType = tika.detect(tempFile.toFile());
|
||||
try {
|
||||
MimeTypes allTypes = MimeTypes.getDefaultMimeTypes();
|
||||
org.apache.tika.mime.MimeType mime = allTypes.forName(mimeType);
|
||||
detectedExtension = mime.getExtension();
|
||||
} catch (MimeTypeException e) {
|
||||
LOGGER.warn("Could not determine file extension for MIME type: {}", mimeType, e);
|
||||
}
|
||||
|
||||
if (filename != null && !filename.isBlank()) {
|
||||
int lastDot = filename.lastIndexOf('.');
|
||||
String baseName = (lastDot > 0) ? filename.substring(0, lastDot) : filename;
|
||||
uploadFilename = baseName + (detectedExtension != null ? detectedExtension : "");
|
||||
} else {
|
||||
uploadFilename = "qortal-" + NTP.getTime() + (detectedExtension != null ? detectedExtension : "");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Boolean isZipBoolean = false;
|
||||
|
||||
if (isZip != null && isZip) {
|
||||
isZipBoolean = true;
|
||||
}
|
||||
|
||||
|
||||
return this.upload(
|
||||
Service.valueOf(serviceString),
|
||||
name,
|
||||
identifier,
|
||||
tempFile.toString(),
|
||||
null,
|
||||
null,
|
||||
isZipBoolean,
|
||||
fee,
|
||||
uploadFilename,
|
||||
title,
|
||||
description,
|
||||
tags,
|
||||
category,
|
||||
preview
|
||||
);
|
||||
|
||||
} catch (IOException e) {
|
||||
LOGGER.error("Unexpected error in finalizeUpload for service='{}', name='{}', name='{}'", serviceString, name, identifier, e);
|
||||
|
||||
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.REPOSITORY_ISSUE, "Failed to merge chunks: " + e.getMessage());
|
||||
} finally {
|
||||
if (tempDir != null) {
|
||||
try {
|
||||
Files.walk(tempDir)
|
||||
.sorted(Comparator.reverseOrder())
|
||||
.map(java.nio.file.Path::toFile)
|
||||
.forEach(File::delete);
|
||||
} catch (IOException e) {
|
||||
LOGGER.warn("Failed to delete temp directory: {}", tempDir, e);
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
Files.walk(chunkDir)
|
||||
.sorted(Comparator.reverseOrder())
|
||||
.map(java.nio.file.Path::toFile)
|
||||
.forEach(File::delete);
|
||||
} catch (IOException e) {
|
||||
LOGGER.warn("Failed to delete chunk directory: {}", chunkDir, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
// Upload base64-encoded data
|
||||
|
||||
@ -1343,7 +1818,7 @@ public class ArbitraryResource {
|
||||
if (path == null) {
|
||||
// See if we have a string instead
|
||||
if (string != null) {
|
||||
if (filename == null) {
|
||||
if (filename == null || filename.isBlank()) {
|
||||
// Use current time as filename
|
||||
filename = String.format("qortal-%d", NTP.getTime());
|
||||
}
|
||||
@ -1358,7 +1833,7 @@ public class ArbitraryResource {
|
||||
}
|
||||
// ... or base64 encoded raw data
|
||||
else if (base64 != null) {
|
||||
if (filename == null) {
|
||||
if (filename == null || filename.isBlank()) {
|
||||
// Use current time as filename
|
||||
filename = String.format("qortal-%d", NTP.getTime());
|
||||
}
|
||||
@ -1409,6 +1884,7 @@ public class ArbitraryResource {
|
||||
);
|
||||
|
||||
transactionBuilder.build();
|
||||
|
||||
// Don't compute nonce - this is done by the client (or via POST /arbitrary/compute)
|
||||
ArbitraryTransactionData transactionData = transactionBuilder.getArbitraryTransactionData();
|
||||
return Base58.encode(ArbitraryTransactionTransformer.toBytes(transactionData));
|
||||
@ -1424,22 +1900,20 @@ public class ArbitraryResource {
|
||||
}
|
||||
}
|
||||
|
||||
private HttpServletResponse download(Service service, String name, String identifier, String filepath, String encoding, boolean rebuild, boolean async, Integer maxAttempts) {
|
||||
|
||||
private void download(Service service, String name, String identifier, String filepath, String encoding, boolean rebuild, boolean async, Integer maxAttempts, boolean attachment, String attachmentFilename) {
|
||||
try {
|
||||
ArbitraryDataReader arbitraryDataReader = new ArbitraryDataReader(name, ArbitraryDataFile.ResourceIdType.NAME, service, identifier);
|
||||
|
||||
|
||||
int attempts = 0;
|
||||
if (maxAttempts == null) {
|
||||
maxAttempts = 5;
|
||||
}
|
||||
|
||||
|
||||
// Loop until we have data
|
||||
if (async) {
|
||||
// Asynchronous
|
||||
arbitraryDataReader.loadAsynchronously(false, 1);
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
// Synchronous
|
||||
while (!Controller.isStopping()) {
|
||||
attempts++;
|
||||
@ -1449,88 +1923,189 @@ public class ArbitraryResource {
|
||||
break;
|
||||
} catch (MissingDataException e) {
|
||||
if (attempts > maxAttempts) {
|
||||
// Give up after 5 attempts
|
||||
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.INVALID_CRITERIA, "Data unavailable. Please try again later.");
|
||||
}
|
||||
}
|
||||
}
|
||||
Thread.sleep(3000L);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
java.nio.file.Path outputPath = arbitraryDataReader.getFilePath();
|
||||
if (outputPath == null) {
|
||||
// Assume the resource doesn't exist
|
||||
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.FILE_NOT_FOUND, "File not found");
|
||||
}
|
||||
|
||||
|
||||
if (filepath == null || filepath.isEmpty()) {
|
||||
// No file path supplied - so check if this is a single file resource
|
||||
String[] files = ArrayUtils.removeElement(outputPath.toFile().list(), ".qortal");
|
||||
if (files != null && files.length == 1) {
|
||||
// This is a single file resource
|
||||
filepath = files[0];
|
||||
}
|
||||
else {
|
||||
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.INVALID_CRITERIA,
|
||||
"filepath is required for resources containing more than one file");
|
||||
} else {
|
||||
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.INVALID_CRITERIA, "filepath is required for resources containing more than one file");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
java.nio.file.Path path = Paths.get(outputPath.toString(), filepath);
|
||||
if (!Files.exists(path)) {
|
||||
String message = String.format("No file exists at filepath: %s", filepath);
|
||||
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.INVALID_CRITERIA, message);
|
||||
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.INVALID_CRITERIA, "No file exists at filepath: " + filepath);
|
||||
}
|
||||
|
||||
byte[] data;
|
||||
int fileSize = (int)path.toFile().length();
|
||||
int length = fileSize;
|
||||
|
||||
// Parse "Range" header
|
||||
Integer rangeStart = null;
|
||||
Integer rangeEnd = null;
|
||||
|
||||
if (attachment) {
|
||||
String rawFilename;
|
||||
|
||||
if (attachmentFilename != null && !attachmentFilename.isEmpty()) {
|
||||
// 1. Sanitize first
|
||||
String safeAttachmentFilename = attachmentFilename.replaceAll("[\\\\/:*?\"<>|]", "_");
|
||||
|
||||
// 2. Check for a valid extension (3–5 alphanumeric chars)
|
||||
if (!safeAttachmentFilename.matches(".*\\.[a-zA-Z0-9]{2,5}$")) {
|
||||
safeAttachmentFilename += ".bin";
|
||||
}
|
||||
|
||||
rawFilename = safeAttachmentFilename;
|
||||
} else {
|
||||
// Fallback if no filename is provided
|
||||
String baseFilename = (identifier != null && !identifier.isEmpty())
|
||||
? name + "-" + identifier
|
||||
: name;
|
||||
rawFilename = baseFilename.replaceAll("[\\\\/:*?\"<>|]", "_") + ".bin";
|
||||
}
|
||||
|
||||
// Optional: trim length
|
||||
rawFilename = rawFilename.length() > 100 ? rawFilename.substring(0, 100) : rawFilename;
|
||||
|
||||
// 3. Set Content-Disposition header
|
||||
response.setHeader("Content-Disposition", "attachment; filename=\"" + rawFilename + "\"");
|
||||
}
|
||||
|
||||
// Determine the total size of the requested file
|
||||
long fileSize = Files.size(path);
|
||||
String mimeType = context.getMimeType(path.toString());
|
||||
|
||||
// Attempt to read the "Range" header from the request to support partial content delivery (e.g., for video streaming or resumable downloads)
|
||||
String range = request.getHeader("Range");
|
||||
if (range != null) {
|
||||
range = range.replace("bytes=", "");
|
||||
String[] parts = range.split("-");
|
||||
rangeStart = (parts != null && parts.length > 0) ? Integer.parseInt(parts[0]) : null;
|
||||
rangeEnd = (parts != null && parts.length > 1) ? Integer.parseInt(parts[1]) : fileSize;
|
||||
|
||||
long rangeStart = 0;
|
||||
long rangeEnd = fileSize - 1;
|
||||
boolean isPartial = false;
|
||||
|
||||
// If a Range header is present and no base64 encoding is requested, parse the range values
|
||||
if (range != null && encoding == null) {
|
||||
range = range.replace("bytes=", ""); // Remove the "bytes=" prefix
|
||||
String[] parts = range.split("-"); // Split the range into start and end
|
||||
|
||||
// Parse range start
|
||||
if (parts.length > 0 && !parts[0].isEmpty()) {
|
||||
rangeStart = Long.parseLong(parts[0]);
|
||||
}
|
||||
|
||||
// Parse range end, if present
|
||||
if (parts.length > 1 && !parts[1].isEmpty()) {
|
||||
rangeEnd = Long.parseLong(parts[1]);
|
||||
}
|
||||
|
||||
isPartial = true; // Indicate that this is a partial content request
|
||||
}
|
||||
|
||||
if (rangeStart != null && rangeEnd != null) {
|
||||
// We have a range, so update the requested length
|
||||
length = rangeEnd - rangeStart;
|
||||
|
||||
// Calculate how many bytes should be sent in the response
|
||||
long contentLength = rangeEnd - rangeStart + 1;
|
||||
|
||||
// Inform the client that byte ranges are supported
|
||||
response.setHeader("Accept-Ranges", "bytes");
|
||||
|
||||
if (isPartial) {
|
||||
// If partial content was requested, return 206 Partial Content with appropriate headers
|
||||
response.setStatus(HttpServletResponse.SC_PARTIAL_CONTENT);
|
||||
response.setHeader("Content-Range", String.format("bytes %d-%d/%d", rangeStart, rangeEnd, fileSize));
|
||||
} else {
|
||||
// Otherwise, return the entire file with status 200 OK
|
||||
response.setStatus(HttpServletResponse.SC_OK);
|
||||
}
|
||||
|
||||
if (length < fileSize && encoding == null) {
|
||||
// Partial content requested, and not encoding the data
|
||||
response.setStatus(206);
|
||||
response.addHeader("Content-Range", String.format("bytes %d-%d/%d", rangeStart, rangeEnd-1, fileSize));
|
||||
data = FilesystemUtils.readFromFile(path.toString(), rangeStart, length);
|
||||
|
||||
// Initialize output streams for writing the file to the response
|
||||
OutputStream rawOut = null;
|
||||
OutputStream base64Out = null;
|
||||
OutputStream gzipOut = null;
|
||||
|
||||
try {
|
||||
rawOut = response.getOutputStream();
|
||||
|
||||
if (encoding != null && "base64".equalsIgnoreCase(encoding)) {
|
||||
// If base64 encoding is requested, override content type
|
||||
response.setContentType("text/plain");
|
||||
|
||||
// Check if the client accepts gzip encoding
|
||||
String acceptEncoding = request.getHeader("Accept-Encoding");
|
||||
boolean wantsGzip = acceptEncoding != null && acceptEncoding.contains("gzip");
|
||||
|
||||
if (wantsGzip) {
|
||||
// Wrap output in GZIP and Base64 streams if gzip is accepted
|
||||
response.setHeader("Content-Encoding", "gzip");
|
||||
gzipOut = new GZIPOutputStream(rawOut);
|
||||
base64Out = java.util.Base64.getEncoder().wrap(gzipOut);
|
||||
} else {
|
||||
// Wrap output in Base64 only
|
||||
base64Out = java.util.Base64.getEncoder().wrap(rawOut);
|
||||
}
|
||||
|
||||
rawOut = base64Out; // Use the wrapped stream for writing
|
||||
} else {
|
||||
// For raw binary output, set the content type and length
|
||||
response.setContentType(mimeType != null ? mimeType : "application/octet-stream");
|
||||
response.setContentLength((int) contentLength);
|
||||
}
|
||||
|
||||
// Stream file content
|
||||
try (InputStream inputStream = Files.newInputStream(path)) {
|
||||
if (rangeStart > 0) {
|
||||
inputStream.skip(rangeStart);
|
||||
}
|
||||
|
||||
byte[] buffer = new byte[65536];
|
||||
long bytesRemaining = contentLength;
|
||||
int bytesRead;
|
||||
|
||||
while (bytesRemaining > 0 && (bytesRead = inputStream.read(buffer, 0, (int) Math.min(buffer.length, bytesRemaining))) != -1) {
|
||||
rawOut.write(buffer, 0, bytesRead);
|
||||
bytesRemaining -= bytesRead;
|
||||
}
|
||||
}
|
||||
|
||||
// Stream finished
|
||||
if (base64Out != null) {
|
||||
base64Out.close(); // Also flushes and closes the wrapped gzipOut
|
||||
} else if (gzipOut != null) {
|
||||
gzipOut.close(); // Only close gzipOut if it wasn't wrapped by base64Out
|
||||
} else {
|
||||
rawOut.flush(); // Flush only the base output stream if nothing was wrapped
|
||||
}
|
||||
|
||||
if (!response.isCommitted()) {
|
||||
response.setStatus(HttpServletResponse.SC_OK);
|
||||
response.getWriter().write(" ");
|
||||
}
|
||||
|
||||
} catch (IOException e) {
|
||||
// Streaming errors should not rethrow — just log
|
||||
LOGGER.warn(String.format("Streaming error for %s %s: %s", service, name, e.getMessage()));
|
||||
}
|
||||
else {
|
||||
// Full content requested (or encoded data)
|
||||
response.setStatus(200);
|
||||
data = Files.readAllBytes(path); // TODO: limit file size that can be read into memory
|
||||
|
||||
} catch (IOException | ApiException | DataException e) {
|
||||
LOGGER.warn(String.format("Unable to load %s %s: %s", service, name, e.getMessage()));
|
||||
if (!response.isCommitted()) {
|
||||
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.FILE_NOT_FOUND, e.getMessage());
|
||||
}
|
||||
|
||||
// Encode the data if requested
|
||||
if (encoding != null && Objects.equals(encoding.toLowerCase(), "base64")) {
|
||||
data = Base64.encode(data);
|
||||
} catch (NumberFormatException e) {
|
||||
LOGGER.warn(String.format("Invalid range for %s %s: %s", service, name, e.getMessage()));
|
||||
if (!response.isCommitted()) {
|
||||
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.INVALID_DATA, e.getMessage());
|
||||
}
|
||||
|
||||
response.addHeader("Accept-Ranges", "bytes");
|
||||
response.setContentType(context.getMimeType(path.toString()));
|
||||
response.setContentLength(data.length);
|
||||
response.getOutputStream().write(data);
|
||||
|
||||
return response;
|
||||
} catch (Exception e) {
|
||||
LOGGER.debug(String.format("Unable to load %s %s: %s", service, name, e.getMessage()));
|
||||
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.FILE_NOT_FOUND, e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
private FileProperties getFileProperties(Service service, String name, String identifier) {
|
||||
try {
|
||||
|
@ -502,10 +502,10 @@ public class CrossChainBitcoinResource {
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/feeceiling")
|
||||
@Path("/feerequired")
|
||||
@Operation(
|
||||
summary = "Returns Bitcoin fee per Kb.",
|
||||
description = "Returns Bitcoin fee per Kb.",
|
||||
summary = "The total fee required for unlocking BTC to the trade offer creator.",
|
||||
description = "This is in sats for a transaction that is approximately 300 kB in size.",
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
content = @Content(
|
||||
@ -516,17 +516,17 @@ public class CrossChainBitcoinResource {
|
||||
)
|
||||
}
|
||||
)
|
||||
public String getBitcoinFeeCeiling() {
|
||||
public String getBitcoinFeeRequired() {
|
||||
Bitcoin bitcoin = Bitcoin.getInstance();
|
||||
|
||||
return String.valueOf(bitcoin.getFeeCeiling());
|
||||
return String.valueOf(bitcoin.getFeeRequired());
|
||||
}
|
||||
|
||||
@POST
|
||||
@Path("/updatefeeceiling")
|
||||
@Path("/updatefeerequired")
|
||||
@Operation(
|
||||
summary = "Sets Bitcoin fee ceiling.",
|
||||
description = "Sets Bitcoin fee ceiling.",
|
||||
summary = "The total fee required for unlocking BTC to the trade offer creator.",
|
||||
description = "This is in sats for a transaction that is approximately 300 kB in size.",
|
||||
requestBody = @RequestBody(
|
||||
required = true,
|
||||
content = @Content(
|
||||
@ -545,13 +545,13 @@ public class CrossChainBitcoinResource {
|
||||
}
|
||||
)
|
||||
@ApiErrors({ApiError.INVALID_PRIVATE_KEY, ApiError.INVALID_CRITERIA})
|
||||
public String setBitcoinFeeCeiling(@HeaderParam(Security.API_KEY_HEADER) String apiKey, String fee) {
|
||||
public String setBitcoinFeeRequired(@HeaderParam(Security.API_KEY_HEADER) String apiKey, String fee) {
|
||||
Security.checkApiCallAllowed(request);
|
||||
|
||||
Bitcoin bitcoin = Bitcoin.getInstance();
|
||||
|
||||
try {
|
||||
return CrossChainUtils.setFeeCeiling(bitcoin, fee);
|
||||
return CrossChainUtils.setFeeRequired(bitcoin, fee);
|
||||
}
|
||||
catch (IllegalArgumentException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA);
|
||||
|
@ -502,10 +502,10 @@ public class CrossChainDigibyteResource {
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/feeceiling")
|
||||
@Path("/feerequired")
|
||||
@Operation(
|
||||
summary = "Returns Digibyte fee per Kb.",
|
||||
description = "Returns Digibyte fee per Kb.",
|
||||
summary = "The total fee required for unlocking DGB to the trade offer creator.",
|
||||
description = "This is in sats for a transaction that is approximately 300 kB in size.",
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
content = @Content(
|
||||
@ -516,17 +516,17 @@ public class CrossChainDigibyteResource {
|
||||
)
|
||||
}
|
||||
)
|
||||
public String getDigibyteFeeCeiling() {
|
||||
public String getDigibyteFeeRequired() {
|
||||
Digibyte digibyte = Digibyte.getInstance();
|
||||
|
||||
return String.valueOf(digibyte.getFeeCeiling());
|
||||
return String.valueOf(digibyte.getFeeRequired());
|
||||
}
|
||||
|
||||
@POST
|
||||
@Path("/updatefeeceiling")
|
||||
@Path("/updatefeerequired")
|
||||
@Operation(
|
||||
summary = "Sets Digibyte fee ceiling.",
|
||||
description = "Sets Digibyte fee ceiling.",
|
||||
summary = "The total fee required for unlocking DGB to the trade offer creator.",
|
||||
description = "This is in sats for a transaction that is approximately 300 kB in size.",
|
||||
requestBody = @RequestBody(
|
||||
required = true,
|
||||
content = @Content(
|
||||
@ -545,13 +545,13 @@ public class CrossChainDigibyteResource {
|
||||
}
|
||||
)
|
||||
@ApiErrors({ApiError.INVALID_PRIVATE_KEY, ApiError.INVALID_CRITERIA})
|
||||
public String setDigibyteFeeCeiling(@HeaderParam(Security.API_KEY_HEADER) String apiKey, String fee) {
|
||||
public String setDigibyteFeeRequired(@HeaderParam(Security.API_KEY_HEADER) String apiKey, String fee) {
|
||||
Security.checkApiCallAllowed(request);
|
||||
|
||||
Digibyte digibyte = Digibyte.getInstance();
|
||||
|
||||
try {
|
||||
return CrossChainUtils.setFeeCeiling(digibyte, fee);
|
||||
return CrossChainUtils.setFeeRequired(digibyte, fee);
|
||||
}
|
||||
catch (IllegalArgumentException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA);
|
||||
|
@ -502,10 +502,10 @@ public class CrossChainDogecoinResource {
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/feeceiling")
|
||||
@Path("/feerequired")
|
||||
@Operation(
|
||||
summary = "Returns Dogecoin fee per Kb.",
|
||||
description = "Returns Dogecoin fee per Kb.",
|
||||
summary = "The total fee required for unlocking DOGE to the trade offer creator.",
|
||||
description = "This is in sats for a transaction that is approximately 300 kB in size.",
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
content = @Content(
|
||||
@ -516,17 +516,17 @@ public class CrossChainDogecoinResource {
|
||||
)
|
||||
}
|
||||
)
|
||||
public String getDogecoinFeeCeiling() {
|
||||
public String getDogecoinFeeRequired() {
|
||||
Dogecoin dogecoin = Dogecoin.getInstance();
|
||||
|
||||
return String.valueOf(dogecoin.getFeeCeiling());
|
||||
return String.valueOf(dogecoin.getFeeRequired());
|
||||
}
|
||||
|
||||
@POST
|
||||
@Path("/updatefeeceiling")
|
||||
@Path("/updatefeerequired")
|
||||
@Operation(
|
||||
summary = "Sets Dogecoin fee ceiling.",
|
||||
description = "Sets Dogecoin fee ceiling.",
|
||||
summary = "The total fee required for unlocking DOGE to the trade offer creator.",
|
||||
description = "This is in sats for a transaction that is approximately 300 kB in size.",
|
||||
requestBody = @RequestBody(
|
||||
required = true,
|
||||
content = @Content(
|
||||
@ -545,13 +545,13 @@ public class CrossChainDogecoinResource {
|
||||
}
|
||||
)
|
||||
@ApiErrors({ApiError.INVALID_PRIVATE_KEY, ApiError.INVALID_CRITERIA})
|
||||
public String setDogecoinFeeCeiling(@HeaderParam(Security.API_KEY_HEADER) String apiKey, String fee) {
|
||||
public String setDogecoinFeeRequired(@HeaderParam(Security.API_KEY_HEADER) String apiKey, String fee) {
|
||||
Security.checkApiCallAllowed(request);
|
||||
|
||||
Dogecoin dogecoin = Dogecoin.getInstance();
|
||||
|
||||
try {
|
||||
return CrossChainUtils.setFeeCeiling(dogecoin, fee);
|
||||
return CrossChainUtils.setFeeRequired(dogecoin, fee);
|
||||
}
|
||||
catch (IllegalArgumentException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA);
|
||||
|
@ -540,10 +540,10 @@ public class CrossChainLitecoinResource {
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/feeceiling")
|
||||
@Path("/feerequired")
|
||||
@Operation(
|
||||
summary = "Returns Litecoin fee per Kb.",
|
||||
description = "Returns Litecoin fee per Kb.",
|
||||
summary = "The total fee required for unlocking LTC to the trade offer creator.",
|
||||
description = "This is in sats for a transaction that is approximately 300 kB in size.",
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
content = @Content(
|
||||
@ -554,17 +554,17 @@ public class CrossChainLitecoinResource {
|
||||
)
|
||||
}
|
||||
)
|
||||
public String getLitecoinFeeCeiling() {
|
||||
public String getLitecoinFeeRequired() {
|
||||
Litecoin litecoin = Litecoin.getInstance();
|
||||
|
||||
return String.valueOf(litecoin.getFeeCeiling());
|
||||
return String.valueOf(litecoin.getFeeRequired());
|
||||
}
|
||||
|
||||
@POST
|
||||
@Path("/updatefeeceiling")
|
||||
@Path("/updatefeerequired")
|
||||
@Operation(
|
||||
summary = "Sets Litecoin fee ceiling.",
|
||||
description = "Sets Litecoin fee ceiling.",
|
||||
summary = "The total fee required for unlocking LTC to the trade offer creator.",
|
||||
description = "This is in sats for a transaction that is approximately 300 kB in size.",
|
||||
requestBody = @RequestBody(
|
||||
required = true,
|
||||
content = @Content(
|
||||
@ -583,13 +583,13 @@ public class CrossChainLitecoinResource {
|
||||
}
|
||||
)
|
||||
@ApiErrors({ApiError.INVALID_PRIVATE_KEY, ApiError.INVALID_CRITERIA})
|
||||
public String setLitecoinFeeCeiling(@HeaderParam(Security.API_KEY_HEADER) String apiKey, String fee) {
|
||||
public String setLitecoinFeeRequired(@HeaderParam(Security.API_KEY_HEADER) String apiKey, String fee) {
|
||||
Security.checkApiCallAllowed(request);
|
||||
|
||||
Litecoin litecoin = Litecoin.getInstance();
|
||||
|
||||
try {
|
||||
return CrossChainUtils.setFeeCeiling(litecoin, fee);
|
||||
return CrossChainUtils.setFeeRequired(litecoin, fee);
|
||||
}
|
||||
catch (IllegalArgumentException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA);
|
||||
|
@ -587,10 +587,10 @@ public class CrossChainPirateChainResource {
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/feeceiling")
|
||||
@Path("/feerequired")
|
||||
@Operation(
|
||||
summary = "Returns PirateChain fee per Kb.",
|
||||
description = "Returns PirateChain fee per Kb.",
|
||||
summary = "The total fee required for unlocking ARRR to the trade offer creator.",
|
||||
description = "The total fee required for unlocking ARRR to the trade offer creator.",
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
content = @Content(
|
||||
@ -601,17 +601,17 @@ public class CrossChainPirateChainResource {
|
||||
)
|
||||
}
|
||||
)
|
||||
public String getPirateChainFeeCeiling() {
|
||||
public String getPirateChainFeeRequired() {
|
||||
PirateChain pirateChain = PirateChain.getInstance();
|
||||
|
||||
return String.valueOf(pirateChain.getFeeCeiling());
|
||||
return String.valueOf(pirateChain.getFeeRequired());
|
||||
}
|
||||
|
||||
@POST
|
||||
@Path("/updatefeeceiling")
|
||||
@Path("/updatefeerequired")
|
||||
@Operation(
|
||||
summary = "Sets PirateChain fee ceiling.",
|
||||
description = "Sets PirateChain fee ceiling.",
|
||||
summary = "The total fee required for unlocking ARRR to the trade offer creator.",
|
||||
description = "This is in sats for a transaction that is approximately 300 kB in size.",
|
||||
requestBody = @RequestBody(
|
||||
required = true,
|
||||
content = @Content(
|
||||
@ -630,13 +630,13 @@ public class CrossChainPirateChainResource {
|
||||
}
|
||||
)
|
||||
@ApiErrors({ApiError.INVALID_PRIVATE_KEY, ApiError.INVALID_CRITERIA})
|
||||
public String setPirateChainFeeCeiling(@HeaderParam(Security.API_KEY_HEADER) String apiKey, String fee) {
|
||||
public String setPirateChainFeeRequired(@HeaderParam(Security.API_KEY_HEADER) String apiKey, String fee) {
|
||||
Security.checkApiCallAllowed(request);
|
||||
|
||||
PirateChain pirateChain = PirateChain.getInstance();
|
||||
|
||||
try {
|
||||
return CrossChainUtils.setFeeCeiling(pirateChain, fee);
|
||||
return CrossChainUtils.setFeeRequired(pirateChain, fee);
|
||||
}
|
||||
catch (IllegalArgumentException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA);
|
||||
|
@ -502,10 +502,10 @@ public class CrossChainRavencoinResource {
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/feeceiling")
|
||||
@Path("/feerequired")
|
||||
@Operation(
|
||||
summary = "Returns Ravencoin fee per Kb.",
|
||||
description = "Returns Ravencoin fee per Kb.",
|
||||
summary = "The total fee required for unlocking RVN to the trade offer creator.",
|
||||
description = "This is in sats for a transaction that is approximately 300 kB in size.",
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
content = @Content(
|
||||
@ -516,17 +516,17 @@ public class CrossChainRavencoinResource {
|
||||
)
|
||||
}
|
||||
)
|
||||
public String getRavencoinFeeCeiling() {
|
||||
public String getRavencoinFeeRequired() {
|
||||
Ravencoin ravencoin = Ravencoin.getInstance();
|
||||
|
||||
return String.valueOf(ravencoin.getFeeCeiling());
|
||||
return String.valueOf(ravencoin.getFeeRequired());
|
||||
}
|
||||
|
||||
@POST
|
||||
@Path("/updatefeeceiling")
|
||||
@Path("/updatefeerequired")
|
||||
@Operation(
|
||||
summary = "Sets Ravencoin fee ceiling.",
|
||||
description = "Sets Ravencoin fee ceiling.",
|
||||
summary = "The total fee required for unlocking RVN to the trade offer creator.",
|
||||
description = "This is in sats for a transaction that is approximately 300 kB in size.",
|
||||
requestBody = @RequestBody(
|
||||
required = true,
|
||||
content = @Content(
|
||||
@ -545,13 +545,13 @@ public class CrossChainRavencoinResource {
|
||||
}
|
||||
)
|
||||
@ApiErrors({ApiError.INVALID_PRIVATE_KEY, ApiError.INVALID_CRITERIA})
|
||||
public String setRavencoinFeeCeiling(@HeaderParam(Security.API_KEY_HEADER) String apiKey, String fee) {
|
||||
public String setRavencoinFeeRequired(@HeaderParam(Security.API_KEY_HEADER) String apiKey, String fee) {
|
||||
Security.checkApiCallAllowed(request);
|
||||
|
||||
Ravencoin ravencoin = Ravencoin.getInstance();
|
||||
|
||||
try {
|
||||
return CrossChainUtils.setFeeCeiling(ravencoin, fee);
|
||||
return CrossChainUtils.setFeeRequired(ravencoin, fee);
|
||||
}
|
||||
catch (IllegalArgumentException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA);
|
||||
|
@ -10,6 +10,8 @@ import io.swagger.v3.oas.annotations.parameters.RequestBody;
|
||||
import io.swagger.v3.oas.annotations.responses.ApiResponse;
|
||||
import io.swagger.v3.oas.annotations.security.SecurityRequirement;
|
||||
import io.swagger.v3.oas.annotations.tags.Tag;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.glassfish.jersey.media.multipart.ContentDisposition;
|
||||
import org.qortal.api.ApiError;
|
||||
import org.qortal.api.ApiErrors;
|
||||
@ -18,6 +20,7 @@ import org.qortal.api.Security;
|
||||
import org.qortal.api.model.CrossChainCancelRequest;
|
||||
import org.qortal.api.model.CrossChainTradeLedgerEntry;
|
||||
import org.qortal.api.model.CrossChainTradeSummary;
|
||||
import org.qortal.controller.ForeignFeesManager;
|
||||
import org.qortal.controller.tradebot.TradeBot;
|
||||
import org.qortal.crosschain.ACCT;
|
||||
import org.qortal.crosschain.AcctMode;
|
||||
@ -29,6 +32,8 @@ import org.qortal.data.at.ATData;
|
||||
import org.qortal.data.at.ATStateData;
|
||||
import org.qortal.data.crosschain.CrossChainTradeData;
|
||||
import org.qortal.data.crosschain.TransactionSummary;
|
||||
import org.qortal.data.crosschain.ForeignFeeDecodedData;
|
||||
import org.qortal.data.crosschain.ForeignFeeEncodedData;
|
||||
import org.qortal.data.transaction.BaseTransactionData;
|
||||
import org.qortal.data.transaction.MessageTransactionData;
|
||||
import org.qortal.data.transaction.TransactionData;
|
||||
@ -64,6 +69,8 @@ import java.util.stream.Collectors;
|
||||
@Tag(name = "Cross-Chain")
|
||||
public class CrossChainResource {
|
||||
|
||||
private static final Logger LOGGER = LogManager.getLogger(CrossChainResource.class);
|
||||
|
||||
@Context
|
||||
HttpServletRequest request;
|
||||
|
||||
@ -360,6 +367,101 @@ public class CrossChainResource {
|
||||
}
|
||||
}
|
||||
|
||||
@POST
|
||||
@Path("/signedfees")
|
||||
@Operation(
|
||||
summary = "",
|
||||
description = "",
|
||||
requestBody = @RequestBody(
|
||||
required = true,
|
||||
content = @Content(
|
||||
mediaType = MediaType.APPLICATION_JSON,
|
||||
array = @ArraySchema(
|
||||
schema = @Schema(
|
||||
implementation = ForeignFeeEncodedData.class
|
||||
)
|
||||
)
|
||||
)
|
||||
),
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
description = "true on success",
|
||||
content = @Content(
|
||||
mediaType = MediaType.TEXT_PLAIN,
|
||||
schema = @Schema(
|
||||
type = "boolean"
|
||||
)
|
||||
)
|
||||
)
|
||||
}
|
||||
)
|
||||
public String postSignedForeignFees(List<ForeignFeeEncodedData> signedFees) {
|
||||
|
||||
LOGGER.info("signedFees = " + signedFees);
|
||||
|
||||
try {
|
||||
ForeignFeesManager.getInstance().addSignedFees(signedFees);
|
||||
|
||||
return "true";
|
||||
}
|
||||
catch( Exception e ) {
|
||||
|
||||
LOGGER.error(e.getMessage(), e);
|
||||
|
||||
return "false";
|
||||
}
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/unsignedfees/{address}")
|
||||
@Operation(
|
||||
summary = "",
|
||||
description = "",
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
content = @Content(
|
||||
array = @ArraySchema(
|
||||
schema = @Schema(
|
||||
implementation = ForeignFeeEncodedData.class
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
}
|
||||
)
|
||||
@ApiErrors({ApiError.INVALID_CRITERIA, ApiError.REPOSITORY_ISSUE})
|
||||
public List<ForeignFeeEncodedData> getUnsignedFees(@PathParam("address") String address) {
|
||||
|
||||
List<ForeignFeeEncodedData> unsignedFeesForAddress = ForeignFeesManager.getInstance().getUnsignedFeesForAddress(address);
|
||||
|
||||
LOGGER.info("address = " + address);
|
||||
LOGGER.info("returning unsigned = " + unsignedFeesForAddress);
|
||||
return unsignedFeesForAddress;
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/signedfees")
|
||||
@Operation(
|
||||
summary = "",
|
||||
description = "",
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
content = @Content(
|
||||
array = @ArraySchema(
|
||||
schema = @Schema(
|
||||
implementation = ForeignFeeDecodedData.class
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
}
|
||||
)
|
||||
@ApiErrors({ApiError.INVALID_CRITERIA, ApiError.REPOSITORY_ISSUE})
|
||||
public List<ForeignFeeDecodedData> getSignedFees() {
|
||||
|
||||
return ForeignFeesManager.getInstance().getSignedFees();
|
||||
}
|
||||
|
||||
/**
|
||||
* Decode Public Key
|
||||
*
|
||||
|
@ -12,10 +12,15 @@ import org.bouncycastle.util.Strings;
|
||||
import org.json.simple.JSONObject;
|
||||
import org.qortal.api.model.CrossChainTradeLedgerEntry;
|
||||
import org.qortal.api.model.crosschain.BitcoinyTBDRequest;
|
||||
import org.qortal.asset.Asset;
|
||||
import org.qortal.crosschain.*;
|
||||
import org.qortal.data.account.AccountBalanceData;
|
||||
import org.qortal.data.at.ATData;
|
||||
import org.qortal.data.at.ATStateData;
|
||||
import org.qortal.data.crosschain.*;
|
||||
import org.qortal.event.EventBus;
|
||||
import org.qortal.event.LockingFeeUpdateEvent;
|
||||
import org.qortal.event.RequiredFeeUpdateEvent;
|
||||
import org.qortal.repository.DataException;
|
||||
import org.qortal.repository.Repository;
|
||||
import org.qortal.utils.Amounts;
|
||||
@ -23,15 +28,11 @@ import org.qortal.utils.BitTwiddling;
|
||||
|
||||
import java.io.BufferedWriter;
|
||||
import java.io.IOException;
|
||||
import java.io.OutputStreamWriter;
|
||||
import java.io.PrintWriter;
|
||||
import java.io.Writer;
|
||||
import java.text.DateFormat;
|
||||
import java.text.SimpleDateFormat;
|
||||
import java.time.Instant;
|
||||
import java.time.ZoneId;
|
||||
import java.time.ZonedDateTime;
|
||||
import java.util.*;
|
||||
import java.util.function.Function;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
|
||||
@ -103,11 +104,13 @@ public class CrossChainUtils {
|
||||
|
||||
bitcoiny.setFeePerKb(Coin.valueOf(satoshis) );
|
||||
|
||||
EventBus.INSTANCE.notify(new LockingFeeUpdateEvent());
|
||||
|
||||
return String.valueOf(bitcoiny.getFeePerKb().value);
|
||||
}
|
||||
|
||||
/**
|
||||
* Set Fee Ceiling
|
||||
* Set Fee Required
|
||||
*
|
||||
* @param bitcoiny the blockchain support
|
||||
* @param fee the fee in satoshis
|
||||
@ -116,14 +119,16 @@ public class CrossChainUtils {
|
||||
*
|
||||
* @throws IllegalArgumentException if invalid
|
||||
*/
|
||||
public static String setFeeCeiling(Bitcoiny bitcoiny, String fee) throws IllegalArgumentException{
|
||||
public static String setFeeRequired(Bitcoiny bitcoiny, String fee) throws IllegalArgumentException{
|
||||
|
||||
long satoshis = Long.parseLong(fee);
|
||||
if( satoshis < 0 ) throw new IllegalArgumentException("can't set fee to negative number");
|
||||
|
||||
bitcoiny.setFeeCeiling( Long.parseLong(fee));
|
||||
bitcoiny.setFeeRequired( Long.parseLong(fee));
|
||||
|
||||
return String.valueOf(bitcoiny.getFeeCeiling());
|
||||
EventBus.INSTANCE.notify(new RequiredFeeUpdateEvent(bitcoiny));
|
||||
|
||||
return String.valueOf(bitcoiny.getFeeRequired());
|
||||
}
|
||||
|
||||
/**
|
||||
@ -232,6 +237,9 @@ public class CrossChainUtils {
|
||||
return bitcoiny.getBlockchainProvider().removeServer(server);
|
||||
}
|
||||
|
||||
public static ChainableServer getCurrentServer( Bitcoiny bitcoiny ) {
|
||||
return bitcoiny.getBlockchainProvider().getCurrentServer();
|
||||
}
|
||||
/**
|
||||
* Set Current Server
|
||||
*
|
||||
@ -771,4 +779,46 @@ public class CrossChainUtils {
|
||||
entries.add(ledgerEntry);
|
||||
}
|
||||
}
|
||||
|
||||
public static List<CrossChainTradeData> populateTradeDataList(Repository repository, ACCT acct, List<ATData> atDataList) throws DataException {
|
||||
|
||||
if(atDataList.isEmpty()) return new ArrayList<>(0);
|
||||
|
||||
List<ATStateData> latestATStates
|
||||
= repository.getATRepository()
|
||||
.getLatestATStates(
|
||||
atDataList.stream()
|
||||
.map(ATData::getATAddress)
|
||||
.collect(Collectors.toList())
|
||||
);
|
||||
|
||||
Map<String, ATStateData> atStateDataByAtAddress
|
||||
= latestATStates.stream().collect(Collectors.toMap(ATStateData::getATAddress, Function.identity()));
|
||||
|
||||
Map<String, ATData> atDataByAtAddress
|
||||
= atDataList.stream().collect(Collectors.toMap(ATData::getATAddress, Function.identity()));
|
||||
|
||||
Map<String, Long> balanceByAtAddress
|
||||
= repository
|
||||
.getAccountRepository()
|
||||
.getBalances(new ArrayList<>(atDataByAtAddress.keySet()), Asset.QORT)
|
||||
.stream().collect(Collectors.toMap(AccountBalanceData::getAddress, AccountBalanceData::getBalance));
|
||||
|
||||
List<CrossChainTradeData> crossChainTradeDataList = new ArrayList<>(latestATStates.size());
|
||||
|
||||
for( ATStateData atStateData : latestATStates ) {
|
||||
ATData atData = atDataByAtAddress.get(atStateData.getATAddress());
|
||||
crossChainTradeDataList.add(
|
||||
acct.populateTradeData(
|
||||
repository,
|
||||
atData.getCreatorPublicKey(),
|
||||
atData.getCreation(),
|
||||
atStateData,
|
||||
OptionalLong.of(balanceByAtAddress.get(atStateData.getATAddress()))
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
return crossChainTradeDataList;
|
||||
}
|
||||
}
|
@ -33,6 +33,7 @@ import javax.ws.rs.*;
|
||||
import javax.ws.rs.core.Context;
|
||||
import javax.ws.rs.core.MediaType;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
@Path("/names")
|
||||
@ -104,6 +105,45 @@ public class NamesResource {
|
||||
}
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/primary/{address}")
|
||||
@Operation(
|
||||
summary = "primary name owned by address",
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
description = "registered primary name info",
|
||||
content = @Content(
|
||||
mediaType = MediaType.APPLICATION_JSON,
|
||||
schema = @Schema(implementation = NameSummary.class)
|
||||
)
|
||||
)
|
||||
}
|
||||
)
|
||||
@ApiErrors({ApiError.INVALID_ADDRESS, ApiError.REPOSITORY_ISSUE, ApiError.UNAUTHORIZED})
|
||||
public NameSummary getPrimaryNameByAddress(@PathParam("address") String address) {
|
||||
if (!Crypto.isValidAddress(address))
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_ADDRESS);
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
|
||||
if (Settings.getInstance().isLite()) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.UNAUTHORIZED);
|
||||
}
|
||||
else {
|
||||
Optional<String> primaryName = repository.getNameRepository().getPrimaryName(address);
|
||||
|
||||
if(primaryName.isPresent()) {
|
||||
return new NameSummary(new NameData(primaryName.get(), address));
|
||||
}
|
||||
else {
|
||||
return new NameSummary((new NameData(null, address)));
|
||||
}
|
||||
}
|
||||
} catch (DataException e) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
|
||||
}
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/{name}")
|
||||
@Operation(
|
||||
|
@ -1092,25 +1092,4 @@ public class AdminResource {
|
||||
|
||||
return info;
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/dbstates")
|
||||
@Operation(
|
||||
summary = "Get DB States",
|
||||
description = "Get DB States",
|
||||
responses = {
|
||||
@ApiResponse(
|
||||
content = @Content(mediaType = MediaType.APPLICATION_JSON, array = @ArraySchema(schema = @Schema(implementation = DbConnectionInfo.class)))
|
||||
)
|
||||
}
|
||||
)
|
||||
public List<DbConnectionInfo> getDbConnectionsStates() {
|
||||
|
||||
try {
|
||||
return Controller.REPOSITORY_FACTORY.getDbConnectionsStates();
|
||||
} catch (Exception e) {
|
||||
LOGGER.error(e.getMessage(), e);
|
||||
return new ArrayList<>(0);
|
||||
}
|
||||
}
|
||||
}
|
@ -71,33 +71,33 @@ public class RenderResource {
|
||||
@Path("/signature/{signature}")
|
||||
@SecurityRequirement(name = "apiKey")
|
||||
public HttpServletResponse getIndexBySignature(@PathParam("signature") String signature,
|
||||
@QueryParam("theme") String theme) {
|
||||
@QueryParam("theme") String theme, @QueryParam("lang") String lang) {
|
||||
if (!Settings.getInstance().isQDNAuthBypassEnabled())
|
||||
Security.requirePriorAuthorization(request, signature, Service.WEBSITE, null);
|
||||
|
||||
return this.get(signature, ResourceIdType.SIGNATURE, null, null, "/", null, "/render/signature", true, true, theme);
|
||||
return this.get(signature, ResourceIdType.SIGNATURE, null, null, "/", null, "/render/signature", true, true, theme, lang);
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/signature/{signature}/{path:.*}")
|
||||
@SecurityRequirement(name = "apiKey")
|
||||
public HttpServletResponse getPathBySignature(@PathParam("signature") String signature, @PathParam("path") String inPath,
|
||||
@QueryParam("theme") String theme) {
|
||||
@QueryParam("theme") String theme, @QueryParam("lang") String lang) {
|
||||
if (!Settings.getInstance().isQDNAuthBypassEnabled())
|
||||
Security.requirePriorAuthorization(request, signature, Service.WEBSITE, null);
|
||||
|
||||
return this.get(signature, ResourceIdType.SIGNATURE, null, null, inPath,null, "/render/signature", true, true, theme);
|
||||
return this.get(signature, ResourceIdType.SIGNATURE, null, null, inPath,null, "/render/signature", true, true, theme, lang);
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/hash/{hash}")
|
||||
@SecurityRequirement(name = "apiKey")
|
||||
public HttpServletResponse getIndexByHash(@PathParam("hash") String hash58, @QueryParam("secret") String secret58,
|
||||
@QueryParam("theme") String theme) {
|
||||
@QueryParam("theme") String theme, @QueryParam("lang") String lang) {
|
||||
if (!Settings.getInstance().isQDNAuthBypassEnabled())
|
||||
Security.requirePriorAuthorization(request, hash58, Service.WEBSITE, null);
|
||||
|
||||
return this.get(hash58, ResourceIdType.FILE_HASH, Service.ARBITRARY_DATA, null, "/", secret58, "/render/hash", true, false, theme);
|
||||
return this.get(hash58, ResourceIdType.FILE_HASH, Service.ARBITRARY_DATA, null, "/", secret58, "/render/hash", true, false, theme, lang);
|
||||
}
|
||||
|
||||
@GET
|
||||
@ -105,11 +105,11 @@ public class RenderResource {
|
||||
@SecurityRequirement(name = "apiKey")
|
||||
public HttpServletResponse getPathByHash(@PathParam("hash") String hash58, @PathParam("path") String inPath,
|
||||
@QueryParam("secret") String secret58,
|
||||
@QueryParam("theme") String theme) {
|
||||
@QueryParam("theme") String theme, @QueryParam("lang") String lang) {
|
||||
if (!Settings.getInstance().isQDNAuthBypassEnabled())
|
||||
Security.requirePriorAuthorization(request, hash58, Service.WEBSITE, null);
|
||||
|
||||
return this.get(hash58, ResourceIdType.FILE_HASH, Service.ARBITRARY_DATA, null, inPath, secret58, "/render/hash", true, false, theme);
|
||||
return this.get(hash58, ResourceIdType.FILE_HASH, Service.ARBITRARY_DATA, null, inPath, secret58, "/render/hash", true, false, theme, lang);
|
||||
}
|
||||
|
||||
@GET
|
||||
@ -119,12 +119,12 @@ public class RenderResource {
|
||||
@PathParam("name") String name,
|
||||
@PathParam("path") String inPath,
|
||||
@QueryParam("identifier") String identifier,
|
||||
@QueryParam("theme") String theme) {
|
||||
@QueryParam("theme") String theme, @QueryParam("lang") String lang) {
|
||||
if (!Settings.getInstance().isQDNAuthBypassEnabled())
|
||||
Security.requirePriorAuthorization(request, name, service, null);
|
||||
|
||||
String prefix = String.format("/render/%s", service);
|
||||
return this.get(name, ResourceIdType.NAME, service, identifier, inPath, null, prefix, true, true, theme);
|
||||
return this.get(name, ResourceIdType.NAME, service, identifier, inPath, null, prefix, true, true, theme, lang);
|
||||
}
|
||||
|
||||
@GET
|
||||
@ -133,18 +133,18 @@ public class RenderResource {
|
||||
public HttpServletResponse getIndexByName(@PathParam("service") Service service,
|
||||
@PathParam("name") String name,
|
||||
@QueryParam("identifier") String identifier,
|
||||
@QueryParam("theme") String theme) {
|
||||
@QueryParam("theme") String theme, @QueryParam("lang") String lang) {
|
||||
if (!Settings.getInstance().isQDNAuthBypassEnabled())
|
||||
Security.requirePriorAuthorization(request, name, service, null);
|
||||
|
||||
String prefix = String.format("/render/%s", service);
|
||||
return this.get(name, ResourceIdType.NAME, service, identifier, "/", null, prefix, true, true, theme);
|
||||
return this.get(name, ResourceIdType.NAME, service, identifier, "/", null, prefix, true, true, theme, lang);
|
||||
}
|
||||
|
||||
|
||||
|
||||
private HttpServletResponse get(String resourceId, ResourceIdType resourceIdType, Service service, String identifier,
|
||||
String inPath, String secret58, String prefix, boolean includeResourceIdInPrefix, boolean async, String theme) {
|
||||
String inPath, String secret58, String prefix, boolean includeResourceIdInPrefix, boolean async, String theme, String lang) {
|
||||
|
||||
ArbitraryDataRenderer renderer = new ArbitraryDataRenderer(resourceId, resourceIdType, service, identifier, inPath,
|
||||
secret58, prefix, includeResourceIdInPrefix, async, "render", request, response, context);
|
||||
@ -152,6 +152,9 @@ public class RenderResource {
|
||||
if (theme != null) {
|
||||
renderer.setTheme(theme);
|
||||
}
|
||||
if (lang != null) {
|
||||
renderer.setLang(lang);
|
||||
}
|
||||
return renderer.render();
|
||||
}
|
||||
|
||||
|
@ -0,0 +1,83 @@
|
||||
package org.qortal.api.websocket;
|
||||
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.eclipse.jetty.websocket.api.Session;
|
||||
import org.eclipse.jetty.websocket.api.WebSocketException;
|
||||
import org.eclipse.jetty.websocket.api.annotations.OnWebSocketClose;
|
||||
import org.eclipse.jetty.websocket.api.annotations.OnWebSocketConnect;
|
||||
import org.eclipse.jetty.websocket.api.annotations.OnWebSocketError;
|
||||
import org.eclipse.jetty.websocket.api.annotations.OnWebSocketMessage;
|
||||
import org.eclipse.jetty.websocket.api.annotations.WebSocket;
|
||||
import org.eclipse.jetty.websocket.servlet.WebSocketServletFactory;
|
||||
import org.qortal.data.crosschain.UnsignedFeeEvent;
|
||||
import org.qortal.event.Event;
|
||||
import org.qortal.event.EventBus;
|
||||
import org.qortal.event.FeeWaitingEvent;
|
||||
import org.qortal.event.Listener;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.StringWriter;
|
||||
|
||||
@WebSocket
|
||||
@SuppressWarnings("serial")
|
||||
public class UnsignedFeesSocket extends ApiWebSocket implements Listener {
|
||||
|
||||
private static final Logger LOGGER = LogManager.getLogger(UnsignedFeesSocket.class);
|
||||
|
||||
@Override
|
||||
public void configure(WebSocketServletFactory factory) {
|
||||
LOGGER.info("configure");
|
||||
|
||||
factory.register(UnsignedFeesSocket.class);
|
||||
|
||||
EventBus.INSTANCE.addListener(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void listen(Event event) {
|
||||
if (!(event instanceof FeeWaitingEvent))
|
||||
return;
|
||||
|
||||
for (Session session : getSessions()) {
|
||||
FeeWaitingEvent feeWaitingEvent = (FeeWaitingEvent) event;
|
||||
sendUnsignedFeeEvent(session, new UnsignedFeeEvent(feeWaitingEvent.isPositive(), feeWaitingEvent.getAddress()));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@OnWebSocketConnect
|
||||
@Override
|
||||
public void onWebSocketConnect(Session session) {
|
||||
super.onWebSocketConnect(session);
|
||||
}
|
||||
|
||||
@OnWebSocketClose
|
||||
@Override
|
||||
public void onWebSocketClose(Session session, int statusCode, String reason) {
|
||||
super.onWebSocketClose(session, statusCode, reason);
|
||||
}
|
||||
|
||||
@OnWebSocketError
|
||||
public void onWebSocketError(Session session, Throwable throwable) {
|
||||
/* We ignore errors for now, but method here to silence log spam */
|
||||
}
|
||||
|
||||
@OnWebSocketMessage
|
||||
public void onWebSocketMessage(Session session, String message) {
|
||||
LOGGER.info("onWebSocketMessage: message = " + message);
|
||||
}
|
||||
|
||||
private void sendUnsignedFeeEvent(Session session, UnsignedFeeEvent unsignedFeeEvent) {
|
||||
StringWriter stringWriter = new StringWriter();
|
||||
|
||||
try {
|
||||
marshall(stringWriter, unsignedFeeEvent);
|
||||
|
||||
session.getRemote().sendStringByFuture(stringWriter.toString());
|
||||
} catch (IOException | WebSocketException e) {
|
||||
// No output this time
|
||||
}
|
||||
}
|
||||
|
||||
}
|
@ -4,6 +4,7 @@ import org.qortal.repository.DataException;
|
||||
import org.qortal.utils.Base58;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.nio.file.FileSystem;
|
||||
import java.nio.file.Files;
|
||||
@ -25,47 +26,53 @@ public class ArbitraryDataDigest {
|
||||
}
|
||||
|
||||
public void compute() throws IOException, DataException {
|
||||
List<Path> allPaths = Files.walk(path).filter(Files::isRegularFile).sorted().collect(Collectors.toList());
|
||||
List<Path> allPaths = Files.walk(path)
|
||||
.filter(Files::isRegularFile)
|
||||
.sorted()
|
||||
.collect(Collectors.toList());
|
||||
|
||||
Path basePathAbsolute = this.path.toAbsolutePath();
|
||||
|
||||
|
||||
MessageDigest sha256;
|
||||
try {
|
||||
sha256 = MessageDigest.getInstance("SHA-256");
|
||||
} catch (NoSuchAlgorithmException e) {
|
||||
throw new DataException("SHA-256 hashing algorithm unavailable");
|
||||
}
|
||||
|
||||
|
||||
for (Path path : allPaths) {
|
||||
// We need to work with paths relative to the base path, to ensure the same hash
|
||||
// is generated on different systems
|
||||
Path relativePath = basePathAbsolute.relativize(path.toAbsolutePath());
|
||||
|
||||
|
||||
// Exclude Qortal folder since it can be different each time
|
||||
// We only care about hashing the actual user data
|
||||
if (relativePath.startsWith(".qortal/")) {
|
||||
continue;
|
||||
}
|
||||
|
||||
|
||||
// Account for \ VS / : Linux VS Windows
|
||||
String pathString = relativePath.toString();
|
||||
|
||||
if(relativePath.getFileSystem().toString().contains("Windows")) {
|
||||
pathString = pathString.replace("\\","/");
|
||||
if (relativePath.getFileSystem().toString().contains("Windows")) {
|
||||
pathString = pathString.replace("\\", "/");
|
||||
}
|
||||
|
||||
|
||||
// Hash path
|
||||
byte[] filePathBytes = pathString.getBytes(StandardCharsets.UTF_8);
|
||||
System.out.printf("Path: %s \n", pathString);
|
||||
System.out.printf("Path Byte Array: %s \n", Arrays.toString(filePathBytes));
|
||||
sha256.update(filePathBytes);
|
||||
|
||||
// Hash contents
|
||||
byte[] fileContent = Files.readAllBytes(path);
|
||||
System.out.printf("File Content: %s \n", Arrays.toString(fileContent));
|
||||
sha256.update(fileContent);
|
||||
|
||||
try (InputStream in = Files.newInputStream(path)) {
|
||||
byte[] buffer = new byte[65536]; // 64 KB
|
||||
int bytesRead;
|
||||
while ((bytesRead = in.read(buffer)) != -1) {
|
||||
sha256.update(buffer, 0, bytesRead);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
this.hash = sha256.digest();
|
||||
}
|
||||
|
||||
|
||||
public boolean isHashValid(byte[] hash) {
|
||||
return Arrays.equals(hash, this.hash);
|
||||
|
@ -52,7 +52,7 @@ public class ArbitraryDataFile {
|
||||
|
||||
private static final Logger LOGGER = LogManager.getLogger(ArbitraryDataFile.class);
|
||||
|
||||
public static final long MAX_FILE_SIZE = 500 * 1024 * 1024; // 500MiB
|
||||
public static final long MAX_FILE_SIZE = 2L * 1024 * 1024 * 1024; // 2 GiB
|
||||
protected static final int MAX_CHUNK_SIZE = 1 * 1024 * 1024; // 1MiB
|
||||
public static final int CHUNK_SIZE = 512 * 1024; // 0.5MiB
|
||||
public static int SHORT_DIGEST_LENGTH = 8;
|
||||
|
@ -1,6 +1,7 @@
|
||||
package org.qortal.arbitrary;
|
||||
|
||||
import com.google.common.io.Resources;
|
||||
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.commons.lang3.ArrayUtils;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
@ -15,11 +16,13 @@ import org.qortal.settings.Settings;
|
||||
import javax.servlet.ServletContext;
|
||||
import javax.servlet.http.HttpServletRequest;
|
||||
import javax.servlet.http.HttpServletResponse;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.FileInputStream;
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
import java.net.URL;
|
||||
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.NoSuchFileException;
|
||||
@ -37,6 +40,7 @@ public class ArbitraryDataRenderer {
|
||||
private final Service service;
|
||||
private final String identifier;
|
||||
private String theme = "light";
|
||||
private String lang = "en";
|
||||
private String inPath;
|
||||
private final String secret58;
|
||||
private final String prefix;
|
||||
@ -166,9 +170,16 @@ public class ArbitraryDataRenderer {
|
||||
if (HTMLParser.isHtmlFile(filename)) {
|
||||
// HTML file - needs to be parsed
|
||||
byte[] data = Files.readAllBytes(filePath); // TODO: limit file size that can be read into memory
|
||||
HTMLParser htmlParser = new HTMLParser(resourceId, inPath, prefix, includeResourceIdInPrefix, data, qdnContext, service, identifier, theme, usingCustomRouting);
|
||||
String encodedResourceId;
|
||||
|
||||
if (resourceIdType == ResourceIdType.NAME) {
|
||||
encodedResourceId = resourceId.replace(" ", "%20");
|
||||
} else {
|
||||
encodedResourceId = resourceId;
|
||||
}
|
||||
HTMLParser htmlParser = new HTMLParser(encodedResourceId, inPath, prefix, includeResourceIdInPrefix, data, qdnContext, service, identifier, theme, usingCustomRouting, lang);
|
||||
htmlParser.addAdditionalHeaderTags();
|
||||
response.addHeader("Content-Security-Policy", "default-src 'self' 'unsafe-inline' 'unsafe-eval'; font-src 'self' data:; media-src 'self' data: blob:; img-src 'self' data: blob:; connect-src 'self' wss:;");
|
||||
response.addHeader("Content-Security-Policy", "default-src 'self' 'unsafe-inline' 'unsafe-eval'; font-src 'self' data:; media-src 'self' data: blob:; img-src 'self' data: blob:; connect-src 'self' wss: blob:;");
|
||||
response.setContentType(context.getMimeType(filename));
|
||||
response.setContentLength(htmlParser.getData().length);
|
||||
response.getOutputStream().write(htmlParser.getData());
|
||||
@ -256,5 +267,8 @@ public class ArbitraryDataRenderer {
|
||||
public void setTheme(String theme) {
|
||||
this.theme = theme;
|
||||
}
|
||||
public void setLang(String lang) {
|
||||
this.lang = lang;
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -29,6 +29,7 @@ import org.qortal.utils.FilesystemUtils;
|
||||
import org.qortal.utils.NTP;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
@ -197,7 +198,7 @@ public class ArbitraryDataTransactionBuilder {
|
||||
|
||||
// We can't use PATCH for on-chain data because this requires the .qortal directory, which can't be put on chain
|
||||
final boolean isSingleFileResource = FilesystemUtils.isSingleFileResource(this.path, false);
|
||||
final boolean shouldUseOnChainData = (isSingleFileResource && AES.getEncryptedFileSize(FilesystemUtils.getSingleFileContents(path).length) <= ArbitraryTransaction.MAX_DATA_SIZE);
|
||||
final boolean shouldUseOnChainData = (isSingleFileResource && AES.getEncryptedFileSize(Files.size(path)) <= ArbitraryTransaction.MAX_DATA_SIZE);
|
||||
if (shouldUseOnChainData) {
|
||||
LOGGER.info("Data size is small enough to go on chain - using PUT");
|
||||
return Method.PUT;
|
||||
@ -245,7 +246,7 @@ public class ArbitraryDataTransactionBuilder {
|
||||
|
||||
// Single file resources are handled differently, especially for very small data payloads, as these go on chain
|
||||
final boolean isSingleFileResource = FilesystemUtils.isSingleFileResource(path, false);
|
||||
final boolean shouldUseOnChainData = (isSingleFileResource && AES.getEncryptedFileSize(FilesystemUtils.getSingleFileContents(path).length) <= ArbitraryTransaction.MAX_DATA_SIZE);
|
||||
final boolean shouldUseOnChainData = (isSingleFileResource && AES.getEncryptedFileSize(Files.size(path)) <= ArbitraryTransaction.MAX_DATA_SIZE);
|
||||
|
||||
// Use zip compression if data isn't going on chain
|
||||
Compression compression = shouldUseOnChainData ? Compression.NONE : Compression.ZIP;
|
||||
|
@ -62,7 +62,17 @@ public enum Service {
|
||||
|
||||
// Custom validation function to require an index HTML file in the root directory
|
||||
List<String> fileNames = ArbitraryDataRenderer.indexFiles();
|
||||
String[] files = path.toFile().list();
|
||||
List<String> files;
|
||||
|
||||
// single files are paackaged differently
|
||||
if( path.toFile().isFile() ) {
|
||||
files = new ArrayList<>(1);
|
||||
files.add(path.getFileName().toString());
|
||||
}
|
||||
else {
|
||||
files = new ArrayList<>(Arrays.asList(path.toFile().list()));
|
||||
}
|
||||
|
||||
if (files != null) {
|
||||
for (String file : files) {
|
||||
Path fileName = Paths.get(file).getFileName();
|
||||
|
@ -1640,6 +1640,8 @@ public class Block {
|
||||
SelfSponsorshipAlgoV2Block.processAccountPenalties(this);
|
||||
} else if (this.blockData.getHeight() == BlockChain.getInstance().getSelfSponsorshipAlgoV3Height()) {
|
||||
SelfSponsorshipAlgoV3Block.processAccountPenalties(this);
|
||||
} else if (this.blockData.getHeight() == BlockChain.getInstance().getMultipleNamesPerAccountHeight()) {
|
||||
PrimaryNamesBlock.processNames(this.repository);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1721,11 +1723,19 @@ public class Block {
|
||||
accountData.setBlocksMinted(accountData.getBlocksMinted() + 1);
|
||||
LOGGER.trace(() -> String.format("Block minter %s up to %d minted block%s", accountData.getAddress(), accountData.getBlocksMinted(), (accountData.getBlocksMinted() != 1 ? "s" : "")));
|
||||
|
||||
final int effectiveBlocksMinted = accountData.getBlocksMinted() + accountData.getBlocksMintedAdjustment() + accountData.getBlocksMintedPenalty();
|
||||
int blocksMintedAdjustment
|
||||
=
|
||||
(this.blockData.getHeight() > BlockChain.getInstance().getMintedBlocksAdjustmentRemovalHeight())
|
||||
?
|
||||
0
|
||||
:
|
||||
accountData.getBlocksMintedAdjustment();
|
||||
|
||||
final int effectiveBlocksMinted = accountData.getBlocksMinted() + blocksMintedAdjustment + accountData.getBlocksMintedPenalty();
|
||||
|
||||
for (int newLevel = maximumLevel; newLevel >= 0; --newLevel)
|
||||
if (effectiveBlocksMinted >= cumulativeBlocksByLevel.get(newLevel)) {
|
||||
if (newLevel > accountData.getLevel()) {
|
||||
if (newLevel != accountData.getLevel()) {
|
||||
// Account has increased in level!
|
||||
accountData.setLevel(newLevel);
|
||||
bumpedAccounts.put(accountData.getAddress(), newLevel);
|
||||
@ -1952,6 +1962,8 @@ public class Block {
|
||||
SelfSponsorshipAlgoV2Block.orphanAccountPenalties(this);
|
||||
} else if (this.blockData.getHeight() == BlockChain.getInstance().getSelfSponsorshipAlgoV3Height()) {
|
||||
SelfSponsorshipAlgoV3Block.orphanAccountPenalties(this);
|
||||
} else if (this.blockData.getHeight() == BlockChain.getInstance().getMultipleNamesPerAccountHeight()) {
|
||||
PrimaryNamesBlock.orphanNames( this.repository );
|
||||
}
|
||||
}
|
||||
|
||||
@ -2127,11 +2139,19 @@ public class Block {
|
||||
accountData.setBlocksMinted(accountData.getBlocksMinted() - 1);
|
||||
LOGGER.trace(() -> String.format("Block minter %s down to %d minted block%s", accountData.getAddress(), accountData.getBlocksMinted(), (accountData.getBlocksMinted() != 1 ? "s" : "")));
|
||||
|
||||
final int effectiveBlocksMinted = accountData.getBlocksMinted() + accountData.getBlocksMintedAdjustment() + accountData.getBlocksMintedPenalty();
|
||||
int blocksMintedAdjustment
|
||||
=
|
||||
(this.blockData.getHeight() -1 > BlockChain.getInstance().getMintedBlocksAdjustmentRemovalHeight())
|
||||
?
|
||||
0
|
||||
:
|
||||
accountData.getBlocksMintedAdjustment();
|
||||
|
||||
final int effectiveBlocksMinted = accountData.getBlocksMinted() + blocksMintedAdjustment + accountData.getBlocksMintedPenalty();
|
||||
|
||||
for (int newLevel = maximumLevel; newLevel >= 0; --newLevel)
|
||||
if (effectiveBlocksMinted >= cumulativeBlocksByLevel.get(newLevel)) {
|
||||
if (newLevel < accountData.getLevel()) {
|
||||
if (newLevel != accountData.getLevel()) {
|
||||
// Account has decreased in level!
|
||||
accountData.setLevel(newLevel);
|
||||
repository.getAccountRepository().setLevel(accountData);
|
||||
|
@ -92,7 +92,9 @@ public class BlockChain {
|
||||
adminsReplaceFoundersHeight,
|
||||
nullGroupMembershipHeight,
|
||||
ignoreLevelForRewardShareHeight,
|
||||
adminQueryFixHeight
|
||||
adminQueryFixHeight,
|
||||
multipleNamesPerAccountHeight,
|
||||
mintedBlocksAdjustmentRemovalHeight
|
||||
}
|
||||
|
||||
// Custom transaction fees
|
||||
@ -112,7 +114,8 @@ public class BlockChain {
|
||||
/** Whether to use legacy, broken RIPEMD160 implementation when converting public keys to addresses. */
|
||||
private boolean useBrokenMD160ForAddresses = false;
|
||||
|
||||
/** Whether only one registered name is allowed per account. */
|
||||
/** This should get ignored and overwritten in the oneNamePerAccount(int blockchainHeight) method,
|
||||
* because it is based on block height, not based on the genesis block.*/
|
||||
private boolean oneNamePerAccount = false;
|
||||
|
||||
/** Checkpoints */
|
||||
@ -474,8 +477,9 @@ public class BlockChain {
|
||||
return this.useBrokenMD160ForAddresses;
|
||||
}
|
||||
|
||||
public boolean oneNamePerAccount() {
|
||||
return this.oneNamePerAccount;
|
||||
public boolean oneNamePerAccount(int blockchainHeight) {
|
||||
// this is not set on a simple blockchain setting, it is based on a feature trigger height
|
||||
return blockchainHeight < this.getMultipleNamesPerAccountHeight();
|
||||
}
|
||||
|
||||
public List<Checkpoint> getCheckpoints() {
|
||||
@ -688,6 +692,14 @@ public class BlockChain {
|
||||
return this.featureTriggers.get(FeatureTrigger.adminQueryFixHeight.name()).intValue();
|
||||
}
|
||||
|
||||
public int getMultipleNamesPerAccountHeight() {
|
||||
return this.featureTriggers.get(FeatureTrigger.multipleNamesPerAccountHeight.name()).intValue();
|
||||
}
|
||||
|
||||
public int getMintedBlocksAdjustmentRemovalHeight() {
|
||||
return this.featureTriggers.get(FeatureTrigger.mintedBlocksAdjustmentRemovalHeight.name()).intValue();
|
||||
}
|
||||
|
||||
// More complex getters for aspects that change by height or timestamp
|
||||
|
||||
public long getRewardAtHeight(int ourHeight) {
|
||||
|
47
src/main/java/org/qortal/block/PrimaryNamesBlock.java
Normal file
47
src/main/java/org/qortal/block/PrimaryNamesBlock.java
Normal file
@ -0,0 +1,47 @@
|
||||
package org.qortal.block;
|
||||
|
||||
import org.qortal.account.Account;
|
||||
import org.qortal.api.resource.TransactionsResource;
|
||||
import org.qortal.data.naming.NameData;
|
||||
import org.qortal.repository.DataException;
|
||||
import org.qortal.repository.Repository;
|
||||
|
||||
import java.util.Set;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
/**
|
||||
* Class PrimaryNamesBlock
|
||||
*/
|
||||
public class PrimaryNamesBlock {
|
||||
|
||||
/**
|
||||
* Process Primary Names
|
||||
*
|
||||
* @param repository
|
||||
* @throws DataException
|
||||
*/
|
||||
public static void processNames(Repository repository) throws DataException {
|
||||
|
||||
Set<String> addressesWithNames
|
||||
= repository.getNameRepository().getAllNames().stream()
|
||||
.map(NameData::getOwner).collect(Collectors.toSet());
|
||||
|
||||
// for each address with a name, set primary name to the address
|
||||
for( String address : addressesWithNames ) {
|
||||
|
||||
Account account = new Account(repository, address);
|
||||
account.resetPrimaryName(TransactionsResource.ConfirmationStatus.CONFIRMED);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Orphan the Primary Names Block
|
||||
*
|
||||
* @param repository
|
||||
* @throws DataException
|
||||
*/
|
||||
public static void orphanNames(Repository repository) throws DataException {
|
||||
|
||||
repository.getNameRepository().clearPrimaryNames();
|
||||
}
|
||||
}
|
@ -46,6 +46,7 @@ import org.qortal.utils.*;
|
||||
|
||||
import javax.xml.bind.annotation.XmlAccessType;
|
||||
import javax.xml.bind.annotation.XmlAccessorType;
|
||||
|
||||
import java.awt.TrayIcon.MessageType;
|
||||
import java.io.File;
|
||||
import java.io.FileNotFoundException;
|
||||
@ -53,6 +54,7 @@ import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.UnknownHostException;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.Paths;
|
||||
import java.security.SecureRandom;
|
||||
@ -70,11 +72,10 @@ import java.util.concurrent.locks.ReentrantLock;
|
||||
import java.util.function.Function;
|
||||
import java.util.function.Predicate;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
public class Controller extends Thread {
|
||||
|
||||
public static HSQLDBRepositoryFactory REPOSITORY_FACTORY;
|
||||
|
||||
static {
|
||||
// This must go before any calls to LogManager/Logger
|
||||
System.setProperty("log4j2.formatMsgNoLookups", "true");
|
||||
@ -396,6 +397,9 @@ public class Controller extends Thread {
|
||||
|
||||
Controller.newInstance(args);
|
||||
|
||||
|
||||
cleanChunkUploadTempDir(); // cleanup leftover chunks from streaming to disk
|
||||
|
||||
LOGGER.info("Starting NTP");
|
||||
Long ntpOffset = Settings.getInstance().getTestNtpOffset();
|
||||
if (ntpOffset != null)
|
||||
@ -405,8 +409,8 @@ public class Controller extends Thread {
|
||||
|
||||
LOGGER.info("Starting repository");
|
||||
try {
|
||||
REPOSITORY_FACTORY = new HSQLDBRepositoryFactory(getRepositoryUrl());
|
||||
RepositoryManager.setRepositoryFactory(REPOSITORY_FACTORY);
|
||||
HSQLDBRepositoryFactory repositoryFactory = new HSQLDBRepositoryFactory(getRepositoryUrl());
|
||||
RepositoryManager.setRepositoryFactory(repositoryFactory);
|
||||
RepositoryManager.setRequestedCheckpoint(Boolean.TRUE);
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
@ -560,6 +564,12 @@ public class Controller extends Thread {
|
||||
LOGGER.info("Starting online accounts manager");
|
||||
OnlineAccountsManager.getInstance().start();
|
||||
|
||||
LOGGER.info("Starting foreign fees manager");
|
||||
ForeignFeesManager.getInstance().start();
|
||||
|
||||
LOGGER.info("Starting follower");
|
||||
Follower.getInstance().start();
|
||||
|
||||
LOGGER.info("Starting transaction importer");
|
||||
TransactionImporter.getInstance().start();
|
||||
|
||||
@ -1130,6 +1140,9 @@ public class Controller extends Thread {
|
||||
LOGGER.info("Shutting down online accounts manager");
|
||||
OnlineAccountsManager.getInstance().shutdown();
|
||||
|
||||
LOGGER.info("Shutting down foreign fees manager");
|
||||
ForeignFeesManager.getInstance().shutdown();
|
||||
|
||||
LOGGER.info("Shutting down transaction importer");
|
||||
TransactionImporter.getInstance().shutdown();
|
||||
|
||||
@ -1474,6 +1487,14 @@ public class Controller extends Thread {
|
||||
OnlineAccountsManager.getInstance().onNetworkOnlineAccountsV3Message(peer, message);
|
||||
break;
|
||||
|
||||
case GET_FOREIGN_FEES:
|
||||
ForeignFeesManager.getInstance().onNetworkGetForeignFeesMessage(peer, message);
|
||||
break;
|
||||
|
||||
case FOREIGN_FEES:
|
||||
ForeignFeesManager.getInstance().onNetworkForeignFeesMessage(peer, message);
|
||||
break;
|
||||
|
||||
case GET_ARBITRARY_DATA:
|
||||
// Not currently supported
|
||||
break;
|
||||
@ -2160,6 +2181,24 @@ public class Controller extends Thread {
|
||||
return now - offset;
|
||||
}
|
||||
|
||||
private static void cleanChunkUploadTempDir() {
|
||||
Path uploadsTemp = Paths.get("uploads-temp");
|
||||
if (!Files.exists(uploadsTemp)) {
|
||||
return;
|
||||
}
|
||||
|
||||
try (Stream<Path> paths = Files.walk(uploadsTemp)) {
|
||||
paths.sorted(Comparator.reverseOrder())
|
||||
.map(Path::toFile)
|
||||
.forEach(File::delete);
|
||||
|
||||
LOGGER.info("Cleaned up all temporary uploads in {}", uploadsTemp);
|
||||
} catch (IOException e) {
|
||||
LOGGER.warn("Failed to clean up uploads-temp directory", e);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public StatsSnapshot getStatsSnapshot() {
|
||||
return this.stats;
|
||||
}
|
||||
|
1202
src/main/java/org/qortal/controller/ForeignFeesManager.java
Normal file
1202
src/main/java/org/qortal/controller/ForeignFeesManager.java
Normal file
File diff suppressed because it is too large
Load Diff
@ -2,6 +2,7 @@ package org.qortal.controller;
|
||||
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.qortal.controller.arbitrary.PeerMessage;
|
||||
import org.qortal.data.block.BlockData;
|
||||
import org.qortal.data.transaction.TransactionData;
|
||||
import org.qortal.network.Network;
|
||||
@ -20,7 +21,11 @@ import org.qortal.utils.Base58;
|
||||
import org.qortal.utils.NTP;
|
||||
|
||||
import java.util.*;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.ScheduledExecutorService;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.locks.ReentrantLock;
|
||||
import java.util.function.Function;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
public class TransactionImporter extends Thread {
|
||||
@ -50,6 +55,10 @@ public class TransactionImporter extends Thread {
|
||||
/** Cached list of unconfirmed transactions, used when counting per creator. This is replaced regularly */
|
||||
public static List<TransactionData> unconfirmedTransactionsCache = null;
|
||||
|
||||
public TransactionImporter() {
|
||||
signatureMessageScheduler.scheduleAtFixedRate(this::processNetworkTransactionSignaturesMessage, 60, 1, TimeUnit.SECONDS);
|
||||
getTransactionMessageScheduler.scheduleAtFixedRate(this::processNetworkGetTransactionMessages, 60, 1, TimeUnit.SECONDS);
|
||||
}
|
||||
|
||||
public static synchronized TransactionImporter getInstance() {
|
||||
if (instance == null) {
|
||||
@ -371,36 +380,104 @@ public class TransactionImporter extends Thread {
|
||||
}
|
||||
}
|
||||
|
||||
// List to collect messages
|
||||
private final List<PeerMessage> getTransactionMessageList = new ArrayList<>();
|
||||
// Lock to synchronize access to the list
|
||||
private final Object getTransactionMessageLock = new Object();
|
||||
|
||||
// Scheduled executor service to process messages every second
|
||||
private final ScheduledExecutorService getTransactionMessageScheduler = Executors.newScheduledThreadPool(1);
|
||||
|
||||
public void onNetworkGetTransactionMessage(Peer peer, Message message) {
|
||||
GetTransactionMessage getTransactionMessage = (GetTransactionMessage) message;
|
||||
byte[] signature = getTransactionMessage.getSignature();
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
synchronized (getTransactionMessageLock) {
|
||||
getTransactionMessageList.add(new PeerMessage(peer, message));
|
||||
}
|
||||
}
|
||||
|
||||
private void processNetworkGetTransactionMessages() {
|
||||
|
||||
try {
|
||||
List<PeerMessage> messagesToProcess;
|
||||
synchronized (getTransactionMessageLock) {
|
||||
messagesToProcess = new ArrayList<>(getTransactionMessageList);
|
||||
getTransactionMessageList.clear();
|
||||
}
|
||||
|
||||
if( messagesToProcess.isEmpty() ) return;
|
||||
|
||||
Map<String, PeerMessage> peerMessageBySignature58 = new HashMap<>(messagesToProcess.size());
|
||||
|
||||
for( PeerMessage peerMessage : messagesToProcess ) {
|
||||
GetTransactionMessage getTransactionMessage = (GetTransactionMessage) peerMessage.getMessage();
|
||||
byte[] signature = getTransactionMessage.getSignature();
|
||||
|
||||
peerMessageBySignature58.put(Base58.encode(signature), peerMessage);
|
||||
}
|
||||
|
||||
// Firstly check the sig-valid transactions that are currently queued for import
|
||||
TransactionData transactionData = this.getCachedSigValidTransactions().stream()
|
||||
.filter(t -> Arrays.equals(signature, t.getSignature()))
|
||||
.findFirst().orElse(null);
|
||||
Map<String, TransactionData> transactionsCachedBySignature58
|
||||
= this.getCachedSigValidTransactions().stream()
|
||||
.collect(Collectors.toMap(t -> Base58.encode(t.getSignature()), Function.identity()));
|
||||
|
||||
if (transactionData == null) {
|
||||
Map<Boolean, List<Map.Entry<String, PeerMessage>>> transactionsCachedBySignature58Partition
|
||||
= peerMessageBySignature58.entrySet().stream()
|
||||
.collect(Collectors.partitioningBy(entry -> transactionsCachedBySignature58.containsKey(entry.getKey())));
|
||||
|
||||
List<byte[]> signaturesNeeded
|
||||
= transactionsCachedBySignature58Partition.get(false).stream()
|
||||
.map(Map.Entry::getValue)
|
||||
.map(PeerMessage::getMessage)
|
||||
.map(message -> (GetTransactionMessage) message)
|
||||
.map(GetTransactionMessage::getSignature)
|
||||
.collect(Collectors.toList());
|
||||
|
||||
// transaction found in the import queue
|
||||
Map<String, TransactionData> transactionsToSendBySignature58 = new HashMap<>(messagesToProcess.size());
|
||||
for( Map.Entry<String, PeerMessage> entry : transactionsCachedBySignature58Partition.get(true)) {
|
||||
transactionsToSendBySignature58.put(entry.getKey(), transactionsCachedBySignature58.get(entry.getKey()));
|
||||
}
|
||||
|
||||
if( !signaturesNeeded.isEmpty() ) {
|
||||
// Not found in import queue, so try the database
|
||||
transactionData = repository.getTransactionRepository().fromSignature(signature);
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
transactionsToSendBySignature58.putAll(
|
||||
repository.getTransactionRepository().fromSignatures(signaturesNeeded).stream()
|
||||
.collect(Collectors.toMap(transactionData -> Base58.encode(transactionData.getSignature()), Function.identity()))
|
||||
);
|
||||
} catch (DataException e) {
|
||||
LOGGER.error(e.getMessage(), e);
|
||||
}
|
||||
}
|
||||
|
||||
if (transactionData == null) {
|
||||
// Still not found - so we don't have this transaction
|
||||
LOGGER.debug(() -> String.format("Ignoring GET_TRANSACTION request from peer %s for unknown transaction %s", peer, Base58.encode(signature)));
|
||||
// Send no response at all???
|
||||
return;
|
||||
}
|
||||
for( final Map.Entry<String, TransactionData> entry : transactionsToSendBySignature58.entrySet() ) {
|
||||
|
||||
Message transactionMessage = new TransactionMessage(transactionData);
|
||||
PeerMessage peerMessage = peerMessageBySignature58.get(entry.getKey());
|
||||
final Message message = peerMessage.getMessage();
|
||||
final Peer peer = peerMessage.getPeer();
|
||||
|
||||
Runnable sendTransactionMessageRunner = () -> sendTransactionMessage(entry.getKey(), entry.getValue(), message, peer);
|
||||
Thread sendTransactionMessageThread = new Thread(sendTransactionMessageRunner);
|
||||
sendTransactionMessageThread.start();
|
||||
}
|
||||
} catch (Exception e) {
|
||||
LOGGER.error(e.getMessage(),e);
|
||||
}
|
||||
}
|
||||
|
||||
private static void sendTransactionMessage(String signature58, TransactionData data, Message message, Peer peer) {
|
||||
try {
|
||||
Message transactionMessage = new TransactionMessage(data);
|
||||
transactionMessage.setId(message.getId());
|
||||
|
||||
if (!peer.sendMessage(transactionMessage))
|
||||
peer.disconnect("failed to send transaction");
|
||||
} catch (DataException e) {
|
||||
LOGGER.error(String.format("Repository issue while sending transaction %s to peer %s", Base58.encode(signature), peer), e);
|
||||
} catch (TransformationException e) {
|
||||
LOGGER.error(String.format("Serialization issue while sending transaction %s to peer %s", Base58.encode(signature), peer), e);
|
||||
}
|
||||
catch (TransformationException e) {
|
||||
LOGGER.error(String.format("Serialization issue while sending transaction %s to peer %s", signature58, peer), e);
|
||||
}
|
||||
catch (Exception e) {
|
||||
LOGGER.error(e.getMessage(), e);
|
||||
}
|
||||
}
|
||||
|
||||
@ -421,44 +498,86 @@ public class TransactionImporter extends Thread {
|
||||
}
|
||||
}
|
||||
|
||||
// List to collect messages
|
||||
private final List<PeerMessage> signatureMessageList = new ArrayList<>();
|
||||
// Lock to synchronize access to the list
|
||||
private final Object signatureMessageLock = new Object();
|
||||
|
||||
// Scheduled executor service to process messages every second
|
||||
private final ScheduledExecutorService signatureMessageScheduler = Executors.newScheduledThreadPool(1);
|
||||
|
||||
public void onNetworkTransactionSignaturesMessage(Peer peer, Message message) {
|
||||
TransactionSignaturesMessage transactionSignaturesMessage = (TransactionSignaturesMessage) message;
|
||||
List<byte[]> signatures = transactionSignaturesMessage.getSignatures();
|
||||
synchronized (signatureMessageLock) {
|
||||
signatureMessageList.add(new PeerMessage(peer, message));
|
||||
}
|
||||
}
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
for (byte[] signature : signatures) {
|
||||
String signature58 = Base58.encode(signature);
|
||||
if (invalidUnconfirmedTransactions.containsKey(signature58)) {
|
||||
// Previously invalid transaction - don't keep requesting it
|
||||
// It will be periodically removed from invalidUnconfirmedTransactions to allow for rechecks
|
||||
continue;
|
||||
}
|
||||
public void processNetworkTransactionSignaturesMessage() {
|
||||
|
||||
// Ignore if this transaction is in the queue
|
||||
if (incomingTransactionQueueContains(signature)) {
|
||||
LOGGER.trace(() -> String.format("Ignoring existing queued transaction %s from peer %s", Base58.encode(signature), peer));
|
||||
continue;
|
||||
}
|
||||
try {
|
||||
List<PeerMessage> messagesToProcess;
|
||||
synchronized (signatureMessageLock) {
|
||||
messagesToProcess = new ArrayList<>(signatureMessageList);
|
||||
signatureMessageList.clear();
|
||||
}
|
||||
|
||||
// Do we have it already? (Before requesting transaction data itself)
|
||||
if (repository.getTransactionRepository().exists(signature)) {
|
||||
LOGGER.trace(() -> String.format("Ignoring existing transaction %s from peer %s", Base58.encode(signature), peer));
|
||||
continue;
|
||||
}
|
||||
Map<String, byte[]> signatureBySignature58 = new HashMap<>(messagesToProcess.size() * 10);
|
||||
Map<String, Peer> peerBySignature58 = new HashMap<>( messagesToProcess.size() * 10 );
|
||||
|
||||
// Check isInterrupted() here and exit fast
|
||||
if (Thread.currentThread().isInterrupted())
|
||||
return;
|
||||
for( PeerMessage peerMessage : messagesToProcess ) {
|
||||
|
||||
// Fetch actual transaction data from peer
|
||||
Message getTransactionMessage = new GetTransactionMessage(signature);
|
||||
if (!peer.sendMessage(getTransactionMessage)) {
|
||||
peer.disconnect("failed to request transaction");
|
||||
return;
|
||||
TransactionSignaturesMessage transactionSignaturesMessage = (TransactionSignaturesMessage) peerMessage.getMessage();
|
||||
List<byte[]> signatures = transactionSignaturesMessage.getSignatures();
|
||||
|
||||
for (byte[] signature : signatures) {
|
||||
String signature58 = Base58.encode(signature);
|
||||
if (invalidUnconfirmedTransactions.containsKey(signature58)) {
|
||||
// Previously invalid transaction - don't keep requesting it
|
||||
// It will be periodically removed from invalidUnconfirmedTransactions to allow for rechecks
|
||||
continue;
|
||||
}
|
||||
|
||||
// Ignore if this transaction is in the queue
|
||||
if (incomingTransactionQueueContains(signature)) {
|
||||
LOGGER.trace(() -> String.format("Ignoring existing queued transaction %s from peer %s", Base58.encode(signature), peerMessage.getPeer()));
|
||||
continue;
|
||||
}
|
||||
|
||||
signatureBySignature58.put(signature58, signature);
|
||||
peerBySignature58.put(signature58, peerMessage.getPeer());
|
||||
}
|
||||
}
|
||||
} catch (DataException e) {
|
||||
LOGGER.error(String.format("Repository issue while processing unconfirmed transactions from peer %s", peer), e);
|
||||
|
||||
if( !signatureBySignature58.isEmpty() ) {
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
|
||||
// remove signatures in db already
|
||||
repository.getTransactionRepository()
|
||||
.fromSignatures(new ArrayList<>(signatureBySignature58.values())).stream()
|
||||
.map(TransactionData::getSignature)
|
||||
.map(signature -> Base58.encode(signature))
|
||||
.forEach(signature58 -> signatureBySignature58.remove(signature58));
|
||||
} catch (DataException e) {
|
||||
LOGGER.error(String.format("Repository issue while processing unconfirmed transactions from peer"), e);
|
||||
}
|
||||
}
|
||||
|
||||
// Check isInterrupted() here and exit fast
|
||||
if (Thread.currentThread().isInterrupted())
|
||||
return;
|
||||
|
||||
for (Map.Entry<String, byte[]> entry : signatureBySignature58.entrySet()) {
|
||||
|
||||
Peer peer = peerBySignature58.get(entry.getKey());
|
||||
|
||||
// Fetch actual transaction data from peer
|
||||
Message getTransactionMessage = new GetTransactionMessage(entry.getValue());
|
||||
if (peer != null && !peer.sendMessage(getTransactionMessage)) {
|
||||
peer.disconnect("failed to request transaction");
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
LOGGER.error(e.getMessage(), e);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -25,6 +25,10 @@ import org.qortal.utils.NTP;
|
||||
import org.qortal.utils.Triple;
|
||||
|
||||
import java.util.*;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.ScheduledExecutorService;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static org.qortal.controller.arbitrary.ArbitraryDataFileManager.MAX_FILE_HASH_RESPONSES;
|
||||
|
||||
@ -73,6 +77,8 @@ public class ArbitraryDataFileListManager {
|
||||
|
||||
|
||||
private ArbitraryDataFileListManager() {
|
||||
getArbitraryDataFileListMessageScheduler.scheduleAtFixedRate(this::processNetworkGetArbitraryDataFileListMessage, 60, 1, TimeUnit.SECONDS);
|
||||
arbitraryDataFileListMessageScheduler.scheduleAtFixedRate(this::processNetworkArbitraryDataFileListMessage, 60, 1, TimeUnit.SECONDS);
|
||||
}
|
||||
|
||||
public static ArbitraryDataFileListManager getInstance() {
|
||||
@ -118,8 +124,8 @@ public class ArbitraryDataFileListManager {
|
||||
if (timeSinceLastAttempt > 15 * 1000L) {
|
||||
// We haven't tried for at least 15 seconds
|
||||
|
||||
if (networkBroadcastCount < 3) {
|
||||
// We've made less than 3 total attempts
|
||||
if (networkBroadcastCount < 12) {
|
||||
// We've made less than 12 total attempts
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@ -128,8 +134,8 @@ public class ArbitraryDataFileListManager {
|
||||
if (timeSinceLastAttempt > 60 * 1000L) {
|
||||
// We haven't tried for at least 1 minute
|
||||
|
||||
if (networkBroadcastCount < 8) {
|
||||
// We've made less than 8 total attempts
|
||||
if (networkBroadcastCount < 40) {
|
||||
// We've made less than 40 total attempts
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@ -396,11 +402,11 @@ public class ArbitraryDataFileListManager {
|
||||
return true;
|
||||
}
|
||||
|
||||
public void deleteFileListRequestsForSignature(byte[] signature) {
|
||||
String signature58 = Base58.encode(signature);
|
||||
public void deleteFileListRequestsForSignature(String signature58) {
|
||||
|
||||
for (Iterator<Map.Entry<Integer, Triple<String, Peer, Long>>> it = arbitraryDataFileListRequests.entrySet().iterator(); it.hasNext();) {
|
||||
Map.Entry<Integer, Triple<String, Peer, Long>> entry = it.next();
|
||||
if (entry == null || entry.getKey() == null || entry.getValue() != null) {
|
||||
if (entry == null || entry.getKey() == null || entry.getValue() == null) {
|
||||
continue;
|
||||
}
|
||||
if (Objects.equals(entry.getValue().getA(), signature58)) {
|
||||
@ -413,70 +419,116 @@ public class ArbitraryDataFileListManager {
|
||||
|
||||
// Network handlers
|
||||
|
||||
// List to collect messages
|
||||
private final List<PeerMessage> arbitraryDataFileListMessageList = new ArrayList<>();
|
||||
// Lock to synchronize access to the list
|
||||
private final Object arbitraryDataFileListMessageLock = new Object();
|
||||
|
||||
// Scheduled executor service to process messages every second
|
||||
private final ScheduledExecutorService arbitraryDataFileListMessageScheduler = Executors.newScheduledThreadPool(1);
|
||||
|
||||
public void onNetworkArbitraryDataFileListMessage(Peer peer, Message message) {
|
||||
// Don't process if QDN is disabled
|
||||
if (!Settings.getInstance().isQdnEnabled()) {
|
||||
return;
|
||||
}
|
||||
|
||||
ArbitraryDataFileListMessage arbitraryDataFileListMessage = (ArbitraryDataFileListMessage) message;
|
||||
LOGGER.debug("Received hash list from peer {} with {} hashes", peer, arbitraryDataFileListMessage.getHashes().size());
|
||||
|
||||
if (LOGGER.isDebugEnabled() && arbitraryDataFileListMessage.getRequestTime() != null) {
|
||||
long totalRequestTime = NTP.getTime() - arbitraryDataFileListMessage.getRequestTime();
|
||||
LOGGER.debug("totalRequestTime: {}, requestHops: {}, peerAddress: {}, isRelayPossible: {}",
|
||||
totalRequestTime, arbitraryDataFileListMessage.getRequestHops(),
|
||||
arbitraryDataFileListMessage.getPeerAddress(), arbitraryDataFileListMessage.isRelayPossible());
|
||||
synchronized (arbitraryDataFileListMessageLock) {
|
||||
arbitraryDataFileListMessageList.add(new PeerMessage(peer, message));
|
||||
}
|
||||
}
|
||||
|
||||
// Do we have a pending request for this data?
|
||||
Triple<String, Peer, Long> request = arbitraryDataFileListRequests.get(message.getId());
|
||||
if (request == null || request.getA() == null) {
|
||||
return;
|
||||
}
|
||||
boolean isRelayRequest = (request.getB() != null);
|
||||
private void processNetworkArbitraryDataFileListMessage() {
|
||||
|
||||
// Does this message's signature match what we're expecting?
|
||||
byte[] signature = arbitraryDataFileListMessage.getSignature();
|
||||
String signature58 = Base58.encode(signature);
|
||||
if (!request.getA().equals(signature58)) {
|
||||
return;
|
||||
}
|
||||
try {
|
||||
List<PeerMessage> messagesToProcess;
|
||||
synchronized (arbitraryDataFileListMessageLock) {
|
||||
messagesToProcess = new ArrayList<>(arbitraryDataFileListMessageList);
|
||||
arbitraryDataFileListMessageList.clear();
|
||||
}
|
||||
|
||||
List<byte[]> hashes = arbitraryDataFileListMessage.getHashes();
|
||||
if (hashes == null || hashes.isEmpty()) {
|
||||
return;
|
||||
}
|
||||
if (messagesToProcess.isEmpty()) return;
|
||||
|
||||
ArbitraryTransactionData arbitraryTransactionData = null;
|
||||
Map<String, PeerMessage> peerMessageBySignature58 = new HashMap<>(messagesToProcess.size());
|
||||
Map<String, byte[]> signatureBySignature58 = new HashMap<>(messagesToProcess.size());
|
||||
Map<String, Boolean> isRelayRequestBySignature58 = new HashMap<>(messagesToProcess.size());
|
||||
Map<String, List<byte[]>> hashesBySignature58 = new HashMap<>(messagesToProcess.size());
|
||||
Map<String, Triple<String, Peer, Long>> requestBySignature58 = new HashMap<>(messagesToProcess.size());
|
||||
|
||||
// Check transaction exists and hashes are correct
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
TransactionData transactionData = repository.getTransactionRepository().fromSignature(signature);
|
||||
if (!(transactionData instanceof ArbitraryTransactionData))
|
||||
for (PeerMessage peerMessage : messagesToProcess) {
|
||||
Peer peer = peerMessage.getPeer();
|
||||
Message message = peerMessage.getMessage();
|
||||
|
||||
ArbitraryDataFileListMessage arbitraryDataFileListMessage = (ArbitraryDataFileListMessage) message;
|
||||
LOGGER.debug("Received hash list from peer {} with {} hashes", peer, arbitraryDataFileListMessage.getHashes().size());
|
||||
|
||||
if (LOGGER.isDebugEnabled() && arbitraryDataFileListMessage.getRequestTime() != null) {
|
||||
long totalRequestTime = NTP.getTime() - arbitraryDataFileListMessage.getRequestTime();
|
||||
LOGGER.debug("totalRequestTime: {}, requestHops: {}, peerAddress: {}, isRelayPossible: {}",
|
||||
totalRequestTime, arbitraryDataFileListMessage.getRequestHops(),
|
||||
arbitraryDataFileListMessage.getPeerAddress(), arbitraryDataFileListMessage.isRelayPossible());
|
||||
}
|
||||
|
||||
// Do we have a pending request for this data?
|
||||
Triple<String, Peer, Long> request = arbitraryDataFileListRequests.get(message.getId());
|
||||
if (request == null || request.getA() == null) {
|
||||
continue;
|
||||
}
|
||||
boolean isRelayRequest = (request.getB() != null);
|
||||
|
||||
// Does this message's signature match what we're expecting?
|
||||
byte[] signature = arbitraryDataFileListMessage.getSignature();
|
||||
String signature58 = Base58.encode(signature);
|
||||
if (!request.getA().equals(signature58)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
List<byte[]> hashes = arbitraryDataFileListMessage.getHashes();
|
||||
if (hashes == null || hashes.isEmpty()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
peerMessageBySignature58.put(signature58, peerMessage);
|
||||
signatureBySignature58.put(signature58, signature);
|
||||
isRelayRequestBySignature58.put(signature58, isRelayRequest);
|
||||
hashesBySignature58.put(signature58, hashes);
|
||||
requestBySignature58.put(signature58, request);
|
||||
}
|
||||
|
||||
if (signatureBySignature58.isEmpty()) return;
|
||||
|
||||
List<ArbitraryTransactionData> arbitraryTransactionDataList;
|
||||
|
||||
// Check transaction exists and hashes are correct
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
arbitraryTransactionDataList
|
||||
= repository.getTransactionRepository()
|
||||
.fromSignatures(new ArrayList<>(signatureBySignature58.values())).stream()
|
||||
.filter(data -> data instanceof ArbitraryTransactionData)
|
||||
.map(data -> (ArbitraryTransactionData) data)
|
||||
.collect(Collectors.toList());
|
||||
} catch (DataException e) {
|
||||
LOGGER.error(String.format("Repository issue while finding arbitrary transaction data list"), e);
|
||||
return;
|
||||
}
|
||||
|
||||
arbitraryTransactionData = (ArbitraryTransactionData) transactionData;
|
||||
for (ArbitraryTransactionData arbitraryTransactionData : arbitraryTransactionDataList) {
|
||||
|
||||
// // Load data file(s)
|
||||
// ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromTransactionData(arbitraryTransactionData);
|
||||
//
|
||||
// // Check all hashes exist
|
||||
// for (byte[] hash : hashes) {
|
||||
// //LOGGER.debug("Received hash {}", Base58.encode(hash));
|
||||
// if (!arbitraryDataFile.containsChunk(hash)) {
|
||||
// // Check the hash against the complete file
|
||||
// if (!Arrays.equals(arbitraryDataFile.getHash(), hash)) {
|
||||
// LOGGER.info("Received non-matching chunk hash {} for signature {}. This could happen if we haven't obtained the metadata file yet.", Base58.encode(hash), signature58);
|
||||
// return;
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
byte[] signature = arbitraryTransactionData.getSignature();
|
||||
String signature58 = Base58.encode(signature);
|
||||
|
||||
if (!isRelayRequest || !Settings.getInstance().isRelayModeEnabled()) {
|
||||
Long now = NTP.getTime();
|
||||
List<byte[]> hashes = hashesBySignature58.get(signature58);
|
||||
|
||||
PeerMessage peerMessage = peerMessageBySignature58.get(signature58);
|
||||
Peer peer = peerMessage.getPeer();
|
||||
Message message = peerMessage.getMessage();
|
||||
|
||||
ArbitraryDataFileListMessage arbitraryDataFileListMessage = (ArbitraryDataFileListMessage) message;
|
||||
|
||||
Boolean isRelayRequest = isRelayRequestBySignature58.get(signature58);
|
||||
if (!isRelayRequest || !Settings.getInstance().isRelayModeEnabled()) {
|
||||
Long now = NTP.getTime();
|
||||
|
||||
if (ArbitraryDataFileManager.getInstance().arbitraryDataFileHashResponses.size() < MAX_FILE_HASH_RESPONSES) {
|
||||
// Keep track of the hashes this peer reports to have access to
|
||||
for (byte[] hash : hashes) {
|
||||
String hash58 = Base58.encode(hash);
|
||||
@ -487,233 +539,300 @@ public class ArbitraryDataFileListManager {
|
||||
ArbitraryFileListResponseInfo responseInfo = new ArbitraryFileListResponseInfo(hash58, signature58,
|
||||
peer, now, arbitraryDataFileListMessage.getRequestTime(), requestHops);
|
||||
|
||||
ArbitraryDataFileManager.getInstance().arbitraryDataFileHashResponses.add(responseInfo);
|
||||
ArbitraryDataFileManager.getInstance().addResponse(responseInfo);
|
||||
}
|
||||
|
||||
// Keep track of the source peer, for direct connections
|
||||
if (arbitraryDataFileListMessage.getPeerAddress() != null) {
|
||||
ArbitraryDataFileManager.getInstance().addDirectConnectionInfoIfUnique(
|
||||
new ArbitraryDirectConnectionInfo(signature, arbitraryDataFileListMessage.getPeerAddress(), hashes, now));
|
||||
}
|
||||
}
|
||||
|
||||
// Keep track of the source peer, for direct connections
|
||||
if (arbitraryDataFileListMessage.getPeerAddress() != null) {
|
||||
ArbitraryDataFileManager.getInstance().addDirectConnectionInfoIfUnique(
|
||||
new ArbitraryDirectConnectionInfo(signature, arbitraryDataFileListMessage.getPeerAddress(), hashes, now));
|
||||
}
|
||||
}
|
||||
// Forwarding
|
||||
if (isRelayRequest && Settings.getInstance().isRelayModeEnabled()) {
|
||||
|
||||
} catch (DataException e) {
|
||||
LOGGER.error(String.format("Repository issue while finding arbitrary transaction data list for peer %s", peer), e);
|
||||
}
|
||||
boolean isBlocked = (arbitraryTransactionData == null || ListUtils.isNameBlocked(arbitraryTransactionData.getName()));
|
||||
if (!isBlocked) {
|
||||
Triple<String, Peer, Long> request = requestBySignature58.get(signature58);
|
||||
Peer requestingPeer = request.getB();
|
||||
if (requestingPeer != null) {
|
||||
Long requestTime = arbitraryDataFileListMessage.getRequestTime();
|
||||
Integer requestHops = arbitraryDataFileListMessage.getRequestHops();
|
||||
|
||||
// Forwarding
|
||||
if (isRelayRequest && Settings.getInstance().isRelayModeEnabled()) {
|
||||
boolean isBlocked = (arbitraryTransactionData == null || ListUtils.isNameBlocked(arbitraryTransactionData.getName()));
|
||||
if (!isBlocked) {
|
||||
Peer requestingPeer = request.getB();
|
||||
if (requestingPeer != null) {
|
||||
Long requestTime = arbitraryDataFileListMessage.getRequestTime();
|
||||
Integer requestHops = arbitraryDataFileListMessage.getRequestHops();
|
||||
// Add each hash to our local mapping so we know who to ask later
|
||||
Long now = NTP.getTime();
|
||||
for (byte[] hash : hashes) {
|
||||
String hash58 = Base58.encode(hash);
|
||||
ArbitraryRelayInfo relayInfo = new ArbitraryRelayInfo(hash58, signature58, peer, now, requestTime, requestHops);
|
||||
ArbitraryDataFileManager.getInstance().addToRelayMap(relayInfo);
|
||||
}
|
||||
|
||||
// Add each hash to our local mapping so we know who to ask later
|
||||
Long now = NTP.getTime();
|
||||
for (byte[] hash : hashes) {
|
||||
String hash58 = Base58.encode(hash);
|
||||
ArbitraryRelayInfo relayInfo = new ArbitraryRelayInfo(hash58, signature58, peer, now, requestTime, requestHops);
|
||||
ArbitraryDataFileManager.getInstance().addToRelayMap(relayInfo);
|
||||
}
|
||||
// Bump requestHops if it exists
|
||||
if (requestHops != null) {
|
||||
requestHops++;
|
||||
}
|
||||
|
||||
// Bump requestHops if it exists
|
||||
if (requestHops != null) {
|
||||
requestHops++;
|
||||
}
|
||||
ArbitraryDataFileListMessage forwardArbitraryDataFileListMessage;
|
||||
|
||||
ArbitraryDataFileListMessage forwardArbitraryDataFileListMessage;
|
||||
// Remove optional parameters if the requesting peer doesn't support it yet
|
||||
// A message with less statistical data is better than no message at all
|
||||
if (!requestingPeer.isAtLeastVersion(MIN_PEER_VERSION_FOR_FILE_LIST_STATS)) {
|
||||
forwardArbitraryDataFileListMessage = new ArbitraryDataFileListMessage(signature, hashes);
|
||||
} else {
|
||||
forwardArbitraryDataFileListMessage = new ArbitraryDataFileListMessage(signature, hashes, requestTime, requestHops,
|
||||
arbitraryDataFileListMessage.getPeerAddress(), arbitraryDataFileListMessage.isRelayPossible());
|
||||
}
|
||||
forwardArbitraryDataFileListMessage.setId(message.getId());
|
||||
|
||||
// Remove optional parameters if the requesting peer doesn't support it yet
|
||||
// A message with less statistical data is better than no message at all
|
||||
if (!requestingPeer.isAtLeastVersion(MIN_PEER_VERSION_FOR_FILE_LIST_STATS)) {
|
||||
forwardArbitraryDataFileListMessage = new ArbitraryDataFileListMessage(signature, hashes);
|
||||
} else {
|
||||
forwardArbitraryDataFileListMessage = new ArbitraryDataFileListMessage(signature, hashes, requestTime, requestHops,
|
||||
arbitraryDataFileListMessage.getPeerAddress(), arbitraryDataFileListMessage.isRelayPossible());
|
||||
}
|
||||
forwardArbitraryDataFileListMessage.setId(message.getId());
|
||||
|
||||
// Forward to requesting peer
|
||||
LOGGER.debug("Forwarding file list with {} hashes to requesting peer: {}", hashes.size(), requestingPeer);
|
||||
if (!requestingPeer.sendMessage(forwardArbitraryDataFileListMessage)) {
|
||||
requestingPeer.disconnect("failed to forward arbitrary data file list");
|
||||
// Forward to requesting peer
|
||||
LOGGER.debug("Forwarding file list with {} hashes to requesting peer: {}", hashes.size(), requestingPeer);
|
||||
requestingPeer.sendMessage(forwardArbitraryDataFileListMessage);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
LOGGER.error(e.getMessage(), e);
|
||||
}
|
||||
}
|
||||
|
||||
// List to collect messages
|
||||
private final List<PeerMessage> getArbitraryDataFileListMessageList = new ArrayList<>();
|
||||
// Lock to synchronize access to the list
|
||||
private final Object getArbitraryDataFileListMessageLock = new Object();
|
||||
|
||||
// Scheduled executor service to process messages every second
|
||||
private final ScheduledExecutorService getArbitraryDataFileListMessageScheduler = Executors.newScheduledThreadPool(1);
|
||||
|
||||
public void onNetworkGetArbitraryDataFileListMessage(Peer peer, Message message) {
|
||||
// Don't respond if QDN is disabled
|
||||
if (!Settings.getInstance().isQdnEnabled()) {
|
||||
return;
|
||||
}
|
||||
|
||||
Controller.getInstance().stats.getArbitraryDataFileListMessageStats.requests.incrementAndGet();
|
||||
|
||||
GetArbitraryDataFileListMessage getArbitraryDataFileListMessage = (GetArbitraryDataFileListMessage) message;
|
||||
byte[] signature = getArbitraryDataFileListMessage.getSignature();
|
||||
String signature58 = Base58.encode(signature);
|
||||
Long now = NTP.getTime();
|
||||
Triple<String, Peer, Long> newEntry = new Triple<>(signature58, peer, now);
|
||||
|
||||
// If we've seen this request recently, then ignore
|
||||
if (arbitraryDataFileListRequests.putIfAbsent(message.getId(), newEntry) != null) {
|
||||
LOGGER.trace("Ignoring hash list request from peer {} for signature {}", peer, signature58);
|
||||
return;
|
||||
synchronized (getArbitraryDataFileListMessageLock) {
|
||||
getArbitraryDataFileListMessageList.add(new PeerMessage(peer, message));
|
||||
}
|
||||
}
|
||||
|
||||
List<byte[]> requestedHashes = getArbitraryDataFileListMessage.getHashes();
|
||||
int hashCount = requestedHashes != null ? requestedHashes.size() : 0;
|
||||
String requestingPeer = getArbitraryDataFileListMessage.getRequestingPeer();
|
||||
private void processNetworkGetArbitraryDataFileListMessage() {
|
||||
|
||||
if (requestingPeer != null) {
|
||||
LOGGER.debug("Received hash list request with {} hashes from peer {} (requesting peer {}) for signature {}", hashCount, peer, requestingPeer, signature58);
|
||||
}
|
||||
else {
|
||||
LOGGER.debug("Received hash list request with {} hashes from peer {} for signature {}", hashCount, peer, signature58);
|
||||
}
|
||||
try {
|
||||
List<PeerMessage> messagesToProcess;
|
||||
synchronized (getArbitraryDataFileListMessageLock) {
|
||||
messagesToProcess = new ArrayList<>(getArbitraryDataFileListMessageList);
|
||||
getArbitraryDataFileListMessageList.clear();
|
||||
}
|
||||
|
||||
List<byte[]> hashes = new ArrayList<>();
|
||||
ArbitraryTransactionData transactionData = null;
|
||||
boolean allChunksExist = false;
|
||||
boolean hasMetadata = false;
|
||||
if (messagesToProcess.isEmpty()) return;
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
Map<String, byte[]> signatureBySignature58 = new HashMap<>(messagesToProcess.size());
|
||||
Map<String, List<byte[]>> requestedHashesBySignature58 = new HashMap<>(messagesToProcess.size());
|
||||
Map<String, String> requestingPeerBySignature58 = new HashMap<>(messagesToProcess.size());
|
||||
Map<String, Long> nowBySignature58 = new HashMap<>((messagesToProcess.size()));
|
||||
Map<String, PeerMessage> peerMessageBySignature58 = new HashMap<>(messagesToProcess.size());
|
||||
|
||||
// Firstly we need to lookup this file on chain to get a list of its hashes
|
||||
transactionData = (ArbitraryTransactionData)repository.getTransactionRepository().fromSignature(signature);
|
||||
if (transactionData instanceof ArbitraryTransactionData) {
|
||||
for (PeerMessage messagePeer : messagesToProcess) {
|
||||
Controller.getInstance().stats.getArbitraryDataFileListMessageStats.requests.incrementAndGet();
|
||||
|
||||
Message message = messagePeer.message;
|
||||
Peer peer = messagePeer.peer;
|
||||
|
||||
GetArbitraryDataFileListMessage getArbitraryDataFileListMessage = (GetArbitraryDataFileListMessage) message;
|
||||
byte[] signature = getArbitraryDataFileListMessage.getSignature();
|
||||
String signature58 = Base58.encode(signature);
|
||||
Long now = NTP.getTime();
|
||||
Triple<String, Peer, Long> newEntry = new Triple<>(signature58, peer, now);
|
||||
|
||||
// If we've seen this request recently, then ignore
|
||||
if (arbitraryDataFileListRequests.putIfAbsent(message.getId(), newEntry) != null) {
|
||||
LOGGER.trace("Ignoring hash list request from peer {} for signature {}", peer, signature58);
|
||||
continue;
|
||||
}
|
||||
|
||||
List<byte[]> requestedHashes = getArbitraryDataFileListMessage.getHashes();
|
||||
int hashCount = requestedHashes != null ? requestedHashes.size() : 0;
|
||||
String requestingPeer = getArbitraryDataFileListMessage.getRequestingPeer();
|
||||
|
||||
if (requestingPeer != null) {
|
||||
LOGGER.debug("Received hash list request with {} hashes from peer {} (requesting peer {}) for signature {}", hashCount, peer, requestingPeer, signature58);
|
||||
} else {
|
||||
LOGGER.debug("Received hash list request with {} hashes from peer {} for signature {}", hashCount, peer, signature58);
|
||||
}
|
||||
|
||||
signatureBySignature58.put(signature58, signature);
|
||||
requestedHashesBySignature58.put(signature58, requestedHashes);
|
||||
requestingPeerBySignature58.put(signature58, requestingPeer);
|
||||
nowBySignature58.put(signature58, now);
|
||||
peerMessageBySignature58.put(signature58, messagePeer);
|
||||
}
|
||||
|
||||
if (signatureBySignature58.isEmpty()) {
|
||||
return;
|
||||
}
|
||||
|
||||
List<byte[]> hashes = new ArrayList<>();
|
||||
boolean allChunksExist = false;
|
||||
boolean hasMetadata = false;
|
||||
|
||||
List<ArbitraryTransactionData> transactionDataList;
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
|
||||
// Firstly we need to lookup this file on chain to get a list of its hashes
|
||||
transactionDataList
|
||||
= repository.getTransactionRepository()
|
||||
.fromSignatures(new ArrayList<>(signatureBySignature58.values())).stream()
|
||||
.filter(data -> data instanceof ArbitraryTransactionData)
|
||||
.map(data -> (ArbitraryTransactionData) data)
|
||||
.collect(Collectors.toList());
|
||||
|
||||
} catch (DataException e) {
|
||||
LOGGER.error(String.format("Repository issue while fetching arbitrary file list for peer"), e);
|
||||
return;
|
||||
}
|
||||
|
||||
for (ArbitraryTransactionData transactionData : transactionDataList) {
|
||||
byte[] signature = transactionData.getSignature();
|
||||
String signature58 = Base58.encode(signature);
|
||||
List<byte[]> requestedHashes = requestedHashesBySignature58.get(signature58);
|
||||
|
||||
// Check if we're even allowed to serve data for this transaction
|
||||
if (ArbitraryDataStorageManager.getInstance().canStoreData(transactionData)) {
|
||||
|
||||
// Load file(s) and add any that exist to the list of hashes
|
||||
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromTransactionData(transactionData);
|
||||
try {
|
||||
// Load file(s) and add any that exist to the list of hashes
|
||||
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromTransactionData(transactionData);
|
||||
|
||||
// If the peer didn't supply a hash list, we need to return all hashes for this transaction
|
||||
if (requestedHashes == null || requestedHashes.isEmpty()) {
|
||||
requestedHashes = new ArrayList<>();
|
||||
// If the peer didn't supply a hash list, we need to return all hashes for this transaction
|
||||
if (requestedHashes == null || requestedHashes.isEmpty()) {
|
||||
requestedHashes = new ArrayList<>();
|
||||
|
||||
// Add the metadata file
|
||||
if (arbitraryDataFile.getMetadataHash() != null) {
|
||||
requestedHashes.add(arbitraryDataFile.getMetadataHash());
|
||||
hasMetadata = true;
|
||||
// Add the metadata file
|
||||
if (arbitraryDataFile.getMetadataHash() != null) {
|
||||
requestedHashes.add(arbitraryDataFile.getMetadataHash());
|
||||
hasMetadata = true;
|
||||
}
|
||||
|
||||
// Add the chunk hashes
|
||||
if (!arbitraryDataFile.getChunkHashes().isEmpty()) {
|
||||
requestedHashes.addAll(arbitraryDataFile.getChunkHashes());
|
||||
}
|
||||
// Add complete file if there are no hashes
|
||||
else {
|
||||
requestedHashes.add(arbitraryDataFile.getHash());
|
||||
}
|
||||
}
|
||||
|
||||
// Add the chunk hashes
|
||||
if (!arbitraryDataFile.getChunkHashes().isEmpty()) {
|
||||
requestedHashes.addAll(arbitraryDataFile.getChunkHashes());
|
||||
}
|
||||
// Add complete file if there are no hashes
|
||||
else {
|
||||
requestedHashes.add(arbitraryDataFile.getHash());
|
||||
|
||||
// Assume all chunks exists, unless one can't be found below
|
||||
allChunksExist = true;
|
||||
|
||||
for (byte[] requestedHash : requestedHashes) {
|
||||
ArbitraryDataFileChunk chunk = ArbitraryDataFileChunk.fromHash(requestedHash, signature);
|
||||
if (chunk.exists()) {
|
||||
hashes.add(chunk.getHash());
|
||||
//LOGGER.trace("Added hash {}", chunk.getHash58());
|
||||
} else {
|
||||
LOGGER.trace("Couldn't add hash {} because it doesn't exist", chunk.getHash58());
|
||||
allChunksExist = false;
|
||||
}
|
||||
}
|
||||
} catch (DataException e) {
|
||||
LOGGER.error(e.getMessage(), e);
|
||||
}
|
||||
}
|
||||
|
||||
// If the only file we have is the metadata then we shouldn't respond. Most nodes will already have that,
|
||||
// or can use the separate metadata protocol to fetch it. This should greatly reduce network spam.
|
||||
if (hasMetadata && hashes.size() == 1) {
|
||||
hashes.clear();
|
||||
}
|
||||
|
||||
PeerMessage peerMessage = peerMessageBySignature58.get(signature58);
|
||||
Peer peer = peerMessage.getPeer();
|
||||
Message message = peerMessage.getMessage();
|
||||
|
||||
Long now = nowBySignature58.get(signature58);
|
||||
|
||||
// We should only respond if we have at least one hash
|
||||
String requestingPeer = requestingPeerBySignature58.get(signature58);
|
||||
if (!hashes.isEmpty()) {
|
||||
|
||||
// Firstly we should keep track of the requesting peer, to allow for potential direct connections later
|
||||
ArbitraryDataFileManager.getInstance().addRecentDataRequest(requestingPeer);
|
||||
|
||||
// We have all the chunks, so update requests map to reflect that we've sent it
|
||||
// There is no need to keep track of the request, as we can serve all the chunks
|
||||
if (allChunksExist) {
|
||||
Triple<String, Peer, Long> newEntry = new Triple<>(null, null, now);
|
||||
arbitraryDataFileListRequests.put(message.getId(), newEntry);
|
||||
}
|
||||
|
||||
// Assume all chunks exists, unless one can't be found below
|
||||
allChunksExist = true;
|
||||
String ourAddress = Network.getInstance().getOurExternalIpAddressAndPort();
|
||||
ArbitraryDataFileListMessage arbitraryDataFileListMessage;
|
||||
|
||||
// Remove optional parameters if the requesting peer doesn't support it yet
|
||||
// A message with less statistical data is better than no message at all
|
||||
if (!peer.isAtLeastVersion(MIN_PEER_VERSION_FOR_FILE_LIST_STATS)) {
|
||||
arbitraryDataFileListMessage = new ArbitraryDataFileListMessage(signature, hashes);
|
||||
} else {
|
||||
arbitraryDataFileListMessage = new ArbitraryDataFileListMessage(signature,
|
||||
hashes, NTP.getTime(), 0, ourAddress, true);
|
||||
}
|
||||
|
||||
arbitraryDataFileListMessage.setId(message.getId());
|
||||
|
||||
if (!peer.sendMessage(arbitraryDataFileListMessage)) {
|
||||
LOGGER.debug("Couldn't send list of hashes");
|
||||
continue;
|
||||
}
|
||||
|
||||
if (allChunksExist) {
|
||||
// Nothing left to do, so return to prevent any unnecessary forwarding from occurring
|
||||
LOGGER.debug("No need for any forwarding because file list request is fully served");
|
||||
continue;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// We may need to forward this request on
|
||||
boolean isBlocked = (transactionData == null || ListUtils.isNameBlocked(transactionData.getName()));
|
||||
if (Settings.getInstance().isRelayModeEnabled() && !isBlocked) {
|
||||
// In relay mode - so ask our other peers if they have it
|
||||
|
||||
|
||||
GetArbitraryDataFileListMessage getArbitraryDataFileListMessage = (GetArbitraryDataFileListMessage) message;
|
||||
|
||||
long requestTime = getArbitraryDataFileListMessage.getRequestTime();
|
||||
int requestHops = getArbitraryDataFileListMessage.getRequestHops() + 1;
|
||||
long totalRequestTime = now - requestTime;
|
||||
|
||||
if (totalRequestTime < RELAY_REQUEST_MAX_DURATION) {
|
||||
// Relay request hasn't timed out yet, so can potentially be rebroadcast
|
||||
if (requestHops < RELAY_REQUEST_MAX_HOPS) {
|
||||
// Relay request hasn't reached the maximum number of hops yet, so can be rebroadcast
|
||||
|
||||
Message relayGetArbitraryDataFileListMessage = new GetArbitraryDataFileListMessage(signature, hashes, requestTime, requestHops, requestingPeer);
|
||||
relayGetArbitraryDataFileListMessage.setId(message.getId());
|
||||
|
||||
LOGGER.debug("Rebroadcasting hash list request from peer {} for signature {} to our other peers... totalRequestTime: {}, requestHops: {}", peer, Base58.encode(signature), totalRequestTime, requestHops);
|
||||
Network.getInstance().broadcast(
|
||||
broadcastPeer ->
|
||||
!broadcastPeer.isAtLeastVersion(RELAY_MIN_PEER_VERSION) ? null :
|
||||
broadcastPeer == peer || Objects.equals(broadcastPeer.getPeerData().getAddress().getHost(), peer.getPeerData().getAddress().getHost()) ? null : relayGetArbitraryDataFileListMessage
|
||||
);
|
||||
|
||||
for (byte[] requestedHash : requestedHashes) {
|
||||
ArbitraryDataFileChunk chunk = ArbitraryDataFileChunk.fromHash(requestedHash, signature);
|
||||
if (chunk.exists()) {
|
||||
hashes.add(chunk.getHash());
|
||||
//LOGGER.trace("Added hash {}", chunk.getHash58());
|
||||
} else {
|
||||
LOGGER.trace("Couldn't add hash {} because it doesn't exist", chunk.getHash58());
|
||||
allChunksExist = false;
|
||||
// This relay request has reached the maximum number of allowed hops
|
||||
}
|
||||
} else {
|
||||
// This relay request has timed out
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} catch (DataException e) {
|
||||
LOGGER.error(String.format("Repository issue while fetching arbitrary file list for peer %s", peer), e);
|
||||
}
|
||||
|
||||
// If the only file we have is the metadata then we shouldn't respond. Most nodes will already have that,
|
||||
// or can use the separate metadata protocol to fetch it. This should greatly reduce network spam.
|
||||
if (hasMetadata && hashes.size() == 1) {
|
||||
hashes.clear();
|
||||
}
|
||||
|
||||
// We should only respond if we have at least one hash
|
||||
if (!hashes.isEmpty()) {
|
||||
|
||||
// Firstly we should keep track of the requesting peer, to allow for potential direct connections later
|
||||
ArbitraryDataFileManager.getInstance().addRecentDataRequest(requestingPeer);
|
||||
|
||||
// We have all the chunks, so update requests map to reflect that we've sent it
|
||||
// There is no need to keep track of the request, as we can serve all the chunks
|
||||
if (allChunksExist) {
|
||||
newEntry = new Triple<>(null, null, now);
|
||||
arbitraryDataFileListRequests.put(message.getId(), newEntry);
|
||||
}
|
||||
|
||||
String ourAddress = Network.getInstance().getOurExternalIpAddressAndPort();
|
||||
ArbitraryDataFileListMessage arbitraryDataFileListMessage;
|
||||
|
||||
// Remove optional parameters if the requesting peer doesn't support it yet
|
||||
// A message with less statistical data is better than no message at all
|
||||
if (!peer.isAtLeastVersion(MIN_PEER_VERSION_FOR_FILE_LIST_STATS)) {
|
||||
arbitraryDataFileListMessage = new ArbitraryDataFileListMessage(signature, hashes);
|
||||
} else {
|
||||
arbitraryDataFileListMessage = new ArbitraryDataFileListMessage(signature,
|
||||
hashes, NTP.getTime(), 0, ourAddress, true);
|
||||
}
|
||||
|
||||
arbitraryDataFileListMessage.setId(message.getId());
|
||||
|
||||
if (!peer.sendMessage(arbitraryDataFileListMessage)) {
|
||||
LOGGER.debug("Couldn't send list of hashes");
|
||||
peer.disconnect("failed to send list of hashes");
|
||||
return;
|
||||
}
|
||||
LOGGER.debug("Sent list of hashes (count: {})", hashes.size());
|
||||
|
||||
if (allChunksExist) {
|
||||
// Nothing left to do, so return to prevent any unnecessary forwarding from occurring
|
||||
LOGGER.debug("No need for any forwarding because file list request is fully served");
|
||||
return;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// We may need to forward this request on
|
||||
boolean isBlocked = (transactionData == null || ListUtils.isNameBlocked(transactionData.getName()));
|
||||
if (Settings.getInstance().isRelayModeEnabled() && !isBlocked) {
|
||||
// In relay mode - so ask our other peers if they have it
|
||||
|
||||
long requestTime = getArbitraryDataFileListMessage.getRequestTime();
|
||||
int requestHops = getArbitraryDataFileListMessage.getRequestHops() + 1;
|
||||
long totalRequestTime = now - requestTime;
|
||||
|
||||
if (totalRequestTime < RELAY_REQUEST_MAX_DURATION) {
|
||||
// Relay request hasn't timed out yet, so can potentially be rebroadcast
|
||||
if (requestHops < RELAY_REQUEST_MAX_HOPS) {
|
||||
// Relay request hasn't reached the maximum number of hops yet, so can be rebroadcast
|
||||
|
||||
Message relayGetArbitraryDataFileListMessage = new GetArbitraryDataFileListMessage(signature, hashes, requestTime, requestHops, requestingPeer);
|
||||
relayGetArbitraryDataFileListMessage.setId(message.getId());
|
||||
|
||||
LOGGER.debug("Rebroadcasting hash list request from peer {} for signature {} to our other peers... totalRequestTime: {}, requestHops: {}", peer, Base58.encode(signature), totalRequestTime, requestHops);
|
||||
Network.getInstance().broadcast(
|
||||
broadcastPeer ->
|
||||
!broadcastPeer.isAtLeastVersion(RELAY_MIN_PEER_VERSION) ? null :
|
||||
broadcastPeer == peer || Objects.equals(broadcastPeer.getPeerData().getAddress().getHost(), peer.getPeerData().getAddress().getHost()) ? null : relayGetArbitraryDataFileListMessage
|
||||
);
|
||||
|
||||
}
|
||||
else {
|
||||
// This relay request has reached the maximum number of allowed hops
|
||||
}
|
||||
}
|
||||
else {
|
||||
// This relay request has timed out
|
||||
}
|
||||
} catch (Exception e) {
|
||||
LOGGER.error(e.getMessage(), e);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,6 +1,7 @@
|
||||
package org.qortal.controller.arbitrary;
|
||||
|
||||
import com.google.common.net.InetAddresses;
|
||||
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.qortal.arbitrary.ArbitraryDataFile;
|
||||
@ -12,6 +13,7 @@ import org.qortal.data.network.PeerData;
|
||||
import org.qortal.data.transaction.ArbitraryTransactionData;
|
||||
import org.qortal.network.Network;
|
||||
import org.qortal.network.Peer;
|
||||
import org.qortal.network.PeerSendManagement;
|
||||
import org.qortal.network.message.*;
|
||||
import org.qortal.repository.DataException;
|
||||
import org.qortal.repository.Repository;
|
||||
@ -23,12 +25,16 @@ import org.qortal.utils.NTP;
|
||||
|
||||
import java.security.SecureRandom;
|
||||
import java.util.*;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.ConcurrentMap;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.ScheduledExecutorService;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
public class ArbitraryDataFileManager extends Thread {
|
||||
|
||||
public static final int SEND_TIMEOUT_MS = 500;
|
||||
private static final Logger LOGGER = LogManager.getLogger(ArbitraryDataFileManager.class);
|
||||
|
||||
private static ArbitraryDataFileManager instance;
|
||||
@ -48,7 +54,7 @@ public class ArbitraryDataFileManager extends Thread {
|
||||
/**
|
||||
* List to keep track of any arbitrary data file hash responses
|
||||
*/
|
||||
public final List<ArbitraryFileListResponseInfo> arbitraryDataFileHashResponses = Collections.synchronizedList(new ArrayList<>());
|
||||
private final List<ArbitraryFileListResponseInfo> arbitraryDataFileHashResponses = Collections.synchronizedList(new ArrayList<>());
|
||||
|
||||
/**
|
||||
* List to keep track of peers potentially available for direct connections, based on recent requests
|
||||
@ -65,8 +71,9 @@ public class ArbitraryDataFileManager extends Thread {
|
||||
|
||||
public static int MAX_FILE_HASH_RESPONSES = 1000;
|
||||
|
||||
|
||||
private ArbitraryDataFileManager() {
|
||||
this.arbitraryDataFileHashResponseScheduler.scheduleAtFixedRate( this::processResponses, 60, 1, TimeUnit.SECONDS);
|
||||
this.arbitraryDataFileHashResponseScheduler.scheduleAtFixedRate(this::handleFileListRequestProcess, 60, 1, TimeUnit.SECONDS);
|
||||
}
|
||||
|
||||
public static ArbitraryDataFileManager getInstance() {
|
||||
@ -76,18 +83,13 @@ public class ArbitraryDataFileManager extends Thread {
|
||||
return instance;
|
||||
}
|
||||
|
||||
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
Thread.currentThread().setName("Arbitrary Data File Manager");
|
||||
|
||||
try {
|
||||
// Use a fixed thread pool to execute the arbitrary data file requests
|
||||
int threadCount = 5;
|
||||
ExecutorService arbitraryDataFileRequestExecutor = Executors.newFixedThreadPool(threadCount);
|
||||
for (int i = 0; i < threadCount; i++) {
|
||||
arbitraryDataFileRequestExecutor.execute(new ArbitraryDataFileRequestThread());
|
||||
}
|
||||
|
||||
while (!isStopping) {
|
||||
// Nothing to do yet
|
||||
Thread.sleep(1000);
|
||||
@ -112,7 +114,6 @@ public class ArbitraryDataFileManager extends Thread {
|
||||
|
||||
final long relayMinimumTimestamp = now - ArbitraryDataManager.getInstance().ARBITRARY_RELAY_TIMEOUT;
|
||||
arbitraryRelayMap.removeIf(entry -> entry == null || entry.getTimestamp() == null || entry.getTimestamp() < relayMinimumTimestamp);
|
||||
arbitraryDataFileHashResponses.removeIf(entry -> entry.getTimestamp() < relayMinimumTimestamp);
|
||||
|
||||
final long directConnectionInfoMinimumTimestamp = now - ArbitraryDataManager.getInstance().ARBITRARY_DIRECT_CONNECTION_INFO_TIMEOUT;
|
||||
directConnectionInfo.removeIf(entry -> entry.getTimestamp() < directConnectionInfoMinimumTimestamp);
|
||||
@ -125,8 +126,7 @@ public class ArbitraryDataFileManager extends Thread {
|
||||
|
||||
// Fetch data files by hash
|
||||
|
||||
public boolean fetchArbitraryDataFiles(Repository repository,
|
||||
Peer peer,
|
||||
public boolean fetchArbitraryDataFiles(Peer peer,
|
||||
byte[] signature,
|
||||
ArbitraryTransactionData arbitraryTransactionData,
|
||||
List<byte[]> hashes) throws DataException {
|
||||
@ -146,21 +146,15 @@ public class ArbitraryDataFileManager extends Thread {
|
||||
if (!arbitraryDataFileRequests.containsKey(Base58.encode(hash))) {
|
||||
LOGGER.debug("Requesting data file {} from peer {}", hash58, peer);
|
||||
Long startTime = NTP.getTime();
|
||||
ArbitraryDataFile receivedArbitraryDataFile = fetchArbitraryDataFile(peer, null, arbitraryTransactionData, signature, hash, null);
|
||||
ArbitraryDataFile receivedArbitraryDataFile = fetchArbitraryDataFile(peer, arbitraryTransactionData, signature, hash);
|
||||
Long endTime = NTP.getTime();
|
||||
if (receivedArbitraryDataFile != null) {
|
||||
LOGGER.debug("Received data file {} from peer {}. Time taken: {} ms", receivedArbitraryDataFile.getHash58(), peer, (endTime-startTime));
|
||||
receivedAtLeastOneFile = true;
|
||||
|
||||
// Remove this hash from arbitraryDataFileHashResponses now that we have received it
|
||||
arbitraryDataFileHashResponses.remove(hash58);
|
||||
}
|
||||
else {
|
||||
LOGGER.debug("Peer {} didn't respond with data file {} for signature {}. Time taken: {} ms", peer, Base58.encode(hash), Base58.encode(signature), (endTime-startTime));
|
||||
|
||||
// Remove this hash from arbitraryDataFileHashResponses now that we have failed to receive it
|
||||
arbitraryDataFileHashResponses.remove(hash58);
|
||||
|
||||
// Stop asking for files from this peer
|
||||
break;
|
||||
}
|
||||
@ -169,10 +163,6 @@ public class ArbitraryDataFileManager extends Thread {
|
||||
LOGGER.trace("Already requesting data file {} for signature {} from peer {}", arbitraryDataFile, Base58.encode(signature), peer);
|
||||
}
|
||||
}
|
||||
else {
|
||||
// Remove this hash from arbitraryDataFileHashResponses because we have a local copy
|
||||
arbitraryDataFileHashResponses.remove(hash58);
|
||||
}
|
||||
}
|
||||
|
||||
if (receivedAtLeastOneFile) {
|
||||
@ -191,14 +181,103 @@ public class ArbitraryDataFileManager extends Thread {
|
||||
return receivedAtLeastOneFile;
|
||||
}
|
||||
|
||||
private ArbitraryDataFile fetchArbitraryDataFile(Peer peer, Peer requestingPeer, ArbitraryTransactionData arbitraryTransactionData, byte[] signature, byte[] hash, Message originalMessage) throws DataException {
|
||||
ArbitraryDataFile existingFile = ArbitraryDataFile.fromHash(hash, signature);
|
||||
boolean fileAlreadyExists = existingFile.exists();
|
||||
String hash58 = Base58.encode(hash);
|
||||
// Lock to synchronize access to the list
|
||||
private final Object arbitraryDataFileHashResponseLock = new Object();
|
||||
|
||||
// Scheduled executor service to process messages every second
|
||||
private final ScheduledExecutorService arbitraryDataFileHashResponseScheduler = Executors.newScheduledThreadPool(1);
|
||||
|
||||
|
||||
public void addResponse( ArbitraryFileListResponseInfo responseInfo ) {
|
||||
|
||||
synchronized (arbitraryDataFileHashResponseLock) {
|
||||
this.arbitraryDataFileHashResponses.add(responseInfo);
|
||||
}
|
||||
}
|
||||
|
||||
private void processResponses() {
|
||||
try {
|
||||
List<ArbitraryFileListResponseInfo> responsesToProcess;
|
||||
synchronized (arbitraryDataFileHashResponseLock) {
|
||||
responsesToProcess = new ArrayList<>(arbitraryDataFileHashResponses);
|
||||
arbitraryDataFileHashResponses.clear();
|
||||
}
|
||||
|
||||
if (responsesToProcess.isEmpty()) return;
|
||||
|
||||
Long now = NTP.getTime();
|
||||
|
||||
ArbitraryDataFileRequestThread.getInstance().processFileHashes(now, responsesToProcess, this);
|
||||
} catch (Exception e) {
|
||||
LOGGER.error(e.getMessage(), e);
|
||||
}
|
||||
}
|
||||
|
||||
private ArbitraryDataFile fetchArbitraryDataFile(Peer peer, ArbitraryTransactionData arbitraryTransactionData, byte[] signature, byte[] hash) throws DataException {
|
||||
ArbitraryDataFile arbitraryDataFile;
|
||||
|
||||
// Fetch the file if it doesn't exist locally
|
||||
if (!fileAlreadyExists) {
|
||||
try {
|
||||
ArbitraryDataFile existingFile = ArbitraryDataFile.fromHash(hash, signature);
|
||||
boolean fileAlreadyExists = existingFile.exists();
|
||||
String hash58 = Base58.encode(hash);
|
||||
|
||||
// Fetch the file if it doesn't exist locally
|
||||
if (!fileAlreadyExists) {
|
||||
LOGGER.debug(String.format("Fetching data file %.8s from peer %s", hash58, peer));
|
||||
arbitraryDataFileRequests.put(hash58, NTP.getTime());
|
||||
Message getArbitraryDataFileMessage = new GetArbitraryDataFileMessage(signature, hash);
|
||||
|
||||
Message response = null;
|
||||
try {
|
||||
response = peer.getResponseWithTimeout(getArbitraryDataFileMessage, (int) ArbitraryDataManager.ARBITRARY_REQUEST_TIMEOUT);
|
||||
} catch (InterruptedException e) {
|
||||
// Will return below due to null response
|
||||
}
|
||||
arbitraryDataFileRequests.remove(hash58);
|
||||
LOGGER.trace(String.format("Removed hash %.8s from arbitraryDataFileRequests", hash58));
|
||||
|
||||
if (response == null) {
|
||||
LOGGER.debug("Received null response from peer {}", peer);
|
||||
return null;
|
||||
}
|
||||
if (response.getType() != MessageType.ARBITRARY_DATA_FILE) {
|
||||
LOGGER.debug("Received response with invalid type: {} from peer {}", response.getType(), peer);
|
||||
return null;
|
||||
}
|
||||
|
||||
ArbitraryDataFileMessage peersArbitraryDataFileMessage = (ArbitraryDataFileMessage) response;
|
||||
arbitraryDataFile = peersArbitraryDataFileMessage.getArbitraryDataFile();
|
||||
} else {
|
||||
LOGGER.debug(String.format("File hash %s already exists, so skipping the request", hash58));
|
||||
arbitraryDataFile = existingFile;
|
||||
}
|
||||
|
||||
if (arbitraryDataFile != null) {
|
||||
|
||||
arbitraryDataFile.save();
|
||||
|
||||
// If this is a metadata file then we need to update the cache
|
||||
if (arbitraryTransactionData != null && arbitraryTransactionData.getMetadataHash() != null) {
|
||||
if (Arrays.equals(arbitraryTransactionData.getMetadataHash(), hash)) {
|
||||
ArbitraryDataCacheManager.getInstance().addToUpdateQueue(arbitraryTransactionData);
|
||||
}
|
||||
}
|
||||
|
||||
// We may need to remove the file list request, if we have all the files for this transaction
|
||||
this.handleFileListRequests(signature);
|
||||
}
|
||||
} catch (DataException e) {
|
||||
LOGGER.error(e.getMessage(), e);
|
||||
arbitraryDataFile = null;
|
||||
}
|
||||
|
||||
return arbitraryDataFile;
|
||||
}
|
||||
|
||||
private void fetchFileForRelay(Peer peer, Peer requestingPeer, byte[] signature, byte[] hash, Message originalMessage) throws DataException {
|
||||
try {
|
||||
String hash58 = Base58.encode(hash);
|
||||
|
||||
LOGGER.debug(String.format("Fetching data file %.8s from peer %s", hash58, peer));
|
||||
arbitraryDataFileRequests.put(hash58, NTP.getTime());
|
||||
Message getArbitraryDataFileMessage = new GetArbitraryDataFileMessage(signature, hash);
|
||||
@ -212,73 +291,73 @@ public class ArbitraryDataFileManager extends Thread {
|
||||
arbitraryDataFileRequests.remove(hash58);
|
||||
LOGGER.trace(String.format("Removed hash %.8s from arbitraryDataFileRequests", hash58));
|
||||
|
||||
// We may need to remove the file list request, if we have all the files for this transaction
|
||||
this.handleFileListRequests(signature);
|
||||
|
||||
if (response == null) {
|
||||
LOGGER.debug("Received null response from peer {}", peer);
|
||||
return null;
|
||||
return;
|
||||
}
|
||||
if (response.getType() != MessageType.ARBITRARY_DATA_FILE) {
|
||||
LOGGER.debug("Received response with invalid type: {} from peer {}", response.getType(), peer);
|
||||
return null;
|
||||
}
|
||||
|
||||
ArbitraryDataFileMessage peersArbitraryDataFileMessage = (ArbitraryDataFileMessage) response;
|
||||
arbitraryDataFile = peersArbitraryDataFileMessage.getArbitraryDataFile();
|
||||
} else {
|
||||
LOGGER.debug(String.format("File hash %s already exists, so skipping the request", hash58));
|
||||
arbitraryDataFile = existingFile;
|
||||
}
|
||||
|
||||
if (arbitraryDataFile == null) {
|
||||
// We don't have a file, so give up here
|
||||
return null;
|
||||
}
|
||||
|
||||
// We might want to forward the request to the peer that originally requested it
|
||||
this.handleArbitraryDataFileForwarding(requestingPeer, new ArbitraryDataFileMessage(signature, arbitraryDataFile), originalMessage);
|
||||
|
||||
boolean isRelayRequest = (requestingPeer != null);
|
||||
if (isRelayRequest) {
|
||||
if (!fileAlreadyExists) {
|
||||
// File didn't exist locally before the request, and it's a forwarding request, so delete it if it exists.
|
||||
// It shouldn't exist on the filesystem yet, but leaving this here just in case.
|
||||
arbitraryDataFile.delete(10);
|
||||
}
|
||||
}
|
||||
else {
|
||||
arbitraryDataFile.save();
|
||||
}
|
||||
|
||||
// If this is a metadata file then we need to update the cache
|
||||
if (arbitraryTransactionData != null && arbitraryTransactionData.getMetadataHash() != null) {
|
||||
if (Arrays.equals(arbitraryTransactionData.getMetadataHash(), hash)) {
|
||||
ArbitraryDataCacheManager.getInstance().addToUpdateQueue(arbitraryTransactionData);
|
||||
}
|
||||
}
|
||||
|
||||
return arbitraryDataFile;
|
||||
}
|
||||
|
||||
private void handleFileListRequests(byte[] signature) {
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
|
||||
// Fetch the transaction data
|
||||
ArbitraryTransactionData arbitraryTransactionData = ArbitraryTransactionUtils.fetchTransactionData(repository, signature);
|
||||
if (arbitraryTransactionData == null) {
|
||||
return;
|
||||
}
|
||||
|
||||
boolean allChunksExist = ArbitraryTransactionUtils.allChunksExist(arbitraryTransactionData);
|
||||
ArbitraryDataFileMessage peersArbitraryDataFileMessage = (ArbitraryDataFileMessage) response;
|
||||
ArbitraryDataFile arbitraryDataFile = peersArbitraryDataFileMessage.getArbitraryDataFile();
|
||||
|
||||
if (allChunksExist) {
|
||||
// Update requests map to reflect that we've received all chunks
|
||||
ArbitraryDataFileListManager.getInstance().deleteFileListRequestsForSignature(signature);
|
||||
if (arbitraryDataFile != null) {
|
||||
|
||||
// We might want to forward the request to the peer that originally requested it
|
||||
this.handleArbitraryDataFileForwarding(requestingPeer, new ArbitraryDataFileMessage(signature, arbitraryDataFile), originalMessage);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
LOGGER.error(e.getMessage(), e);
|
||||
}
|
||||
}
|
||||
|
||||
Map<String, byte[]> signatureBySignature58 = new HashMap<>();
|
||||
|
||||
// Lock to synchronize access to the list
|
||||
private final Object handleFileListRequestsLock = new Object();
|
||||
|
||||
// Scheduled executor service to process messages every second
|
||||
private final ScheduledExecutorService handleFileListRequestsScheduler = Executors.newScheduledThreadPool(1);
|
||||
|
||||
private void handleFileListRequests(byte[] signature) {
|
||||
|
||||
synchronized (handleFileListRequestsLock) {
|
||||
signatureBySignature58.put(Base58.encode(signature), signature);
|
||||
}
|
||||
}
|
||||
|
||||
private void handleFileListRequestProcess() {
|
||||
|
||||
Map<String, byte[]> signaturesToProcess;
|
||||
|
||||
synchronized (handleFileListRequestsLock) {
|
||||
signaturesToProcess = new HashMap<>(signatureBySignature58);
|
||||
signatureBySignature58.clear();
|
||||
}
|
||||
|
||||
if( signaturesToProcess.isEmpty() ) return;
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
|
||||
// Fetch the transaction data
|
||||
List<ArbitraryTransactionData> arbitraryTransactionDataList
|
||||
= ArbitraryTransactionUtils.fetchTransactionDataList(repository, new ArrayList<>(signaturesToProcess.values()));
|
||||
|
||||
for( ArbitraryTransactionData arbitraryTransactionData : arbitraryTransactionDataList ) {
|
||||
boolean completeFileExists = ArbitraryTransactionUtils.completeFileExists(arbitraryTransactionData);
|
||||
|
||||
if (completeFileExists) {
|
||||
String signature58 = Base58.encode(arbitraryTransactionData.getSignature());
|
||||
LOGGER.debug("All chunks or complete file exist for transaction {}", signature58);
|
||||
|
||||
ArbitraryDataFileListManager.getInstance().deleteFileListRequestsForSignature(signature58);
|
||||
}
|
||||
}
|
||||
|
||||
} catch (DataException e) {
|
||||
LOGGER.debug("Unable to handle file list requests: {}", e.getMessage());
|
||||
} catch (Exception e) {
|
||||
LOGGER.error(e.getMessage(), e);
|
||||
}
|
||||
}
|
||||
|
||||
@ -295,15 +374,14 @@ public class ArbitraryDataFileManager extends Thread {
|
||||
|
||||
LOGGER.debug("Received arbitrary data file - forwarding is needed");
|
||||
|
||||
// The ID needs to match that of the original request
|
||||
message.setId(originalMessage.getId());
|
||||
try {
|
||||
// The ID needs to match that of the original request
|
||||
message.setId(originalMessage.getId());
|
||||
|
||||
if (!requestingPeer.sendMessageWithTimeout(message, (int) ArbitraryDataManager.ARBITRARY_REQUEST_TIMEOUT)) {
|
||||
LOGGER.debug("Failed to forward arbitrary data file to peer {}", requestingPeer);
|
||||
requestingPeer.disconnect("failed to forward arbitrary data file");
|
||||
}
|
||||
else {
|
||||
LOGGER.debug("Forwarded arbitrary data file to peer {}", requestingPeer);
|
||||
PeerSendManagement.getInstance().getOrCreateSendManager(requestingPeer).queueMessage(message, SEND_TIMEOUT_MS);
|
||||
|
||||
} catch (Exception e) {
|
||||
LOGGER.error(e.getMessage(), e);
|
||||
}
|
||||
}
|
||||
|
||||
@ -577,13 +655,9 @@ public class ArbitraryDataFileManager extends Thread {
|
||||
LOGGER.debug("Sending file {}...", arbitraryDataFile);
|
||||
ArbitraryDataFileMessage arbitraryDataFileMessage = new ArbitraryDataFileMessage(signature, arbitraryDataFile);
|
||||
arbitraryDataFileMessage.setId(message.getId());
|
||||
if (!peer.sendMessageWithTimeout(arbitraryDataFileMessage, (int) ArbitraryDataManager.ARBITRARY_REQUEST_TIMEOUT)) {
|
||||
LOGGER.debug("Couldn't send file {}", arbitraryDataFile);
|
||||
peer.disconnect("failed to send file");
|
||||
}
|
||||
else {
|
||||
LOGGER.debug("Sent file {}", arbitraryDataFile);
|
||||
}
|
||||
|
||||
PeerSendManagement.getInstance().getOrCreateSendManager(peer).queueMessage(arbitraryDataFileMessage, SEND_TIMEOUT_MS);
|
||||
|
||||
}
|
||||
else if (relayInfo != null) {
|
||||
LOGGER.debug("We have relay info for hash {}", Base58.encode(hash));
|
||||
@ -595,7 +669,7 @@ public class ArbitraryDataFileManager extends Thread {
|
||||
LOGGER.debug("Asking peer {} for hash {}", peerToAsk, hash58);
|
||||
// No need to pass arbitraryTransactionData below because this is only used for metadata caching,
|
||||
// and metadata isn't retained when relaying.
|
||||
this.fetchArbitraryDataFile(peerToAsk, peer, null, signature, hash, message);
|
||||
this.fetchFileForRelay(peerToAsk, peer, signature, hash, message);
|
||||
}
|
||||
else {
|
||||
LOGGER.debug("Peer {} not found in relay info", peer);
|
||||
@ -617,7 +691,6 @@ public class ArbitraryDataFileManager extends Thread {
|
||||
fileUnknownMessage.setId(message.getId());
|
||||
if (!peer.sendMessage(fileUnknownMessage)) {
|
||||
LOGGER.debug("Couldn't sent file-unknown response");
|
||||
peer.disconnect("failed to send file-unknown response");
|
||||
}
|
||||
else {
|
||||
LOGGER.debug("Sent file-unknown response for file {}", arbitraryDataFile);
|
||||
|
@ -4,127 +4,186 @@ import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.qortal.controller.Controller;
|
||||
import org.qortal.data.arbitrary.ArbitraryFileListResponseInfo;
|
||||
import org.qortal.data.arbitrary.ArbitraryResourceData;
|
||||
import org.qortal.data.transaction.ArbitraryTransactionData;
|
||||
import org.qortal.event.DataMonitorEvent;
|
||||
import org.qortal.event.EventBus;
|
||||
import org.qortal.network.Peer;
|
||||
import org.qortal.network.message.MessageType;
|
||||
import org.qortal.repository.DataException;
|
||||
import org.qortal.repository.Repository;
|
||||
import org.qortal.repository.RepositoryManager;
|
||||
import org.qortal.settings.Settings;
|
||||
import org.qortal.utils.ArbitraryTransactionUtils;
|
||||
import org.qortal.utils.Base58;
|
||||
import org.qortal.utils.NTP;
|
||||
import org.qortal.utils.NamedThreadFactory;
|
||||
|
||||
import java.net.http.HttpResponse;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Comparator;
|
||||
import java.util.HashMap;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.Executor;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.ScheduledExecutorService;
|
||||
import java.util.concurrent.ThreadPoolExecutor;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import static java.lang.Thread.NORM_PRIORITY;
|
||||
|
||||
public class ArbitraryDataFileRequestThread implements Runnable {
|
||||
public class ArbitraryDataFileRequestThread {
|
||||
|
||||
private static final Logger LOGGER = LogManager.getLogger(ArbitraryDataFileRequestThread.class);
|
||||
|
||||
public ArbitraryDataFileRequestThread() {
|
||||
private static final Integer FETCHER_LIMIT_PER_PEER = Settings.getInstance().getMaxThreadsForMessageType(MessageType.GET_ARBITRARY_DATA_FILE);
|
||||
private static final String FETCHER_THREAD_PREFIX = "Arbitrary Data Fetcher ";
|
||||
|
||||
private ConcurrentHashMap<String, ExecutorService> executorByPeer = new ConcurrentHashMap<>();
|
||||
|
||||
private ArbitraryDataFileRequestThread() {
|
||||
cleanupExecutorByPeerScheduler.scheduleAtFixedRate(this::cleanupExecutorsByPeer, 1, 1, TimeUnit.MINUTES);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
Thread.currentThread().setName("Arbitrary Data File Request Thread");
|
||||
Thread.currentThread().setPriority(NORM_PRIORITY);
|
||||
private static ArbitraryDataFileRequestThread instance = null;
|
||||
|
||||
public static ArbitraryDataFileRequestThread getInstance() {
|
||||
|
||||
if( instance == null ) {
|
||||
instance = new ArbitraryDataFileRequestThread();
|
||||
}
|
||||
|
||||
return instance;
|
||||
}
|
||||
|
||||
private final ScheduledExecutorService cleanupExecutorByPeerScheduler = Executors.newScheduledThreadPool(1);
|
||||
|
||||
private void cleanupExecutorsByPeer() {
|
||||
|
||||
try {
|
||||
while (!Controller.isStopping()) {
|
||||
Long now = NTP.getTime();
|
||||
this.processFileHashes(now);
|
||||
}
|
||||
} catch (InterruptedException e) {
|
||||
// Fall-through to exit thread...
|
||||
this.executorByPeer.forEach((key, value) -> {
|
||||
if (value instanceof ThreadPoolExecutor) {
|
||||
ThreadPoolExecutor threadPoolExecutor = (ThreadPoolExecutor) value;
|
||||
if (threadPoolExecutor.getActiveCount() == 0) {
|
||||
threadPoolExecutor.shutdown();
|
||||
if (this.executorByPeer.computeIfPresent(key, (k, v) -> null) == null) {
|
||||
LOGGER.trace("removed executor: peer = " + key);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
LOGGER.warn("casting issue in cleanup");
|
||||
}
|
||||
});
|
||||
} catch (Exception e) {
|
||||
LOGGER.error(e.getMessage(), e);
|
||||
}
|
||||
}
|
||||
|
||||
private void processFileHashes(Long now) throws InterruptedException {
|
||||
public void processFileHashes(Long now, List<ArbitraryFileListResponseInfo> responseInfos, ArbitraryDataFileManager arbitraryDataFileManager) {
|
||||
if (Controller.isStopping()) {
|
||||
return;
|
||||
}
|
||||
|
||||
ArbitraryDataFileManager arbitraryDataFileManager = ArbitraryDataFileManager.getInstance();
|
||||
String signature58 = null;
|
||||
String hash58 = null;
|
||||
Peer peer = null;
|
||||
boolean shouldProcess = false;
|
||||
Map<String, byte[]> signatureBySignature58 = new HashMap<>(responseInfos.size());
|
||||
Map<String, List<ArbitraryFileListResponseInfo>> responseInfoBySignature58 = new HashMap<>();
|
||||
|
||||
synchronized (arbitraryDataFileManager.arbitraryDataFileHashResponses) {
|
||||
if (!arbitraryDataFileManager.arbitraryDataFileHashResponses.isEmpty()) {
|
||||
for( ArbitraryFileListResponseInfo responseInfo : responseInfos) {
|
||||
|
||||
// Sort by lowest number of node hops first
|
||||
Comparator<ArbitraryFileListResponseInfo> lowestHopsFirstComparator =
|
||||
Comparator.comparingInt(ArbitraryFileListResponseInfo::getRequestHops);
|
||||
arbitraryDataFileManager.arbitraryDataFileHashResponses.sort(lowestHopsFirstComparator);
|
||||
if( responseInfo == null ) continue;
|
||||
|
||||
Iterator iterator = arbitraryDataFileManager.arbitraryDataFileHashResponses.iterator();
|
||||
while (iterator.hasNext()) {
|
||||
if (Controller.isStopping()) {
|
||||
return;
|
||||
}
|
||||
|
||||
ArbitraryFileListResponseInfo responseInfo = (ArbitraryFileListResponseInfo) iterator.next();
|
||||
if (responseInfo == null) {
|
||||
iterator.remove();
|
||||
continue;
|
||||
}
|
||||
|
||||
hash58 = responseInfo.getHash58();
|
||||
peer = responseInfo.getPeer();
|
||||
signature58 = responseInfo.getSignature58();
|
||||
Long timestamp = responseInfo.getTimestamp();
|
||||
|
||||
if (now - timestamp >= ArbitraryDataManager.ARBITRARY_RELAY_TIMEOUT || signature58 == null || peer == null) {
|
||||
// Ignore - to be deleted
|
||||
iterator.remove();
|
||||
continue;
|
||||
}
|
||||
|
||||
// Skip if already requesting, but don't remove, as we might want to retry later
|
||||
if (arbitraryDataFileManager.arbitraryDataFileRequests.containsKey(hash58)) {
|
||||
// Already requesting - leave this attempt for later
|
||||
continue;
|
||||
}
|
||||
|
||||
// We want to process this file
|
||||
shouldProcess = true;
|
||||
iterator.remove();
|
||||
break;
|
||||
}
|
||||
if (Controller.isStopping()) {
|
||||
return;
|
||||
}
|
||||
|
||||
Peer peer = responseInfo.getPeer();
|
||||
|
||||
// if relay timeout, then move on
|
||||
if (now - responseInfo.getTimestamp() >= ArbitraryDataManager.ARBITRARY_RELAY_TIMEOUT || responseInfo.getSignature58() == null || peer == null) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Skip if already requesting, but don't remove, as we might want to retry later
|
||||
if (arbitraryDataFileManager.arbitraryDataFileRequests.containsKey(responseInfo.getHash58())) {
|
||||
// Already requesting - leave this attempt for later
|
||||
arbitraryDataFileManager.addResponse(responseInfo); // don't remove -> adding back, beacause it was removed already above
|
||||
continue;
|
||||
}
|
||||
|
||||
|
||||
byte[] hash = Base58.decode(responseInfo.getHash58());
|
||||
byte[] signature = Base58.decode(responseInfo.getSignature58());
|
||||
|
||||
// check for null
|
||||
if (signature == null || hash == null || peer == null) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// We want to process this file, store and map data to process later
|
||||
signatureBySignature58.put(responseInfo.getSignature58(), signature);
|
||||
responseInfoBySignature58
|
||||
.computeIfAbsent(responseInfo.getSignature58(), signature58 -> new ArrayList<>())
|
||||
.add(responseInfo);
|
||||
}
|
||||
|
||||
if (!shouldProcess) {
|
||||
// Nothing to do
|
||||
Thread.sleep(1000L);
|
||||
return;
|
||||
}
|
||||
// if there are no signatures, then there is nothing to process and nothing query the database
|
||||
if( signatureBySignature58.isEmpty() ) return;
|
||||
|
||||
byte[] hash = Base58.decode(hash58);
|
||||
byte[] signature = Base58.decode(signature58);
|
||||
List<ArbitraryTransactionData> arbitraryTransactionDataList = new ArrayList<>();
|
||||
|
||||
// Fetch the transaction data
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
ArbitraryTransactionData arbitraryTransactionData = ArbitraryTransactionUtils.fetchTransactionData(repository, signature);
|
||||
if (arbitraryTransactionData == null) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (signature == null || hash == null || peer == null || arbitraryTransactionData == null) {
|
||||
return;
|
||||
}
|
||||
|
||||
LOGGER.trace("Fetching file {} from peer {} via request thread...", hash58, peer);
|
||||
arbitraryDataFileManager.fetchArbitraryDataFiles(repository, peer, signature, arbitraryTransactionData, Arrays.asList(hash));
|
||||
|
||||
arbitraryTransactionDataList.addAll(
|
||||
ArbitraryTransactionUtils.fetchTransactionDataList(repository, new ArrayList<>(signatureBySignature58.values())));
|
||||
} catch (DataException e) {
|
||||
LOGGER.debug("Unable to process file hashes: {}", e.getMessage());
|
||||
LOGGER.warn("Unable to fetch transaction data: {}", e.getMessage());
|
||||
}
|
||||
|
||||
if( !arbitraryTransactionDataList.isEmpty() ) {
|
||||
long start = System.currentTimeMillis();
|
||||
|
||||
for(ArbitraryTransactionData data : arbitraryTransactionDataList ) {
|
||||
String signature58 = Base58.encode(data.getSignature());
|
||||
for( ArbitraryFileListResponseInfo responseInfo : responseInfoBySignature58.get(signature58)) {
|
||||
Runnable fetcher = () -> arbitraryDataFileFetcher(arbitraryDataFileManager, responseInfo, data);
|
||||
this.executorByPeer
|
||||
.computeIfAbsent(
|
||||
responseInfo.getPeer().toString(),
|
||||
peer -> Executors.newFixedThreadPool(
|
||||
FETCHER_LIMIT_PER_PEER,
|
||||
new NamedThreadFactory(FETCHER_THREAD_PREFIX + responseInfo.getPeer().toString(), NORM_PRIORITY)
|
||||
)
|
||||
)
|
||||
.execute(fetcher);
|
||||
}
|
||||
}
|
||||
long timeLapse = System.currentTimeMillis() - start;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void arbitraryDataFileFetcher(ArbitraryDataFileManager arbitraryDataFileManager, ArbitraryFileListResponseInfo responseInfo, ArbitraryTransactionData arbitraryTransactionData) {
|
||||
try {
|
||||
Long now = NTP.getTime();
|
||||
|
||||
if (now - responseInfo.getTimestamp() >= ArbitraryDataManager.ARBITRARY_RELAY_TIMEOUT ) {
|
||||
|
||||
Peer peer = responseInfo.getPeer();
|
||||
String hash58 = responseInfo.getHash58();
|
||||
String signature58 = responseInfo.getSignature58();
|
||||
LOGGER.debug("Peer {} version {} didn't fetch data file {} for signature {} due to relay timeout.", peer, peer.getPeersVersionString(), hash58, signature58);
|
||||
return;
|
||||
}
|
||||
|
||||
arbitraryDataFileManager.fetchArbitraryDataFiles(
|
||||
responseInfo.getPeer(),
|
||||
arbitraryTransactionData.getSignature(),
|
||||
arbitraryTransactionData,
|
||||
Arrays.asList(Base58.decode(responseInfo.getHash58()))
|
||||
);
|
||||
} catch (DataException e) {
|
||||
LOGGER.warn("Unable to process file hashes: {}", e.getMessage());
|
||||
}
|
||||
}
|
||||
}
|
@ -42,10 +42,10 @@ public class ArbitraryDataManager extends Thread {
|
||||
private int powDifficulty = 14; // Must not be final, as unit tests need to reduce this value
|
||||
|
||||
/** Request timeout when transferring arbitrary data */
|
||||
public static final long ARBITRARY_REQUEST_TIMEOUT = 12 * 1000L; // ms
|
||||
public static final long ARBITRARY_REQUEST_TIMEOUT = 24 * 1000L; // ms
|
||||
|
||||
/** Maximum time to hold information about an in-progress relay */
|
||||
public static final long ARBITRARY_RELAY_TIMEOUT = 60 * 1000L; // ms
|
||||
public static final long ARBITRARY_RELAY_TIMEOUT = 120 * 1000L; // ms
|
||||
|
||||
/** Maximum time to hold direct peer connection information */
|
||||
public static final long ARBITRARY_DIRECT_CONNECTION_INFO_TIMEOUT = 2 * 60 * 1000L; // ms
|
||||
|
@ -47,15 +47,15 @@ public class ArbitraryDataStorageManager extends Thread {
|
||||
|
||||
private static final long DIRECTORY_SIZE_CHECK_INTERVAL = 10 * 60 * 1000L; // 10 minutes
|
||||
|
||||
/** Treat storage as full at 90% usage, to reduce risk of going over the limit.
|
||||
/** Treat storage as full at 80% usage, to reduce risk of going over the limit.
|
||||
* This is necessary because we don't calculate total storage values before every write.
|
||||
* It also helps avoid a fetch/delete loop, as we will stop fetching before the hard limit.
|
||||
* This must be lower than DELETION_THRESHOLD. */
|
||||
private static final double STORAGE_FULL_THRESHOLD = 0.90f; // 90%
|
||||
private static final double STORAGE_FULL_THRESHOLD = 0.8f; // 80%
|
||||
|
||||
/** Start deleting files once we reach 98% usage.
|
||||
/** Start deleting files once we reach 90% usage.
|
||||
* This must be higher than STORAGE_FULL_THRESHOLD in order to avoid a fetch/delete loop. */
|
||||
public static final double DELETION_THRESHOLD = 0.98f; // 98%
|
||||
public static final double DELETION_THRESHOLD = 0.9f; // 90%
|
||||
|
||||
private static final long PER_NAME_STORAGE_MULTIPLIER = 4L;
|
||||
|
||||
|
@ -24,6 +24,11 @@ import org.qortal.utils.Triple;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.*;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.ScheduledExecutorService;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.function.Function;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static org.qortal.controller.arbitrary.ArbitraryDataFileListManager.*;
|
||||
|
||||
@ -61,6 +66,7 @@ public class ArbitraryMetadataManager {
|
||||
|
||||
|
||||
private ArbitraryMetadataManager() {
|
||||
scheduler.scheduleAtFixedRate(this::processNetworkGetArbitraryMetadataMessage, 60, 1, TimeUnit.SECONDS);
|
||||
}
|
||||
|
||||
public static ArbitraryMetadataManager getInstance() {
|
||||
@ -354,9 +360,8 @@ public class ArbitraryMetadataManager {
|
||||
|
||||
// Forward to requesting peer
|
||||
LOGGER.debug("Forwarding metadata to requesting peer: {}", requestingPeer);
|
||||
if (!requestingPeer.sendMessage(forwardArbitraryMetadataMessage)) {
|
||||
requestingPeer.disconnect("failed to forward arbitrary metadata");
|
||||
}
|
||||
requestingPeer.sendMessage(forwardArbitraryMetadataMessage);
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -371,107 +376,159 @@ public class ArbitraryMetadataManager {
|
||||
}
|
||||
}
|
||||
|
||||
// List to collect messages
|
||||
private final List<PeerMessage> messageList = new ArrayList<>();
|
||||
// Lock to synchronize access to the list
|
||||
private final Object lock = new Object();
|
||||
|
||||
// Scheduled executor service to process messages every second
|
||||
private final ScheduledExecutorService scheduler = Executors.newScheduledThreadPool(1);
|
||||
|
||||
public void onNetworkGetArbitraryMetadataMessage(Peer peer, Message message) {
|
||||
|
||||
// Don't respond if QDN is disabled
|
||||
if (!Settings.getInstance().isQdnEnabled()) {
|
||||
return;
|
||||
}
|
||||
|
||||
Controller.getInstance().stats.getArbitraryMetadataMessageStats.requests.incrementAndGet();
|
||||
|
||||
GetArbitraryMetadataMessage getArbitraryMetadataMessage = (GetArbitraryMetadataMessage) message;
|
||||
byte[] signature = getArbitraryMetadataMessage.getSignature();
|
||||
String signature58 = Base58.encode(signature);
|
||||
Long now = NTP.getTime();
|
||||
Triple<String, Peer, Long> newEntry = new Triple<>(signature58, peer, now);
|
||||
|
||||
// If we've seen this request recently, then ignore
|
||||
if (arbitraryMetadataRequests.putIfAbsent(message.getId(), newEntry) != null) {
|
||||
LOGGER.debug("Ignoring metadata request from peer {} for signature {}", peer, signature58);
|
||||
return;
|
||||
}
|
||||
|
||||
LOGGER.debug("Received metadata request from peer {} for signature {}", peer, signature58);
|
||||
|
||||
ArbitraryTransactionData transactionData = null;
|
||||
ArbitraryDataFile metadataFile = null;
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
|
||||
// Firstly we need to lookup this file on chain to get its metadata hash
|
||||
transactionData = (ArbitraryTransactionData)repository.getTransactionRepository().fromSignature(signature);
|
||||
if (transactionData instanceof ArbitraryTransactionData) {
|
||||
|
||||
// Check if we're even allowed to serve metadata for this transaction
|
||||
if (ArbitraryDataStorageManager.getInstance().canStoreData(transactionData)) {
|
||||
|
||||
byte[] metadataHash = transactionData.getMetadataHash();
|
||||
if (metadataHash != null) {
|
||||
|
||||
// Load metadata file
|
||||
metadataFile = ArbitraryDataFile.fromHash(metadataHash, signature);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} catch (DataException e) {
|
||||
LOGGER.error(String.format("Repository issue while fetching arbitrary metadata for peer %s", peer), e);
|
||||
}
|
||||
|
||||
// We should only respond if we have the metadata file
|
||||
if (metadataFile != null && metadataFile.exists()) {
|
||||
|
||||
// We have the metadata file, so update requests map to reflect that we've sent it
|
||||
newEntry = new Triple<>(null, null, now);
|
||||
arbitraryMetadataRequests.put(message.getId(), newEntry);
|
||||
|
||||
ArbitraryMetadataMessage arbitraryMetadataMessage = new ArbitraryMetadataMessage(signature, metadataFile);
|
||||
arbitraryMetadataMessage.setId(message.getId());
|
||||
if (!peer.sendMessage(arbitraryMetadataMessage)) {
|
||||
LOGGER.debug("Couldn't send metadata");
|
||||
peer.disconnect("failed to send metadata");
|
||||
return;
|
||||
}
|
||||
LOGGER.debug("Sent metadata");
|
||||
|
||||
// Nothing left to do, so return to prevent any unnecessary forwarding from occurring
|
||||
LOGGER.debug("No need for any forwarding because metadata request is fully served");
|
||||
return;
|
||||
|
||||
}
|
||||
|
||||
// We may need to forward this request on
|
||||
boolean isBlocked = (transactionData == null || ListUtils.isNameBlocked(transactionData.getName()));
|
||||
if (Settings.getInstance().isRelayModeEnabled() && !isBlocked) {
|
||||
// In relay mode - so ask our other peers if they have it
|
||||
|
||||
long requestTime = getArbitraryMetadataMessage.getRequestTime();
|
||||
int requestHops = getArbitraryMetadataMessage.getRequestHops() + 1;
|
||||
long totalRequestTime = now - requestTime;
|
||||
|
||||
if (totalRequestTime < RELAY_REQUEST_MAX_DURATION) {
|
||||
// Relay request hasn't timed out yet, so can potentially be rebroadcast
|
||||
if (requestHops < RELAY_REQUEST_MAX_HOPS) {
|
||||
// Relay request hasn't reached the maximum number of hops yet, so can be rebroadcast
|
||||
|
||||
Message relayGetArbitraryMetadataMessage = new GetArbitraryMetadataMessage(signature, requestTime, requestHops);
|
||||
relayGetArbitraryMetadataMessage.setId(message.getId());
|
||||
|
||||
LOGGER.debug("Rebroadcasting metadata request from peer {} for signature {} to our other peers... totalRequestTime: {}, requestHops: {}", peer, Base58.encode(signature), totalRequestTime, requestHops);
|
||||
Network.getInstance().broadcast(
|
||||
broadcastPeer ->
|
||||
!broadcastPeer.isAtLeastVersion(RELAY_MIN_PEER_VERSION) ? null :
|
||||
broadcastPeer == peer || Objects.equals(broadcastPeer.getPeerData().getAddress().getHost(), peer.getPeerData().getAddress().getHost()) ? null : relayGetArbitraryMetadataMessage);
|
||||
|
||||
}
|
||||
else {
|
||||
// This relay request has reached the maximum number of allowed hops
|
||||
}
|
||||
}
|
||||
else {
|
||||
// This relay request has timed out
|
||||
}
|
||||
synchronized (lock) {
|
||||
messageList.add(new PeerMessage(peer, message));
|
||||
}
|
||||
}
|
||||
|
||||
private void processNetworkGetArbitraryMetadataMessage() {
|
||||
|
||||
try {
|
||||
List<PeerMessage> messagesToProcess;
|
||||
synchronized (lock) {
|
||||
messagesToProcess = new ArrayList<>(messageList);
|
||||
messageList.clear();
|
||||
}
|
||||
|
||||
Map<String, byte[]> signatureBySignature58 = new HashMap<>((messagesToProcess.size()));
|
||||
Map<String, Long> nowBySignature58 = new HashMap<>(messagesToProcess.size());
|
||||
Map<String,PeerMessage> peerMessageBySignature58 = new HashMap<>(messagesToProcess.size());
|
||||
|
||||
for( PeerMessage peerMessage : messagesToProcess) {
|
||||
Controller.getInstance().stats.getArbitraryMetadataMessageStats.requests.incrementAndGet();
|
||||
|
||||
GetArbitraryMetadataMessage getArbitraryMetadataMessage = (GetArbitraryMetadataMessage) peerMessage.message;
|
||||
byte[] signature = getArbitraryMetadataMessage.getSignature();
|
||||
String signature58 = Base58.encode(signature);
|
||||
Long now = NTP.getTime();
|
||||
Triple<String, Peer, Long> newEntry = new Triple<>(signature58, peerMessage.peer, now);
|
||||
|
||||
// If we've seen this request recently, then ignore
|
||||
if (arbitraryMetadataRequests.putIfAbsent(peerMessage.message.getId(), newEntry) != null) {
|
||||
LOGGER.debug("Ignoring metadata request from peer {} for signature {}", peerMessage.peer, signature58);
|
||||
continue;
|
||||
}
|
||||
|
||||
LOGGER.debug("Received metadata request from peer {} for signature {}", peerMessage.peer, signature58);
|
||||
|
||||
signatureBySignature58.put(signature58, signature);
|
||||
nowBySignature58.put(signature58, now);
|
||||
peerMessageBySignature58.put(signature58, peerMessage);
|
||||
}
|
||||
|
||||
if( signatureBySignature58.isEmpty() ) return;
|
||||
|
||||
List<TransactionData> transactionDataList;
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
|
||||
// Firstly we need to lookup this file on chain to get its metadata hash
|
||||
transactionDataList = repository.getTransactionRepository().fromSignatures(new ArrayList(signatureBySignature58.values()));
|
||||
} catch (DataException e) {
|
||||
LOGGER.error(String.format("Repository issue while fetching arbitrary transactions"), e);
|
||||
return;
|
||||
}
|
||||
|
||||
Map<String, ArbitraryTransactionData> dataBySignature58
|
||||
= transactionDataList.stream()
|
||||
.filter(data -> data instanceof ArbitraryTransactionData)
|
||||
.map(ArbitraryTransactionData.class::cast)
|
||||
.collect(Collectors.toMap(data -> Base58.encode(data.getSignature()), Function.identity()));
|
||||
|
||||
for(Map.Entry<String, ArbitraryTransactionData> entry : dataBySignature58.entrySet()) {
|
||||
String signature58 = entry.getKey();
|
||||
ArbitraryTransactionData transactionData = entry.getValue();
|
||||
|
||||
try {
|
||||
|
||||
// Check if we're even allowed to serve metadata for this transaction
|
||||
if (ArbitraryDataStorageManager.getInstance().canStoreData(transactionData)) {
|
||||
|
||||
byte[] metadataHash = transactionData.getMetadataHash();
|
||||
if (metadataHash != null) {
|
||||
|
||||
// Load metadata file
|
||||
ArbitraryDataFile metadataFile = ArbitraryDataFile.fromHash(metadataHash, transactionData.getSignature());
|
||||
// We should only respond if we have the metadata file
|
||||
if (metadataFile != null && metadataFile.exists()) {
|
||||
|
||||
PeerMessage peerMessage = peerMessageBySignature58.get(signature58);
|
||||
Message message = peerMessage.message;
|
||||
Peer peer = peerMessage.peer;
|
||||
|
||||
// We have the metadata file, so update requests map to reflect that we've sent it
|
||||
Triple newEntry = new Triple<>(null, null, nowBySignature58.get(signature58));
|
||||
arbitraryMetadataRequests.put(message.getId(), newEntry);
|
||||
|
||||
ArbitraryMetadataMessage arbitraryMetadataMessage = new ArbitraryMetadataMessage(entry.getValue().getSignature(), metadataFile);
|
||||
arbitraryMetadataMessage.setId(message.getId());
|
||||
if (!peer.sendMessage(arbitraryMetadataMessage)) {
|
||||
LOGGER.debug("Couldn't send metadata");
|
||||
continue;
|
||||
}
|
||||
LOGGER.debug("Sent metadata");
|
||||
|
||||
// Nothing left to do, so return to prevent any unnecessary forwarding from occurring
|
||||
LOGGER.debug("No need for any forwarding because metadata request is fully served");
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
} catch (DataException e) {
|
||||
LOGGER.error(String.format("Repository issue while fetching arbitrary metadata"), e);
|
||||
}
|
||||
|
||||
// We may need to forward this request on
|
||||
boolean isBlocked = (transactionDataList == null || ListUtils.isNameBlocked(transactionData.getName()));
|
||||
if (Settings.getInstance().isRelayModeEnabled() && !isBlocked) {
|
||||
// In relay mode - so ask our other peers if they have it
|
||||
|
||||
PeerMessage peerMessage = peerMessageBySignature58.get(signature58);
|
||||
GetArbitraryMetadataMessage getArbitraryMetadataMessage = (GetArbitraryMetadataMessage) peerMessage.message;
|
||||
long requestTime = getArbitraryMetadataMessage.getRequestTime();
|
||||
int requestHops = getArbitraryMetadataMessage.getRequestHops() + 1;
|
||||
long totalRequestTime = nowBySignature58.get(signature58) - requestTime;
|
||||
|
||||
if (totalRequestTime < RELAY_REQUEST_MAX_DURATION) {
|
||||
// Relay request hasn't timed out yet, so can potentially be rebroadcast
|
||||
if (requestHops < RELAY_REQUEST_MAX_HOPS) {
|
||||
// Relay request hasn't reached the maximum number of hops yet, so can be rebroadcast
|
||||
|
||||
byte[] signature = signatureBySignature58.get(signature58);
|
||||
Message relayGetArbitraryMetadataMessage = new GetArbitraryMetadataMessage(signature, requestTime, requestHops);
|
||||
relayGetArbitraryMetadataMessage.setId(getArbitraryMetadataMessage.getId());
|
||||
|
||||
Peer peer = peerMessage.peer;
|
||||
LOGGER.debug("Rebroadcasting metadata request from peer {} for signature {} to our other peers... totalRequestTime: {}, requestHops: {}", peer, Base58.encode(signature), totalRequestTime, requestHops);
|
||||
Network.getInstance().broadcast(
|
||||
broadcastPeer ->
|
||||
!broadcastPeer.isAtLeastVersion(RELAY_MIN_PEER_VERSION) ? null :
|
||||
broadcastPeer == peer || Objects.equals(broadcastPeer.getPeerData().getAddress().getHost(), peer.getPeerData().getAddress().getHost()) ? null : relayGetArbitraryMetadataMessage);
|
||||
|
||||
} else {
|
||||
// This relay request has reached the maximum number of allowed hops
|
||||
}
|
||||
} else {
|
||||
// This relay request has timed out
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
LOGGER.error(e.getMessage(), e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
130
src/main/java/org/qortal/controller/arbitrary/Follower.java
Normal file
130
src/main/java/org/qortal/controller/arbitrary/Follower.java
Normal file
@ -0,0 +1,130 @@
|
||||
package org.qortal.controller.arbitrary;
|
||||
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.qortal.data.transaction.ArbitraryTransactionData;
|
||||
import org.qortal.repository.Repository;
|
||||
import org.qortal.repository.RepositoryManager;
|
||||
import org.qortal.settings.Settings;
|
||||
import org.qortal.utils.ListUtils;
|
||||
import org.qortal.utils.NamedThreadFactory;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.OptionalInt;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.ScheduledExecutorService;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
public class Follower {
|
||||
|
||||
private static final Logger LOGGER = LogManager.getLogger(Follower.class);
|
||||
|
||||
private ScheduledExecutorService service
|
||||
= Executors.newScheduledThreadPool(2, new NamedThreadFactory("Follower", Thread.NORM_PRIORITY));
|
||||
|
||||
private Follower() {
|
||||
|
||||
}
|
||||
|
||||
private static Follower instance;
|
||||
|
||||
public static Follower getInstance() {
|
||||
|
||||
if( instance == null ) {
|
||||
instance = new Follower();
|
||||
}
|
||||
|
||||
return instance;
|
||||
}
|
||||
|
||||
public void start() {
|
||||
|
||||
// fetch arbitrary transactions from followed names from the last 100 blocks every 2 minutes
|
||||
service.scheduleWithFixedDelay(() -> fetch(OptionalInt.of(100)), 10, 2, TimeUnit.MINUTES);
|
||||
|
||||
// fetch arbitrary transaction from followed names from any block every 24 hours
|
||||
service.scheduleWithFixedDelay(() -> fetch(OptionalInt.empty()), 4, 24, TimeUnit.HOURS);
|
||||
}
|
||||
|
||||
private void fetch(OptionalInt limit) {
|
||||
|
||||
try {
|
||||
// for each followed name, get arbitraty transactions, then examine those transactions before fetching
|
||||
for (String name : ListUtils.followedNames()) {
|
||||
|
||||
List<ArbitraryTransactionData> transactionsInReverseOrder;
|
||||
|
||||
// open database to get the transactions in reverse order for the followed name
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
|
||||
List<ArbitraryTransactionData> latestArbitraryTransactionsByName
|
||||
= repository.getArbitraryRepository().getLatestArbitraryTransactionsByName(name);
|
||||
|
||||
if (limit.isPresent()) {
|
||||
final int blockHeightThreshold = repository.getBlockRepository().getBlockchainHeight() - limit.getAsInt();
|
||||
|
||||
transactionsInReverseOrder
|
||||
= latestArbitraryTransactionsByName.stream().filter(tx -> tx.getBlockHeight() > blockHeightThreshold)
|
||||
.collect(Collectors.toList());
|
||||
} else {
|
||||
transactionsInReverseOrder = latestArbitraryTransactionsByName;
|
||||
}
|
||||
|
||||
} catch (Exception e) {
|
||||
LOGGER.error(e.getMessage(), e);
|
||||
transactionsInReverseOrder = new ArrayList<>(0);
|
||||
}
|
||||
|
||||
// collect process transaction hashes, so we don't fetch outdated transactions
|
||||
Set<ArbitraryTransactionDataHashWrapper> processedTransactions = new HashSet<>();
|
||||
|
||||
ArbitraryDataStorageManager storageManager = ArbitraryDataStorageManager.getInstance();
|
||||
|
||||
// for each arbitrary transaction for the followed name process, evaluate, fetch
|
||||
for (ArbitraryTransactionData arbitraryTransaction : transactionsInReverseOrder) {
|
||||
|
||||
boolean examined = false;
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
|
||||
// if not processed
|
||||
if (!processedTransactions.contains(new ArbitraryTransactionDataHashWrapper(arbitraryTransaction))) {
|
||||
boolean isLocal = repository.getArbitraryRepository().isDataLocal(arbitraryTransaction.getSignature());
|
||||
|
||||
// if not local, then continue to evaluate
|
||||
if (!isLocal) {
|
||||
|
||||
// evaluate fetching status for this transaction on this node
|
||||
ArbitraryDataExamination examination = storageManager.shouldPreFetchData(repository, arbitraryTransaction);
|
||||
|
||||
// if the evaluation passed, then fetch
|
||||
examined = examination.isPass();
|
||||
}
|
||||
// if locally stored, then nothing needs to be done
|
||||
|
||||
// add to processed transactions
|
||||
processedTransactions.add(new ArbitraryTransactionDataHashWrapper(arbitraryTransaction));
|
||||
}
|
||||
}
|
||||
|
||||
// if passed examination for fetching, then fetch
|
||||
if (examined) {
|
||||
LOGGER.info("for {} on {}, fetching {}", name, arbitraryTransaction.getService(), arbitraryTransaction.getIdentifier());
|
||||
boolean fetched = ArbitraryDataFileListManager.getInstance().fetchArbitraryDataFileList(arbitraryTransaction);
|
||||
|
||||
LOGGER.info("fetched = " + fetched);
|
||||
}
|
||||
|
||||
// pause a second before moving on to another transaction
|
||||
Thread.sleep(1000);
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
LOGGER.error(e.getMessage(), e);
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,22 @@
|
||||
package org.qortal.controller.arbitrary;
|
||||
|
||||
import org.qortal.network.Peer;
|
||||
import org.qortal.network.message.Message;
|
||||
|
||||
public class PeerMessage {
|
||||
Peer peer;
|
||||
Message message;
|
||||
|
||||
public PeerMessage(Peer peer, Message message) {
|
||||
this.peer = peer;
|
||||
this.message = message;
|
||||
}
|
||||
|
||||
public Peer getPeer() {
|
||||
return peer;
|
||||
}
|
||||
|
||||
public Message getMessage() {
|
||||
return message;
|
||||
}
|
||||
}
|
@ -8,6 +8,7 @@ import org.qortal.account.PrivateKeyAccount;
|
||||
import org.qortal.api.model.crosschain.TradeBotCreateRequest;
|
||||
import org.qortal.controller.Controller;
|
||||
import org.qortal.controller.Synchronizer;
|
||||
import org.qortal.controller.arbitrary.PeerMessage;
|
||||
import org.qortal.controller.tradebot.AcctTradeBot.ResponseResult;
|
||||
import org.qortal.crosschain.*;
|
||||
import org.qortal.crypto.Crypto;
|
||||
@ -37,7 +38,12 @@ import org.qortal.utils.NTP;
|
||||
import java.awt.TrayIcon.MessageType;
|
||||
import java.security.SecureRandom;
|
||||
import java.util.*;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.ScheduledExecutorService;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.function.Function;
|
||||
import java.util.function.Supplier;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
/**
|
||||
* Performing cross-chain trading steps on behalf of user.
|
||||
@ -118,6 +124,9 @@ public class TradeBot implements Listener {
|
||||
private Map<String, Long> validTrades = new HashMap<>();
|
||||
|
||||
private TradeBot() {
|
||||
|
||||
tradePresenceMessageScheduler.scheduleAtFixedRate( this::processTradePresencesMessages, 60, 1, TimeUnit.SECONDS);
|
||||
|
||||
EventBus.INSTANCE.addListener(event -> TradeBot.getInstance().listen(event));
|
||||
}
|
||||
|
||||
@ -551,77 +560,139 @@ public class TradeBot implements Listener {
|
||||
}
|
||||
}
|
||||
|
||||
// List to collect messages
|
||||
private final List<PeerMessage> tradePresenceMessageList = new ArrayList<>();
|
||||
// Lock to synchronize access to the list
|
||||
private final Object tradePresenceMessageLock = new Object();
|
||||
|
||||
// Scheduled executor service to process messages every second
|
||||
private final ScheduledExecutorService tradePresenceMessageScheduler = Executors.newScheduledThreadPool(1);
|
||||
|
||||
public void onTradePresencesMessage(Peer peer, Message message) {
|
||||
TradePresencesMessage tradePresencesMessage = (TradePresencesMessage) message;
|
||||
|
||||
List<TradePresenceData> peersTradePresences = tradePresencesMessage.getTradePresences();
|
||||
synchronized (tradePresenceMessageLock) {
|
||||
tradePresenceMessageList.add(new PeerMessage(peer, message));
|
||||
}
|
||||
}
|
||||
|
||||
long now = NTP.getTime();
|
||||
// Timestamps before this are too far into the past
|
||||
long pastThreshold = now;
|
||||
// Timestamps after this are too far into the future
|
||||
long futureThreshold = now + PRESENCE_LIFETIME;
|
||||
public void processTradePresencesMessages() {
|
||||
|
||||
Map<ByteArray, Supplier<ACCT>> acctSuppliersByCodeHash = SupportedBlockchain.getAcctMap();
|
||||
try {
|
||||
List<PeerMessage> messagesToProcess;
|
||||
synchronized (tradePresenceMessageLock) {
|
||||
messagesToProcess = new ArrayList<>(tradePresenceMessageList);
|
||||
tradePresenceMessageList.clear();
|
||||
}
|
||||
|
||||
int newCount = 0;
|
||||
if( messagesToProcess.isEmpty() ) return;
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
for (TradePresenceData peersTradePresence : peersTradePresences) {
|
||||
long timestamp = peersTradePresence.getTimestamp();
|
||||
Map<Peer, List<TradePresenceData>> tradePresencesByPeer = new HashMap<>(messagesToProcess.size());
|
||||
|
||||
// Ignore if timestamp is out of bounds
|
||||
if (timestamp < pastThreshold || timestamp > futureThreshold) {
|
||||
if (timestamp < pastThreshold)
|
||||
LOGGER.trace("Ignoring trade presence {} from peer {} as timestamp {} is too old vs {}",
|
||||
peersTradePresence.getAtAddress(), peer, timestamp, pastThreshold
|
||||
);
|
||||
else
|
||||
LOGGER.trace("Ignoring trade presence {} from peer {} as timestamp {} is too new vs {}",
|
||||
peersTradePresence.getAtAddress(), peer, timestamp, pastThreshold
|
||||
// map all trade presences from the messages to their peer
|
||||
for( PeerMessage peerMessage : messagesToProcess ) {
|
||||
TradePresencesMessage tradePresencesMessage = (TradePresencesMessage) peerMessage.getMessage();
|
||||
|
||||
List<TradePresenceData> peersTradePresences = tradePresencesMessage.getTradePresences();
|
||||
|
||||
tradePresencesByPeer.put(peerMessage.getPeer(), peersTradePresences);
|
||||
}
|
||||
|
||||
long now = NTP.getTime();
|
||||
// Timestamps before this are too far into the past
|
||||
long pastThreshold = now;
|
||||
// Timestamps after this are too far into the future
|
||||
long futureThreshold = now + PRESENCE_LIFETIME;
|
||||
|
||||
Map<ByteArray, Supplier<ACCT>> acctSuppliersByCodeHash = SupportedBlockchain.getAcctMap();
|
||||
|
||||
int newCount = 0;
|
||||
|
||||
Map<String, List<Peer>> peersByAtAddress = new HashMap<>(tradePresencesByPeer.size());
|
||||
Map<String, TradePresenceData> tradePresenceByAtAddress = new HashMap<>(tradePresencesByPeer.size());
|
||||
|
||||
// for each batch of trade presence data from a peer, validate and populate the maps declared above
|
||||
for ( Map.Entry<Peer, List<TradePresenceData>> entry: tradePresencesByPeer.entrySet()) {
|
||||
|
||||
Peer peer = entry.getKey();
|
||||
|
||||
for( TradePresenceData peersTradePresence : entry.getValue() ) {
|
||||
// TradePresenceData peersTradePresence
|
||||
long timestamp = peersTradePresence.getTimestamp();
|
||||
|
||||
// Ignore if timestamp is out of bounds
|
||||
if (timestamp < pastThreshold || timestamp > futureThreshold) {
|
||||
if (timestamp < pastThreshold)
|
||||
LOGGER.trace("Ignoring trade presence {} from peer {} as timestamp {} is too old vs {}",
|
||||
peersTradePresence.getAtAddress(), peer, timestamp, pastThreshold
|
||||
);
|
||||
else
|
||||
LOGGER.trace("Ignoring trade presence {} from peer {} as timestamp {} is too new vs {}",
|
||||
peersTradePresence.getAtAddress(), peer, timestamp, pastThreshold
|
||||
);
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
ByteArray pubkeyByteArray = ByteArray.wrap(peersTradePresence.getPublicKey());
|
||||
|
||||
// Ignore if we've previously verified this timestamp+publickey combo or sent timestamp is older
|
||||
TradePresenceData existingTradeData = this.safeAllTradePresencesByPubkey.get(pubkeyByteArray);
|
||||
if (existingTradeData != null && timestamp <= existingTradeData.getTimestamp()) {
|
||||
if (timestamp == existingTradeData.getTimestamp())
|
||||
LOGGER.trace("Ignoring trade presence {} from peer {} as we have verified timestamp {} before",
|
||||
peersTradePresence.getAtAddress(), peer, timestamp
|
||||
);
|
||||
else
|
||||
LOGGER.trace("Ignoring trade presence {} from peer {} as timestamp {} is older than latest {}",
|
||||
peersTradePresence.getAtAddress(), peer, timestamp, existingTradeData.getTimestamp()
|
||||
);
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
// Check timestamp signature
|
||||
byte[] timestampSignature = peersTradePresence.getSignature();
|
||||
byte[] timestampBytes = Longs.toByteArray(timestamp);
|
||||
byte[] publicKey = peersTradePresence.getPublicKey();
|
||||
if (!Crypto.verify(publicKey, timestampSignature, timestampBytes)) {
|
||||
LOGGER.trace("Ignoring trade presence {} from peer {} as signature failed to verify",
|
||||
peersTradePresence.getAtAddress(), peer
|
||||
);
|
||||
|
||||
continue;
|
||||
continue;
|
||||
}
|
||||
|
||||
peersByAtAddress.computeIfAbsent(peersTradePresence.getAtAddress(), address -> new ArrayList<>()).add(peer);
|
||||
tradePresenceByAtAddress.put(peersTradePresence.getAtAddress(), peersTradePresence);
|
||||
}
|
||||
}
|
||||
|
||||
ByteArray pubkeyByteArray = ByteArray.wrap(peersTradePresence.getPublicKey());
|
||||
if( tradePresenceByAtAddress.isEmpty() ) return;
|
||||
|
||||
// Ignore if we've previously verified this timestamp+publickey combo or sent timestamp is older
|
||||
TradePresenceData existingTradeData = this.safeAllTradePresencesByPubkey.get(pubkeyByteArray);
|
||||
if (existingTradeData != null && timestamp <= existingTradeData.getTimestamp()) {
|
||||
if (timestamp == existingTradeData.getTimestamp())
|
||||
LOGGER.trace("Ignoring trade presence {} from peer {} as we have verified timestamp {} before",
|
||||
peersTradePresence.getAtAddress(), peer, timestamp
|
||||
);
|
||||
else
|
||||
LOGGER.trace("Ignoring trade presence {} from peer {} as timestamp {} is older than latest {}",
|
||||
peersTradePresence.getAtAddress(), peer, timestamp, existingTradeData.getTimestamp()
|
||||
);
|
||||
List<ATData> atDataList;
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
atDataList = repository.getATRepository().fromATAddresses( new ArrayList<>(tradePresenceByAtAddress.keySet()) );
|
||||
} catch (DataException e) {
|
||||
LOGGER.error("Couldn't process TRADE_PRESENCES message due to repository issue", e);
|
||||
return;
|
||||
}
|
||||
|
||||
continue;
|
||||
}
|
||||
Map<String, Supplier<ACCT>> supplierByAtAddress = new HashMap<>(atDataList.size());
|
||||
|
||||
// Check timestamp signature
|
||||
byte[] timestampSignature = peersTradePresence.getSignature();
|
||||
byte[] timestampBytes = Longs.toByteArray(timestamp);
|
||||
byte[] publicKey = peersTradePresence.getPublicKey();
|
||||
if (!Crypto.verify(publicKey, timestampSignature, timestampBytes)) {
|
||||
LOGGER.trace("Ignoring trade presence {} from peer {} as signature failed to verify",
|
||||
peersTradePresence.getAtAddress(), peer
|
||||
);
|
||||
List<ATData> validatedAtDataList = new ArrayList<>(atDataList.size());
|
||||
|
||||
continue;
|
||||
}
|
||||
// for each trade
|
||||
for( ATData atData : atDataList ) {
|
||||
|
||||
ATData atData = repository.getATRepository().fromATAddress(peersTradePresence.getAtAddress());
|
||||
TradePresenceData peersTradePresence = tradePresenceByAtAddress.get(atData.getATAddress());
|
||||
if (atData == null || atData.getIsFrozen() || atData.getIsFinished()) {
|
||||
if (atData == null)
|
||||
LOGGER.trace("Ignoring trade presence {} from peer {} as AT doesn't exist",
|
||||
peersTradePresence.getAtAddress(), peer
|
||||
LOGGER.trace("Ignoring trade presence {} from peer as AT doesn't exist",
|
||||
peersTradePresence.getAtAddress()
|
||||
);
|
||||
else
|
||||
LOGGER.trace("Ignoring trade presence {} from peer {} as AT is frozen or finished",
|
||||
peersTradePresence.getAtAddress(), peer
|
||||
LOGGER.trace("Ignoring trade presence {} from peer as AT is frozen or finished",
|
||||
peersTradePresence.getAtAddress()
|
||||
);
|
||||
|
||||
continue;
|
||||
@ -630,51 +701,87 @@ public class TradeBot implements Listener {
|
||||
ByteArray atCodeHash = ByteArray.wrap(atData.getCodeHash());
|
||||
Supplier<ACCT> acctSupplier = acctSuppliersByCodeHash.get(atCodeHash);
|
||||
if (acctSupplier == null) {
|
||||
LOGGER.trace("Ignoring trade presence {} from peer {} as AT isn't a known ACCT?",
|
||||
peersTradePresence.getAtAddress(), peer
|
||||
LOGGER.trace("Ignoring trade presence {} from peer as AT isn't a known ACCT?",
|
||||
peersTradePresence.getAtAddress()
|
||||
);
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
CrossChainTradeData tradeData = acctSupplier.get().populateTradeData(repository, atData);
|
||||
if (tradeData == null) {
|
||||
LOGGER.trace("Ignoring trade presence {} from peer {} as trade data not found?",
|
||||
peersTradePresence.getAtAddress(), peer
|
||||
);
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
// Convert signer's public key to address form
|
||||
String signerAddress = peersTradePresence.getTradeAddress();
|
||||
|
||||
// Signer's public key (in address form) must match Bob's / Alice's trade public key (in address form)
|
||||
if (!signerAddress.equals(tradeData.qortalCreatorTradeAddress) && !signerAddress.equals(tradeData.qortalPartnerAddress)) {
|
||||
LOGGER.trace("Ignoring trade presence {} from peer {} as signer isn't Alice or Bob?",
|
||||
peersTradePresence.getAtAddress(), peer
|
||||
);
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
// This is new to us
|
||||
this.allTradePresencesByPubkey.put(pubkeyByteArray, peersTradePresence);
|
||||
++newCount;
|
||||
|
||||
LOGGER.trace("Added trade presence {} from peer {} with timestamp {}",
|
||||
peersTradePresence.getAtAddress(), peer, timestamp
|
||||
);
|
||||
|
||||
EventBus.INSTANCE.notify(new TradePresenceEvent(peersTradePresence));
|
||||
validatedAtDataList.add(atData);
|
||||
}
|
||||
} catch (DataException e) {
|
||||
LOGGER.error("Couldn't process TRADE_PRESENCES message due to repository issue", e);
|
||||
}
|
||||
|
||||
if (newCount > 0) {
|
||||
LOGGER.debug("New trade presences: {}, all trade presences: {}", newCount, allTradePresencesByPubkey.size());
|
||||
rebuildSafeAllTradePresences();
|
||||
// populated data for each trade
|
||||
List<CrossChainTradeData> crossChainTradeDataList;
|
||||
|
||||
// validated trade data grouped by code (cross chain coin)
|
||||
Map<ByteArray, List<ATData>> atDataByCodeHash
|
||||
= validatedAtDataList.stream().collect(
|
||||
Collectors.groupingBy(data -> ByteArray.wrap(data.getCodeHash())));
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
|
||||
crossChainTradeDataList = new ArrayList<>();
|
||||
|
||||
// for each code (cross chain coin), get each trade, then populate trade data
|
||||
for( Map.Entry<ByteArray, List<ATData>> entry : atDataByCodeHash.entrySet() ) {
|
||||
|
||||
Supplier<ACCT> acctSupplier = acctSuppliersByCodeHash.get(entry.getKey());
|
||||
|
||||
crossChainTradeDataList.addAll(
|
||||
acctSupplier.get().populateTradeDataList(
|
||||
repository,
|
||||
entry.getValue()
|
||||
)
|
||||
.stream().filter( data -> data != null )
|
||||
.collect(Collectors.toList())
|
||||
);
|
||||
}
|
||||
} catch (DataException e) {
|
||||
LOGGER.error("Couldn't process TRADE_PRESENCES message due to repository issue", e);
|
||||
return;
|
||||
}
|
||||
|
||||
// for each populated trade data, validate and fire event
|
||||
for( CrossChainTradeData tradeData : crossChainTradeDataList ) {
|
||||
|
||||
List<Peer> peers = peersByAtAddress.get(tradeData.qortalAtAddress);
|
||||
|
||||
for( Peer peer : peers ) {
|
||||
|
||||
TradePresenceData peersTradePresence = tradePresenceByAtAddress.get(tradeData.qortalAtAddress);
|
||||
|
||||
// Convert signer's public key to address form
|
||||
String signerAddress = peersTradePresence.getTradeAddress();
|
||||
|
||||
// Signer's public key (in address form) must match Bob's / Alice's trade public key (in address form)
|
||||
if (!signerAddress.equals(tradeData.qortalCreatorTradeAddress) && !signerAddress.equals(tradeData.qortalPartnerAddress)) {
|
||||
LOGGER.trace("Ignoring trade presence {} from peer {} as signer isn't Alice or Bob?",
|
||||
peersTradePresence.getAtAddress(), peer
|
||||
);
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
ByteArray pubkeyByteArray = ByteArray.wrap(peersTradePresence.getPublicKey());
|
||||
|
||||
// This is new to us
|
||||
this.allTradePresencesByPubkey.put(pubkeyByteArray, peersTradePresence);
|
||||
++newCount;
|
||||
|
||||
LOGGER.trace("Added trade presence {} from peer {} with timestamp {}",
|
||||
peersTradePresence.getAtAddress(), peer, tradeData.creationTimestamp
|
||||
);
|
||||
|
||||
EventBus.INSTANCE.notify(new TradePresenceEvent(peersTradePresence));
|
||||
}
|
||||
}
|
||||
|
||||
if (newCount > 0) {
|
||||
LOGGER.info("New trade presences: {}, all trade presences: {}", newCount, allTradePresencesByPubkey.size());
|
||||
rebuildSafeAllTradePresences();
|
||||
}
|
||||
} catch (Exception e) {
|
||||
LOGGER.error(e.getMessage(), e);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -6,6 +6,9 @@ import org.qortal.data.crosschain.CrossChainTradeData;
|
||||
import org.qortal.repository.DataException;
|
||||
import org.qortal.repository.Repository;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.OptionalLong;
|
||||
|
||||
public interface ACCT {
|
||||
|
||||
public byte[] getCodeBytesHash();
|
||||
@ -16,8 +19,12 @@ public interface ACCT {
|
||||
|
||||
public CrossChainTradeData populateTradeData(Repository repository, ATData atData) throws DataException;
|
||||
|
||||
public List<CrossChainTradeData> populateTradeDataList(Repository respository, List<ATData> atDataList) throws DataException;
|
||||
|
||||
public CrossChainTradeData populateTradeData(Repository repository, ATStateData atStateData) throws DataException;
|
||||
|
||||
CrossChainTradeData populateTradeData(Repository repository, byte[] creatorPublicKey, long creationTimestamp, ATStateData atStateData, OptionalLong optionalBalance) throws DataException;
|
||||
|
||||
public byte[] buildCancelMessage(String creatorQortalAddress);
|
||||
|
||||
public byte[] findSecretA(Repository repository, CrossChainTradeData crossChainTradeData) throws DataException;
|
||||
|
@ -1,5 +1,6 @@
|
||||
package org.qortal.crosschain;
|
||||
|
||||
import org.bitcoinj.core.Coin;
|
||||
import org.bitcoinj.core.Context;
|
||||
import org.bitcoinj.core.NetworkParameters;
|
||||
import org.bitcoinj.core.Transaction;
|
||||
@ -14,15 +15,21 @@ import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.EnumMap;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
|
||||
public class Bitcoin extends Bitcoiny {
|
||||
|
||||
public static final String CURRENCY_CODE = "BTC";
|
||||
|
||||
private static final long MINIMUM_ORDER_AMOUNT = 100000; // 0.001 BTC minimum order, due to high fees
|
||||
// Locking fee to lock in a QORT for BTC. This is the default value that the user should reset to
|
||||
// a value inline with the BTC fee market. This is 5 sats per kB.
|
||||
private static final Coin DEFAULT_FEE_PER_KB = Coin.valueOf(5_000); // 0.00005 BTC per 1000 bytes
|
||||
|
||||
// Temporary values until a dynamic fee system is written.
|
||||
private static final long NEW_FEE_AMOUNT = 6_000L;
|
||||
private static final long MINIMUM_ORDER_AMOUNT = 100_000; // 0.001 BTC minimum order, due to high fees
|
||||
|
||||
// Default value until user resets fee to compete with the current market. This is a total value for a
|
||||
// p2sh transaction, size 300 kB, 5 sats per kB
|
||||
private static final long NEW_FEE_AMOUNT = 1_500L;
|
||||
|
||||
private static final long NON_MAINNET_FEE = 1000L; // enough for TESTNET3 and should be OK for REGTEST
|
||||
|
||||
@ -111,7 +118,7 @@ public class Bitcoin extends Bitcoiny {
|
||||
|
||||
@Override
|
||||
public long getP2shFee(Long timestamp) {
|
||||
return this.getFeeCeiling();
|
||||
return this.getFeeRequired();
|
||||
}
|
||||
},
|
||||
TEST3 {
|
||||
@ -173,14 +180,14 @@ public class Bitcoin extends Bitcoiny {
|
||||
}
|
||||
};
|
||||
|
||||
private long feeCeiling = NEW_FEE_AMOUNT;
|
||||
private AtomicLong feeRequired = new AtomicLong(NEW_FEE_AMOUNT);
|
||||
|
||||
public long getFeeCeiling() {
|
||||
return feeCeiling;
|
||||
public long getFeeRequired() {
|
||||
return feeRequired.get();
|
||||
}
|
||||
|
||||
public void setFeeCeiling(long feeCeiling) {
|
||||
this.feeCeiling = feeCeiling;
|
||||
public void setFeeRequired(long feeRequired) {
|
||||
this.feeRequired.set(feeRequired);
|
||||
}
|
||||
|
||||
public abstract NetworkParameters getParams();
|
||||
@ -196,7 +203,7 @@ public class Bitcoin extends Bitcoiny {
|
||||
// Constructors and instance
|
||||
|
||||
private Bitcoin(BitcoinNet bitcoinNet, BitcoinyBlockchainProvider blockchain, Context bitcoinjContext, String currencyCode) {
|
||||
super(blockchain, bitcoinjContext, currencyCode, bitcoinjContext.getFeePerKb());
|
||||
super(blockchain, bitcoinjContext, currencyCode, DEFAULT_FEE_PER_KB);
|
||||
this.bitcoinNet = bitcoinNet;
|
||||
|
||||
LOGGER.info(() -> String.format("Starting Bitcoin support using %s", this.bitcoinNet.name()));
|
||||
@ -242,14 +249,14 @@ public class Bitcoin extends Bitcoiny {
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getFeeCeiling() {
|
||||
return this.bitcoinNet.getFeeCeiling();
|
||||
public long getFeeRequired() {
|
||||
return this.bitcoinNet.getFeeRequired();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setFeeCeiling(long fee) {
|
||||
public void setFeeRequired(long fee) {
|
||||
|
||||
this.bitcoinNet.setFeeCeiling( fee );
|
||||
this.bitcoinNet.setFeeRequired( fee );
|
||||
}
|
||||
/**
|
||||
* Returns bitcoinj transaction sending <tt>amount</tt> to <tt>recipient</tt> using 20 sat/byte fee.
|
||||
|
@ -4,6 +4,7 @@ import com.google.common.hash.HashCode;
|
||||
import com.google.common.primitives.Bytes;
|
||||
import org.ciyam.at.*;
|
||||
import org.qortal.account.Account;
|
||||
import org.qortal.api.resource.CrossChainUtils;
|
||||
import org.qortal.asset.Asset;
|
||||
import org.qortal.at.QortalFunctionCode;
|
||||
import org.qortal.crypto.Crypto;
|
||||
@ -19,6 +20,7 @@ import org.qortal.utils.BitTwiddling;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.OptionalLong;
|
||||
|
||||
import static org.ciyam.at.OpCode.calcOffset;
|
||||
|
||||
@ -608,7 +610,14 @@ public class BitcoinACCTv1 implements ACCT {
|
||||
@Override
|
||||
public CrossChainTradeData populateTradeData(Repository repository, ATData atData) throws DataException {
|
||||
ATStateData atStateData = repository.getATRepository().getLatestATState(atData.getATAddress());
|
||||
return populateTradeData(repository, atData.getCreatorPublicKey(), atData.getCreation(), atStateData);
|
||||
return populateTradeData(repository, atData.getCreatorPublicKey(), atData.getCreation(), atStateData, OptionalLong.empty());
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<CrossChainTradeData> populateTradeDataList(Repository repository, List<ATData> atDataList) throws DataException {
|
||||
List<CrossChainTradeData> crossChainTradeDataList = CrossChainUtils.populateTradeDataList(repository, this, atDataList);
|
||||
|
||||
return crossChainTradeDataList;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -617,13 +626,14 @@ public class BitcoinACCTv1 implements ACCT {
|
||||
@Override
|
||||
public CrossChainTradeData populateTradeData(Repository repository, ATStateData atStateData) throws DataException {
|
||||
ATData atData = repository.getATRepository().fromATAddress(atStateData.getATAddress());
|
||||
return populateTradeData(repository, atData.getCreatorPublicKey(), atData.getCreation(), atStateData);
|
||||
return populateTradeData(repository, atData.getCreatorPublicKey(), atData.getCreation(), atStateData, OptionalLong.empty());
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns CrossChainTradeData with useful info extracted from AT.
|
||||
*/
|
||||
public CrossChainTradeData populateTradeData(Repository repository, byte[] creatorPublicKey, long creationTimestamp, ATStateData atStateData) throws DataException {
|
||||
@Override
|
||||
public CrossChainTradeData populateTradeData(Repository repository, byte[] creatorPublicKey, long creationTimestamp, ATStateData atStateData, OptionalLong optionalBalance) throws DataException {
|
||||
byte[] addressBytes = new byte[25]; // for general use
|
||||
String atAddress = atStateData.getATAddress();
|
||||
|
||||
@ -636,8 +646,13 @@ public class BitcoinACCTv1 implements ACCT {
|
||||
tradeData.qortalCreator = Crypto.toAddress(creatorPublicKey);
|
||||
tradeData.creationTimestamp = creationTimestamp;
|
||||
|
||||
Account atAccount = new Account(repository, atAddress);
|
||||
tradeData.qortBalance = atAccount.getConfirmedBalance(Asset.QORT);
|
||||
if(optionalBalance.isPresent()) {
|
||||
tradeData.qortBalance = optionalBalance.getAsLong();
|
||||
}
|
||||
else {
|
||||
Account atAccount = new Account(repository, atAddress);
|
||||
tradeData.qortBalance = atAccount.getConfirmedBalance(Asset.QORT);
|
||||
}
|
||||
|
||||
byte[] stateData = atStateData.getStateData();
|
||||
ByteBuffer dataByteBuffer = ByteBuffer.wrap(stateData);
|
||||
|
@ -6,6 +6,7 @@ import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.ciyam.at.*;
|
||||
import org.qortal.account.Account;
|
||||
import org.qortal.api.resource.CrossChainUtils;
|
||||
import org.qortal.asset.Asset;
|
||||
import org.qortal.at.QortalFunctionCode;
|
||||
import org.qortal.crypto.Crypto;
|
||||
@ -21,6 +22,7 @@ import org.qortal.utils.BitTwiddling;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.OptionalLong;
|
||||
|
||||
import static org.ciyam.at.OpCode.calcOffset;
|
||||
|
||||
@ -569,7 +571,14 @@ public class BitcoinACCTv3 implements ACCT {
|
||||
@Override
|
||||
public CrossChainTradeData populateTradeData(Repository repository, ATData atData) throws DataException {
|
||||
ATStateData atStateData = repository.getATRepository().getLatestATState(atData.getATAddress());
|
||||
return populateTradeData(repository, atData.getCreatorPublicKey(), atData.getCreation(), atStateData);
|
||||
return populateTradeData(repository, atData.getCreatorPublicKey(), atData.getCreation(), atStateData, OptionalLong.empty());
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<CrossChainTradeData> populateTradeDataList(Repository repository, List<ATData> atDataList) throws DataException {
|
||||
List<CrossChainTradeData> crossChainTradeDataList = CrossChainUtils.populateTradeDataList(repository, this, atDataList);
|
||||
|
||||
return crossChainTradeDataList;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -578,13 +587,14 @@ public class BitcoinACCTv3 implements ACCT {
|
||||
@Override
|
||||
public CrossChainTradeData populateTradeData(Repository repository, ATStateData atStateData) throws DataException {
|
||||
ATData atData = repository.getATRepository().fromATAddress(atStateData.getATAddress());
|
||||
return populateTradeData(repository, atData.getCreatorPublicKey(), atData.getCreation(), atStateData);
|
||||
return populateTradeData(repository, atData.getCreatorPublicKey(), atData.getCreation(), atStateData, OptionalLong.empty());
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns CrossChainTradeData with useful info extracted from AT.
|
||||
*/
|
||||
public CrossChainTradeData populateTradeData(Repository repository, byte[] creatorPublicKey, long creationTimestamp, ATStateData atStateData) throws DataException {
|
||||
@Override
|
||||
public CrossChainTradeData populateTradeData(Repository repository, byte[] creatorPublicKey, long creationTimestamp, ATStateData atStateData, OptionalLong optionalBalance) throws DataException {
|
||||
byte[] addressBytes = new byte[25]; // for general use
|
||||
String atAddress = atStateData.getATAddress();
|
||||
|
||||
@ -597,8 +607,13 @@ public class BitcoinACCTv3 implements ACCT {
|
||||
tradeData.qortalCreator = Crypto.toAddress(creatorPublicKey);
|
||||
tradeData.creationTimestamp = creationTimestamp;
|
||||
|
||||
Account atAccount = new Account(repository, atAddress);
|
||||
tradeData.qortBalance = atAccount.getConfirmedBalance(Asset.QORT);
|
||||
if(optionalBalance.isPresent()) {
|
||||
tradeData.qortBalance = optionalBalance.getAsLong();
|
||||
}
|
||||
else {
|
||||
Account atAccount = new Account(repository, atAddress);
|
||||
tradeData.qortBalance = atAccount.getConfirmedBalance(Asset.QORT);
|
||||
}
|
||||
|
||||
byte[] stateData = atStateData.getStateData();
|
||||
ByteBuffer dataByteBuffer = ByteBuffer.wrap(stateData);
|
||||
|
@ -864,9 +864,9 @@ public abstract class Bitcoiny extends AbstractBitcoinNetParams implements Forei
|
||||
} while (true);
|
||||
}
|
||||
|
||||
public abstract long getFeeCeiling();
|
||||
public abstract long getFeeRequired();
|
||||
|
||||
public abstract void setFeeCeiling(long fee);
|
||||
public abstract void setFeeRequired(long fee);
|
||||
|
||||
// UTXOProvider support
|
||||
|
||||
|
@ -90,7 +90,7 @@ public class BitcoinyTBD extends Bitcoiny {
|
||||
NetTBD netTBD
|
||||
= new NetTBD(
|
||||
bitcoinyTBDRequest.getNetworkName(),
|
||||
bitcoinyTBDRequest.getFeeCeiling(),
|
||||
bitcoinyTBDRequest.getFeeRequired(),
|
||||
networkParams,
|
||||
Collections.emptyList(),
|
||||
bitcoinyTBDRequest.getExpectedGenesisHash()
|
||||
@ -135,19 +135,19 @@ public class BitcoinyTBD extends Bitcoiny {
|
||||
@Override
|
||||
public long getP2shFee(Long timestamp) throws ForeignBlockchainException {
|
||||
|
||||
return this.netTBD.getFeeCeiling();
|
||||
return this.netTBD.getFeeRequired();
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getFeeCeiling() {
|
||||
public long getFeeRequired() {
|
||||
|
||||
return this.netTBD.getFeeCeiling();
|
||||
return this.netTBD.getFeeRequired();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setFeeCeiling(long fee) {
|
||||
public void setFeeRequired(long fee) {
|
||||
|
||||
this.netTBD.setFeeCeiling( fee );
|
||||
this.netTBD.setFeeRequired( fee );
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -14,6 +14,7 @@ import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.EnumMap;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
|
||||
public class Digibyte extends Bitcoiny {
|
||||
|
||||
@ -59,7 +60,7 @@ public class Digibyte extends Bitcoiny {
|
||||
|
||||
@Override
|
||||
public long getP2shFee(Long timestamp) {
|
||||
return this.getFeeCeiling();
|
||||
return this.getFeeRequired();
|
||||
}
|
||||
},
|
||||
TEST3 {
|
||||
@ -109,14 +110,14 @@ public class Digibyte extends Bitcoiny {
|
||||
}
|
||||
};
|
||||
|
||||
private long feeCeiling = MAINNET_FEE;
|
||||
private AtomicLong feeRequired = new AtomicLong(MAINNET_FEE);
|
||||
|
||||
public long getFeeCeiling() {
|
||||
return feeCeiling;
|
||||
public long getFeeRequired() {
|
||||
return feeRequired.get();
|
||||
}
|
||||
|
||||
public void setFeeCeiling(long feeCeiling) {
|
||||
this.feeCeiling = feeCeiling;
|
||||
public void setFeeRequired(long feeRequired) {
|
||||
this.feeRequired.set(feeRequired);
|
||||
}
|
||||
|
||||
public abstract NetworkParameters getParams();
|
||||
@ -178,13 +179,13 @@ public class Digibyte extends Bitcoiny {
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getFeeCeiling() {
|
||||
return this.digibyteNet.getFeeCeiling();
|
||||
public long getFeeRequired() {
|
||||
return this.digibyteNet.getFeeRequired();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setFeeCeiling(long fee) {
|
||||
public void setFeeRequired(long fee) {
|
||||
|
||||
this.digibyteNet.setFeeCeiling( fee );
|
||||
this.digibyteNet.setFeeRequired( fee );
|
||||
}
|
||||
}
|
||||
|
@ -6,6 +6,7 @@ import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.ciyam.at.*;
|
||||
import org.qortal.account.Account;
|
||||
import org.qortal.api.resource.CrossChainUtils;
|
||||
import org.qortal.asset.Asset;
|
||||
import org.qortal.at.QortalFunctionCode;
|
||||
import org.qortal.crypto.Crypto;
|
||||
@ -21,6 +22,7 @@ import org.qortal.utils.BitTwiddling;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.OptionalLong;
|
||||
|
||||
import static org.ciyam.at.OpCode.calcOffset;
|
||||
|
||||
@ -569,7 +571,14 @@ public class DigibyteACCTv3 implements ACCT {
|
||||
@Override
|
||||
public CrossChainTradeData populateTradeData(Repository repository, ATData atData) throws DataException {
|
||||
ATStateData atStateData = repository.getATRepository().getLatestATState(atData.getATAddress());
|
||||
return populateTradeData(repository, atData.getCreatorPublicKey(), atData.getCreation(), atStateData);
|
||||
return populateTradeData(repository, atData.getCreatorPublicKey(), atData.getCreation(), atStateData, OptionalLong.empty());
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<CrossChainTradeData> populateTradeDataList(Repository repository, List<ATData> atDataList) throws DataException {
|
||||
List<CrossChainTradeData> crossChainTradeDataList = CrossChainUtils.populateTradeDataList(repository, this, atDataList);
|
||||
|
||||
return crossChainTradeDataList;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -578,13 +587,14 @@ public class DigibyteACCTv3 implements ACCT {
|
||||
@Override
|
||||
public CrossChainTradeData populateTradeData(Repository repository, ATStateData atStateData) throws DataException {
|
||||
ATData atData = repository.getATRepository().fromATAddress(atStateData.getATAddress());
|
||||
return populateTradeData(repository, atData.getCreatorPublicKey(), atData.getCreation(), atStateData);
|
||||
return populateTradeData(repository, atData.getCreatorPublicKey(), atData.getCreation(), atStateData, OptionalLong.empty());
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns CrossChainTradeData with useful info extracted from AT.
|
||||
*/
|
||||
public CrossChainTradeData populateTradeData(Repository repository, byte[] creatorPublicKey, long creationTimestamp, ATStateData atStateData) throws DataException {
|
||||
@Override
|
||||
public CrossChainTradeData populateTradeData(Repository repository, byte[] creatorPublicKey, long creationTimestamp, ATStateData atStateData, OptionalLong optionalBalance) throws DataException {
|
||||
byte[] addressBytes = new byte[25]; // for general use
|
||||
String atAddress = atStateData.getATAddress();
|
||||
|
||||
@ -597,8 +607,13 @@ public class DigibyteACCTv3 implements ACCT {
|
||||
tradeData.qortalCreator = Crypto.toAddress(creatorPublicKey);
|
||||
tradeData.creationTimestamp = creationTimestamp;
|
||||
|
||||
Account atAccount = new Account(repository, atAddress);
|
||||
tradeData.qortBalance = atAccount.getConfirmedBalance(Asset.QORT);
|
||||
if(optionalBalance.isPresent()) {
|
||||
tradeData.qortBalance = optionalBalance.getAsLong();
|
||||
}
|
||||
else {
|
||||
Account atAccount = new Account(repository, atAddress);
|
||||
tradeData.qortBalance = atAccount.getConfirmedBalance(Asset.QORT);
|
||||
}
|
||||
|
||||
byte[] stateData = atStateData.getStateData();
|
||||
ByteBuffer dataByteBuffer = ByteBuffer.wrap(stateData);
|
||||
|
@ -13,6 +13,7 @@ import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.EnumMap;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
|
||||
public class Dogecoin extends Bitcoiny {
|
||||
|
||||
@ -60,7 +61,7 @@ public class Dogecoin extends Bitcoiny {
|
||||
|
||||
@Override
|
||||
public long getP2shFee(Long timestamp) {
|
||||
return this.getFeeCeiling();
|
||||
return this.getFeeRequired();
|
||||
}
|
||||
},
|
||||
TEST3 {
|
||||
@ -110,14 +111,14 @@ public class Dogecoin extends Bitcoiny {
|
||||
}
|
||||
};
|
||||
|
||||
private long feeCeiling = MAINNET_FEE;
|
||||
private AtomicLong feeRequired = new AtomicLong(MAINNET_FEE);
|
||||
|
||||
public long getFeeCeiling() {
|
||||
return feeCeiling;
|
||||
public long getFeeRequired() {
|
||||
return feeRequired.get();
|
||||
}
|
||||
|
||||
public void setFeeCeiling(long feeCeiling) {
|
||||
this.feeCeiling = feeCeiling;
|
||||
public void setFeeRequired(long feeRequired) {
|
||||
this.feeRequired.set(feeRequired);
|
||||
}
|
||||
|
||||
public abstract NetworkParameters getParams();
|
||||
@ -179,13 +180,13 @@ public class Dogecoin extends Bitcoiny {
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getFeeCeiling() {
|
||||
return this.dogecoinNet.getFeeCeiling();
|
||||
public long getFeeRequired() {
|
||||
return this.dogecoinNet.getFeeRequired();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setFeeCeiling(long fee) {
|
||||
public void setFeeRequired(long fee) {
|
||||
|
||||
this.dogecoinNet.setFeeCeiling( fee );
|
||||
this.dogecoinNet.setFeeRequired( fee );
|
||||
}
|
||||
}
|
||||
|
@ -6,6 +6,7 @@ import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.ciyam.at.*;
|
||||
import org.qortal.account.Account;
|
||||
import org.qortal.api.resource.CrossChainUtils;
|
||||
import org.qortal.asset.Asset;
|
||||
import org.qortal.at.QortalFunctionCode;
|
||||
import org.qortal.crypto.Crypto;
|
||||
@ -21,6 +22,7 @@ import org.qortal.utils.BitTwiddling;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.OptionalLong;
|
||||
|
||||
import static org.ciyam.at.OpCode.calcOffset;
|
||||
|
||||
@ -566,7 +568,14 @@ public class DogecoinACCTv1 implements ACCT {
|
||||
@Override
|
||||
public CrossChainTradeData populateTradeData(Repository repository, ATData atData) throws DataException {
|
||||
ATStateData atStateData = repository.getATRepository().getLatestATState(atData.getATAddress());
|
||||
return populateTradeData(repository, atData.getCreatorPublicKey(), atData.getCreation(), atStateData);
|
||||
return populateTradeData(repository, atData.getCreatorPublicKey(), atData.getCreation(), atStateData, OptionalLong.empty());
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<CrossChainTradeData> populateTradeDataList(Repository repository, List<ATData> atDataList) throws DataException {
|
||||
List<CrossChainTradeData> crossChainTradeDataList = CrossChainUtils.populateTradeDataList(repository, this, atDataList);
|
||||
|
||||
return crossChainTradeDataList;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -575,13 +584,14 @@ public class DogecoinACCTv1 implements ACCT {
|
||||
@Override
|
||||
public CrossChainTradeData populateTradeData(Repository repository, ATStateData atStateData) throws DataException {
|
||||
ATData atData = repository.getATRepository().fromATAddress(atStateData.getATAddress());
|
||||
return populateTradeData(repository, atData.getCreatorPublicKey(), atData.getCreation(), atStateData);
|
||||
return populateTradeData(repository, atData.getCreatorPublicKey(), atData.getCreation(), atStateData, OptionalLong.empty());
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns CrossChainTradeData with useful info extracted from AT.
|
||||
*/
|
||||
public CrossChainTradeData populateTradeData(Repository repository, byte[] creatorPublicKey, long creationTimestamp, ATStateData atStateData) throws DataException {
|
||||
@Override
|
||||
public CrossChainTradeData populateTradeData(Repository repository, byte[] creatorPublicKey, long creationTimestamp, ATStateData atStateData, OptionalLong optionalBalance) throws DataException {
|
||||
byte[] addressBytes = new byte[25]; // for general use
|
||||
String atAddress = atStateData.getATAddress();
|
||||
|
||||
@ -594,8 +604,13 @@ public class DogecoinACCTv1 implements ACCT {
|
||||
tradeData.qortalCreator = Crypto.toAddress(creatorPublicKey);
|
||||
tradeData.creationTimestamp = creationTimestamp;
|
||||
|
||||
Account atAccount = new Account(repository, atAddress);
|
||||
tradeData.qortBalance = atAccount.getConfirmedBalance(Asset.QORT);
|
||||
if(optionalBalance.isPresent()) {
|
||||
tradeData.qortBalance = optionalBalance.getAsLong();
|
||||
}
|
||||
else {
|
||||
Account atAccount = new Account(repository, atAddress);
|
||||
tradeData.qortBalance = atAccount.getConfirmedBalance(Asset.QORT);
|
||||
}
|
||||
|
||||
byte[] stateData = atStateData.getStateData();
|
||||
ByteBuffer dataByteBuffer = ByteBuffer.wrap(stateData);
|
||||
|
@ -6,6 +6,7 @@ import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.ciyam.at.*;
|
||||
import org.qortal.account.Account;
|
||||
import org.qortal.api.resource.CrossChainUtils;
|
||||
import org.qortal.asset.Asset;
|
||||
import org.qortal.at.QortalFunctionCode;
|
||||
import org.qortal.crypto.Crypto;
|
||||
@ -21,6 +22,7 @@ import org.qortal.utils.BitTwiddling;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.OptionalLong;
|
||||
|
||||
import static org.ciyam.at.OpCode.calcOffset;
|
||||
|
||||
@ -569,7 +571,14 @@ public class DogecoinACCTv3 implements ACCT {
|
||||
@Override
|
||||
public CrossChainTradeData populateTradeData(Repository repository, ATData atData) throws DataException {
|
||||
ATStateData atStateData = repository.getATRepository().getLatestATState(atData.getATAddress());
|
||||
return populateTradeData(repository, atData.getCreatorPublicKey(), atData.getCreation(), atStateData);
|
||||
return populateTradeData(repository, atData.getCreatorPublicKey(), atData.getCreation(), atStateData, OptionalLong.empty());
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<CrossChainTradeData> populateTradeDataList(Repository repository, List<ATData> atDataList) throws DataException {
|
||||
List<CrossChainTradeData> crossChainTradeDataList = CrossChainUtils.populateTradeDataList(repository, this, atDataList);
|
||||
|
||||
return crossChainTradeDataList;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -578,13 +587,14 @@ public class DogecoinACCTv3 implements ACCT {
|
||||
@Override
|
||||
public CrossChainTradeData populateTradeData(Repository repository, ATStateData atStateData) throws DataException {
|
||||
ATData atData = repository.getATRepository().fromATAddress(atStateData.getATAddress());
|
||||
return populateTradeData(repository, atData.getCreatorPublicKey(), atData.getCreation(), atStateData);
|
||||
return populateTradeData(repository, atData.getCreatorPublicKey(), atData.getCreation(), atStateData, OptionalLong.empty());
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns CrossChainTradeData with useful info extracted from AT.
|
||||
*/
|
||||
public CrossChainTradeData populateTradeData(Repository repository, byte[] creatorPublicKey, long creationTimestamp, ATStateData atStateData) throws DataException {
|
||||
@Override
|
||||
public CrossChainTradeData populateTradeData(Repository repository, byte[] creatorPublicKey, long creationTimestamp, ATStateData atStateData, OptionalLong optionalBalance) throws DataException {
|
||||
byte[] addressBytes = new byte[25]; // for general use
|
||||
String atAddress = atStateData.getATAddress();
|
||||
|
||||
@ -597,8 +607,13 @@ public class DogecoinACCTv3 implements ACCT {
|
||||
tradeData.qortalCreator = Crypto.toAddress(creatorPublicKey);
|
||||
tradeData.creationTimestamp = creationTimestamp;
|
||||
|
||||
Account atAccount = new Account(repository, atAddress);
|
||||
tradeData.qortBalance = atAccount.getConfirmedBalance(Asset.QORT);
|
||||
if(optionalBalance.isPresent()) {
|
||||
tradeData.qortBalance = optionalBalance.getAsLong();
|
||||
}
|
||||
else {
|
||||
Account atAccount = new Account(repository, atAddress);
|
||||
tradeData.qortBalance = atAccount.getConfirmedBalance(Asset.QORT);
|
||||
}
|
||||
|
||||
byte[] stateData = atStateData.getStateData();
|
||||
ByteBuffer dataByteBuffer = ByteBuffer.wrap(stateData);
|
||||
|
@ -14,6 +14,7 @@ import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.EnumMap;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
|
||||
public class Litecoin extends Bitcoiny {
|
||||
|
||||
@ -63,7 +64,7 @@ public class Litecoin extends Bitcoiny {
|
||||
|
||||
@Override
|
||||
public long getP2shFee(Long timestamp) {
|
||||
return this.getFeeCeiling();
|
||||
return this.getFeeRequired();
|
||||
}
|
||||
},
|
||||
TEST3 {
|
||||
@ -116,14 +117,14 @@ public class Litecoin extends Bitcoiny {
|
||||
}
|
||||
};
|
||||
|
||||
private long feeCeiling = MAINNET_FEE;
|
||||
private AtomicLong feeRequired = new AtomicLong(MAINNET_FEE);
|
||||
|
||||
public long getFeeCeiling() {
|
||||
return feeCeiling;
|
||||
public long getFeeRequired() {
|
||||
return feeRequired.get();
|
||||
}
|
||||
|
||||
public void setFeeCeiling(long feeCeiling) {
|
||||
this.feeCeiling = feeCeiling;
|
||||
public void setFeeRequired(long feeRequired) {
|
||||
this.feeRequired.set(feeRequired);
|
||||
}
|
||||
|
||||
public abstract NetworkParameters getParams();
|
||||
@ -185,13 +186,13 @@ public class Litecoin extends Bitcoiny {
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getFeeCeiling() {
|
||||
return this.litecoinNet.getFeeCeiling();
|
||||
public long getFeeRequired() {
|
||||
return this.litecoinNet.getFeeRequired();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setFeeCeiling(long fee) {
|
||||
public void setFeeRequired(long fee) {
|
||||
|
||||
this.litecoinNet.setFeeCeiling( fee );
|
||||
this.litecoinNet.setFeeRequired( fee );
|
||||
}
|
||||
}
|
||||
|
@ -4,6 +4,7 @@ import com.google.common.hash.HashCode;
|
||||
import com.google.common.primitives.Bytes;
|
||||
import org.ciyam.at.*;
|
||||
import org.qortal.account.Account;
|
||||
import org.qortal.api.resource.CrossChainUtils;
|
||||
import org.qortal.asset.Asset;
|
||||
import org.qortal.at.QortalFunctionCode;
|
||||
import org.qortal.crypto.Crypto;
|
||||
@ -19,6 +20,7 @@ import org.qortal.utils.BitTwiddling;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.OptionalLong;
|
||||
|
||||
import static org.ciyam.at.OpCode.calcOffset;
|
||||
|
||||
@ -559,7 +561,14 @@ public class LitecoinACCTv1 implements ACCT {
|
||||
@Override
|
||||
public CrossChainTradeData populateTradeData(Repository repository, ATData atData) throws DataException {
|
||||
ATStateData atStateData = repository.getATRepository().getLatestATState(atData.getATAddress());
|
||||
return populateTradeData(repository, atData.getCreatorPublicKey(), atData.getCreation(), atStateData);
|
||||
return populateTradeData(repository, atData.getCreatorPublicKey(), atData.getCreation(), atStateData, OptionalLong.empty());
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<CrossChainTradeData> populateTradeDataList(Repository repository, List<ATData> atDataList) throws DataException {
|
||||
List<CrossChainTradeData> crossChainTradeDataList = CrossChainUtils.populateTradeDataList(repository, this, atDataList);
|
||||
|
||||
return crossChainTradeDataList;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -568,13 +577,14 @@ public class LitecoinACCTv1 implements ACCT {
|
||||
@Override
|
||||
public CrossChainTradeData populateTradeData(Repository repository, ATStateData atStateData) throws DataException {
|
||||
ATData atData = repository.getATRepository().fromATAddress(atStateData.getATAddress());
|
||||
return populateTradeData(repository, atData.getCreatorPublicKey(), atData.getCreation(), atStateData);
|
||||
return populateTradeData(repository, atData.getCreatorPublicKey(), atData.getCreation(), atStateData, OptionalLong.empty());
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns CrossChainTradeData with useful info extracted from AT.
|
||||
*/
|
||||
public CrossChainTradeData populateTradeData(Repository repository, byte[] creatorPublicKey, long creationTimestamp, ATStateData atStateData) throws DataException {
|
||||
@Override
|
||||
public CrossChainTradeData populateTradeData(Repository repository, byte[] creatorPublicKey, long creationTimestamp, ATStateData atStateData, OptionalLong optionalBalance) throws DataException {
|
||||
byte[] addressBytes = new byte[25]; // for general use
|
||||
String atAddress = atStateData.getATAddress();
|
||||
|
||||
@ -587,8 +597,13 @@ public class LitecoinACCTv1 implements ACCT {
|
||||
tradeData.qortalCreator = Crypto.toAddress(creatorPublicKey);
|
||||
tradeData.creationTimestamp = creationTimestamp;
|
||||
|
||||
Account atAccount = new Account(repository, atAddress);
|
||||
tradeData.qortBalance = atAccount.getConfirmedBalance(Asset.QORT);
|
||||
if(optionalBalance.isPresent()) {
|
||||
tradeData.qortBalance = optionalBalance.getAsLong();
|
||||
}
|
||||
else {
|
||||
Account atAccount = new Account(repository, atAddress);
|
||||
tradeData.qortBalance = atAccount.getConfirmedBalance(Asset.QORT);
|
||||
}
|
||||
|
||||
byte[] stateData = atStateData.getStateData();
|
||||
ByteBuffer dataByteBuffer = ByteBuffer.wrap(stateData);
|
||||
|
@ -4,6 +4,7 @@ import com.google.common.hash.HashCode;
|
||||
import com.google.common.primitives.Bytes;
|
||||
import org.ciyam.at.*;
|
||||
import org.qortal.account.Account;
|
||||
import org.qortal.api.resource.CrossChainUtils;
|
||||
import org.qortal.asset.Asset;
|
||||
import org.qortal.at.QortalFunctionCode;
|
||||
import org.qortal.crypto.Crypto;
|
||||
@ -19,6 +20,7 @@ import org.qortal.utils.BitTwiddling;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.OptionalLong;
|
||||
|
||||
import static org.ciyam.at.OpCode.calcOffset;
|
||||
|
||||
@ -562,7 +564,14 @@ public class LitecoinACCTv3 implements ACCT {
|
||||
@Override
|
||||
public CrossChainTradeData populateTradeData(Repository repository, ATData atData) throws DataException {
|
||||
ATStateData atStateData = repository.getATRepository().getLatestATState(atData.getATAddress());
|
||||
return populateTradeData(repository, atData.getCreatorPublicKey(), atData.getCreation(), atStateData);
|
||||
return populateTradeData(repository, atData.getCreatorPublicKey(), atData.getCreation(), atStateData, OptionalLong.empty());
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<CrossChainTradeData> populateTradeDataList(Repository repository, List<ATData> atDataList) throws DataException {
|
||||
List<CrossChainTradeData> crossChainTradeDataList = CrossChainUtils.populateTradeDataList(repository, this, atDataList);
|
||||
|
||||
return crossChainTradeDataList;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -571,13 +580,14 @@ public class LitecoinACCTv3 implements ACCT {
|
||||
@Override
|
||||
public CrossChainTradeData populateTradeData(Repository repository, ATStateData atStateData) throws DataException {
|
||||
ATData atData = repository.getATRepository().fromATAddress(atStateData.getATAddress());
|
||||
return populateTradeData(repository, atData.getCreatorPublicKey(), atData.getCreation(), atStateData);
|
||||
return populateTradeData(repository, atData.getCreatorPublicKey(), atData.getCreation(), atStateData, OptionalLong.empty());
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns CrossChainTradeData with useful info extracted from AT.
|
||||
*/
|
||||
public CrossChainTradeData populateTradeData(Repository repository, byte[] creatorPublicKey, long creationTimestamp, ATStateData atStateData) throws DataException {
|
||||
@Override
|
||||
public CrossChainTradeData populateTradeData(Repository repository, byte[] creatorPublicKey, long creationTimestamp, ATStateData atStateData, OptionalLong optionalBalance) throws DataException {
|
||||
byte[] addressBytes = new byte[25]; // for general use
|
||||
String atAddress = atStateData.getATAddress();
|
||||
|
||||
@ -590,8 +600,13 @@ public class LitecoinACCTv3 implements ACCT {
|
||||
tradeData.qortalCreator = Crypto.toAddress(creatorPublicKey);
|
||||
tradeData.creationTimestamp = creationTimestamp;
|
||||
|
||||
Account atAccount = new Account(repository, atAddress);
|
||||
tradeData.qortBalance = atAccount.getConfirmedBalance(Asset.QORT);
|
||||
if(optionalBalance.isPresent()) {
|
||||
tradeData.qortBalance = optionalBalance.getAsLong();
|
||||
}
|
||||
else {
|
||||
Account atAccount = new Account(repository, atAddress);
|
||||
tradeData.qortBalance = atAccount.getConfirmedBalance(Asset.QORT);
|
||||
}
|
||||
|
||||
byte[] stateData = atStateData.getStateData();
|
||||
ByteBuffer dataByteBuffer = ByteBuffer.wrap(stateData);
|
||||
|
@ -3,18 +3,19 @@ package org.qortal.crosschain;
|
||||
import org.bitcoinj.core.NetworkParameters;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
|
||||
public class NetTBD {
|
||||
|
||||
private String name;
|
||||
private long feeCeiling;
|
||||
private AtomicLong feeRequired;
|
||||
private NetworkParameters params;
|
||||
private Collection<ElectrumX.Server> servers;
|
||||
private String genesisHash;
|
||||
|
||||
public NetTBD(String name, long feeCeiling, NetworkParameters params, Collection<ElectrumX.Server> servers, String genesisHash) {
|
||||
public NetTBD(String name, long feeRequired, NetworkParameters params, Collection<ElectrumX.Server> servers, String genesisHash) {
|
||||
this.name = name;
|
||||
this.feeCeiling = feeCeiling;
|
||||
this.feeRequired = new AtomicLong(feeRequired);
|
||||
this.params = params;
|
||||
this.servers = servers;
|
||||
this.genesisHash = genesisHash;
|
||||
@ -25,14 +26,14 @@ public class NetTBD {
|
||||
return this.name;
|
||||
}
|
||||
|
||||
public long getFeeCeiling() {
|
||||
public long getFeeRequired() {
|
||||
|
||||
return feeCeiling;
|
||||
return feeRequired.get();
|
||||
}
|
||||
|
||||
public void setFeeCeiling(long feeCeiling) {
|
||||
public void setFeeRequired(long feeRequired) {
|
||||
|
||||
this.feeCeiling = feeCeiling;
|
||||
this.feeRequired.set(feeRequired);
|
||||
}
|
||||
|
||||
public NetworkParameters getParams() {
|
||||
|
@ -21,6 +21,7 @@ import org.qortal.utils.BitTwiddling;
|
||||
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.*;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
|
||||
public class PirateChain extends Bitcoiny {
|
||||
|
||||
@ -51,12 +52,7 @@ public class PirateChain extends Bitcoiny {
|
||||
public Collection<Server> getServers() {
|
||||
return Arrays.asList(
|
||||
// Servers chosen on NO BASIS WHATSOEVER from various sources!
|
||||
new Server("lightd.pirate.black", Server.ConnectionType.SSL, 443),
|
||||
new Server("wallet-arrr1.qortal.online", Server.ConnectionType.SSL, 443),
|
||||
new Server("wallet-arrr2.qortal.online", Server.ConnectionType.SSL, 443),
|
||||
new Server("wallet-arrr3.qortal.online", Server.ConnectionType.SSL, 443),
|
||||
new Server("wallet-arrr4.qortal.online", Server.ConnectionType.SSL, 443),
|
||||
new Server("wallet-arrr5.qortal.online", Server.ConnectionType.SSL, 443)
|
||||
new Server("lightd.pirate.black", Server.ConnectionType.SSL, 443)
|
||||
);
|
||||
}
|
||||
|
||||
@ -67,7 +63,7 @@ public class PirateChain extends Bitcoiny {
|
||||
|
||||
@Override
|
||||
public long getP2shFee(Long timestamp) {
|
||||
return this.getFeeCeiling();
|
||||
return this.getFeeRequired();
|
||||
}
|
||||
},
|
||||
TEST3 {
|
||||
@ -117,14 +113,14 @@ public class PirateChain extends Bitcoiny {
|
||||
}
|
||||
};
|
||||
|
||||
private long feeCeiling = MAINNET_FEE;
|
||||
private AtomicLong feeRequired = new AtomicLong(MAINNET_FEE);
|
||||
|
||||
public long getFeeCeiling() {
|
||||
return feeCeiling;
|
||||
public long getFeeRequired() {
|
||||
return feeRequired.get();
|
||||
}
|
||||
|
||||
public void setFeeCeiling(long feeCeiling) {
|
||||
this.feeCeiling = feeCeiling;
|
||||
public void setFeeRequired(long feeRequired) {
|
||||
this.feeRequired.set(feeRequired);
|
||||
}
|
||||
|
||||
public abstract NetworkParameters getParams();
|
||||
@ -186,14 +182,14 @@ public class PirateChain extends Bitcoiny {
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getFeeCeiling() {
|
||||
return this.pirateChainNet.getFeeCeiling();
|
||||
public long getFeeRequired() {
|
||||
return this.pirateChainNet.getFeeRequired();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setFeeCeiling(long fee) {
|
||||
public void setFeeRequired(long fee) {
|
||||
|
||||
this.pirateChainNet.setFeeCeiling( fee );
|
||||
this.pirateChainNet.setFeeRequired( fee );
|
||||
}
|
||||
/**
|
||||
* Returns confirmed balance, based on passed payment script.
|
||||
|
@ -4,6 +4,7 @@ import com.google.common.hash.HashCode;
|
||||
import com.google.common.primitives.Bytes;
|
||||
import org.ciyam.at.*;
|
||||
import org.qortal.account.Account;
|
||||
import org.qortal.api.resource.CrossChainUtils;
|
||||
import org.qortal.asset.Asset;
|
||||
import org.qortal.at.QortalFunctionCode;
|
||||
import org.qortal.crypto.Crypto;
|
||||
@ -19,6 +20,7 @@ import org.qortal.utils.BitTwiddling;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.OptionalLong;
|
||||
|
||||
import static org.ciyam.at.OpCode.calcOffset;
|
||||
|
||||
@ -580,7 +582,14 @@ public class PirateChainACCTv3 implements ACCT {
|
||||
@Override
|
||||
public CrossChainTradeData populateTradeData(Repository repository, ATData atData) throws DataException {
|
||||
ATStateData atStateData = repository.getATRepository().getLatestATState(atData.getATAddress());
|
||||
return populateTradeData(repository, atData.getCreatorPublicKey(), atData.getCreation(), atStateData);
|
||||
return populateTradeData(repository, atData.getCreatorPublicKey(), atData.getCreation(), atStateData, OptionalLong.empty());
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<CrossChainTradeData> populateTradeDataList(Repository repository, List<ATData> atDataList) throws DataException {
|
||||
List<CrossChainTradeData> crossChainTradeDataList = CrossChainUtils.populateTradeDataList(repository, this, atDataList);
|
||||
|
||||
return crossChainTradeDataList;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -589,13 +598,14 @@ public class PirateChainACCTv3 implements ACCT {
|
||||
@Override
|
||||
public CrossChainTradeData populateTradeData(Repository repository, ATStateData atStateData) throws DataException {
|
||||
ATData atData = repository.getATRepository().fromATAddress(atStateData.getATAddress());
|
||||
return populateTradeData(repository, atData.getCreatorPublicKey(), atData.getCreation(), atStateData);
|
||||
return populateTradeData(repository, atData.getCreatorPublicKey(), atData.getCreation(), atStateData, OptionalLong.empty());
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns CrossChainTradeData with useful info extracted from AT.
|
||||
*/
|
||||
public CrossChainTradeData populateTradeData(Repository repository, byte[] creatorPublicKey, long creationTimestamp, ATStateData atStateData) throws DataException {
|
||||
@Override
|
||||
public CrossChainTradeData populateTradeData(Repository repository, byte[] creatorPublicKey, long creationTimestamp, ATStateData atStateData, OptionalLong optionalBalance) throws DataException {
|
||||
byte[] addressBytes = new byte[25]; // for general use
|
||||
String atAddress = atStateData.getATAddress();
|
||||
|
||||
@ -608,8 +618,13 @@ public class PirateChainACCTv3 implements ACCT {
|
||||
tradeData.qortalCreator = Crypto.toAddress(creatorPublicKey);
|
||||
tradeData.creationTimestamp = creationTimestamp;
|
||||
|
||||
Account atAccount = new Account(repository, atAddress);
|
||||
tradeData.qortBalance = atAccount.getConfirmedBalance(Asset.QORT);
|
||||
if(optionalBalance.isPresent()) {
|
||||
tradeData.qortBalance = optionalBalance.getAsLong();
|
||||
}
|
||||
else {
|
||||
Account atAccount = new Account(repository, atAddress);
|
||||
tradeData.qortBalance = atAccount.getConfirmedBalance(Asset.QORT);
|
||||
}
|
||||
|
||||
byte[] stateData = atStateData.getStateData();
|
||||
ByteBuffer dataByteBuffer = ByteBuffer.wrap(stateData);
|
||||
|
@ -8,6 +8,7 @@ import org.bouncycastle.util.encoders.DecoderException;
|
||||
import org.json.JSONArray;
|
||||
import org.json.JSONException;
|
||||
import org.json.JSONObject;
|
||||
import org.qortal.api.resource.CrossChainUtils;
|
||||
import org.qortal.controller.PirateChainWalletController;
|
||||
import org.qortal.crypto.Crypto;
|
||||
import org.qortal.settings.Settings;
|
||||
@ -67,8 +68,8 @@ public class PirateWallet {
|
||||
}
|
||||
|
||||
// Pick a random server
|
||||
PirateLightClient.Server server = this.getRandomServer();
|
||||
String serverUri = String.format("https://%s:%d/", server.hostname, server.port);
|
||||
ChainableServer server = PirateChain.getInstance().blockchainProvider.getCurrentServer();
|
||||
String serverUri = String.format("https://%s:%d/", server.getHostName(), server.getPort());
|
||||
|
||||
// Pirate library uses base64 encoding
|
||||
String entropy64 = Base64.toBase64String(this.entropyBytes);
|
||||
|
@ -14,6 +14,7 @@ import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.EnumMap;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
|
||||
public class Ravencoin extends Bitcoiny {
|
||||
|
||||
@ -61,7 +62,7 @@ public class Ravencoin extends Bitcoiny {
|
||||
|
||||
@Override
|
||||
public long getP2shFee(Long timestamp) {
|
||||
return this.getFeeCeiling();
|
||||
return this.getFeeRequired();
|
||||
}
|
||||
},
|
||||
TEST3 {
|
||||
@ -111,14 +112,14 @@ public class Ravencoin extends Bitcoiny {
|
||||
}
|
||||
};
|
||||
|
||||
private long feeCeiling = MAINNET_FEE;
|
||||
private AtomicLong feeRequired = new AtomicLong( MAINNET_FEE );
|
||||
|
||||
public long getFeeCeiling() {
|
||||
return feeCeiling;
|
||||
public long getFeeRequired() {
|
||||
return feeRequired.get();
|
||||
}
|
||||
|
||||
public void setFeeCeiling(long feeCeiling) {
|
||||
this.feeCeiling = feeCeiling;
|
||||
public void setFeeRequired(long feeRequired) {
|
||||
this.feeRequired.set(feeRequired);
|
||||
}
|
||||
|
||||
public abstract NetworkParameters getParams();
|
||||
@ -180,13 +181,13 @@ public class Ravencoin extends Bitcoiny {
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getFeeCeiling() {
|
||||
return this.ravencoinNet.getFeeCeiling();
|
||||
public long getFeeRequired() {
|
||||
return this.ravencoinNet.getFeeRequired();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setFeeCeiling(long fee) {
|
||||
public void setFeeRequired(long fee) {
|
||||
|
||||
this.ravencoinNet.setFeeCeiling( fee );
|
||||
this.ravencoinNet.setFeeRequired( fee );
|
||||
}
|
||||
}
|
||||
|
@ -6,6 +6,7 @@ import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.ciyam.at.*;
|
||||
import org.qortal.account.Account;
|
||||
import org.qortal.api.resource.CrossChainUtils;
|
||||
import org.qortal.asset.Asset;
|
||||
import org.qortal.at.QortalFunctionCode;
|
||||
import org.qortal.crypto.Crypto;
|
||||
@ -21,6 +22,7 @@ import org.qortal.utils.BitTwiddling;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.OptionalLong;
|
||||
|
||||
import static org.ciyam.at.OpCode.calcOffset;
|
||||
|
||||
@ -569,7 +571,14 @@ public class RavencoinACCTv3 implements ACCT {
|
||||
@Override
|
||||
public CrossChainTradeData populateTradeData(Repository repository, ATData atData) throws DataException {
|
||||
ATStateData atStateData = repository.getATRepository().getLatestATState(atData.getATAddress());
|
||||
return populateTradeData(repository, atData.getCreatorPublicKey(), atData.getCreation(), atStateData);
|
||||
return populateTradeData(repository, atData.getCreatorPublicKey(), atData.getCreation(), atStateData, OptionalLong.empty());
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<CrossChainTradeData> populateTradeDataList(Repository repository, List<ATData> atDataList) throws DataException {
|
||||
List<CrossChainTradeData> crossChainTradeDataList = CrossChainUtils.populateTradeDataList(repository, this, atDataList);
|
||||
|
||||
return crossChainTradeDataList;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -578,13 +587,14 @@ public class RavencoinACCTv3 implements ACCT {
|
||||
@Override
|
||||
public CrossChainTradeData populateTradeData(Repository repository, ATStateData atStateData) throws DataException {
|
||||
ATData atData = repository.getATRepository().fromATAddress(atStateData.getATAddress());
|
||||
return populateTradeData(repository, atData.getCreatorPublicKey(), atData.getCreation(), atStateData);
|
||||
return populateTradeData(repository, atData.getCreatorPublicKey(), atData.getCreation(), atStateData, OptionalLong.empty());
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns CrossChainTradeData with useful info extracted from AT.
|
||||
*/
|
||||
public CrossChainTradeData populateTradeData(Repository repository, byte[] creatorPublicKey, long creationTimestamp, ATStateData atStateData) throws DataException {
|
||||
@Override
|
||||
public CrossChainTradeData populateTradeData(Repository repository, byte[] creatorPublicKey, long creationTimestamp, ATStateData atStateData, OptionalLong optionalBalance) throws DataException {
|
||||
byte[] addressBytes = new byte[25]; // for general use
|
||||
String atAddress = atStateData.getATAddress();
|
||||
|
||||
@ -597,8 +607,13 @@ public class RavencoinACCTv3 implements ACCT {
|
||||
tradeData.qortalCreator = Crypto.toAddress(creatorPublicKey);
|
||||
tradeData.creationTimestamp = creationTimestamp;
|
||||
|
||||
Account atAccount = new Account(repository, atAddress);
|
||||
tradeData.qortBalance = atAccount.getConfirmedBalance(Asset.QORT);
|
||||
if(optionalBalance.isPresent()) {
|
||||
tradeData.qortBalance = optionalBalance.getAsLong();
|
||||
}
|
||||
else {
|
||||
Account atAccount = new Account(repository, atAddress);
|
||||
tradeData.qortBalance = atAccount.getConfirmedBalance(Asset.QORT);
|
||||
}
|
||||
|
||||
byte[] stateData = atStateData.getStateData();
|
||||
ByteBuffer dataByteBuffer = ByteBuffer.wrap(stateData);
|
||||
|
@ -100,7 +100,7 @@ public class AES {
|
||||
// Prepend the output stream with the 16 byte initialization vector
|
||||
outputStream.write(iv.getIV());
|
||||
|
||||
byte[] buffer = new byte[1024];
|
||||
byte[] buffer = new byte[65536];
|
||||
int bytesRead;
|
||||
while ((bytesRead = inputStream.read(buffer)) != -1) {
|
||||
byte[] output = cipher.update(buffer, 0, bytesRead);
|
||||
@ -138,7 +138,7 @@ public class AES {
|
||||
Cipher cipher = Cipher.getInstance(algorithm);
|
||||
cipher.init(Cipher.DECRYPT_MODE, key, new IvParameterSpec(iv));
|
||||
|
||||
byte[] buffer = new byte[64];
|
||||
byte[] buffer = new byte[65536];
|
||||
int bytesRead;
|
||||
while ((bytesRead = inputStream.read(buffer)) != -1) {
|
||||
byte[] output = cipher.update(buffer, 0, bytesRead);
|
||||
|
57
src/main/java/org/qortal/data/crosschain/ForeignFeeData.java
Normal file
57
src/main/java/org/qortal/data/crosschain/ForeignFeeData.java
Normal file
@ -0,0 +1,57 @@
|
||||
package org.qortal.data.crosschain;
|
||||
|
||||
import org.json.JSONObject;
|
||||
|
||||
import javax.xml.bind.annotation.XmlAccessType;
|
||||
import javax.xml.bind.annotation.XmlAccessorType;
|
||||
import javax.xml.bind.annotation.adapters.XmlJavaTypeAdapter;
|
||||
|
||||
// All properties to be converted to JSON via JAXB
|
||||
@XmlAccessorType(XmlAccessType.FIELD)
|
||||
public class ForeignFeeData {
|
||||
|
||||
private String blockchain;
|
||||
|
||||
@XmlJavaTypeAdapter(value = org.qortal.api.AmountTypeAdapter.class)
|
||||
private long fee;
|
||||
|
||||
protected ForeignFeeData() {
|
||||
/* JAXB */
|
||||
}
|
||||
|
||||
public ForeignFeeData(String blockchain,
|
||||
long fee) {
|
||||
this.blockchain = blockchain;
|
||||
this.fee = fee;
|
||||
}
|
||||
|
||||
public String getBlockchain() {
|
||||
return this.blockchain;
|
||||
}
|
||||
|
||||
public long getFee() {
|
||||
return this.fee;
|
||||
}
|
||||
|
||||
public JSONObject toJson() {
|
||||
JSONObject jsonObject = new JSONObject();
|
||||
jsonObject.put("blockchain", this.getBlockchain());
|
||||
jsonObject.put("fee", this.getFee());
|
||||
return jsonObject;
|
||||
}
|
||||
|
||||
public static ForeignFeeData fromJson(JSONObject json) {
|
||||
return new ForeignFeeData(
|
||||
json.isNull("blockchain") ? null : json.getString("blockchain"),
|
||||
json.isNull("fee") ? null : json.getLong("fee")
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "ForeignFeeData{" +
|
||||
"blockchain='" + blockchain + '\'' +
|
||||
", fee=" + fee +
|
||||
'}';
|
||||
}
|
||||
}
|
@ -0,0 +1,90 @@
|
||||
package org.qortal.data.crosschain;
|
||||
|
||||
import org.json.JSONObject;
|
||||
import org.qortal.data.account.MintingAccountData;
|
||||
import org.qortal.utils.Base58;
|
||||
|
||||
import javax.xml.bind.annotation.XmlAccessType;
|
||||
import javax.xml.bind.annotation.XmlAccessorType;
|
||||
import java.util.Objects;
|
||||
|
||||
// All properties to be converted to JSON via JAXB
|
||||
@XmlAccessorType(XmlAccessType.FIELD)
|
||||
public class ForeignFeeDecodedData {
|
||||
|
||||
protected long timestamp;
|
||||
protected byte[] data;
|
||||
protected String atAddress;
|
||||
protected Integer fee;
|
||||
|
||||
// Constructors
|
||||
|
||||
// necessary for JAXB serialization
|
||||
protected ForeignFeeDecodedData() {
|
||||
}
|
||||
|
||||
public ForeignFeeDecodedData(long timestamp, byte[] data, String atAddress, Integer fee) {
|
||||
this.timestamp = timestamp;
|
||||
this.data = data;
|
||||
this.atAddress = atAddress;
|
||||
this.fee = fee;
|
||||
}
|
||||
|
||||
public long getTimestamp() {
|
||||
return this.timestamp;
|
||||
}
|
||||
|
||||
public byte[] getData() {
|
||||
return this.data;
|
||||
}
|
||||
|
||||
public String getAtAddress() {
|
||||
return atAddress;
|
||||
}
|
||||
|
||||
public Integer getFee() {
|
||||
return this.fee;
|
||||
}
|
||||
|
||||
// Comparison
|
||||
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
ForeignFeeDecodedData that = (ForeignFeeDecodedData) o;
|
||||
return timestamp == that.timestamp && Objects.equals(atAddress, that.atAddress) && Objects.equals(fee, that.fee);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(timestamp, atAddress, fee);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "ForeignFeeDecodedData{" +
|
||||
"timestamp=" + timestamp +
|
||||
", atAddress='" + atAddress + '\'' +
|
||||
", fee=" + fee +
|
||||
'}';
|
||||
}
|
||||
|
||||
public JSONObject toJson() {
|
||||
JSONObject jsonObject = new JSONObject();
|
||||
jsonObject.put("data", Base58.encode(this.data));
|
||||
jsonObject.put("atAddress", this.atAddress);
|
||||
jsonObject.put("timestamp", this.timestamp);
|
||||
jsonObject.put("fee", this.fee);
|
||||
return jsonObject;
|
||||
}
|
||||
|
||||
public static ForeignFeeDecodedData fromJson(JSONObject json) {
|
||||
return new ForeignFeeDecodedData(
|
||||
json.isNull("timestamp") ? null : json.getLong("timestamp"),
|
||||
json.isNull("data") ? null : Base58.decode(json.getString("data")),
|
||||
json.isNull("atAddress") ? null : json.getString("atAddress"),
|
||||
json.isNull("fee") ? null : json.getInt("fee"));
|
||||
}
|
||||
}
|
@ -0,0 +1,69 @@
|
||||
package org.qortal.data.crosschain;
|
||||
|
||||
import javax.xml.bind.annotation.XmlAccessType;
|
||||
import javax.xml.bind.annotation.XmlAccessorType;
|
||||
import java.util.Objects;
|
||||
|
||||
// All properties to be converted to JSON via JAXB
|
||||
@XmlAccessorType(XmlAccessType.FIELD)
|
||||
public class ForeignFeeEncodedData {
|
||||
|
||||
protected long timestamp;
|
||||
protected String data;
|
||||
protected String atAddress;
|
||||
protected Integer fee;
|
||||
|
||||
// Constructors
|
||||
|
||||
// necessary for JAXB serialization
|
||||
protected ForeignFeeEncodedData() {
|
||||
}
|
||||
|
||||
public ForeignFeeEncodedData(long timestamp, String data, String atAddress, Integer fee) {
|
||||
this.timestamp = timestamp;
|
||||
this.data = data;
|
||||
this.atAddress = atAddress;
|
||||
this.fee = fee;
|
||||
}
|
||||
|
||||
public long getTimestamp() {
|
||||
return this.timestamp;
|
||||
}
|
||||
|
||||
public String getData() {
|
||||
return this.data;
|
||||
}
|
||||
|
||||
public String getAtAddress() {
|
||||
return atAddress;
|
||||
}
|
||||
|
||||
public Integer getFee() {
|
||||
return this.fee;
|
||||
}
|
||||
|
||||
// Comparison
|
||||
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
ForeignFeeEncodedData that = (ForeignFeeEncodedData) o;
|
||||
return timestamp == that.timestamp && Objects.equals(atAddress, that.atAddress) && Objects.equals(fee, that.fee);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(timestamp, atAddress, fee);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "ForeignFeeDecodedData{" +
|
||||
"timestamp=" + timestamp +
|
||||
", atAddress='" + atAddress + '\'' +
|
||||
", fee=" + fee +
|
||||
'}';
|
||||
}
|
||||
}
|
@ -0,0 +1,29 @@
|
||||
package org.qortal.data.crosschain;
|
||||
|
||||
import javax.xml.bind.annotation.XmlAccessType;
|
||||
import javax.xml.bind.annotation.XmlAccessorType;
|
||||
|
||||
@XmlAccessorType(XmlAccessType.FIELD)
|
||||
public class UnsignedFeeEvent {
|
||||
|
||||
private boolean positive;
|
||||
|
||||
private String address;
|
||||
|
||||
public UnsignedFeeEvent() {
|
||||
}
|
||||
|
||||
public UnsignedFeeEvent(boolean positive, String address) {
|
||||
|
||||
this.positive = positive;
|
||||
this.address = address;
|
||||
}
|
||||
|
||||
public boolean isPositive() {
|
||||
return positive;
|
||||
}
|
||||
|
||||
public String getAddress() {
|
||||
return address;
|
||||
}
|
||||
}
|
@ -67,6 +67,11 @@ public class NameData {
|
||||
this(name, reducedName, owner, data, registered, null, false, null, reference, creationGroupId);
|
||||
}
|
||||
|
||||
// Typically used for name summsry
|
||||
public NameData(String name, String owner) {
|
||||
this(name, null, owner, null, 0L, null, false, null, null, 0);
|
||||
}
|
||||
|
||||
// Getters / setters
|
||||
|
||||
public String getName() {
|
||||
|
30
src/main/java/org/qortal/event/FeeWaitingEvent.java
Normal file
30
src/main/java/org/qortal/event/FeeWaitingEvent.java
Normal file
@ -0,0 +1,30 @@
|
||||
package org.qortal.event;
|
||||
|
||||
import javax.xml.bind.annotation.XmlAccessType;
|
||||
import javax.xml.bind.annotation.XmlAccessorType;
|
||||
|
||||
@XmlAccessorType(XmlAccessType.FIELD)
|
||||
public class FeeWaitingEvent implements Event{
|
||||
|
||||
private boolean positive;
|
||||
|
||||
private String address;
|
||||
|
||||
public FeeWaitingEvent() {
|
||||
}
|
||||
|
||||
public FeeWaitingEvent(boolean positive, String address) {
|
||||
|
||||
this.positive = positive;
|
||||
this.address = address;
|
||||
|
||||
}
|
||||
|
||||
public boolean isPositive() {
|
||||
return positive;
|
||||
}
|
||||
|
||||
public String getAddress() {
|
||||
return address;
|
||||
}
|
||||
}
|
@ -0,0 +1,4 @@
|
||||
package org.qortal.event;
|
||||
|
||||
public class LockingFeeUpdateEvent implements Event{
|
||||
}
|
15
src/main/java/org/qortal/event/RequiredFeeUpdateEvent.java
Normal file
15
src/main/java/org/qortal/event/RequiredFeeUpdateEvent.java
Normal file
@ -0,0 +1,15 @@
|
||||
package org.qortal.event;
|
||||
|
||||
import org.qortal.crosschain.Bitcoiny;
|
||||
|
||||
public class RequiredFeeUpdateEvent implements Event{
|
||||
private final Bitcoiny bitcoiny;
|
||||
|
||||
public RequiredFeeUpdateEvent(Bitcoiny bitcoiny) {
|
||||
this.bitcoiny = bitcoiny;
|
||||
}
|
||||
|
||||
public Bitcoiny getBitcoiny() {
|
||||
return bitcoiny;
|
||||
}
|
||||
}
|
@ -983,7 +983,7 @@ public class Network {
|
||||
if (maxThreadsForMessageType != null) {
|
||||
Integer threadCount = threadsPerMessageType.get(message.getType());
|
||||
if (threadCount != null && threadCount >= maxThreadsForMessageType) {
|
||||
LOGGER.trace("Discarding {} message as there are already {} active threads", message.getType().name(), threadCount);
|
||||
LOGGER.warn("Discarding {} message as there are already {} active threads", message.getType().name(), threadCount);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
@ -640,10 +640,13 @@ public class Peer {
|
||||
return false;
|
||||
|
||||
try {
|
||||
this.outputBuffer = ByteBuffer.wrap(message.toBytes());
|
||||
byte[] messageBytes = message.toBytes();
|
||||
|
||||
this.outputBuffer = ByteBuffer.wrap(messageBytes);
|
||||
this.outputMessageType = message.getType().name();
|
||||
this.outputMessageId = message.getId();
|
||||
|
||||
|
||||
LOGGER.trace("[{}] Sending {} message with ID {} to peer {}",
|
||||
this.peerConnectionId, this.outputMessageType, this.outputMessageId, this);
|
||||
|
||||
@ -662,12 +665,22 @@ public class Peer {
|
||||
// If output byte buffer is not null, send from that
|
||||
int bytesWritten = this.socketChannel.write(outputBuffer);
|
||||
|
||||
LOGGER.trace("[{}] Sent {} bytes of {} message with ID {} to peer {} ({} total)", this.peerConnectionId,
|
||||
bytesWritten, this.outputMessageType, this.outputMessageId, this, outputBuffer.limit());
|
||||
int zeroSendCount = 0;
|
||||
|
||||
// If we've sent 0 bytes then socket buffer is full so we need to wait until it's empty again
|
||||
if (bytesWritten == 0) {
|
||||
return true;
|
||||
while (bytesWritten == 0) {
|
||||
if (zeroSendCount > 9) {
|
||||
LOGGER.debug("Socket write stuck for too long, returning");
|
||||
return true;
|
||||
}
|
||||
try {
|
||||
Thread.sleep(10); // 10MS CPU Sleep to try and give it time to flush the socket
|
||||
}
|
||||
catch (InterruptedException e) {
|
||||
Thread.currentThread().interrupt();
|
||||
return false; // optional, if you want to signal shutdown
|
||||
}
|
||||
zeroSendCount++;
|
||||
bytesWritten = this.socketChannel.write(outputBuffer);
|
||||
}
|
||||
|
||||
// If we then exhaust the byte buffer, set it to null (otherwise loop and try to send more)
|
||||
@ -723,13 +736,18 @@ public class Peer {
|
||||
* @return <code>true</code> if message successfully sent; <code>false</code> otherwise
|
||||
*/
|
||||
public boolean sendMessageWithTimeout(Message message, int timeout) {
|
||||
|
||||
return PeerSendManagement.getInstance().getOrCreateSendManager(this).queueMessage(message, timeout);
|
||||
}
|
||||
|
||||
public boolean sendMessageWithTimeoutNow(Message message, int timeout) {
|
||||
if (!this.socketChannel.isOpen()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
try {
|
||||
// Queue message, to be picked up by ChannelWriteTask and then peer.writeChannel()
|
||||
LOGGER.trace("[{}] Queuing {} message with ID {} to peer {}", this.peerConnectionId,
|
||||
LOGGER.debug("[{}] Queuing {} message with ID {} to peer {}", this.peerConnectionId,
|
||||
message.getType().name(), message.getId(), this);
|
||||
|
||||
// Check message properly constructed
|
||||
|
55
src/main/java/org/qortal/network/PeerSendManagement.java
Normal file
55
src/main/java/org/qortal/network/PeerSendManagement.java
Normal file
@ -0,0 +1,55 @@
|
||||
package org.qortal.network;
|
||||
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
|
||||
import java.util.Iterator;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.ScheduledExecutorService;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
public class PeerSendManagement {
|
||||
|
||||
private static final Logger LOGGER = LogManager.getLogger(PeerSendManagement.class);
|
||||
|
||||
private final Map<String, PeerSendManager> peerSendManagers = new ConcurrentHashMap<>();
|
||||
|
||||
public PeerSendManager getOrCreateSendManager(Peer peer) {
|
||||
return peerSendManagers.computeIfAbsent(peer.toString(), p -> new PeerSendManager(peer));
|
||||
}
|
||||
|
||||
private PeerSendManagement() {
|
||||
|
||||
ScheduledExecutorService cleaner = Executors.newSingleThreadScheduledExecutor();
|
||||
|
||||
cleaner.scheduleAtFixedRate(() -> {
|
||||
long idleCutoff = TimeUnit.MINUTES.toMillis(2);
|
||||
Iterator<Map.Entry<String, PeerSendManager>> iterator = peerSendManagers.entrySet().iterator();
|
||||
|
||||
while (iterator.hasNext()) {
|
||||
Map.Entry<String, PeerSendManager> entry = iterator.next();
|
||||
|
||||
PeerSendManager manager = entry.getValue();
|
||||
|
||||
if (manager.isIdle(idleCutoff)) {
|
||||
iterator.remove(); // SAFE removal during iteration
|
||||
manager.shutdown();
|
||||
LOGGER.debug("Cleaned up PeerSendManager for peer {}", entry.getKey());
|
||||
}
|
||||
}
|
||||
}, 0, 5, TimeUnit.MINUTES);
|
||||
}
|
||||
|
||||
private static PeerSendManagement instance;
|
||||
|
||||
public static PeerSendManagement getInstance() {
|
||||
|
||||
if( instance == null ) {
|
||||
instance = new PeerSendManagement();
|
||||
}
|
||||
|
||||
return instance;
|
||||
}
|
||||
}
|
138
src/main/java/org/qortal/network/PeerSendManager.java
Normal file
138
src/main/java/org/qortal/network/PeerSendManager.java
Normal file
@ -0,0 +1,138 @@
|
||||
package org.qortal.network;
|
||||
|
||||
import java.util.concurrent.*;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.qortal.network.message.Message;
|
||||
|
||||
public class PeerSendManager {
|
||||
private static final Logger LOGGER = LogManager.getLogger(PeerSendManager.class);
|
||||
|
||||
private static final int MAX_FAILURES = 15;
|
||||
private static final int MAX_MESSAGE_ATTEMPTS = 2;
|
||||
private static final int RETRY_DELAY_MS = 100;
|
||||
private static final long MAX_QUEUE_DURATION_MS = 20_000;
|
||||
private static final long COOLDOWN_DURATION_MS = 20_000;
|
||||
|
||||
private final Peer peer;
|
||||
private final BlockingQueue<TimedMessage> queue = new LinkedBlockingQueue<>();
|
||||
private final ExecutorService executor;
|
||||
private final AtomicInteger failureCount = new AtomicInteger(0);
|
||||
private static final AtomicInteger threadCount = new AtomicInteger(1);
|
||||
|
||||
private volatile boolean coolingDown = false;
|
||||
private volatile long lastUsed = System.currentTimeMillis();
|
||||
|
||||
public PeerSendManager(Peer peer) {
|
||||
this.peer = peer;
|
||||
this.executor = Executors.newSingleThreadExecutor(r -> {
|
||||
Thread t = new Thread(r);
|
||||
t.setName("PeerSendManager-" + peer.getResolvedAddress().getHostString() + "-" + threadCount.getAndIncrement());
|
||||
return t;
|
||||
});
|
||||
start();
|
||||
}
|
||||
|
||||
private void start() {
|
||||
executor.submit(() -> {
|
||||
while (!Thread.currentThread().isInterrupted()) {
|
||||
try {
|
||||
TimedMessage timedMessage = queue.take();
|
||||
long age = System.currentTimeMillis() - timedMessage.timestamp;
|
||||
|
||||
if (age > MAX_QUEUE_DURATION_MS) {
|
||||
LOGGER.debug("Dropping stale message {} ({}ms old)", timedMessage.message.getId(), age);
|
||||
continue;
|
||||
}
|
||||
|
||||
Message message = timedMessage.message;
|
||||
int timeout = timedMessage.timeout;
|
||||
boolean success = false;
|
||||
|
||||
for (int attempt = 1; attempt <= MAX_MESSAGE_ATTEMPTS; attempt++) {
|
||||
try {
|
||||
if (peer.sendMessageWithTimeoutNow(message, timeout)) {
|
||||
success = true;
|
||||
failureCount.set(0); // reset on success
|
||||
break;
|
||||
}
|
||||
} catch (Exception e) {
|
||||
LOGGER.debug("Attempt {} failed for message {} to peer {}: {}", attempt, message.getId(), peer, e.getMessage());
|
||||
}
|
||||
|
||||
Thread.sleep(RETRY_DELAY_MS);
|
||||
}
|
||||
|
||||
if (!success) {
|
||||
int totalFailures = failureCount.incrementAndGet();
|
||||
LOGGER.debug("Failed to send message {} to peer {}. Total failures: {}", message.getId(), peer, totalFailures);
|
||||
|
||||
if (totalFailures >= MAX_FAILURES) {
|
||||
LOGGER.debug("Peer {} exceeded failure limit ({}). Disconnecting...", peer, totalFailures);
|
||||
peer.disconnect("Too many message send failures");
|
||||
coolingDown = true;
|
||||
queue.clear();
|
||||
|
||||
try {
|
||||
Thread.sleep(COOLDOWN_DURATION_MS);
|
||||
} catch (InterruptedException e) {
|
||||
Thread.currentThread().interrupt();
|
||||
return;
|
||||
} finally {
|
||||
coolingDown = false;
|
||||
failureCount.set(0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Thread.sleep(50); // small throttle
|
||||
} catch (InterruptedException e) {
|
||||
Thread.currentThread().interrupt();
|
||||
break;
|
||||
} catch (Exception e) {
|
||||
LOGGER.error("Unexpected error in PeerSendManager for peer {}: {}", peer, e.getMessage(), e);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
public boolean queueMessage(Message message, int timeout) {
|
||||
if (coolingDown) {
|
||||
LOGGER.debug("In cooldown, ignoring message {}", message.getId());
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
lastUsed = System.currentTimeMillis();
|
||||
if (!queue.offer(new TimedMessage(message, timeout))) {
|
||||
LOGGER.debug("Send queue full, dropping message {}", message.getId());
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
public boolean isIdle(long cutoffMillis) {
|
||||
return System.currentTimeMillis() - lastUsed > cutoffMillis;
|
||||
}
|
||||
|
||||
public void shutdown() {
|
||||
queue.clear();
|
||||
executor.shutdownNow();
|
||||
}
|
||||
|
||||
private static class TimedMessage {
|
||||
final Message message;
|
||||
final long timestamp;
|
||||
final int timeout;
|
||||
|
||||
TimedMessage(Message message, int timeout) {
|
||||
this.message = message;
|
||||
this.timestamp = System.currentTimeMillis();
|
||||
this.timeout = timeout;
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,43 @@
|
||||
package org.qortal.network.message;
|
||||
|
||||
import org.qortal.data.crosschain.ForeignFeeDecodedData;
|
||||
import org.qortal.utils.ForeignFeesMessageUtils;
|
||||
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* For sending online accounts info to remote peer.
|
||||
*
|
||||
* Same format as V2, but with added support for a mempow nonce.
|
||||
*/
|
||||
public class ForeignFeesMessage extends Message {
|
||||
|
||||
public static final long MIN_PEER_VERSION = 0x300060000L; // 3.6.0
|
||||
|
||||
private List<ForeignFeeDecodedData> foreignFees;
|
||||
|
||||
public ForeignFeesMessage(List<ForeignFeeDecodedData> foreignFeeDecodedData) {
|
||||
super(MessageType.FOREIGN_FEES);
|
||||
|
||||
this.dataBytes = ForeignFeesMessageUtils.fromDataToSendBytes(foreignFeeDecodedData);
|
||||
this.checksumBytes = Message.generateChecksum(this.dataBytes);
|
||||
}
|
||||
|
||||
private ForeignFeesMessage(int id, List<ForeignFeeDecodedData> foreignFees) {
|
||||
super(id, MessageType.FOREIGN_FEES);
|
||||
|
||||
this.foreignFees = foreignFees;
|
||||
}
|
||||
|
||||
public List<ForeignFeeDecodedData> getForeignFees() {
|
||||
return this.foreignFees;
|
||||
}
|
||||
|
||||
public static Message fromByteBuffer(int id, ByteBuffer bytes) throws MessageException {
|
||||
List<ForeignFeeDecodedData> foreignFeeDecodedData = ForeignFeesMessageUtils.fromSendBytesToData(bytes);
|
||||
|
||||
return new ForeignFeesMessage(id, foreignFeeDecodedData);
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,46 @@
|
||||
package org.qortal.network.message;
|
||||
|
||||
import org.qortal.data.crosschain.ForeignFeeDecodedData;
|
||||
import org.qortal.utils.ForeignFeesMessageUtils;
|
||||
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
public class GetForeignFeesMessage extends Message {
|
||||
|
||||
private static final Map<Long, Map<Byte, byte[]>> EMPTY_ONLINE_ACCOUNTS = Collections.emptyMap();
|
||||
private final List<ForeignFeeDecodedData> foreignFeeDecodedData;
|
||||
|
||||
public GetForeignFeesMessage(List<ForeignFeeDecodedData> foreignFeeDecodedData) {
|
||||
super(MessageType.GET_FOREIGN_FEES);
|
||||
|
||||
this.foreignFeeDecodedData = foreignFeeDecodedData;
|
||||
|
||||
// If we don't have ANY online accounts then it's an easier construction...
|
||||
if (foreignFeeDecodedData.isEmpty()) {
|
||||
this.dataBytes = EMPTY_DATA_BYTES;
|
||||
return;
|
||||
}
|
||||
|
||||
this.dataBytes = ForeignFeesMessageUtils.fromDataToGetBytes(foreignFeeDecodedData);
|
||||
this.checksumBytes = Message.generateChecksum(this.dataBytes);
|
||||
}
|
||||
|
||||
private GetForeignFeesMessage(int id, List<ForeignFeeDecodedData> foreignFeeDecodedData) {
|
||||
super(id, MessageType.GET_FOREIGN_FEES);
|
||||
|
||||
this.foreignFeeDecodedData = foreignFeeDecodedData;
|
||||
}
|
||||
|
||||
public List<ForeignFeeDecodedData> getForeignFeeData() {
|
||||
return foreignFeeDecodedData;
|
||||
}
|
||||
|
||||
public static Message fromByteBuffer(int id, ByteBuffer bytes) {
|
||||
|
||||
return new GetForeignFeesMessage(id, ForeignFeesMessageUtils.fromGetBytesToData(bytes));
|
||||
}
|
||||
|
||||
}
|
@ -79,7 +79,10 @@ public enum MessageType {
|
||||
GET_NAME(182, GetNameMessage::fromByteBuffer),
|
||||
|
||||
TRANSACTIONS(190, TransactionsMessage::fromByteBuffer),
|
||||
GET_ACCOUNT_TRANSACTIONS(191, GetAccountTransactionsMessage::fromByteBuffer);
|
||||
GET_ACCOUNT_TRANSACTIONS(191, GetAccountTransactionsMessage::fromByteBuffer),
|
||||
|
||||
FOREIGN_FEES( 200, ForeignFeesMessage::fromByteBuffer),
|
||||
GET_FOREIGN_FEES( 201, GetForeignFeesMessage::fromByteBuffer);
|
||||
|
||||
public final int value;
|
||||
public final MessageProducer fromByteBufferMethod;
|
||||
|
@ -31,8 +31,28 @@ public class ChannelWriteTask implements Task {
|
||||
@Override
|
||||
public void perform() throws InterruptedException {
|
||||
try {
|
||||
boolean isSocketClogged = peer.writeChannel();
|
||||
|
||||
boolean isSocketClogged;
|
||||
int clogCounter = 0;
|
||||
do {
|
||||
isSocketClogged = peer.writeChannel();
|
||||
|
||||
if (clogCounter > 9) {
|
||||
LOGGER.warn("10 Socket Clogs - GIVING UP");
|
||||
break;
|
||||
}
|
||||
if (isSocketClogged) {
|
||||
LOGGER.debug(
|
||||
"socket is clogged: peer = {} {}, retrying",
|
||||
peer.getPeerData().getAddress().toString(),
|
||||
Thread.currentThread().getName()
|
||||
);
|
||||
Thread.sleep(1000);
|
||||
clogCounter++;
|
||||
}
|
||||
|
||||
} while( isSocketClogged );
|
||||
|
||||
// Tell Network that we've finished
|
||||
Network.getInstance().notifyChannelNotWriting(socketChannel);
|
||||
|
||||
@ -49,4 +69,4 @@ public class ChannelWriteTask implements Task {
|
||||
peer.disconnect("I/O error");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -14,6 +14,8 @@ public interface ATRepository {
|
||||
/** Returns ATData using AT's address or null if none found */
|
||||
public ATData fromATAddress(String atAddress) throws DataException;
|
||||
|
||||
public List<ATData> fromATAddresses(List<String> atAddresses) throws DataException;
|
||||
|
||||
/** Returns where AT with passed address exists in repository */
|
||||
public boolean exists(String atAddress) throws DataException;
|
||||
|
||||
@ -62,6 +64,8 @@ public interface ATRepository {
|
||||
*/
|
||||
public ATStateData getLatestATState(String atAddress) throws DataException;
|
||||
|
||||
public List<ATStateData> getLatestATStates(List<String> collect) throws DataException;
|
||||
|
||||
/**
|
||||
* Returns final ATStateData for ATs matching codeHash (required)
|
||||
* and specific data segment value (optional).
|
||||
|
@ -130,6 +130,8 @@ public interface AccountRepository {
|
||||
*/
|
||||
public AccountBalanceData getBalance(String address, long assetId) throws DataException;
|
||||
|
||||
public List<AccountBalanceData> getBalances(List<String> addresses, long assetId) throws DataException;
|
||||
|
||||
/** Returns all account balances for given assetID, optionally excluding zero balances. */
|
||||
public List<AccountBalanceData> getAssetBalances(long assetId, Boolean excludeZero) throws DataException;
|
||||
|
||||
|
@ -3,6 +3,7 @@ package org.qortal.repository;
|
||||
import org.qortal.data.naming.NameData;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
|
||||
public interface NameRepository {
|
||||
|
||||
@ -34,10 +35,17 @@ public interface NameRepository {
|
||||
return getNamesByOwner(address, null, null, null);
|
||||
}
|
||||
|
||||
public int setPrimaryName(String address, String primaryName) throws DataException;
|
||||
|
||||
public void removePrimaryName(String address) throws DataException;
|
||||
|
||||
public Optional<String> getPrimaryName(String address) throws DataException;
|
||||
|
||||
public int clearPrimaryNames() throws DataException;
|
||||
|
||||
public List<String> getRecentNames(long startTimestamp) throws DataException;
|
||||
|
||||
public void save(NameData nameData) throws DataException;
|
||||
|
||||
public void delete(String name) throws DataException;
|
||||
|
||||
}
|
||||
|
@ -1,6 +1,7 @@
|
||||
package org.qortal.repository;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.sql.Connection;
|
||||
import java.util.concurrent.TimeoutException;
|
||||
|
||||
public interface Repository extends AutoCloseable {
|
||||
@ -62,4 +63,5 @@ public interface Repository extends AutoCloseable {
|
||||
|
||||
public static void attemptRecovery(String connectionUrl, String name) throws DataException {}
|
||||
|
||||
public Connection getConnection();
|
||||
}
|
||||
|
@ -18,6 +18,8 @@ public interface TransactionRepository {
|
||||
|
||||
public TransactionData fromSignature(byte[] signature) throws DataException;
|
||||
|
||||
public List<TransactionData> fromSignatures(List<byte[]> signatures) throws DataException;
|
||||
|
||||
public TransactionData fromReference(byte[] reference) throws DataException;
|
||||
|
||||
public TransactionData fromHeightAndSequence(int height, int sequence) throws DataException;
|
||||
@ -351,4 +353,5 @@ public interface TransactionRepository {
|
||||
|
||||
public void delete(TransactionData transactionData) throws DataException;
|
||||
|
||||
|
||||
}
|
||||
|
@ -15,8 +15,12 @@ import org.qortal.utils.ByteArray;
|
||||
import java.sql.ResultSet;
|
||||
import java.sql.SQLException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.Comparator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import org.qortal.data.account.AccountData;
|
||||
|
||||
@ -76,6 +80,63 @@ public class HSQLDBATRepository implements ATRepository {
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<ATData> fromATAddresses(List<String> atAddresses) throws DataException {
|
||||
String sql = "SELECT creator, created_when, version, asset_id, code_bytes, code_hash, "
|
||||
+ "is_sleeping, sleep_until_height, is_finished, had_fatal_error, "
|
||||
+ "is_frozen, frozen_balance, sleep_until_message_timestamp, AT_address "
|
||||
+ "FROM ATs "
|
||||
+ "WHERE AT_address IN ("
|
||||
+ String.join(", ", Collections.nCopies(atAddresses.size(), "?"))
|
||||
+ ")"
|
||||
;
|
||||
|
||||
List<ATData> list;
|
||||
try (ResultSet resultSet = this.repository.checkedExecute(sql, atAddresses.toArray(new String[atAddresses.size()]))) {
|
||||
if (resultSet == null) {
|
||||
return new ArrayList<>(0);
|
||||
}
|
||||
|
||||
list = new ArrayList<>(atAddresses.size());
|
||||
|
||||
do {
|
||||
byte[] creatorPublicKey = resultSet.getBytes(1);
|
||||
long created = resultSet.getLong(2);
|
||||
int version = resultSet.getInt(3);
|
||||
long assetId = resultSet.getLong(4);
|
||||
byte[] codeBytes = resultSet.getBytes(5); // Actually BLOB
|
||||
byte[] codeHash = resultSet.getBytes(6);
|
||||
boolean isSleeping = resultSet.getBoolean(7);
|
||||
|
||||
Integer sleepUntilHeight = resultSet.getInt(8);
|
||||
if (sleepUntilHeight == 0 && resultSet.wasNull())
|
||||
sleepUntilHeight = null;
|
||||
|
||||
boolean isFinished = resultSet.getBoolean(9);
|
||||
boolean hadFatalError = resultSet.getBoolean(10);
|
||||
boolean isFrozen = resultSet.getBoolean(11);
|
||||
|
||||
Long frozenBalance = resultSet.getLong(12);
|
||||
if (frozenBalance == 0 && resultSet.wasNull())
|
||||
frozenBalance = null;
|
||||
|
||||
Long sleepUntilMessageTimestamp = resultSet.getLong(13);
|
||||
if (sleepUntilMessageTimestamp == 0 && resultSet.wasNull())
|
||||
sleepUntilMessageTimestamp = null;
|
||||
|
||||
String atAddress = resultSet.getString(14);
|
||||
|
||||
list.add(new ATData(atAddress, creatorPublicKey, created, version, assetId, codeBytes, codeHash,
|
||||
isSleeping, sleepUntilHeight, isFinished, hadFatalError, isFrozen, frozenBalance,
|
||||
sleepUntilMessageTimestamp));
|
||||
} while ( resultSet.next());
|
||||
|
||||
return list;
|
||||
} catch (SQLException e) {
|
||||
throw new DataException("Unable to fetch AT from repository", e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean exists(String atAddress) throws DataException {
|
||||
try {
|
||||
@ -403,6 +464,56 @@ public class HSQLDBATRepository implements ATRepository {
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<ATStateData> getLatestATStates(List<String> atAddresses) throws DataException{
|
||||
String sql = "SELECT height, state_data, state_hash, fees, is_initial, sleep_until_message_timestamp, AT_address "
|
||||
+ "FROM ATStates "
|
||||
+ "JOIN ATStatesData USING (AT_address, height) "
|
||||
+ "WHERE ATStates.AT_address IN ("
|
||||
+ String.join(", ", Collections.nCopies(atAddresses.size(), "?"))
|
||||
+ ")";
|
||||
|
||||
List<ATStateData> stateDataList;
|
||||
|
||||
try (ResultSet resultSet = this.repository.checkedExecute(sql, atAddresses.toArray(new String[atAddresses.size()]))) {
|
||||
if (resultSet == null)
|
||||
return new ArrayList<>(0);
|
||||
|
||||
stateDataList = new ArrayList<>();
|
||||
|
||||
do {
|
||||
int height = resultSet.getInt(1);
|
||||
byte[] stateData = resultSet.getBytes(2); // Actually BLOB
|
||||
byte[] stateHash = resultSet.getBytes(3);
|
||||
long fees = resultSet.getLong(4);
|
||||
boolean isInitial = resultSet.getBoolean(5);
|
||||
|
||||
Long sleepUntilMessageTimestamp = resultSet.getLong(6);
|
||||
if (sleepUntilMessageTimestamp == 0 && resultSet.wasNull())
|
||||
sleepUntilMessageTimestamp = null;
|
||||
|
||||
String atAddress = resultSet.getString(7);
|
||||
stateDataList.add(new ATStateData(atAddress, height, stateData, stateHash, fees, isInitial, sleepUntilMessageTimestamp));
|
||||
} while( resultSet.next());
|
||||
} catch (SQLException e) {
|
||||
throw new DataException("Unable to fetch latest AT state from repository", e);
|
||||
}
|
||||
|
||||
Map<String, List<ATStateData>> stateDataByAtAddress
|
||||
= stateDataList.stream()
|
||||
.collect(Collectors.groupingBy(ATStateData::getATAddress));
|
||||
|
||||
List<ATStateData> latestForEachAtAddress
|
||||
= stateDataByAtAddress.values().stream()
|
||||
.map(list -> list.stream()
|
||||
.max(Comparator.comparing(ATStateData::getHeight))
|
||||
.orElse(null))
|
||||
.filter(obj -> obj != null)
|
||||
.collect(Collectors.toList());
|
||||
|
||||
return latestForEachAtAddress;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<ATStateData> getMatchingFinalATStates(byte[] codeHash, byte[] buyerPublicKey, byte[] sellerPublicKey, Boolean isFinished,
|
||||
Integer dataByteOffset, Long expectedValue, Integer minimumFinalHeight,
|
||||
|
@ -407,6 +407,39 @@ public class HSQLDBAccountRepository implements AccountRepository {
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<AccountBalanceData> getBalances(List<String> addresses, long assetId) throws DataException {
|
||||
|
||||
StringBuffer sql = new StringBuffer();
|
||||
sql.append("SELECT balance, account, asset_id FROM AccountBalances ");
|
||||
sql.append("WHERE account IN (");
|
||||
sql.append(String.join(", ", Collections.nCopies(addresses.size(), "?")));
|
||||
sql.append(")");
|
||||
|
||||
try (ResultSet resultSet = this.repository.checkedExecute(sql.toString(), addresses.toArray(new String[addresses.size()]))) {
|
||||
if (resultSet == null)
|
||||
return new ArrayList<>(0);
|
||||
|
||||
List<AccountBalanceData> balances = new ArrayList<>(addresses.size());
|
||||
do {
|
||||
long balance = resultSet.getLong(1);
|
||||
String address = resultSet.getString(2);
|
||||
Long assetIdResult = resultSet.getLong(3);
|
||||
|
||||
if( assetIdResult != assetId ) {
|
||||
LOGGER.warn("assetIdResult = " + assetIdResult);
|
||||
continue;
|
||||
}
|
||||
|
||||
balances.add(new AccountBalanceData(address, assetId, balance) );
|
||||
} while( resultSet.next());
|
||||
|
||||
return balances;
|
||||
} catch (SQLException e) {
|
||||
throw new DataException("Unable to fetch account balance from repository", e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<AccountBalanceData> getAssetBalances(long assetId, Boolean excludeZero) throws DataException {
|
||||
StringBuilder sql = new StringBuilder(1024);
|
||||
|
@ -468,7 +468,7 @@ public class HSQLDBCacheUtils {
|
||||
|
||||
Thread.currentThread().setName(DB_CACHE_TIMER_TASK);
|
||||
|
||||
try (final HSQLDBRepository respository = (HSQLDBRepository) Controller.REPOSITORY_FACTORY.getRepository()) {
|
||||
try (final Repository respository = RepositoryManager.getRepository()) {
|
||||
fillCache(ArbitraryResourceCache.getInstance(), respository);
|
||||
}
|
||||
catch( DataException e ) {
|
||||
@ -611,7 +611,7 @@ public class HSQLDBCacheUtils {
|
||||
private static int recordCurrentBalances(ConcurrentHashMap<Integer, List<AccountBalanceData>> balancesByHeight) {
|
||||
int currentHeight;
|
||||
|
||||
try (final HSQLDBRepository repository = (HSQLDBRepository) Controller.REPOSITORY_FACTORY.getRepository()) {
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
|
||||
// get current balances
|
||||
List<AccountBalanceData> accountBalances = getAccountBalances(repository);
|
||||
@ -675,7 +675,7 @@ public class HSQLDBCacheUtils {
|
||||
* @param cache the cache to fill
|
||||
* @param repository the data source to fill the cache with
|
||||
*/
|
||||
public static void fillCache(ArbitraryResourceCache cache, HSQLDBRepository repository) {
|
||||
public static void fillCache(ArbitraryResourceCache cache, Repository repository) {
|
||||
|
||||
try {
|
||||
// ensure all data is committed in, before we query it
|
||||
@ -713,7 +713,7 @@ public class HSQLDBCacheUtils {
|
||||
*
|
||||
* @throws SQLException
|
||||
*/
|
||||
private static void fillNamepMap(ConcurrentHashMap<String, Integer> levelByName, HSQLDBRepository repository ) throws SQLException {
|
||||
private static void fillNamepMap(ConcurrentHashMap<String, Integer> levelByName, Repository repository ) throws SQLException {
|
||||
|
||||
StringBuilder sql = new StringBuilder(512);
|
||||
|
||||
@ -721,7 +721,7 @@ public class HSQLDBCacheUtils {
|
||||
sql.append("FROM NAMES ");
|
||||
sql.append("INNER JOIN ACCOUNTS on owner = account ");
|
||||
|
||||
Statement statement = repository.connection.createStatement();
|
||||
Statement statement = repository.getConnection().createStatement();
|
||||
|
||||
ResultSet resultSet = statement.executeQuery(sql.toString());
|
||||
|
||||
@ -744,7 +744,7 @@ public class HSQLDBCacheUtils {
|
||||
* @return the resources
|
||||
* @throws SQLException
|
||||
*/
|
||||
private static List<ArbitraryResourceData> getResources( HSQLDBRepository repository) throws SQLException {
|
||||
private static List<ArbitraryResourceData> getResources( Repository repository) throws SQLException {
|
||||
|
||||
List<ArbitraryResourceData> resources = new ArrayList<>();
|
||||
|
||||
@ -756,7 +756,7 @@ public class HSQLDBCacheUtils {
|
||||
sql.append("LEFT JOIN ArbitraryMetadataCache USING (service, name, identifier) WHERE name IS NOT NULL");
|
||||
|
||||
List<ArbitraryResourceData> arbitraryResources = new ArrayList<>();
|
||||
Statement statement = repository.connection.createStatement();
|
||||
Statement statement = repository.getConnection().createStatement();
|
||||
|
||||
ResultSet resultSet = statement.executeQuery(sql.toString());
|
||||
|
||||
@ -822,7 +822,7 @@ public class HSQLDBCacheUtils {
|
||||
return resources;
|
||||
}
|
||||
|
||||
public static List<AccountBalanceData> getAccountBalances(HSQLDBRepository repository) {
|
||||
public static List<AccountBalanceData> getAccountBalances(Repository repository) {
|
||||
|
||||
StringBuilder sql = new StringBuilder();
|
||||
|
||||
@ -836,7 +836,7 @@ public class HSQLDBCacheUtils {
|
||||
LOGGER.info( "Getting account balances ...");
|
||||
|
||||
try {
|
||||
Statement statement = repository.connection.createStatement();
|
||||
Statement statement = repository.getConnection().createStatement();
|
||||
|
||||
ResultSet resultSet = statement.executeQuery(sql.toString());
|
||||
|
||||
|
@ -1,5 +1,8 @@
|
||||
package org.qortal.repository.hsqldb;
|
||||
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.qortal.block.BlockChain;
|
||||
import org.qortal.data.chat.ActiveChats;
|
||||
import org.qortal.data.chat.ActiveChats.DirectChat;
|
||||
import org.qortal.data.chat.ActiveChats.GroupChat;
|
||||
@ -18,6 +21,8 @@ import static org.qortal.data.chat.ChatMessage.Encoding;
|
||||
|
||||
public class HSQLDBChatRepository implements ChatRepository {
|
||||
|
||||
private static final Logger LOGGER = LogManager.getLogger(HSQLDBChatRepository.class);
|
||||
|
||||
protected HSQLDBRepository repository;
|
||||
|
||||
public HSQLDBChatRepository(HSQLDBRepository repository) {
|
||||
@ -35,13 +40,23 @@ public class HSQLDBChatRepository implements ChatRepository {
|
||||
|
||||
StringBuilder sql = new StringBuilder(1024);
|
||||
|
||||
String tableName;
|
||||
|
||||
// if the PrimaryTable is available, then use it
|
||||
if( this.repository.getBlockRepository().getBlockchainHeight() > BlockChain.getInstance().getMultipleNamesPerAccountHeight()) {
|
||||
tableName = "PrimaryNames";
|
||||
}
|
||||
else {
|
||||
tableName = "Names";
|
||||
}
|
||||
|
||||
sql.append("SELECT created_when, tx_group_id, Transactions.reference, creator, "
|
||||
+ "sender, SenderNames.name, recipient, RecipientNames.name, "
|
||||
+ "chat_reference, data, is_text, is_encrypted, signature "
|
||||
+ "FROM ChatTransactions "
|
||||
+ "JOIN Transactions USING (signature) "
|
||||
+ "LEFT OUTER JOIN Names AS SenderNames ON SenderNames.owner = sender "
|
||||
+ "LEFT OUTER JOIN Names AS RecipientNames ON RecipientNames.owner = recipient ");
|
||||
+ "LEFT OUTER JOIN " + tableName + " AS SenderNames ON SenderNames.owner = sender "
|
||||
+ "LEFT OUTER JOIN " + tableName + " AS RecipientNames ON RecipientNames.owner = recipient ");
|
||||
|
||||
// WHERE clauses
|
||||
|
||||
@ -142,10 +157,21 @@ public class HSQLDBChatRepository implements ChatRepository {
|
||||
|
||||
@Override
|
||||
public ChatMessage toChatMessage(ChatTransactionData chatTransactionData, Encoding encoding) throws DataException {
|
||||
|
||||
String tableName;
|
||||
|
||||
// if the PrimaryTable is available, then use it
|
||||
if( this.repository.getBlockRepository().getBlockchainHeight() > BlockChain.getInstance().getMultipleNamesPerAccountHeight()) {
|
||||
tableName = "PrimaryNames";
|
||||
}
|
||||
else {
|
||||
tableName = "Names";
|
||||
}
|
||||
|
||||
String sql = "SELECT SenderNames.name, RecipientNames.name "
|
||||
+ "FROM ChatTransactions "
|
||||
+ "LEFT OUTER JOIN Names AS SenderNames ON SenderNames.owner = sender "
|
||||
+ "LEFT OUTER JOIN Names AS RecipientNames ON RecipientNames.owner = recipient "
|
||||
+ "LEFT OUTER JOIN " + tableName + " AS SenderNames ON SenderNames.owner = sender "
|
||||
+ "LEFT OUTER JOIN " + tableName + " AS RecipientNames ON RecipientNames.owner = recipient "
|
||||
+ "WHERE signature = ?";
|
||||
|
||||
try (ResultSet resultSet = this.repository.checkedExecute(sql, chatTransactionData.getSignature())) {
|
||||
@ -184,6 +210,16 @@ public class HSQLDBChatRepository implements ChatRepository {
|
||||
}
|
||||
|
||||
private List<GroupChat> getActiveGroupChats(String address, Encoding encoding, Boolean hasChatReference) throws DataException {
|
||||
String tableName;
|
||||
|
||||
// if the PrimaryTable is available, then use it
|
||||
if( this.repository.getBlockRepository().getBlockchainHeight() > BlockChain.getInstance().getMultipleNamesPerAccountHeight()) {
|
||||
tableName = "PrimaryNames";
|
||||
}
|
||||
else {
|
||||
tableName = "Names";
|
||||
}
|
||||
|
||||
// Find groups where address is a member and potential latest message details
|
||||
String groupsSql = "SELECT group_id, group_name, latest_timestamp, sender, sender_name, signature, data "
|
||||
+ "FROM GroupMembers "
|
||||
@ -192,7 +228,7 @@ public class HSQLDBChatRepository implements ChatRepository {
|
||||
+ "SELECT created_when AS latest_timestamp, sender, name AS sender_name, signature, data "
|
||||
+ "FROM ChatTransactions "
|
||||
+ "JOIN Transactions USING (signature) "
|
||||
+ "LEFT OUTER JOIN Names AS SenderNames ON SenderNames.owner = sender "
|
||||
+ "LEFT OUTER JOIN " + tableName + " AS SenderNames ON SenderNames.owner = sender "
|
||||
// NOTE: We need to qualify "Groups.group_id" here to avoid "General error" bug in HSQLDB v2.5.0
|
||||
+ "WHERE tx_group_id = Groups.group_id AND type = " + TransactionType.CHAT.value + " ";
|
||||
|
||||
@ -236,7 +272,7 @@ public class HSQLDBChatRepository implements ChatRepository {
|
||||
String grouplessSql = "SELECT created_when, sender, SenderNames.name, signature, data "
|
||||
+ "FROM ChatTransactions "
|
||||
+ "JOIN Transactions USING (signature) "
|
||||
+ "LEFT OUTER JOIN Names AS SenderNames ON SenderNames.owner = sender "
|
||||
+ "LEFT OUTER JOIN " + tableName + " AS SenderNames ON SenderNames.owner = sender "
|
||||
+ "WHERE tx_group_id = 0 "
|
||||
+ "AND recipient IS NULL ";
|
||||
|
||||
@ -276,6 +312,16 @@ public class HSQLDBChatRepository implements ChatRepository {
|
||||
}
|
||||
|
||||
private List<DirectChat> getActiveDirectChats(String address, Boolean hasChatReference) throws DataException {
|
||||
String tableName;
|
||||
|
||||
// if the PrimaryTable is available, then use it
|
||||
if( this.repository.getBlockRepository().getBlockchainHeight() > BlockChain.getInstance().getMultipleNamesPerAccountHeight()) {
|
||||
tableName = "PrimaryNames";
|
||||
}
|
||||
else {
|
||||
tableName = "Names";
|
||||
}
|
||||
|
||||
// Find chat messages involving address
|
||||
String directSql = "SELECT other_address, name, latest_timestamp, sender, sender_name "
|
||||
+ "FROM ("
|
||||
@ -289,7 +335,7 @@ public class HSQLDBChatRepository implements ChatRepository {
|
||||
+ "SELECT created_when AS latest_timestamp, sender, name AS sender_name "
|
||||
+ "FROM ChatTransactions "
|
||||
+ "NATURAL JOIN Transactions "
|
||||
+ "LEFT OUTER JOIN Names AS SenderNames ON SenderNames.owner = sender "
|
||||
+ "LEFT OUTER JOIN " + tableName + " AS SenderNames ON SenderNames.owner = sender "
|
||||
+ "WHERE (sender = other_address AND recipient = ?) "
|
||||
+ "OR (sender = ? AND recipient = other_address) ";
|
||||
|
||||
@ -305,7 +351,7 @@ public class HSQLDBChatRepository implements ChatRepository {
|
||||
directSql += "ORDER BY created_when DESC "
|
||||
+ "LIMIT 1"
|
||||
+ ") AS LatestMessages "
|
||||
+ "LEFT OUTER JOIN Names ON owner = other_address";
|
||||
+ "LEFT OUTER JOIN " + tableName + " ON owner = other_address";
|
||||
|
||||
Object[] bindParams = new Object[] { address, address, address, address };
|
||||
|
||||
|
@ -1053,6 +1053,12 @@ public class HSQLDBDatabaseUpdates {
|
||||
stmt.execute("UPDATE Accounts SET blocks_minted_penalty = -5000000 WHERE blocks_minted_penalty < 0");
|
||||
break;
|
||||
|
||||
case 50:
|
||||
// Primary name for a Qortal Address, 0-1 for any address
|
||||
stmt.execute("CREATE TABLE PrimaryNames (owner QortalAddress, name RegisteredName, "
|
||||
+ "PRIMARY KEY (owner), FOREIGN KEY (name) REFERENCES Names (name) ON DELETE CASCADE)");
|
||||
break;
|
||||
|
||||
default:
|
||||
// nothing to do
|
||||
return false;
|
||||
|
@ -8,6 +8,7 @@ import java.sql.ResultSet;
|
||||
import java.sql.SQLException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
|
||||
public class HSQLDBNameRepository implements NameRepository {
|
||||
|
||||
@ -267,7 +268,7 @@ public class HSQLDBNameRepository implements NameRepository {
|
||||
StringBuilder sql = new StringBuilder(512);
|
||||
|
||||
sql.append("SELECT name, reduced_name, data, registered_when, updated_when, "
|
||||
+ "is_for_sale, sale_price, reference, creation_group_id FROM Names WHERE owner = ? ORDER BY name");
|
||||
+ "is_for_sale, sale_price, reference, creation_group_id FROM Names WHERE owner = ? ORDER BY registered_when");
|
||||
|
||||
if (reverse != null && reverse)
|
||||
sql.append(" DESC");
|
||||
@ -333,6 +334,55 @@ public class HSQLDBNameRepository implements NameRepository {
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removePrimaryName(String address) throws DataException {
|
||||
try {
|
||||
this.repository.delete("PrimaryNames", "owner = ?", address);
|
||||
} catch (SQLException e) {
|
||||
throw new DataException("Unable to delete primary name from repository", e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Optional<String> getPrimaryName(String address) throws DataException {
|
||||
String sql = "SELECT name FROM PrimaryNames WHERE owner = ?";
|
||||
|
||||
List<String> names = new ArrayList<>();
|
||||
|
||||
try (ResultSet resultSet = this.repository.checkedExecute(sql, address)) {
|
||||
if (resultSet == null)
|
||||
return Optional.empty();
|
||||
|
||||
String name = resultSet.getString(1);
|
||||
|
||||
return Optional.of(name);
|
||||
} catch (SQLException e) {
|
||||
throw new DataException("Unable to fetch recent names from repository", e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public int setPrimaryName(String address, String primaryName) throws DataException {
|
||||
|
||||
String sql = "INSERT INTO PrimaryNames (owner, name) VALUES (?, ?) ON DUPLICATE KEY UPDATE name = ?";
|
||||
|
||||
try{
|
||||
return this.repository.executeCheckedUpdate(sql, address, primaryName, primaryName);
|
||||
} catch (SQLException e) {
|
||||
throw new DataException("Unable to set primary name", e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public int clearPrimaryNames() throws DataException {
|
||||
|
||||
try {
|
||||
return this.repository.delete("PrimaryNames");
|
||||
} catch (SQLException e) {
|
||||
throw new DataException("Unable to clear primary names from repository", e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void save(NameData nameData) throws DataException {
|
||||
HSQLDBSaver saveHelper = new HSQLDBSaver("Names");
|
||||
|
@ -174,6 +174,11 @@ public class HSQLDBRepository implements Repository {
|
||||
|
||||
// Transaction COMMIT / ROLLBACK / savepoints
|
||||
|
||||
@Override
|
||||
public Connection getConnection() {
|
||||
return this.connection;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void saveChanges() throws DataException {
|
||||
long beforeQuery = this.slowQueryThreshold == null ? 0 : System.currentTimeMillis();
|
||||
|
@ -155,6 +155,58 @@ public class HSQLDBTransactionRepository implements TransactionRepository {
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<TransactionData> fromSignatures(List<byte[]> signatures) throws DataException {
|
||||
StringBuffer sql = new StringBuffer();
|
||||
|
||||
sql.append("SELECT type, reference, creator, created_when, fee, tx_group_id, block_height, approval_status, approval_height, signature ");
|
||||
sql.append("FROM Transactions WHERE signature IN (");
|
||||
sql.append(String.join(", ", Collections.nCopies(signatures.size(), "?")));
|
||||
sql.append(")");
|
||||
|
||||
List<TransactionData> list;
|
||||
try (ResultSet resultSet = this.repository.checkedExecute(sql.toString(), signatures.toArray(new byte[0][]))) {
|
||||
if (resultSet == null) {
|
||||
return new ArrayList<>(0);
|
||||
}
|
||||
|
||||
list = new ArrayList<>(signatures.size());
|
||||
|
||||
do {
|
||||
TransactionType type = TransactionType.valueOf(resultSet.getInt(1));
|
||||
|
||||
byte[] reference = resultSet.getBytes(2);
|
||||
byte[] creatorPublicKey = resultSet.getBytes(3);
|
||||
long timestamp = resultSet.getLong(4);
|
||||
|
||||
Long fee = resultSet.getLong(5);
|
||||
if (fee == 0 && resultSet.wasNull())
|
||||
fee = null;
|
||||
|
||||
int txGroupId = resultSet.getInt(6);
|
||||
|
||||
Integer blockHeight = resultSet.getInt(7);
|
||||
if (blockHeight == 0 && resultSet.wasNull())
|
||||
blockHeight = null;
|
||||
|
||||
ApprovalStatus approvalStatus = ApprovalStatus.valueOf(resultSet.getInt(8));
|
||||
Integer approvalHeight = resultSet.getInt(9);
|
||||
if (approvalHeight == 0 && resultSet.wasNull())
|
||||
approvalHeight = null;
|
||||
|
||||
byte[] signature = resultSet.getBytes(10);
|
||||
|
||||
BaseTransactionData baseTransactionData = new BaseTransactionData(timestamp, txGroupId, reference, creatorPublicKey, fee, approvalStatus, blockHeight, approvalHeight, signature);
|
||||
|
||||
list.add( fromBase(type, baseTransactionData) );
|
||||
} while( resultSet.next());
|
||||
|
||||
return list;
|
||||
} catch (SQLException e) {
|
||||
throw new DataException("Unable to fetch transactions from repository", e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public TransactionData fromReference(byte[] reference) throws DataException {
|
||||
String sql = "SELECT type, signature, creator, created_when, fee, tx_group_id, block_height, approval_status, approval_height "
|
||||
|
@ -213,7 +213,7 @@ public class Settings {
|
||||
public long recoveryModeTimeout = 9999999999999L;
|
||||
|
||||
/** Minimum peer version number required in order to sync with them */
|
||||
private String minPeerVersion = "4.6.5";
|
||||
private String minPeerVersion = "5.0.0";
|
||||
/** Whether to allow connections with peers below minPeerVersion
|
||||
* If true, we won't sync with them but they can still sync with us, and will show in the peers list
|
||||
* If false, sync will be blocked both ways, and they will not appear in the peers list */
|
||||
@ -756,17 +756,17 @@ public class Settings {
|
||||
private void setAdditionalDefaults() {
|
||||
// Populate defaults for maxThreadsPerMessageType. If any are specified in settings.json, they will take priority.
|
||||
maxThreadsPerMessageType.add(new ThreadLimit("ARBITRARY_DATA_FILE", 5));
|
||||
maxThreadsPerMessageType.add(new ThreadLimit("GET_ARBITRARY_DATA_FILE", 5));
|
||||
maxThreadsPerMessageType.add(new ThreadLimit("GET_ARBITRARY_DATA_FILE", 15));
|
||||
maxThreadsPerMessageType.add(new ThreadLimit("ARBITRARY_DATA", 5));
|
||||
maxThreadsPerMessageType.add(new ThreadLimit("GET_ARBITRARY_DATA", 5));
|
||||
maxThreadsPerMessageType.add(new ThreadLimit("ARBITRARY_DATA_FILE_LIST", 5));
|
||||
maxThreadsPerMessageType.add(new ThreadLimit("GET_ARBITRARY_DATA_FILE_LIST", 5));
|
||||
maxThreadsPerMessageType.add(new ThreadLimit("ARBITRARY_DATA_FILE_LIST", 50));
|
||||
maxThreadsPerMessageType.add(new ThreadLimit("GET_ARBITRARY_DATA_FILE_LIST", 50));
|
||||
maxThreadsPerMessageType.add(new ThreadLimit("ARBITRARY_SIGNATURES", 5));
|
||||
maxThreadsPerMessageType.add(new ThreadLimit("ARBITRARY_METADATA", 5));
|
||||
maxThreadsPerMessageType.add(new ThreadLimit("GET_ARBITRARY_METADATA", 5));
|
||||
maxThreadsPerMessageType.add(new ThreadLimit("GET_TRANSACTION", 10));
|
||||
maxThreadsPerMessageType.add(new ThreadLimit("TRANSACTION_SIGNATURES", 5));
|
||||
maxThreadsPerMessageType.add(new ThreadLimit("TRADE_PRESENCES", 5));
|
||||
maxThreadsPerMessageType.add(new ThreadLimit("GET_ARBITRARY_METADATA", 100));
|
||||
maxThreadsPerMessageType.add(new ThreadLimit("GET_TRANSACTION", 50));
|
||||
maxThreadsPerMessageType.add(new ThreadLimit("TRANSACTION_SIGNATURES", 50));
|
||||
maxThreadsPerMessageType.add(new ThreadLimit("TRADE_PRESENCES", 50));
|
||||
}
|
||||
|
||||
// Getters / setters
|
||||
|
@ -16,6 +16,7 @@ import org.qortal.utils.Unicode;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
|
||||
public class BuyNameTransaction extends Transaction {
|
||||
|
||||
@ -48,6 +49,15 @@ public class BuyNameTransaction extends Transaction {
|
||||
|
||||
@Override
|
||||
public ValidationResult isValid() throws DataException {
|
||||
Optional<String> buyerPrimaryName = this.getBuyer().getPrimaryName();
|
||||
if( buyerPrimaryName.isPresent() ) {
|
||||
|
||||
NameData nameData = repository.getNameRepository().fromName(buyerPrimaryName.get());
|
||||
if (nameData.isForSale()) {
|
||||
return ValidationResult.NOT_SUPPORTED;
|
||||
}
|
||||
}
|
||||
|
||||
String name = this.buyNameTransactionData.getName();
|
||||
|
||||
// Check seller address is valid
|
||||
@ -79,7 +89,7 @@ public class BuyNameTransaction extends Transaction {
|
||||
return ValidationResult.BUYER_ALREADY_OWNER;
|
||||
|
||||
// If accounts are only allowed one registered name then check for this
|
||||
if (BlockChain.getInstance().oneNamePerAccount()
|
||||
if (BlockChain.getInstance().oneNamePerAccount(this.repository.getBlockRepository().getBlockchainHeight())
|
||||
&& !this.repository.getNameRepository().getNamesByOwner(buyer.getAddress()).isEmpty())
|
||||
return ValidationResult.MULTIPLE_NAMES_FORBIDDEN;
|
||||
|
||||
@ -92,7 +102,7 @@ public class BuyNameTransaction extends Transaction {
|
||||
return ValidationResult.INVALID_AMOUNT;
|
||||
|
||||
// Check buyer has enough funds
|
||||
if (buyer.getConfirmedBalance(Asset.QORT) < this.buyNameTransactionData.getFee())
|
||||
if (buyer.getConfirmedBalance(Asset.QORT) < this.buyNameTransactionData.getFee() + this.buyNameTransactionData.getAmount())
|
||||
return ValidationResult.NO_BALANCE;
|
||||
|
||||
return ValidationResult.OK;
|
||||
@ -117,6 +127,25 @@ public class BuyNameTransaction extends Transaction {
|
||||
|
||||
// Save transaction with updated "name reference" pointing to previous transaction that changed name
|
||||
this.repository.getTransactionRepository().save(this.buyNameTransactionData);
|
||||
|
||||
// if multiple names feature is activated, then check the buyer and seller's primary name status
|
||||
if( this.repository.getBlockRepository().getBlockchainHeight() > BlockChain.getInstance().getMultipleNamesPerAccountHeight()) {
|
||||
|
||||
Account seller = new Account(this.repository, this.buyNameTransactionData.getSeller());
|
||||
Optional<String> sellerPrimaryName = seller.getPrimaryName();
|
||||
|
||||
// if the seller sold their primary name, then remove their primary name
|
||||
if (sellerPrimaryName.isPresent() && sellerPrimaryName.get().equals(buyNameTransactionData.getName())) {
|
||||
seller.removePrimaryName();
|
||||
}
|
||||
|
||||
Account buyer = new Account(this.repository, this.getBuyer().getAddress());
|
||||
|
||||
// if the buyer had no primary name, then set the primary name to the name bought
|
||||
if( buyer.getPrimaryName().isEmpty() ) {
|
||||
buyer.setPrimaryName(this.buyNameTransactionData.getName());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -127,6 +156,24 @@ public class BuyNameTransaction extends Transaction {
|
||||
|
||||
// Save this transaction, with previous "name reference"
|
||||
this.repository.getTransactionRepository().save(this.buyNameTransactionData);
|
||||
}
|
||||
|
||||
// if multiple names feature is activated, then check the buyer and seller's primary name status
|
||||
if( this.repository.getBlockRepository().getBlockchainHeight() > BlockChain.getInstance().getMultipleNamesPerAccountHeight()) {
|
||||
|
||||
Account seller = new Account(this.repository, this.buyNameTransactionData.getSeller());
|
||||
|
||||
// if the seller lost their primary name, then set their primary name back
|
||||
if (seller.getPrimaryName().isEmpty()) {
|
||||
seller.setPrimaryName(this.buyNameTransactionData.getName());
|
||||
}
|
||||
|
||||
Account buyer = new Account(this.repository, this.getBuyer().getAddress());
|
||||
Optional<String> buyerPrimaryName = buyer.getPrimaryName();
|
||||
|
||||
// if the buyer bought their primary, then remove it
|
||||
if( buyerPrimaryName.isPresent() && this.buyNameTransactionData.getName().equals(buyerPrimaryName.get()) ) {
|
||||
buyer.removePrimaryName();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -2,10 +2,12 @@ package org.qortal.transaction;
|
||||
|
||||
import com.google.common.base.Utf8;
|
||||
import org.qortal.account.Account;
|
||||
import org.qortal.api.resource.TransactionsResource;
|
||||
import org.qortal.asset.Asset;
|
||||
import org.qortal.block.BlockChain;
|
||||
import org.qortal.controller.repository.NamesDatabaseIntegrityCheck;
|
||||
import org.qortal.crypto.Crypto;
|
||||
import org.qortal.data.naming.NameData;
|
||||
import org.qortal.data.transaction.RegisterNameTransactionData;
|
||||
import org.qortal.data.transaction.TransactionData;
|
||||
import org.qortal.naming.Name;
|
||||
@ -15,6 +17,7 @@ import org.qortal.utils.Unicode;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
|
||||
public class RegisterNameTransaction extends Transaction {
|
||||
|
||||
@ -54,6 +57,15 @@ public class RegisterNameTransaction extends Transaction {
|
||||
Account registrant = getRegistrant();
|
||||
String name = this.registerNameTransactionData.getName();
|
||||
|
||||
Optional<String> registrantPrimaryName = registrant.getPrimaryName();
|
||||
if( registrantPrimaryName.isPresent() ) {
|
||||
|
||||
NameData nameData = repository.getNameRepository().fromName(registrantPrimaryName.get());
|
||||
if (nameData.isForSale()) {
|
||||
return ValidationResult.NOT_SUPPORTED;
|
||||
}
|
||||
}
|
||||
|
||||
int blockchainHeight = this.repository.getBlockRepository().getBlockchainHeight();
|
||||
final int start = BlockChain.getInstance().getSelfSponsorshipAlgoV2Height() - 1180;
|
||||
final int end = BlockChain.getInstance().getSelfSponsorshipAlgoV3Height();
|
||||
@ -94,7 +106,7 @@ public class RegisterNameTransaction extends Transaction {
|
||||
return ValidationResult.NAME_ALREADY_REGISTERED;
|
||||
|
||||
// If accounts are only allowed one registered name then check for this
|
||||
if (BlockChain.getInstance().oneNamePerAccount()
|
||||
if (BlockChain.getInstance().oneNamePerAccount(this.repository.getBlockRepository().getBlockchainHeight())
|
||||
&& !this.repository.getNameRepository().getNamesByOwner(getRegistrant().getAddress()).isEmpty())
|
||||
return ValidationResult.MULTIPLE_NAMES_FORBIDDEN;
|
||||
|
||||
@ -117,6 +129,16 @@ public class RegisterNameTransaction extends Transaction {
|
||||
// Register Name
|
||||
Name name = new Name(this.repository, this.registerNameTransactionData);
|
||||
name.register();
|
||||
|
||||
if( this.repository.getBlockRepository().getBlockchainHeight() > BlockChain.getInstance().getMultipleNamesPerAccountHeight()) {
|
||||
|
||||
Account account = new Account(this.repository, this.getCreator().getAddress());
|
||||
|
||||
// if there is no primary name established, then the new registered name is the primary name
|
||||
if (account.getPrimaryName().isEmpty()) {
|
||||
account.setPrimaryName(this.registerNameTransactionData.getName());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -45,6 +45,12 @@ public class SellNameTransaction extends Transaction {
|
||||
public ValidationResult isValid() throws DataException {
|
||||
String name = this.sellNameTransactionData.getName();
|
||||
|
||||
// if the account has more than one name, then they cannot sell their primary name
|
||||
if( this.repository.getNameRepository().getNamesByOwner(this.getOwner().getAddress()).size() > 1 &&
|
||||
this.getOwner().getPrimaryName().get().equals(name) ) {
|
||||
return ValidationResult.NOT_SUPPORTED;
|
||||
}
|
||||
|
||||
// Check name size bounds
|
||||
int nameLength = Utf8.encodedLength(name);
|
||||
if (nameLength < 1 || nameLength > Name.MAX_NAME_SIZE)
|
||||
|
@ -3,6 +3,7 @@ package org.qortal.transaction;
|
||||
import com.google.common.base.Utf8;
|
||||
import org.qortal.account.Account;
|
||||
import org.qortal.asset.Asset;
|
||||
import org.qortal.block.BlockChain;
|
||||
import org.qortal.controller.repository.NamesDatabaseIntegrityCheck;
|
||||
import org.qortal.crypto.Crypto;
|
||||
import org.qortal.data.naming.NameData;
|
||||
@ -49,6 +50,12 @@ public class UpdateNameTransaction extends Transaction {
|
||||
public ValidationResult isValid() throws DataException {
|
||||
String name = this.updateNameTransactionData.getName();
|
||||
|
||||
// if the account has more than one name, then they cannot update their primary name
|
||||
if( this.repository.getNameRepository().getNamesByOwner(this.getOwner().getAddress()).size() > 1 &&
|
||||
this.getOwner().getPrimaryName().get().equals(name) ) {
|
||||
return ValidationResult.NOT_SUPPORTED;
|
||||
}
|
||||
|
||||
// Check name size bounds
|
||||
int nameLength = Utf8.encodedLength(name);
|
||||
if (nameLength < Name.MIN_NAME_SIZE || nameLength > Name.MAX_NAME_SIZE)
|
||||
@ -152,6 +159,16 @@ public class UpdateNameTransaction extends Transaction {
|
||||
|
||||
// Save this transaction, now with updated "name reference" to previous transaction that changed name
|
||||
this.repository.getTransactionRepository().save(this.updateNameTransactionData);
|
||||
|
||||
if( this.repository.getBlockRepository().getBlockchainHeight() > BlockChain.getInstance().getMultipleNamesPerAccountHeight()) {
|
||||
|
||||
Account account = new Account(this.repository, this.getCreator().getAddress());
|
||||
|
||||
// if updating the primary name, then set primary name to new name
|
||||
if( account.getPrimaryName().isEmpty() || account.getPrimaryName().get().equals(this.updateNameTransactionData.getName())) {
|
||||
account.setPrimaryName(this.updateNameTransactionData.getNewName());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -167,6 +184,16 @@ public class UpdateNameTransaction extends Transaction {
|
||||
|
||||
// Save this transaction, with previous "name reference"
|
||||
this.repository.getTransactionRepository().save(this.updateNameTransactionData);
|
||||
|
||||
if( this.repository.getBlockRepository().getBlockchainHeight() > BlockChain.getInstance().getMultipleNamesPerAccountHeight()) {
|
||||
|
||||
Account account = new Account(this.repository, this.getCreator().getAddress());
|
||||
|
||||
// if the primary name is the new updated name, then it needs to be set back to the previous name
|
||||
if (account.getPrimaryName().isPresent() && account.getPrimaryName().get().equals(this.updateNameTransactionData.getNewName())) {
|
||||
account.setPrimaryName(this.updateNameTransactionData.getName());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -53,7 +53,7 @@ public class ArbitraryIndexUtils {
|
||||
try {
|
||||
fillCache(IndexCache.getInstance());
|
||||
} catch (IOException | DataException e) {
|
||||
LOGGER.error(e.getMessage(), e);
|
||||
LOGGER.warn(e.getMessage());
|
||||
}
|
||||
}
|
||||
};
|
||||
@ -111,6 +111,8 @@ public class ArbitraryIndexUtils {
|
||||
|
||||
indexDetails.add( new ArbitraryDataIndexDetail(indexResource.name, rank, indices.get(rank - 1), indexResource.identifier ));
|
||||
}
|
||||
} catch (MissingDataException e) {
|
||||
LOGGER.warn( e.getMessage() );
|
||||
} catch (InvalidFormatException e) {
|
||||
LOGGER.debug("invalid format, skipping: " + indexResource);
|
||||
} catch (UnrecognizedPropertyException e) {
|
||||
@ -131,16 +133,12 @@ public class ArbitraryIndexUtils {
|
||||
)
|
||||
);
|
||||
|
||||
LOGGER.info("processed indices by term: count = " + indicesByTerm.size());
|
||||
|
||||
// lock, clear old, load new
|
||||
synchronized( IndexCache.getInstance().getIndicesByTerm() ) {
|
||||
IndexCache.getInstance().getIndicesByTerm().clear();
|
||||
IndexCache.getInstance().getIndicesByTerm().putAll(indicesByTerm);
|
||||
}
|
||||
|
||||
LOGGER.info("loaded indices by term");
|
||||
|
||||
LOGGER.debug("processing indices by issuer ...");
|
||||
Map<String, List<ArbitraryDataIndexDetail>> indicesByIssuer
|
||||
= indexDetails.stream().collect(
|
||||
@ -154,15 +152,11 @@ public class ArbitraryIndexUtils {
|
||||
)
|
||||
);
|
||||
|
||||
LOGGER.info("processed indices by issuer: count = " + indicesByIssuer.size());
|
||||
|
||||
// lock, clear old, load new
|
||||
synchronized( IndexCache.getInstance().getIndicesByIssuer() ) {
|
||||
IndexCache.getInstance().getIndicesByIssuer().clear();
|
||||
IndexCache.getInstance().getIndicesByIssuer().putAll(indicesByIssuer);
|
||||
}
|
||||
|
||||
LOGGER.info("loaded indices by issuer");
|
||||
}
|
||||
}
|
||||
|
||||
@ -199,7 +193,7 @@ public class ArbitraryIndexUtils {
|
||||
}
|
||||
}
|
||||
|
||||
public static String getJson(String name, String identifier) throws IOException {
|
||||
public static String getJson(String name, String identifier) throws IOException, MissingDataException {
|
||||
|
||||
try {
|
||||
ArbitraryDataReader arbitraryDataReader
|
||||
@ -217,11 +211,10 @@ public class ArbitraryIndexUtils {
|
||||
} catch (MissingDataException e) {
|
||||
if (attempts > maxAttempts) {
|
||||
// Give up after 5 attempts
|
||||
throw new IOException("Data unavailable. Please try again later.");
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
}
|
||||
Thread.sleep(3000L);
|
||||
}
|
||||
|
||||
java.nio.file.Path outputPath = arbitraryDataReader.getFilePath();
|
||||
|
@ -48,6 +48,24 @@ public class ArbitraryTransactionUtils {
|
||||
}
|
||||
}
|
||||
|
||||
public static List<ArbitraryTransactionData> fetchTransactionDataList(final Repository repository, final List<byte[]> signature) {
|
||||
try {
|
||||
List<TransactionData> transactions = repository.getTransactionRepository().fromSignatures(signature);
|
||||
|
||||
List<ArbitraryTransactionData> list
|
||||
= transactions.stream()
|
||||
.filter( transaction -> transaction instanceof ArbitraryTransactionData )
|
||||
.map( transactionData -> (ArbitraryTransactionData) transactionData)
|
||||
.collect(Collectors.toList());
|
||||
|
||||
return list;
|
||||
|
||||
} catch (DataException e) {
|
||||
LOGGER.error("Repository issue when fetching arbitrary transaction data", e);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
public static ArbitraryTransactionData fetchLatestPut(Repository repository, ArbitraryTransactionData arbitraryTransactionData) {
|
||||
if (arbitraryTransactionData == null) {
|
||||
return null;
|
||||
|
@ -6,6 +6,7 @@ import org.qortal.settings.Settings;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.RandomAccessFile;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.nio.file.*;
|
||||
@ -232,31 +233,37 @@ public class FilesystemUtils {
|
||||
}
|
||||
|
||||
public static byte[] getSingleFileContents(Path path, Integer maxLength) throws IOException {
|
||||
byte[] data = null;
|
||||
// TODO: limit the file size that can be loaded into memory
|
||||
|
||||
// If the path is a file, read the contents directly
|
||||
if (path.toFile().isFile()) {
|
||||
int fileSize = (int)path.toFile().length();
|
||||
maxLength = maxLength != null ? Math.min(maxLength, fileSize) : fileSize;
|
||||
data = FilesystemUtils.readFromFile(path.toString(), 0, maxLength);
|
||||
}
|
||||
|
||||
// Or if it's a directory, only load file contents if there is a single file inside it
|
||||
else if (path.toFile().isDirectory()) {
|
||||
Path filePath = null;
|
||||
|
||||
if (Files.isRegularFile(path)) {
|
||||
filePath = path;
|
||||
} else if (Files.isDirectory(path)) {
|
||||
String[] files = ArrayUtils.removeElement(path.toFile().list(), ".qortal");
|
||||
if (files.length == 1) {
|
||||
Path filePath = Paths.get(path.toString(), files[0]);
|
||||
if (filePath.toFile().isFile()) {
|
||||
int fileSize = (int)filePath.toFile().length();
|
||||
maxLength = maxLength != null ? Math.min(maxLength, fileSize) : fileSize;
|
||||
data = FilesystemUtils.readFromFile(filePath.toString(), 0, maxLength);
|
||||
}
|
||||
filePath = path.resolve(files[0]);
|
||||
}
|
||||
}
|
||||
|
||||
return data;
|
||||
|
||||
if (filePath == null || !Files.exists(filePath)) {
|
||||
return null;
|
||||
}
|
||||
|
||||
long fileSize = Files.size(filePath);
|
||||
int length = (maxLength != null) ? Math.min(maxLength, (int) Math.min(fileSize, Integer.MAX_VALUE)) : (int) Math.min(fileSize, Integer.MAX_VALUE);
|
||||
|
||||
try (InputStream in = Files.newInputStream(filePath)) {
|
||||
byte[] buffer = new byte[length];
|
||||
int bytesRead = in.read(buffer);
|
||||
if (bytesRead < length) {
|
||||
// Resize buffer to actual read size
|
||||
byte[] trimmed = new byte[bytesRead];
|
||||
System.arraycopy(buffer, 0, trimmed, 0, bytesRead);
|
||||
return trimmed;
|
||||
}
|
||||
return buffer;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* isSingleFileResource
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user