mirror of
https://github.com/Qortal/altcoinj.git
synced 2025-02-12 10:15:52 +00:00
nonetty: Fix a couple of unit test bugs.
Remove references to netty that still appeared in comments/POM
This commit is contained in:
parent
534cec9791
commit
68a614a33b
@ -136,7 +136,6 @@
|
|||||||
<urn>com.h2database:h2:1.3.167:jar:null:compile:d3867d586f087e53eb12fc65e5693d8ee9a5da17</urn>
|
<urn>com.h2database:h2:1.3.167:jar:null:compile:d3867d586f087e53eb12fc65e5693d8ee9a5da17</urn>
|
||||||
<urn>com.lambdaworks:scrypt:1.3.3:jar:null:compile:06d6813de41e177189e1722717979b4fb5454b1d</urn>
|
<urn>com.lambdaworks:scrypt:1.3.3:jar:null:compile:06d6813de41e177189e1722717979b4fb5454b1d</urn>
|
||||||
<urn>com.madgag:sc-light-jdk15on:1.47.0.2:jar:null:compile:d5c98671cc97fa0d928be1c7eb5edd3fb95d3234</urn>
|
<urn>com.madgag:sc-light-jdk15on:1.47.0.2:jar:null:compile:d5c98671cc97fa0d928be1c7eb5edd3fb95d3234</urn>
|
||||||
<urn>io.netty:netty:3.6.3.Final:jar:null:compile:1eebfd2f79dd72c44d09d9917c549c60322462b8</urn>
|
|
||||||
<urn>net.jcip:jcip-annotations:1.0:jar:null:compile:afba4942caaeaf46aab0b976afd57cc7c181467e</urn>
|
<urn>net.jcip:jcip-annotations:1.0:jar:null:compile:afba4942caaeaf46aab0b976afd57cc7c181467e</urn>
|
||||||
<urn>net.sf.jopt-simple:jopt-simple:4.3:jar:null:compile:88ffca34311a6564a98f14820431e17b4382a069</urn>
|
<urn>net.sf.jopt-simple:jopt-simple:4.3:jar:null:compile:88ffca34311a6564a98f14820431e17b4382a069</urn>
|
||||||
<urn>org.slf4j:slf4j-api:1.7.5:jar:null:compile:6b262da268f8ad9eff941b25503a9198f0a0ac93</urn>
|
<urn>org.slf4j:slf4j-api:1.7.5:jar:null:compile:6b262da268f8ad9eff941b25503a9198f0a0ac93</urn>
|
||||||
|
@ -495,10 +495,6 @@ public class PeerGroup extends AbstractIdleService implements TransactionBroadca
|
|||||||
} finally {
|
} finally {
|
||||||
lock.unlock();
|
lock.unlock();
|
||||||
}
|
}
|
||||||
// Don't do connectTo whilst holding the PeerGroup lock because this can trigger some amazingly deep stacks
|
|
||||||
// and potentially circular deadlock in the case of immediate failure (eg, attempt to access IPv6 node from
|
|
||||||
// a non-v6 capable machine). It doesn't relay control immediately to the netty boss thread as you may expect.
|
|
||||||
//
|
|
||||||
// This method eventually constructs a Peer and puts it into pendingPeers. If the connection fails to establish,
|
// This method eventually constructs a Peer and puts it into pendingPeers. If the connection fails to establish,
|
||||||
// handlePeerDeath will be called, which will potentially call this method again to replace the dead or failed
|
// handlePeerDeath will be called, which will potentially call this method again to replace the dead or failed
|
||||||
// connection.
|
// connection.
|
||||||
@ -764,7 +760,6 @@ public class PeerGroup extends AbstractIdleService implements TransactionBroadca
|
|||||||
int newSize = -1;
|
int newSize = -1;
|
||||||
lock.lock();
|
lock.lock();
|
||||||
try {
|
try {
|
||||||
// Runs on a netty worker thread for every peer that is newly connected. Peer is not locked at this point.
|
|
||||||
// Sets up the newly connected peer so it can do everything it needs to.
|
// Sets up the newly connected peer so it can do everything it needs to.
|
||||||
log.info("{}: New peer", peer);
|
log.info("{}: New peer", peer);
|
||||||
pendingPeers.remove(peer);
|
pendingPeers.remove(peer);
|
||||||
@ -933,10 +928,6 @@ public class PeerGroup extends AbstractIdleService implements TransactionBroadca
|
|||||||
}
|
}
|
||||||
|
|
||||||
protected void handlePeerDeath(final Peer peer) {
|
protected void handlePeerDeath(final Peer peer) {
|
||||||
// This can run on any Netty worker thread. Because connectToAnyPeer() must run unlocked to avoid circular
|
|
||||||
// deadlock, this method must run largely unlocked too. Some members are thread-safe and others aren't, so
|
|
||||||
// we synchronize only the parts that need it.
|
|
||||||
|
|
||||||
// Peer deaths can occur during startup if a connect attempt after peer discovery aborts immediately.
|
// Peer deaths can occur during startup if a connect attempt after peer discovery aborts immediately.
|
||||||
final State state = state();
|
final State state = state();
|
||||||
if (state != State.RUNNING && state != State.STARTING) return;
|
if (state != State.RUNNING && state != State.STARTING) return;
|
||||||
|
@ -16,16 +16,16 @@
|
|||||||
|
|
||||||
package com.google.bitcoin.networkabstraction;
|
package com.google.bitcoin.networkabstraction;
|
||||||
|
|
||||||
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
|
import javax.annotation.Nonnull;
|
||||||
|
import javax.annotation.Nullable;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.io.InputStream;
|
import java.io.InputStream;
|
||||||
import java.net.Socket;
|
import java.net.Socket;
|
||||||
import java.net.SocketAddress;
|
import java.net.SocketAddress;
|
||||||
import java.nio.ByteBuffer;
|
import java.nio.ByteBuffer;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
import javax.annotation.Nonnull;
|
|
||||||
import javax.annotation.Nullable;
|
|
||||||
|
|
||||||
import org.slf4j.LoggerFactory;
|
|
||||||
|
|
||||||
import static com.google.common.base.Preconditions.checkState;
|
import static com.google.common.base.Preconditions.checkState;
|
||||||
|
|
||||||
@ -95,7 +95,7 @@ public class BlockingClient implements MessageWriteTarget {
|
|||||||
}
|
}
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
if (!vCloseRequested)
|
if (!vCloseRequested)
|
||||||
log.error("Error trying to open/read from connection", e);
|
log.error("Error trying to open/read from connection: " + serverAddress, e);
|
||||||
} finally {
|
} finally {
|
||||||
try {
|
try {
|
||||||
socket.close();
|
socket.close();
|
||||||
|
@ -16,18 +16,15 @@
|
|||||||
|
|
||||||
package com.google.bitcoin.networkabstraction;
|
package com.google.bitcoin.networkabstraction;
|
||||||
|
|
||||||
|
import com.google.common.util.concurrent.AbstractExecutionThreadService;
|
||||||
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.net.SocketAddress;
|
import java.net.SocketAddress;
|
||||||
import java.nio.channels.*;
|
import java.nio.channels.*;
|
||||||
import java.nio.channels.spi.SelectorProvider;
|
import java.nio.channels.spi.SelectorProvider;
|
||||||
import java.util.*;
|
import java.util.*;
|
||||||
import java.util.concurrent.LinkedBlockingQueue;
|
import java.util.concurrent.LinkedBlockingQueue;
|
||||||
import java.util.concurrent.atomic.AtomicInteger;
|
|
||||||
import java.util.concurrent.locks.Lock;
|
|
||||||
|
|
||||||
import com.google.common.annotations.VisibleForTesting;
|
|
||||||
import com.google.common.util.concurrent.AbstractExecutionThreadService;
|
|
||||||
import org.slf4j.LoggerFactory;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A class which manages a set of client connections. Uses Java NIO to select network events and processes them in a
|
* A class which manages a set of client connections. Uses Java NIO to select network events and processes them in a
|
||||||
@ -149,6 +146,7 @@ public class NioClientManager extends AbstractExecutionThreadService implements
|
|||||||
newConnectionChannels.offer(new SocketChannelAndParser(sc, parser));
|
newConnectionChannels.offer(new SocketChannelAndParser(sc, parser));
|
||||||
selector.wakeup();
|
selector.wakeup();
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
|
log.error("Could not connect to " + serverAddress);
|
||||||
throw new RuntimeException(e); // This should only happen if we are, eg, out of system resources
|
throw new RuntimeException(e); // This should only happen if we are, eg, out of system resources
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -32,8 +32,8 @@ import org.junit.runners.Parameterized;
|
|||||||
import java.math.BigInteger;
|
import java.math.BigInteger;
|
||||||
import java.net.InetSocketAddress;
|
import java.net.InetSocketAddress;
|
||||||
import java.util.*;
|
import java.util.*;
|
||||||
import java.util.concurrent.Semaphore;
|
import java.util.concurrent.*;
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.atomic.AtomicBoolean;
|
||||||
import java.util.concurrent.atomic.AtomicInteger;
|
import java.util.concurrent.atomic.AtomicInteger;
|
||||||
|
|
||||||
import static org.junit.Assert.*;
|
import static org.junit.Assert.*;
|
||||||
@ -70,23 +70,20 @@ public class PeerGroupTest extends TestWithPeerGroup {
|
|||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void listener() throws Exception {
|
public void listener() throws Exception {
|
||||||
final AtomicInteger connectedPeers = new AtomicInteger(0);
|
final BlockingQueue<Peer> connectedPeers = new LinkedBlockingQueue<Peer>();
|
||||||
final AtomicInteger disconnectedPeers = new AtomicInteger(0);
|
final BlockingQueue<Peer> disconnectedPeers = new LinkedBlockingQueue<Peer>();
|
||||||
final SettableFuture<Void> firstDisconnectFuture = SettableFuture.create();
|
final SettableFuture<Void> firstDisconnectFuture = SettableFuture.create();
|
||||||
final SettableFuture<Void> secondDisconnectFuture = SettableFuture.create();
|
final SettableFuture<Void> secondDisconnectFuture = SettableFuture.create();
|
||||||
final Map<Peer, AtomicInteger> peerToMessageCount = new HashMap<Peer, AtomicInteger>();
|
final Map<Peer, AtomicInteger> peerToMessageCount = new HashMap<Peer, AtomicInteger>();
|
||||||
AbstractPeerEventListener listener = new AbstractPeerEventListener() {
|
AbstractPeerEventListener listener = new AbstractPeerEventListener() {
|
||||||
@Override
|
@Override
|
||||||
public void onPeerConnected(Peer peer, int peerCount) {
|
public void onPeerConnected(Peer peer, int peerCount) {
|
||||||
connectedPeers.incrementAndGet();
|
connectedPeers.add(peer);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void onPeerDisconnected(Peer peer, int peerCount) {
|
public void onPeerDisconnected(Peer peer, int peerCount) {
|
||||||
if (disconnectedPeers.incrementAndGet() == 1)
|
disconnectedPeers.add(peer);
|
||||||
firstDisconnectFuture.set(null);
|
|
||||||
else
|
|
||||||
secondDisconnectFuture.set(null);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@ -106,54 +103,50 @@ public class PeerGroupTest extends TestWithPeerGroup {
|
|||||||
|
|
||||||
// Create a couple of peers.
|
// Create a couple of peers.
|
||||||
InboundMessageQueuer p1 = connectPeer(1);
|
InboundMessageQueuer p1 = connectPeer(1);
|
||||||
Threading.waitForUserCode();
|
|
||||||
assertEquals(1, connectedPeers.get());
|
|
||||||
InboundMessageQueuer p2 = connectPeer(2);
|
InboundMessageQueuer p2 = connectPeer(2);
|
||||||
Threading.waitForUserCode();
|
connectedPeers.take();
|
||||||
assertEquals(2, connectedPeers.get());
|
connectedPeers.take();
|
||||||
|
|
||||||
pingAndWait(p1);
|
pingAndWait(p1);
|
||||||
pingAndWait(p2);
|
pingAndWait(p2);
|
||||||
Threading.waitForUserCode();
|
Threading.waitForUserCode();
|
||||||
assertEquals(0, disconnectedPeers.get());
|
assertEquals(0, disconnectedPeers.size());
|
||||||
|
|
||||||
p1.close();
|
p1.close();
|
||||||
firstDisconnectFuture.get();
|
disconnectedPeers.take();
|
||||||
assertEquals(1, disconnectedPeers.get());
|
assertEquals(0, disconnectedPeers.size());
|
||||||
p2.close();
|
p2.close();
|
||||||
secondDisconnectFuture.get();
|
disconnectedPeers.take();
|
||||||
assertEquals(2, disconnectedPeers.get());
|
assertEquals(0, disconnectedPeers.size());
|
||||||
|
|
||||||
assertTrue(peerGroup.removeEventListener(listener));
|
assertTrue(peerGroup.removeEventListener(listener));
|
||||||
assertFalse(peerGroup.removeEventListener(listener));
|
assertFalse(peerGroup.removeEventListener(listener));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void peerDiscoveryPolling() throws Exception {
|
public void peerDiscoveryPolling() throws InterruptedException {
|
||||||
// Check that if peer discovery fails, we keep trying until we have some nodes to talk with.
|
// Check that if peer discovery fails, we keep trying until we have some nodes to talk with.
|
||||||
final Semaphore sem = new Semaphore(0);
|
final CountDownLatch latch = new CountDownLatch(1);
|
||||||
final boolean[] result = new boolean[1];
|
final AtomicBoolean result = new AtomicBoolean();
|
||||||
result[0] = false;
|
|
||||||
peerGroup.addPeerDiscovery(new PeerDiscovery() {
|
peerGroup.addPeerDiscovery(new PeerDiscovery() {
|
||||||
public InetSocketAddress[] getPeers(long unused, TimeUnit unused2) throws PeerDiscoveryException {
|
public InetSocketAddress[] getPeers(long unused, TimeUnit unused2) throws PeerDiscoveryException {
|
||||||
if (result[0] == false) {
|
if (!result.getAndSet(true)) {
|
||||||
// Pretend we are not connected to the internet.
|
// Pretend we are not connected to the internet.
|
||||||
result[0] = true;
|
|
||||||
throw new PeerDiscoveryException("test failure");
|
throw new PeerDiscoveryException("test failure");
|
||||||
} else {
|
} else {
|
||||||
// Return a bogus address.
|
// Return a bogus address.
|
||||||
sem.release();
|
latch.countDown();
|
||||||
return new InetSocketAddress[]{new InetSocketAddress("localhost", 0)};
|
return new InetSocketAddress[]{new InetSocketAddress("localhost", 1)};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
public void shutdown() {
|
public void shutdown() {
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
peerGroup.startAndWait();
|
peerGroup.startAndWait();
|
||||||
sem.acquire();
|
latch.await();
|
||||||
// Check that we did indeed throw an exception. If we got here it means we threw and then PeerGroup tried
|
// Check that we did indeed throw an exception. If we got here it means we threw and then PeerGroup tried
|
||||||
// again a bit later.
|
// again a bit later.
|
||||||
assertTrue(result[0]);
|
assertTrue(result.get());
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
Loading…
x
Reference in New Issue
Block a user