Split index.ts into multiple scripts in scripts/ and detect last known block when pulling events
This commit is contained in:
81
packages/pipeline/src/scripts/merge_v2_events.ts
Normal file
81
packages/pipeline/src/scripts/merge_v2_events.ts
Normal file
@@ -0,0 +1,81 @@
|
||||
import { web3Factory } from '@0x/dev-utils';
|
||||
import 'reflect-metadata';
|
||||
import { Connection, createConnection } from 'typeorm';
|
||||
|
||||
import { ExchangeEventsSource } from '../data_sources/contract-wrappers/exchange_events';
|
||||
import { ExchangeFillEvent } from '../entities/ExchangeFillEvent';
|
||||
import { deployConfig } from '../ormconfig';
|
||||
import { parseExchangeEvents } from '../parsers/events';
|
||||
|
||||
let connection: Connection;
|
||||
|
||||
(async () => {
|
||||
connection = await createConnection(deployConfig);
|
||||
await getExchangeEventsAsync();
|
||||
await mergeExchangeEventsAsync();
|
||||
console.log('Exiting process');
|
||||
process.exit(0);
|
||||
})();
|
||||
|
||||
// TODO(albrow): Separately: Errors do not appear to be handled correctly. If you use the
|
||||
// wrong rpcUrl it just returns early with no error.
|
||||
async function getExchangeEventsAsync(): Promise<void> {
|
||||
console.log('Getting event logs...');
|
||||
const provider = web3Factory.getRpcProvider({
|
||||
rpcUrl: 'https://mainnet.infura.io',
|
||||
});
|
||||
const eventsRepository = connection.getRepository(ExchangeFillEvent);
|
||||
const exchangeEvents = new ExchangeEventsSource(provider, 1);
|
||||
const eventLogs = await exchangeEvents.getFillEventsAsync();
|
||||
console.log('Parsing events...');
|
||||
const events = parseExchangeEvents(eventLogs);
|
||||
console.log(`Retrieved and parsed ${events.length} total events.`);
|
||||
console.log('Saving events...');
|
||||
for (const event of events) {
|
||||
await eventsRepository.save(event);
|
||||
}
|
||||
await eventsRepository.save(events);
|
||||
console.log('Saved events.');
|
||||
}
|
||||
|
||||
const insertEventsRawQuery = `INSERT INTO events_raw (
|
||||
event_type,
|
||||
error_id,
|
||||
order_hash,
|
||||
maker,
|
||||
maker_amount,
|
||||
maker_fee,
|
||||
maker_token,
|
||||
taker,
|
||||
taker_amount,
|
||||
taker_fee,
|
||||
taker_token,
|
||||
txn_hash,
|
||||
fee_recipient,
|
||||
block_number,
|
||||
log_index
|
||||
)
|
||||
(
|
||||
SELECT
|
||||
'LogFill',
|
||||
null,
|
||||
"orderHash",
|
||||
"makerAddress",
|
||||
"makerAssetFilledAmount"::numeric(78),
|
||||
"makerFeePaid"::numeric(78),
|
||||
"makerTokenAddress",
|
||||
"takerAddress",
|
||||
"takerAssetFilledAmount"::numeric(78),
|
||||
"takerFeePaid"::numeric(78),
|
||||
"takerTokenAddress",
|
||||
"transactionHash",
|
||||
"feeRecipientAddress",
|
||||
"blockNumber",
|
||||
"logIndex"
|
||||
FROM exchange_fill_event
|
||||
) ON CONFLICT (order_hash, txn_hash, log_index) DO NOTHING`;
|
||||
|
||||
async function mergeExchangeEventsAsync(): Promise<void> {
|
||||
console.log('Merging results into events_raw...');
|
||||
await connection.query(insertEventsRawQuery);
|
||||
}
|
60
packages/pipeline/src/scripts/pull_missing_events.ts
Normal file
60
packages/pipeline/src/scripts/pull_missing_events.ts
Normal file
@@ -0,0 +1,60 @@
|
||||
import { web3Factory } from '@0x/dev-utils';
|
||||
import { Web3ProviderEngine } from '@0x/subproviders';
|
||||
import R = require('ramda');
|
||||
import 'reflect-metadata';
|
||||
import { Connection, createConnection, Repository } from 'typeorm';
|
||||
|
||||
import { ExchangeEventsSource } from '../data_sources/contract-wrappers/exchange_events';
|
||||
import { ExchangeFillEvent } from '../entities/ExchangeFillEvent';
|
||||
import { deployConfig } from '../ormconfig';
|
||||
import { parseExchangeEvents } from '../parsers/events';
|
||||
|
||||
const EXCHANGE_START_BLOCK = 6271590; // Block number when the Exchange contract was deployed to mainnet.
|
||||
const START_BLOCK_OFFSET = 1000; // Number of blocks before the last known block to consider when updating fill events.
|
||||
const BATCH_SAVE_SIZE = 1000; // Number of events to save at once.
|
||||
|
||||
let connection: Connection;
|
||||
|
||||
(async () => {
|
||||
connection = await createConnection(deployConfig);
|
||||
const provider = web3Factory.getRpcProvider({
|
||||
rpcUrl: 'https://mainnet.infura.io',
|
||||
});
|
||||
await getExchangeEventsAsync(provider);
|
||||
process.exit(0);
|
||||
})();
|
||||
|
||||
async function getExchangeEventsAsync(provider: Web3ProviderEngine): Promise<void> {
|
||||
console.log('Checking existing event logs...');
|
||||
const eventsRepository = connection.getRepository(ExchangeFillEvent);
|
||||
const startBlock = await getStartBlockAsync(eventsRepository);
|
||||
console.log(`Getting event logs starting at ${startBlock}...`);
|
||||
const exchangeEvents = new ExchangeEventsSource(provider, 1);
|
||||
const eventLogs = await exchangeEvents.getFillEventsAsync(startBlock);
|
||||
console.log('Parsing events...');
|
||||
const events = parseExchangeEvents(eventLogs);
|
||||
console.log(`Retrieved and parsed ${events.length} total events.`);
|
||||
console.log('Saving events...');
|
||||
// Split the events into batches of size BATCH_SAVE_SIZE and save each batch
|
||||
// in a single request. This reduces round-trip latency to the DB. We need
|
||||
// to batch this way because saving an extremely large number of events in a
|
||||
// single request causes problems.
|
||||
for (const eventsBatch of R.splitEvery(BATCH_SAVE_SIZE, events)) {
|
||||
await eventsRepository.save(eventsBatch);
|
||||
}
|
||||
const totalEvents = await eventsRepository.count();
|
||||
console.log(`Done saving events. There are now ${totalEvents} total events.`);
|
||||
}
|
||||
|
||||
async function getStartBlockAsync(eventsRepository: Repository<ExchangeFillEvent>): Promise<number> {
|
||||
const fillEventCount = await eventsRepository.count();
|
||||
if (fillEventCount === 0) {
|
||||
console.log('No existing fill events found.');
|
||||
return EXCHANGE_START_BLOCK;
|
||||
}
|
||||
const queryResult = await connection.query(
|
||||
'SELECT "blockNumber" FROM exchange_fill_event ORDER BY "blockNumber" DESC LIMIT 1',
|
||||
);
|
||||
const lastKnownBlock = queryResult[0].blockNumber;
|
||||
return lastKnownBlock - START_BLOCK_OFFSET;
|
||||
}
|
31
packages/pipeline/src/scripts/update_relayer_info.ts
Normal file
31
packages/pipeline/src/scripts/update_relayer_info.ts
Normal file
@@ -0,0 +1,31 @@
|
||||
import 'reflect-metadata';
|
||||
import { Connection, createConnection } from 'typeorm';
|
||||
|
||||
import { RelayerRegistrySource } from '../data_sources/relayer-registry';
|
||||
import { Relayer } from '../entities/Relayer';
|
||||
import { deployConfig } from '../ormconfig';
|
||||
import { parseRelayers } from '../parsers/relayer_registry';
|
||||
|
||||
// NOTE(albrow): We need to manually update this URL for now. Fix this when we
|
||||
// have the relayer-registry behind semantic versioning.
|
||||
const RELAYER_REGISTRY_URL =
|
||||
'https://raw.githubusercontent.com/0xProject/0x-relayer-registry/4701c85677d161ea729a466aebbc1826c6aa2c0b/relayers.json';
|
||||
|
||||
let connection: Connection;
|
||||
|
||||
(async () => {
|
||||
connection = await createConnection(deployConfig);
|
||||
await getRelayers();
|
||||
process.exit(0);
|
||||
})();
|
||||
|
||||
async function getRelayers(): Promise<void> {
|
||||
console.log('Getting latest relayer info...');
|
||||
const relayerRepository = connection.getRepository(Relayer);
|
||||
const relayerSource = new RelayerRegistrySource(RELAYER_REGISTRY_URL);
|
||||
const relayersResp = await relayerSource.getRelayerInfoAsync();
|
||||
const relayers = parseRelayers(relayersResp);
|
||||
console.log('Saving relayer info...');
|
||||
await relayerRepository.save(relayers);
|
||||
console.log('Done saving relayer info.');
|
||||
}
|
Reference in New Issue
Block a user