* feat: analyze profits - closes #19 Signed-off-by: Arthurim <arthurbdauphine@gmail.com> * feat: analyze profits - closes #19 Signed-off-by: Arthurim <arthurbdauphine@gmail.com> * fix peotry lock Signed-off-by: Arthurim <arthurbdauphine@gmail.com> * feat: add save to csv option Signed-off-by: Arthurim <arthurbdauphine@gmail.com> * fix: update dockerfile to resolve deps automatically at build-time Signed-off-by: Luca Georges Francois <luca@quartz.technology> * feat: add porfit by day Signed-off-by: Arthurim <arthurbdauphine@gmail.com> * feat: add failures saving Signed-off-by: Arthurim <arthurbdauphine@gmail.com> * fix: launch script Signed-off-by: Arthurim <arthurbdauphine@gmail.com> * feat: get rpc url from env Signed-off-by: Arthurim <arthurbdauphine@gmail.com> Signed-off-by: Arthurim <arthurbdauphine@gmail.com> Signed-off-by: Luca Georges Francois <luca@quartz.technology> Co-authored-by: Luca Georges Francois <luca@quartz.technology>
This commit is contained in:
parent
880992362b
commit
bfbf1cc379
3
.gitignore
vendored
3
.gitignore
vendored
@ -25,3 +25,6 @@ cache
|
||||
|
||||
# pycharm
|
||||
.idea
|
||||
|
||||
# generated csvs
|
||||
*.csv
|
@ -18,7 +18,7 @@ WORKDIR /app/
|
||||
USER flashbot
|
||||
|
||||
RUN poetry config virtualenvs.create false \
|
||||
&& poetry install
|
||||
&& rm poetry.lock ; poetry lock && poetry install
|
||||
|
||||
COPY --chown=flashbot . /app
|
||||
|
||||
|
23
cli.py
23
cli.py
@ -6,6 +6,8 @@ from datetime import datetime
|
||||
|
||||
import click
|
||||
import dramatiq
|
||||
from profit_analysis.analysis import analyze_profit, get_profit_by
|
||||
from profit_analysis.column_names import BLOCK_KEY
|
||||
|
||||
from mev_inspect.concurrency import coro
|
||||
from mev_inspect.crud.prices import write_prices
|
||||
@ -47,6 +49,27 @@ async def inspect_block_command(block_number: int, rpc: str):
|
||||
)
|
||||
|
||||
|
||||
@cli.command()
|
||||
@click.argument("block_from", type=int)
|
||||
@click.argument("block_to", type=int)
|
||||
@click.argument("save_to_csv", type=bool)
|
||||
@coro
|
||||
async def analyze_profit_command(
|
||||
block_from: int, block_to: int, save_to_csv: bool = False
|
||||
):
|
||||
inspect_db_session = get_inspect_session()
|
||||
profit = analyze_profit(inspect_db_session, block_from, block_to, save_to_csv)
|
||||
print(" -------------------------------------------------------------------")
|
||||
print(" Profit By Block")
|
||||
print(get_profit_by(profit, BLOCK_KEY, save_to_csv))
|
||||
print(" -------------------------------------------------------------------")
|
||||
print(" Profit By Day")
|
||||
print(get_profit_by(profit, "date", save_to_csv))
|
||||
print(" -------------------------------------------------------------------")
|
||||
print(" Profit By Category")
|
||||
print(get_profit_by(profit, "category", save_to_csv))
|
||||
|
||||
|
||||
@cli.command()
|
||||
@click.argument("block_number", type=int)
|
||||
@click.option("--rpc", default=lambda: os.environ.get(RPC_URL_ENV, ""))
|
||||
|
20
launch_analysis.py
Normal file
20
launch_analysis.py
Normal file
@ -0,0 +1,20 @@
|
||||
import sys
|
||||
|
||||
from profit_analysis.analysis import analyze_profit, get_profit_by
|
||||
from profit_analysis.column_names import BLOCK_KEY
|
||||
|
||||
from mev_inspect.db import get_inspect_session
|
||||
|
||||
block_from = int(sys.argv[1])
|
||||
block_to = int(sys.argv[2])
|
||||
inspect_db_session = get_inspect_session()
|
||||
profit = analyze_profit(inspect_db_session, block_from, block_to, True)
|
||||
print(" -------------------------------------------------------------------")
|
||||
print(" Profit By Block")
|
||||
print(get_profit_by(profit, BLOCK_KEY, True))
|
||||
print(" -------------------------------------------------------------------")
|
||||
print(" Profit By Day")
|
||||
print(get_profit_by(profit, "date", True))
|
||||
print(" -------------------------------------------------------------------")
|
||||
print(" Profit By Category")
|
||||
print(get_profit_by(profit, "category", True))
|
10
mev
10
mev
@ -68,6 +68,14 @@ case "$1" in
|
||||
kubectl exec -ti deploy/mev-inspect -- \
|
||||
poetry run inspect-many-blocks $after_block_number $before_block_number
|
||||
;;
|
||||
analyze-profit)
|
||||
after_block_number=$2
|
||||
before_block_number=$3
|
||||
save_to_csv=$4
|
||||
echo "Analyzing profit from block $after_block_number to $before_block_number"
|
||||
kubectl exec -ti deploy/mev-inspect -- \
|
||||
poetry run analyze-profit $after_block_number $before_block_number $save_to_csv
|
||||
;;
|
||||
test)
|
||||
shift
|
||||
echo "Running tests"
|
||||
@ -122,7 +130,7 @@ case "$1" in
|
||||
kubectl exec -ti deploy/mev-inspect -- $@
|
||||
;;
|
||||
*)
|
||||
echo "Usage: "$1" {db|backfill|inspect|test}"
|
||||
echo "Usage: "$1" {db|backfill|inspect|test|analyze-profit}"
|
||||
exit 1
|
||||
esac
|
||||
|
||||
|
29
mev_inspect/crud/read.py
Normal file
29
mev_inspect/crud/read.py
Normal file
@ -0,0 +1,29 @@
|
||||
import pandas as pd
|
||||
|
||||
|
||||
def read_from_db_all_into_dataframe(db_session, table, columns, where_clause):
|
||||
"""
|
||||
Reads all relevant rows from the DB as a df
|
||||
:param db_session:
|
||||
:param table:
|
||||
:param columns:
|
||||
:param where_clause:
|
||||
:return:
|
||||
"""
|
||||
query = "SELECT " + columns + " FROM " + table
|
||||
if where_clause != "":
|
||||
query += " WHERE " + where_clause
|
||||
result = db_session.execute(query)
|
||||
return result
|
||||
|
||||
|
||||
def read_profit_from_to(db_session, block_from, block_to):
|
||||
where_clause = (
|
||||
"block_number>=" + str(block_from) + " AND " + "block_number<=" + str(block_to)
|
||||
)
|
||||
profit = read_from_db_all_into_dataframe(
|
||||
db_session, "total_profit_by_block", "*", where_clause
|
||||
)
|
||||
profit = pd.DataFrame(profit.fetchall())
|
||||
profit = profit.drop(["id"], axis=1)
|
||||
return profit
|
1694
poetry.lock
generated
1694
poetry.lock
generated
File diff suppressed because it is too large
Load Diff
0
profit_analysis/__init__.py
Normal file
0
profit_analysis/__init__.py
Normal file
262
profit_analysis/analysis.py
Normal file
262
profit_analysis/analysis.py
Normal file
@ -0,0 +1,262 @@
|
||||
import datetime
|
||||
import os
|
||||
|
||||
import pandas as pd
|
||||
import web3
|
||||
from profit_analysis.block_utils import add_block_timestamp
|
||||
from profit_analysis.coingecko import (
|
||||
add_cg_ids,
|
||||
get_address_to_coingecko_ids_mapping,
|
||||
get_coingecko_historical_prices,
|
||||
)
|
||||
from profit_analysis.column_names import (
|
||||
AMOUNT_DEBT_KEY,
|
||||
AMOUNT_RECEIVED_KEY,
|
||||
CG_ID_DEBT_KEY,
|
||||
CG_ID_RECEIVED_KEY,
|
||||
DECIMAL_DEBT_KEY,
|
||||
PRICE_DEBT_KEY,
|
||||
PRICE_KEY,
|
||||
PRICE_RECEIVED_KEY,
|
||||
TIMESTAMP_KEY,
|
||||
TOKEN_DEBT_KEY,
|
||||
TOKEN_RECEIVED_KEY,
|
||||
)
|
||||
from profit_analysis.constants import DATA_PATH
|
||||
from profit_analysis.token_utils import get_decimals
|
||||
|
||||
from mev_inspect.crud.read import read_profit_from_to
|
||||
|
||||
"""
|
||||
Steps:
|
||||
1. given blockfrom and block to, read the profit
|
||||
"""
|
||||
|
||||
WETH_TOKEN_ADDRESS = "0x7ceB23fD6bC0adD59E62ac25578270cFf1b9f619"
|
||||
PD_DATETIME_FORMAT = "datetime64[ns]"
|
||||
|
||||
|
||||
def analyze_profit(inspect_db_session, block_from, block_to, save_to_csv=False):
|
||||
profit = read_profit_from_to(inspect_db_session, block_from, block_to)
|
||||
w3 = create_web3()
|
||||
profit = add_block_timestamp(w3, profit)
|
||||
profit = add_cg_ids(profit)
|
||||
profit = get_usd_profit(profit, save_to_csv)
|
||||
print(profit)
|
||||
return profit
|
||||
|
||||
|
||||
def get_usd_profit(profit, save_to_csv=False):
|
||||
"""
|
||||
For each token involved in mev transactions, will get its price at the time of the transaction and
|
||||
compute the profit of each mev transaction.
|
||||
|
||||
:param profit: pd.DataFrame, with columns = ['block_number', 'timestamp', 'transaction_hash',
|
||||
'token_debt', 'amount_debt', 'cg_id_debt',
|
||||
'token_received', 'amount_received', 'cg_id_received']
|
||||
:param save_to_csv: bool, whether to save the analysed profits to csv or not
|
||||
:return: pd.DataFrame, with columns = ['block_number', 'timestamp', 'date', 'transaction_hash',
|
||||
'amount_received', 'token_received', 'price_received',
|
||||
'amount_debt', 'token_debt', 'price_debt',
|
||||
'profit_usd' ]
|
||||
"""
|
||||
tokens = profit[CG_ID_RECEIVED_KEY].unique()
|
||||
mapping = get_address_to_coingecko_ids_mapping()
|
||||
profit_with_price_tokens = pd.DataFrame()
|
||||
failures = {}
|
||||
for token in tokens:
|
||||
print("Processing", token)
|
||||
try:
|
||||
|
||||
profit_by_received_token = pd.DataFrame(
|
||||
profit.loc[profit[CG_ID_RECEIVED_KEY] == token]
|
||||
)
|
||||
profit_by_received_token[TIMESTAMP_KEY] = pd.to_datetime(
|
||||
profit_by_received_token[TIMESTAMP_KEY], format="%Y-%m-%d %H:%M:%S"
|
||||
)
|
||||
|
||||
dates = pd.to_datetime(profit_by_received_token[TIMESTAMP_KEY].unique())
|
||||
# @TODO: What is an optimal value here?
|
||||
# looks like sometimes there is no price for hours???
|
||||
offset_minutes = 30
|
||||
date_min = int(
|
||||
(dates.min() - datetime.timedelta(minutes=offset_minutes)).timestamp()
|
||||
)
|
||||
date_max = int(
|
||||
(dates.max() + datetime.timedelta(minutes=offset_minutes)).timestamp()
|
||||
)
|
||||
|
||||
# get received token prices
|
||||
token_prices = get_coingecko_historical_prices(date_min, date_max, token)
|
||||
token_prices = token_prices.rename(columns={PRICE_KEY: PRICE_RECEIVED_KEY})
|
||||
token_prices[TOKEN_RECEIVED_KEY] = token
|
||||
|
||||
# get received token decimals
|
||||
decimals = get_decimals(
|
||||
profit_by_received_token[TOKEN_RECEIVED_KEY].values[0]
|
||||
)
|
||||
|
||||
# get debt tokens prices
|
||||
debt_tokens_prices = pd.DataFrame()
|
||||
for cg_id_debt in (
|
||||
profit_by_received_token[CG_ID_DEBT_KEY].astype(str).unique().tolist()
|
||||
):
|
||||
if cg_id_debt != "nan":
|
||||
debt_token_prices = get_coingecko_historical_prices(
|
||||
date_min, date_max, cg_id_debt
|
||||
)
|
||||
debt_token_prices[CG_ID_DEBT_KEY] = cg_id_debt
|
||||
debt_token = mapping.loc[
|
||||
mapping[CG_ID_DEBT_KEY] == cg_id_debt, TOKEN_DEBT_KEY
|
||||
].values[0]
|
||||
debt_token_prices[TOKEN_DEBT_KEY] = debt_token
|
||||
debt_tokens_prices = pd.concat(
|
||||
[debt_tokens_prices, debt_token_prices]
|
||||
)
|
||||
debt_tokens_prices = debt_tokens_prices.rename(
|
||||
columns={PRICE_KEY: PRICE_DEBT_KEY}
|
||||
)
|
||||
|
||||
# get debt tokens decimals
|
||||
debt_tokens_decimals = pd.DataFrame(
|
||||
columns=[TOKEN_DEBT_KEY, DECIMAL_DEBT_KEY]
|
||||
)
|
||||
for debt_token in (
|
||||
profit_by_received_token[TOKEN_DEBT_KEY].astype(str).unique().tolist()
|
||||
):
|
||||
if debt_token != "":
|
||||
debt_token_decimals = get_decimals(debt_token)
|
||||
debt_tokens_decimals = pd.concat(
|
||||
[
|
||||
debt_tokens_decimals,
|
||||
pd.DataFrame(
|
||||
[[debt_token, debt_token_decimals]],
|
||||
columns=[TOKEN_DEBT_KEY, DECIMAL_DEBT_KEY],
|
||||
),
|
||||
]
|
||||
)
|
||||
profit_by_received_token = profit_by_received_token.merge(
|
||||
debt_tokens_decimals, on=TOKEN_DEBT_KEY, how="outer"
|
||||
)
|
||||
profit_by_received_token.loc[
|
||||
pd.isna(profit_by_received_token[AMOUNT_DEBT_KEY]), AMOUNT_DEBT_KEY
|
||||
] = 0
|
||||
|
||||
# apply decimals
|
||||
profit_by_received_token[AMOUNT_RECEIVED_KEY] = pd.to_numeric(
|
||||
profit_by_received_token[AMOUNT_RECEIVED_KEY]
|
||||
).div(10**decimals)
|
||||
profit_by_received_token[AMOUNT_DEBT_KEY] = pd.to_numeric(
|
||||
profit_by_received_token[AMOUNT_DEBT_KEY]
|
||||
)
|
||||
|
||||
# set up timestamps for merge
|
||||
token_prices[TIMESTAMP_KEY] = pd.to_datetime(token_prices[TIMESTAMP_KEY])
|
||||
|
||||
# merge received token prices
|
||||
profit_with_price_token = pd.merge_asof(
|
||||
profit_by_received_token.astype({TIMESTAMP_KEY: PD_DATETIME_FORMAT})
|
||||
.sort_values(TIMESTAMP_KEY)
|
||||
.convert_dtypes(),
|
||||
token_prices[[TIMESTAMP_KEY, PRICE_RECEIVED_KEY]]
|
||||
.astype({TIMESTAMP_KEY: PD_DATETIME_FORMAT})
|
||||
.sort_values(TIMESTAMP_KEY)
|
||||
.convert_dtypes(),
|
||||
direction="nearest",
|
||||
on=TIMESTAMP_KEY,
|
||||
)
|
||||
|
||||
if len(debt_tokens_prices) > 0:
|
||||
debt_tokens_prices[TIMESTAMP_KEY] = pd.to_datetime(
|
||||
debt_tokens_prices[TIMESTAMP_KEY]
|
||||
)
|
||||
# merge debt token prices
|
||||
profit_with_price_token = pd.merge_asof(
|
||||
profit_with_price_token.astype({TIMESTAMP_KEY: PD_DATETIME_FORMAT})
|
||||
.sort_values(TIMESTAMP_KEY)
|
||||
.convert_dtypes(),
|
||||
debt_tokens_prices[[TIMESTAMP_KEY, PRICE_DEBT_KEY]]
|
||||
.astype({TIMESTAMP_KEY: PD_DATETIME_FORMAT})
|
||||
.sort_values(TIMESTAMP_KEY)
|
||||
.convert_dtypes(),
|
||||
direction="nearest",
|
||||
on=TIMESTAMP_KEY,
|
||||
by=TOKEN_DEBT_KEY,
|
||||
)
|
||||
category = "liquidation"
|
||||
else:
|
||||
category = "arbitrage"
|
||||
profit_with_price_token[PRICE_DEBT_KEY] = 0
|
||||
|
||||
profit_with_price_token["category"] = category
|
||||
profit_with_price_tokens = pd.concat(
|
||||
[profit_with_price_tokens, profit_with_price_token]
|
||||
)
|
||||
except Exception as e:
|
||||
# @TODO: save into list to add later
|
||||
print(" Failed for token=", token)
|
||||
print(e)
|
||||
failures[token] = e
|
||||
print("Finished processing all tokens")
|
||||
profit_with_price_tokens[PRICE_DEBT_KEY] = profit_with_price_tokens[
|
||||
PRICE_DEBT_KEY
|
||||
].fillna(value=0)
|
||||
profit_with_price_tokens[AMOUNT_DEBT_KEY] = profit_with_price_tokens[
|
||||
AMOUNT_DEBT_KEY
|
||||
].fillna(value=0)
|
||||
profit_with_price_tokens["profit_usd"] = (
|
||||
profit_with_price_tokens[AMOUNT_RECEIVED_KEY]
|
||||
* profit_with_price_tokens[PRICE_RECEIVED_KEY]
|
||||
- profit_with_price_tokens[AMOUNT_DEBT_KEY]
|
||||
* profit_with_price_tokens[PRICE_DEBT_KEY]
|
||||
)
|
||||
profit_with_price_tokens = profit_with_price_tokens.reset_index(drop=True)
|
||||
profit_with_price_tokens["date"] = profit_with_price_tokens[
|
||||
TIMESTAMP_KEY
|
||||
].dt.normalize()
|
||||
if save_to_csv:
|
||||
profit.to_csv(DATA_PATH + "usd_profit.csv", index=False)
|
||||
pd.DataFrame(failures.items(), columns=["token", "error"]).to_csv(
|
||||
DATA_PATH + "analyze_profit_failures.csv", index=False
|
||||
)
|
||||
return profit_with_price_tokens[
|
||||
[
|
||||
"block_number",
|
||||
"timestamp",
|
||||
"date",
|
||||
"transaction_hash",
|
||||
"amount_received",
|
||||
"token_received",
|
||||
"price_received",
|
||||
"amount_debt",
|
||||
"token_debt",
|
||||
"price_debt",
|
||||
"profit_usd",
|
||||
"category",
|
||||
]
|
||||
]
|
||||
|
||||
|
||||
def get_profit_by(profit_with_price_tokens, col, save_to_csv=False):
|
||||
profit_by_block = (
|
||||
profit_with_price_tokens.groupby([col])
|
||||
.agg({"profit_usd": ["sum", "mean", "median", "count"]})
|
||||
.reset_index()
|
||||
)
|
||||
profit_by_block.columns = profit_by_block.columns.droplevel(0)
|
||||
profit_by_block.rename(columns={"": col}, inplace=True)
|
||||
if save_to_csv:
|
||||
file_name = DATA_PATH + "profit_by_" + col + ".csv"
|
||||
print(file_name)
|
||||
profit_by_block.to_csv(file_name, index=False)
|
||||
return profit_by_block
|
||||
|
||||
|
||||
def create_web3():
|
||||
web3_rpc_url = os.environ.get("RPC_URL")
|
||||
w3_provider = web3.Web3(web3.Web3.HTTPProvider(web3_rpc_url))
|
||||
w3_provider.middleware_onion.inject(web3.middleware.geth_poa_middleware, layer=0)
|
||||
if w3_provider.isConnected():
|
||||
return w3_provider
|
||||
else:
|
||||
raise Exception("Failed to connect")
|
21
profit_analysis/block_utils.py
Normal file
21
profit_analysis/block_utils.py
Normal file
@ -0,0 +1,21 @@
|
||||
import datetime
|
||||
|
||||
import pandas as pd
|
||||
from profit_analysis.column_names import BLOCK_KEY, TIMESTAMP_KEY
|
||||
|
||||
|
||||
def add_block_timestamp(w3, profit_by_block):
|
||||
block_timestamp = pd.DataFrame(
|
||||
profit_by_block[BLOCK_KEY].unique(), columns=[BLOCK_KEY]
|
||||
)
|
||||
block_timestamp[TIMESTAMP_KEY] = block_timestamp[BLOCK_KEY].apply(
|
||||
lambda x: get_block_timestamp(w3, x)
|
||||
)
|
||||
return profit_by_block.merge(block_timestamp, on=BLOCK_KEY)
|
||||
|
||||
|
||||
def get_block_timestamp(w3, block):
|
||||
block_info = w3.eth.get_block(int(block))
|
||||
ts = block_info[TIMESTAMP_KEY]
|
||||
dt = datetime.datetime.fromtimestamp(ts)
|
||||
return dt
|
71
profit_analysis/coingecko.py
Normal file
71
profit_analysis/coingecko.py
Normal file
@ -0,0 +1,71 @@
|
||||
import pandas as pd
|
||||
import pycoingecko as pg
|
||||
from profit_analysis.column_names import (
|
||||
CG_ID_DEBT_KEY,
|
||||
CG_ID_KEY,
|
||||
CG_ID_RECEIVED_KEY,
|
||||
PRICE_KEY,
|
||||
TIMESTAMP_KEY,
|
||||
TOKEN_DEBT_KEY,
|
||||
TOKEN_KEY,
|
||||
TOKEN_RECEIVED_KEY,
|
||||
)
|
||||
from profit_analysis.constants import DATA_PATH
|
||||
|
||||
TRAILING_ZEROS = "000000000000000000000000"
|
||||
|
||||
|
||||
def get_address_to_coingecko_ids_mapping():
|
||||
token_cg_ids = pd.read_csv(DATA_PATH + "address_to_coingecko_ids.csv")
|
||||
token_cg_ids[TOKEN_DEBT_KEY] = token_cg_ids[TOKEN_KEY].astype(str)
|
||||
token_cg_ids[CG_ID_RECEIVED_KEY] = token_cg_ids[CG_ID_KEY]
|
||||
token_cg_ids[CG_ID_DEBT_KEY] = token_cg_ids[CG_ID_KEY]
|
||||
token_cg_ids[TOKEN_RECEIVED_KEY] = token_cg_ids[TOKEN_KEY].astype(str)
|
||||
return token_cg_ids
|
||||
|
||||
|
||||
def add_cg_ids(profit_by_block):
|
||||
token_cg_ids = get_address_to_coingecko_ids_mapping()
|
||||
token_cg_ids[TOKEN_DEBT_KEY] = token_cg_ids[TOKEN_DEBT_KEY].str.lower()
|
||||
token_cg_ids[TOKEN_RECEIVED_KEY] = token_cg_ids[TOKEN_RECEIVED_KEY].str.lower()
|
||||
profit_by_block[TOKEN_RECEIVED_KEY] = (
|
||||
profit_by_block[TOKEN_RECEIVED_KEY]
|
||||
.map(lambda x: x.replace(TRAILING_ZEROS, ""))
|
||||
.str.lower()
|
||||
)
|
||||
profit_by_block[TOKEN_DEBT_KEY] = (
|
||||
profit_by_block[TOKEN_DEBT_KEY]
|
||||
.map(lambda x: x.replace(TRAILING_ZEROS, ""))
|
||||
.str.lower()
|
||||
)
|
||||
profit_by_block = profit_by_block.merge(
|
||||
token_cg_ids[[TOKEN_DEBT_KEY, CG_ID_DEBT_KEY]], on=TOKEN_DEBT_KEY, how="left"
|
||||
)
|
||||
profit_by_block = profit_by_block.merge(
|
||||
token_cg_ids[[TOKEN_RECEIVED_KEY, CG_ID_RECEIVED_KEY]], how="left"
|
||||
)
|
||||
return profit_by_block[
|
||||
[
|
||||
"block_number",
|
||||
"timestamp",
|
||||
"transaction_hash",
|
||||
"token_debt",
|
||||
"amount_debt",
|
||||
"cg_id_debt",
|
||||
"token_received",
|
||||
"amount_received",
|
||||
"cg_id_received",
|
||||
]
|
||||
]
|
||||
|
||||
|
||||
def get_coingecko_historical_prices(start, end, token):
|
||||
cg = pg.CoinGeckoAPI()
|
||||
token_prices = cg.get_coin_market_chart_range_by_id(
|
||||
id=token, vs_currency="usd", from_timestamp=start, to_timestamp=end
|
||||
)["prices"]
|
||||
token_prices = pd.DataFrame(token_prices, columns=[TIMESTAMP_KEY, PRICE_KEY])
|
||||
token_prices[TIMESTAMP_KEY] = pd.to_datetime(
|
||||
pd.to_numeric(token_prices[TIMESTAMP_KEY]), unit="ms"
|
||||
)
|
||||
return token_prices[[TIMESTAMP_KEY, PRICE_KEY]]
|
14
profit_analysis/column_names.py
Normal file
14
profit_analysis/column_names.py
Normal file
@ -0,0 +1,14 @@
|
||||
TOKEN_KEY = "token"
|
||||
TIMESTAMP_KEY = "timestamp"
|
||||
BLOCK_KEY = "block_number"
|
||||
TOKEN_RECEIVED_KEY = "token_received"
|
||||
TOKEN_DEBT_KEY = "token_debt"
|
||||
CG_ID_KEY = "cg_id"
|
||||
CG_ID_DEBT_KEY = "cg_id_debt"
|
||||
CG_ID_RECEIVED_KEY = "cg_id_received"
|
||||
AMOUNT_RECEIVED_KEY = "amount_received"
|
||||
AMOUNT_DEBT_KEY = "amount_debt"
|
||||
PRICE_DEBT_KEY = "price_debt"
|
||||
PRICE_RECEIVED_KEY = "price_received"
|
||||
PRICE_KEY = "price"
|
||||
DECIMAL_DEBT_KEY = "decimal_debt"
|
1
profit_analysis/constants.py
Normal file
1
profit_analysis/constants.py
Normal file
@ -0,0 +1 @@
|
||||
DATA_PATH = "resources/"
|
15
profit_analysis/token_utils.py
Normal file
15
profit_analysis/token_utils.py
Normal file
@ -0,0 +1,15 @@
|
||||
import pandas as pd
|
||||
from profit_analysis.column_names import TOKEN_KEY
|
||||
from profit_analysis.constants import DATA_PATH
|
||||
|
||||
|
||||
def get_decimals(token_address):
|
||||
decimals_mapping = pd.read_csv(DATA_PATH + "address_to_decimals.csv")
|
||||
decimals_mapping[TOKEN_KEY] = decimals_mapping[TOKEN_KEY].str.lower()
|
||||
decimals = decimals_mapping.loc[
|
||||
decimals_mapping[TOKEN_KEY] == token_address.lower(), "decimals"
|
||||
].values
|
||||
if len(decimals) > 0:
|
||||
return decimals[0]
|
||||
else:
|
||||
raise Exception("No Decimals for token=", token_address)
|
@ -16,6 +16,7 @@ dramatiq = {extras = ["redis"], version = "^1.12.1"}
|
||||
pycoingecko = "^2.2.0"
|
||||
boto3 = "^1.20.48"
|
||||
aiohttp-retry = "^2.4.6"
|
||||
pandas = "^1.5.2"
|
||||
|
||||
[tool.poetry.dev-dependencies]
|
||||
pre-commit = "^2.13.0"
|
||||
@ -46,6 +47,7 @@ fetch-range = 'cli:fetch_range'
|
||||
s3-export = 'cli:s3_export'
|
||||
enqueue-s3-export = 'cli:enqueue_s3_export'
|
||||
enqueue-many-s3-exports = 'cli:enqueue_many_s3_exports'
|
||||
analyze-profit = 'cli:analyze_profit_command'
|
||||
|
||||
[tool.black]
|
||||
exclude = '''
|
||||
|
23
resources/address_to_coingecko_ids.csv
Normal file
23
resources/address_to_coingecko_ids.csv
Normal file
@ -0,0 +1,23 @@
|
||||
token,cg_id
|
||||
0x7ceB23fD6bC0adD59E62ac25578270cFf1b9f619,weth
|
||||
0xeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee,ethereum
|
||||
0x2260fac5e5542a773aa44fbcfedf7c193bc2c599,wrapped-bitcoin
|
||||
0x514910771af9ca656af840dff83e8264ecf986ca,chainlink
|
||||
0x0bc529c00c6401aef6d220be8c6ea1667f6ad93e,yearn-finance
|
||||
0xD6DF932A45C0f255f85145f286eA0b292B21C90B,aave
|
||||
0x1f9840a85d5af5bf1d1762f925bdaddc4201f984,uniswap
|
||||
0x2791Bca1f2de4661ED88A30C99A7a9449Aa84174,usd-coin
|
||||
0x6b175474e89094c44da98b954eedeac495271d0f,dai
|
||||
0x408e41876cccdc0f92210600ef50372656052a38,republic-protocol
|
||||
0x39aa39c021dfbae8fac545936693ac917d5e7563,compound-usd-coin
|
||||
0x5d3a536e4d6dbd6114cc1ead35777bab948e3643,cdai
|
||||
0x4ddc2d193948926d02f9b1fe9e1daa0718270ed5,compound-ether
|
||||
0xc11b1268c1a384e55c48c2391d8d480264a3a7f4,compound-wrapped-btc
|
||||
0x0d500B1d8E8eF31E21C99d1Db9A6444d3ADf1270,matic-network
|
||||
0x430EF9263E76DAE63c84292C3409D61c598E9682,vulcan-forged
|
||||
0xc2132D05D31c914a87C6611C10748AEb04B58e8F,tether
|
||||
0x22a31bD4cB694433B6de19e0aCC2899E553e9481,mmfinance
|
||||
0xE6469Ba6D2fD6130788E0eA9C0a0515900563b59,terrausd-wormhole
|
||||
0x6f8a06447Ff6FcF75d803135a7de15CE88C1d4ec,shiba-inu
|
||||
0x831753DD7087CaC61aB5644b308642cc1c33Dc13,quick
|
||||
0x723b17718289a91af252d616de2c77944962d122,gaia-everworld
|
|
18
resources/address_to_decimals.csv
Normal file
18
resources/address_to_decimals.csv
Normal file
@ -0,0 +1,18 @@
|
||||
token,decimals
|
||||
0x7ceB23fD6bC0adD59E62ac25578270cFf1b9f619,18
|
||||
0xeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee,18
|
||||
0x2260fac5e5542a773aa44fbcfedf7c193bc2c599,18
|
||||
0x514910771af9ca656af840dff83e8264ecf986ca,18
|
||||
0x0bc529c00c6401aef6d220be8c6ea1667f6ad93e,18
|
||||
0xD6DF932A45C0f255f85145f286eA0b292B21C90B,18
|
||||
0x1f9840a85d5af5bf1d1762f925bdaddc4201f984,18
|
||||
0x2791Bca1f2de4661ED88A30C99A7a9449Aa84174,6
|
||||
0x39aa39c021dfbae8fac545936693ac917d5e7563,8
|
||||
0x0d500B1d8E8eF31E21C99d1Db9A6444d3ADf1270,18
|
||||
0x430EF9263E76DAE63c84292C3409D61c598E9682,18
|
||||
0xc2132D05D31c914a87C6611C10748AEb04B58e8F,6
|
||||
0x22a31bD4cB694433B6de19e0aCC2899E553e9481,18
|
||||
0xE6469Ba6D2fD6130788E0eA9C0a0515900563b59,6
|
||||
0x6f8a06447Ff6FcF75d803135a7de15CE88C1d4ec,18
|
||||
0x831753DD7087CaC61aB5644b308642cc1c33Dc13,18
|
||||
0x723b17718289a91af252d616de2c77944962d122,18
|
|
27
scripts/launch_analysis.sh
Normal file
27
scripts/launch_analysis.sh
Normal file
@ -0,0 +1,27 @@
|
||||
#!/bin/bash
|
||||
# This is a script to analyze MEV profits
|
||||
# Input the pool Id of mev-inspect (can be found on your TILT interface)
|
||||
# TODO: How to extract the mev-inspect pool id to copy the csv files?
|
||||
mevInspectPoolId="mev-inspect-759f8dc6f7-2nnzl"
|
||||
# Input the starting and ending blocks you want to run the profit analysis for
|
||||
blockFrom=$((34500000))
|
||||
blockTo=$((34800000))
|
||||
window=$((100))
|
||||
reps=$(((${blockTo}-${blockFrom})/${window}))
|
||||
echo "${reps}"
|
||||
for i in $(seq 0 1 $reps)
|
||||
do
|
||||
from=$(($blockFrom + $i*$window))
|
||||
to=$(($blockFrom + ($i+1)*$window))
|
||||
echo "--"
|
||||
echo "rep= $i/$reps"
|
||||
echo "from= $from"
|
||||
echo "to= $to"
|
||||
./mev inspect-many $from $to
|
||||
done
|
||||
./mev analyze-profit $blockFrom $blockTo True
|
||||
declare -a file_names=("profit_by_date.csv" "profit_by_block_number.csv" "profit_by_category.csv")
|
||||
for fname in "${file_names[@]}"
|
||||
do
|
||||
kubectl cp $mevInspectPoolId:resources/$fname $fname;
|
||||
done
|
Loading…
x
Reference in New Issue
Block a user