From acc7bc67a08bb8fd3f8f6c7d4d7b2d10bc3962e9 Mon Sep 17 00:00:00 2001 From: NIXBNT <88088888+NIXBNT@users.noreply.github.com> Date: Mon, 29 Apr 2024 08:57:56 +1000 Subject: [PATCH 01/17] add_sei (devnet) initial commit --- .env.example | 1 + fastlane_bot/config/network.py | 41 ++++++++++++++++++- fastlane_bot/config/selectors.py | 1 + .../sei/solidly_v2_event_mappings.csv | 1 + .../blockchain_data/sei/static_pool_data.csv | 3 ++ .../data/blockchain_data/sei/tokens.csv | 5 +++ .../sei/uniswap_v2_event_mappings.csv | 3 ++ .../sei/uniswap_v3_event_mappings.csv | 1 + fastlane_bot/data/multichain_addresses.csv | 2 + main.py | 2 +- 10 files changed, 58 insertions(+), 2 deletions(-) create mode 100644 fastlane_bot/data/blockchain_data/sei/solidly_v2_event_mappings.csv create mode 100644 fastlane_bot/data/blockchain_data/sei/static_pool_data.csv create mode 100644 fastlane_bot/data/blockchain_data/sei/tokens.csv create mode 100644 fastlane_bot/data/blockchain_data/sei/uniswap_v2_event_mappings.csv create mode 100644 fastlane_bot/data/blockchain_data/sei/uniswap_v3_event_mappings.csv diff --git a/.env.example b/.env.example index 5f1f4a5ae..5cb8f2b48 100644 --- a/.env.example +++ b/.env.example @@ -3,6 +3,7 @@ export ETH_PRIVATE_KEY_BE_CAREFUL="0x123-USE-YOUR-OWN-PRIVATE-KEY-HERE" export WEB3_FANTOM="FANTOM-API-KEY-HERE" // "public" can be used in place of a paid API key export WEB3_MANTLE="MANTLE-API-KEY-HERE" export WEB3_LINEA="LINEA-API-KEY-HERE" // +export WEB3_SEI="SEI-API-KEY-HERE" // #******** For Development - not required to run bot ********# export ETHERSCAN_TOKEN="ONLY_REQUIRED_IN_DEV" diff --git a/fastlane_bot/config/network.py b/fastlane_bot/config/network.py index 0bed6b542..be8952671 100644 --- a/fastlane_bot/config/network.py +++ b/fastlane_bot/config/network.py @@ -276,6 +276,7 @@ class ConfigNetwork(ConfigBase): NETWORK_FANTOM = S.NETWORK_FANTOM NETWORK_MANTLE = S.NETWORK_MANTLE NETWORK_LINEA = S.NETWORK_LINEA + NETWORK_SEI = S.NETWORK_SEI # FLAGS ####################################################################################### @@ -317,7 +318,9 @@ def new(cls, network=None): elif network == cls.NETWORK_MANTLE: return _ConfigNetworkMantle(_direct=False) elif network == cls.NETWORK_LINEA: - return _ConfigNetworkLinea(_direct=False) + return _ConfigNetworkLinea(_direct=False) + elif network == cls.NETWORK_SEI: + return _ConfigNetworkSei(_direct=False) elif network == cls.NETWORK_TENDERLY: return _ConfigNetworkTenderly(_direct=False) else: @@ -777,6 +780,42 @@ class _ConfigNetworkLinea(ConfigNetwork): # Add any exchanges unique to the chain here CHAIN_SPECIFIC_EXCHANGES = [] +class _ConfigNetworkSei(ConfigNetwork): + """ + Fastlane bot config -- network [Base Mainnet] + """ + + NETWORK = S.NETWORK_SEI + NETWORK_ID = "1" # TODO + NETWORK_NAME = "sei" + DEFAULT_PROVIDER = S.PROVIDER_ALCHEMY + RPC_ENDPOINT = "https://evm-rpc.arctic-1.seinetwork.io/" # TODO currently Sei devnet + WEB3_ALCHEMY_PROJECT_ID = os.environ.get("WEB3_SEI") + + network_df = get_multichain_addresses(network=NETWORK_NAME) + FASTLANE_CONTRACT_ADDRESS = "0xC7Dd38e64822108446872c5C2105308058c5C55C" #TODO - UPDATE WITH Mainnet + MULTICALL_CONTRACT_ADDRESS = "0x1E05037b9c4fEFaF3c45CD6F4F2C3197e6A43cD8" # previously 0xcA11bde05977b3631167028862bE2a173976CA11 + + CARBON_CONTROLLER_ADDRESS = "0x59f21012B2E9BA67ce6a7605E74F945D0D4C84EA" #TODO - UPDATE WITH Mainnet + CARBON_CONTROLLER_VOUCHER = "0xe4816658ad10bF215053C533cceAe3f59e1f1087" #TODO - UPDATE WITH Mainnet + + NATIVE_GAS_TOKEN_ADDRESS = "0xEeeeeEeeeEeEeeEeEeEeeEEEeeeeEeeeeeeeEEeE" + WRAPPED_GAS_TOKEN_ADDRESS = "0x26841a0A5D958B128209F4ea9a1DD7E61558c330" # TODO confirm for Mainnet + NATIVE_GAS_TOKEN_SYMBOL = "SEI" + WRAPPED_GAS_TOKEN_SYMBOL = "WSEI" + STABLECOIN_ADDRESS = "0xace5f7Ea93439Af39b46d2748fA1aC19951c8d7C" #TODO USDC on devnet + + IS_INJECT_POA_MIDDLEWARE = False + # Balancer + BALANCER_VAULT_ADDRESS = "0x7ccBebeb88696f9c8b061f1112Bb970158e29cA5" # # TODO Jellyswap on devnet + + CHAIN_FLASHLOAN_TOKENS = { + "0x26841a0A5D958B128209F4ea9a1DD7E61558c330": "WSEI", #TODO confirm for Mainnet + "0xace5f7Ea93439Af39b46d2748fA1aC19951c8d7C": "USDC", #TODO confirm for Mainnet + } + # Add any exchanges unique to the chain here + CHAIN_SPECIFIC_EXCHANGES = [] + class _ConfigNetworkTenderly(ConfigNetwork): """ Fastlane bot config -- network [Ethereum Tenderly] diff --git a/fastlane_bot/config/selectors.py b/fastlane_bot/config/selectors.py index d910f52be..791810fea 100644 --- a/fastlane_bot/config/selectors.py +++ b/fastlane_bot/config/selectors.py @@ -18,6 +18,7 @@ NETWORK_CANTO = "canto" NETWORK_FANTOM = "fantom" NETWORK_LINEA = "linea" +NETWORK_SEI = "sei" NETWORK_MANTLE = "mantle" NETWORK_SCROLL = "scroll" NETWORK_BSC = "binance_smart_chain" diff --git a/fastlane_bot/data/blockchain_data/sei/solidly_v2_event_mappings.csv b/fastlane_bot/data/blockchain_data/sei/solidly_v2_event_mappings.csv new file mode 100644 index 000000000..2785f2805 --- /dev/null +++ b/fastlane_bot/data/blockchain_data/sei/solidly_v2_event_mappings.csv @@ -0,0 +1 @@ +exchange,address diff --git a/fastlane_bot/data/blockchain_data/sei/static_pool_data.csv b/fastlane_bot/data/blockchain_data/sei/static_pool_data.csv new file mode 100644 index 000000000..09177afa2 --- /dev/null +++ b/fastlane_bot/data/blockchain_data/sei/static_pool_data.csv @@ -0,0 +1,3 @@ +cid,strategy_id,last_updated,last_updated_block,descr,pair_name,exchange_name,fee,fee_float,address,anchor,tkn0_address,tkn1_address,tkn0_decimals,tkn1_decimals,exchange_id,tkn0_symbol,tkn1_symbol,timestamp,tkn0_balance,tkn1_balance,liquidity,sqrt_price_q96,tick,tick_spacing,exchange,pool_type,tkn0_weight,tkn1_weight,tkn2_address,tkn2_decimals,tkn2_symbol,tkn2_balance,tkn2_weight,tkn3_address,tkn3_decimals,tkn3_symbol,tkn3_balance,tkn3_weight,tkn4_address,tkn4_decimals,tkn4_symbol,tkn4_balance,tkn4_weight,tkn5_address,tkn5_decimals,tkn5_symbol,tkn5_balance,tkn5_weight,tkn6_address,tkn6_decimals,tkn6_symbol,tkn6_balance,tkn6_weight,tkn7_address,tkn7_decimals,tkn7_symbol,tkn7_balance,tkn7_weight +0x1422169ab760ea6994358267b7d3783e8e7fa55c6a74b365b3fd3d17cbf4c6f1,0,,2354,dragonswap 0x027D2E627209f1cebA52ADc8A5aFE9318459b44B/0x7b75109369ACb528d9fa989E227812a6589712b9,0x027D2E627209f1cebA52ADc8A5aFE9318459b44B/0x7b75109369ACb528d9fa989E227812a6589712b9,dragonswap,0.003,0.003,0x01A34Dfa104F020FEE739268679338169945D5B1,,0x027D2E627209f1cebA52ADc8A5aFE9318459b44B,0x7b75109369ACb528d9fa989E227812a6589712b9,18,18,3,WSEI,DSWAP,0,0,0,,,,,dragonswap,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, +0xbfd9612b2cb8035908dff18c040f64de75999cefd1020b5ce8a2e533c2ecd5dc,0,,2354,dragonswap 0x027D2E627209f1cebA52ADc8A5aFE9318459b44B/0xace5f7Ea93439Af39b46d2748fA1aC19951c8d7C,0x027D2E627209f1cebA52ADc8A5aFE9318459b44B/0xace5f7Ea93439Af39b46d2748fA1aC19951c8d7C,dragonswap,0.003,0.003,0x85CB6BFd781e1f42f4E79Efb6bf1F1fEfE4E9732,,0x027D2E627209f1cebA52ADc8A5aFE9318459b44B,0xace5f7Ea93439Af39b46d2748fA1aC19951c8d7C,18,6,3,WSEI,USDC,0,0,0,,,,,dragonswap,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, diff --git a/fastlane_bot/data/blockchain_data/sei/tokens.csv b/fastlane_bot/data/blockchain_data/sei/tokens.csv new file mode 100644 index 000000000..d831bd4a1 --- /dev/null +++ b/fastlane_bot/data/blockchain_data/sei/tokens.csv @@ -0,0 +1,5 @@ +address,decimals,symbol +0x26841a0A5D958B128209F4ea9a1DD7E61558c330,18,WSEI +0xace5f7Ea93439Af39b46d2748fA1aC19951c8d7C,6,USDC +0x027D2E627209f1cebA52ADc8A5aFE9318459b44B,18,WSEI +0x7b75109369ACb528d9fa989E227812a6589712b9,18,DSWAP diff --git a/fastlane_bot/data/blockchain_data/sei/uniswap_v2_event_mappings.csv b/fastlane_bot/data/blockchain_data/sei/uniswap_v2_event_mappings.csv new file mode 100644 index 000000000..f0165604a --- /dev/null +++ b/fastlane_bot/data/blockchain_data/sei/uniswap_v2_event_mappings.csv @@ -0,0 +1,3 @@ +exchange,address +dragonswap,0x01A34Dfa104F020FEE739268679338169945D5B1 +dragonswap,0x85CB6BFd781e1f42f4E79Efb6bf1F1fEfE4E9732 \ No newline at end of file diff --git a/fastlane_bot/data/blockchain_data/sei/uniswap_v3_event_mappings.csv b/fastlane_bot/data/blockchain_data/sei/uniswap_v3_event_mappings.csv new file mode 100644 index 000000000..2785f2805 --- /dev/null +++ b/fastlane_bot/data/blockchain_data/sei/uniswap_v3_event_mappings.csv @@ -0,0 +1 @@ +exchange,address diff --git a/fastlane_bot/data/multichain_addresses.csv b/fastlane_bot/data/multichain_addresses.csv index c1ba30702..235346cb8 100644 --- a/fastlane_bot/data/multichain_addresses.csv +++ b/fastlane_bot/data/multichain_addresses.csv @@ -135,3 +135,5 @@ sushiswap_v3,thundercore,uniswap_v3,0xc35DADB65012eC5796536bD9864eD8773aBc74C4,0 pancakeswap_v3,zkevm,uniswap_v3,0x0BFbCF9fa4f9C56B0F40a671Ad40E0805A091865,0x1b81D678ffb9C0263b24A97847620C99d213eB14,,, pancakeswap_v3,zksync,uniswap_v3,0x1BB72E0CbbEA93c08f535fc7856E0338D7F7a8aB,0xD70C70AD87aa8D45b8D59600342FB3AEe76E3c68,,, xfai_v0,linea,solidly_v2,0xa5136eAd459F0E61C99Cec70fe8F5C24cF3ecA26,0xD538be6e9026C13D130C9e17d509E69C8Bb0eF33,,222864, +carbon_v1,sei,carbon_v1,0x59f21012B2E9BA67ce6a7605E74F945D0D4C84EA,0x59f21012B2E9BA67ce6a7605E74F945D0D4C84EA,,17658678, +dragonswap,sei,uniswap_v2,0x5D370a6189F89603FaB67e9C68383e63F7B6A262,0x2346d3A6fb18Ff3ae590Ea31d9e41E6AB8c9f5EB,,1008775, diff --git a/main.py b/main.py index 42410df32..6ae2a767d 100644 --- a/main.py +++ b/main.py @@ -708,7 +708,7 @@ def _run_async_update_with_retries(mgr, current_block, max_retries=5): "--blockchain", default="ethereum", help="A blockchain from the list. Blockchains not in this list do not have a deployed Fast Lane contract and are not supported.", - choices=["ethereum", "coinbase_base", "fantom", "mantle", "linea"], + choices=["ethereum", "coinbase_base", "fantom", "mantle", "linea", "sei"], ) parser.add_argument( "--pool_data_update_frequency", From a3cf2cc9324da2f31aab792bea67867275108344 Mon Sep 17 00:00:00 2001 From: NIXBNT <88088888+NIXBNT@users.noreply.github.com> Date: Wed, 1 May 2024 21:55:18 +1000 Subject: [PATCH 02/17] update wrapped address --- fastlane_bot/config/network.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fastlane_bot/config/network.py b/fastlane_bot/config/network.py index be8952671..cfec97aa3 100644 --- a/fastlane_bot/config/network.py +++ b/fastlane_bot/config/network.py @@ -800,7 +800,7 @@ class _ConfigNetworkSei(ConfigNetwork): CARBON_CONTROLLER_VOUCHER = "0xe4816658ad10bF215053C533cceAe3f59e1f1087" #TODO - UPDATE WITH Mainnet NATIVE_GAS_TOKEN_ADDRESS = "0xEeeeeEeeeEeEeeEeEeEeeEEEeeeeEeeeeeeeEEeE" - WRAPPED_GAS_TOKEN_ADDRESS = "0x26841a0A5D958B128209F4ea9a1DD7E61558c330" # TODO confirm for Mainnet + WRAPPED_GAS_TOKEN_ADDRESS = "0x57eE725BEeB991c70c53f9642f36755EC6eb2139" # TODO confirm for Mainnet NATIVE_GAS_TOKEN_SYMBOL = "SEI" WRAPPED_GAS_TOKEN_SYMBOL = "WSEI" STABLECOIN_ADDRESS = "0xace5f7Ea93439Af39b46d2748fA1aC19951c8d7C" #TODO USDC on devnet From ed0c578378f8b1b999584a710b32247e1d9b3699 Mon Sep 17 00:00:00 2001 From: NIXBNT <88088888+NIXBNT@users.noreply.github.com> Date: Wed, 1 May 2024 22:28:31 +1000 Subject: [PATCH 03/17] Update tokens.csv --- fastlane_bot/data/blockchain_data/sei/tokens.csv | 3 +++ 1 file changed, 3 insertions(+) diff --git a/fastlane_bot/data/blockchain_data/sei/tokens.csv b/fastlane_bot/data/blockchain_data/sei/tokens.csv index d831bd4a1..af2dc29ce 100644 --- a/fastlane_bot/data/blockchain_data/sei/tokens.csv +++ b/fastlane_bot/data/blockchain_data/sei/tokens.csv @@ -3,3 +3,6 @@ address,decimals,symbol 0xace5f7Ea93439Af39b46d2748fA1aC19951c8d7C,6,USDC 0x027D2E627209f1cebA52ADc8A5aFE9318459b44B,18,WSEI 0x7b75109369ACb528d9fa989E227812a6589712b9,18,DSWAP +0xEeeeeEeeeEeEeeEeEeEeeEEEeeeeEeeeeeeeEEeE,18,SEI +0x9e7A8e558Ce582511f4104465a886b7bEfBC146b,18,JLY +0x57eE725BEeB991c70c53f9642f36755EC6eb2139,18,WSEI From 4bc3b6ee476c4c6c11de3252d6495d907f3e12e1 Mon Sep 17 00:00:00 2001 From: sklbancor <109073706+sklbancor@users.noreply.github.com> Date: Mon, 11 Dec 2023 17:20:19 +0000 Subject: [PATCH 04/17] CPC v3.5 (as_dicts, as_df, minrw, price estimate) (cherry picked from commit 825ff7c605becacc1e5be762d368f627fd8af63f) --- fastlane_bot/tools/cpc.py | 60 ++++++++++++++++++++++++--------------- 1 file changed, 37 insertions(+), 23 deletions(-) diff --git a/fastlane_bot/tools/cpc.py b/fastlane_bot/tools/cpc.py index 1e74c5eb5..29f18289e 100644 --- a/fastlane_bot/tools/cpc.py +++ b/fastlane_bot/tools/cpc.py @@ -8,8 +8,8 @@ NOTE: this class is not part of the API of the Carbon protocol, and you must expect breaking changes even in minor version updates. Use at your own risk. """ -__VERSION__ = "3.4" -__DATE__ = "23/Jan/2024" +__VERSION__ = "3.5" +__DATE__ = "22/Apr/2023" from dataclasses import dataclass, field, asdict, InitVar from .simplepair import SimplePair as Pair @@ -836,7 +836,7 @@ def from_univ3(cls, Pmarg, uniL, uniPa, uniPb, pair, cid, fee, descr, params=Non constr="uv3", params=params, ) - + SOLIDLY_PRICE_SPREAD = 0.06 # 0.06 gives pretty good results for m=2.6 @classmethod def from_solidly( @@ -982,6 +982,9 @@ def from_solidly( print("[cpc::from_solidly] returning curve directly is deprecated; prepare to accept a list of curves in the future") return result + # minimun range width (pa/pb-1) for carbon curves and sqrt thereof + CARBON_MIN_RANGEWIDTH = 1e-6 + @classmethod def from_carbon( cls, @@ -999,6 +1002,7 @@ def from_carbon( descr=None, params=None, isdydx=True, + minrw=None, ): """ constructor: from a single Carbon order (see class docstring for other parameters) (1) @@ -1011,6 +1015,7 @@ def from_carbon( :B: alternative to pa, pb: B = sqrt(pb) in dy/dy :tkny: token y :isdydx: if True prices in dy/dx, if False in quote direction of the pair + :minrw: minimum perc width (pa/pb-1) of range (default CARBON_MIN_RANGEWIDTH) NOTE 1: that ALL parameters are mandatory, except that EITHER pa, bp OR A, B must be given but not both; we do not correct for incorrect assignment of @@ -1028,7 +1033,10 @@ def from_carbon( # assert not fee is None, "fee must not be None" # assert not cid is None, "cid must not be None" # assert not descr is None, "descr must not be None" - + + if minrw is None: + minrw = cls.CARBON_MIN_RANGEWIDTH + # if yint is None: # yint = y assert y <= yint, "y must be <= yint" @@ -1067,15 +1075,21 @@ def from_carbon( if not tkny == tknq: pa, pb = 1 / pa, 1 / pb - # zero-width ranges are somewhat extended for numerical stability + # small and zero-width ranges are extended for numerical stability pa0, pb0 = pa, pb + if pa/pb-1 < minrw: + pa = pb = sqrt(pa*pb) + assert pa == pb, "just making sure" if pa == pb: - pa *= 1.0000001 - pb /= 1.0000001 + # pa *= 1.0000001 + # pb /= 1.0000001 + rw_multiplier = sqrt(1+minrw) + pa *= rw_multiplier + pb /= rw_multiplier # validation - if not pa > pb: - raise cls.CPCValidationError(f"pa > pb required ({pa}, {pb})") + if not pa/pb - 1 >= minrw*0.99: + raise cls.CPCValidationError(f"pa +> pb required ({pa}, {pb}, {pa/pb-1}, {minrw})") # finally set A, B A = sqrt(pa) - sqrt(pb) @@ -1094,7 +1108,7 @@ def from_carbon( yasym_times_A = yint * B kappa_times_A = yint**2 / A - params0 = dict(y=y, yint=yint, A=A0, B=B, pa=pa0, pb=pb0) + params0 = dict(y=y, yint=yint, A=A0, B=B, pa=pa0, pb=pb0, minrw=minrw) if params is None: params = AttrDict(params0) else: @@ -1805,13 +1819,15 @@ def scale(self, tkn): """returns the scale of tkn""" return self.tokenscale.scale(tkn) - def asdicts(self): + def as_dicts(self): """returns list of dictionaries representing the curves""" return [c.asdict() for c in self.curves] - - def asdf(self): + asdicts = as_dicts # legacy name + + def as_df(self): """returns pandas dataframe representing the curves""" return pd.DataFrame.from_dict(self.asdicts()).set_index("cid") + asdf = as_df # legacy name @classmethod def from_dicts(cls, dicts, *, tokenscale=None): @@ -2531,7 +2547,7 @@ def xystats(self, curves=None): PE_DATA = "data" def price_estimate( - self, *, tknq=None, tknb=None, pair=None, result=None, raiseonerror=True + self, *, tknq=None, tknb=None, pair=None, result=None, raiseonerror=True, verbose=False ): """ calculates price estimate in the reference token as base token @@ -2544,6 +2560,7 @@ def price_estimate( :PE_PAIR: slashpair :PE_CURVES: curves :PE_DATA: prices, weights + :verbose: whether to print some progress :returns: price (quote per base) """ assert tknq is not None and tknb is not None or pair is not None, ( @@ -2570,6 +2587,8 @@ def price_estimate( # return dict(curves=tuple(crvs), rcurves=tuple(rcrvs)) return tuple(acurves) data = tuple((r[1], sqrt(r[2])) for r in acurves) + if verbose: + print(f"[price_estimate] {tknq}/{tknb} {len(data)} curves") if not len(data) > 0: if raiseonerror: raise ValueError(f"no curves found for {tknq}/{tknb}") @@ -2620,13 +2639,13 @@ def price_estimates( tknqs = [t.strip() for t in tknqs.split(",")] if isinstance(tknbs, str): tknbs = [t.strip() for t in tknbs.split(",")] - # print(f"[price_estimates] tknqs [{len(tknqs)}], tknbs [{len(tknbs)}]") - # print(f"[price_estimates] tknqs [{len(tknqs)}] = {tknqs} , tknbs [{len(tknbs)}]] = {tknbs} ") + if verbose: + print(f"[price_estimates] tknqs [{len(tknqs)}] = {tknqs} , tknbs [{len(tknbs)}] = {tknbs} ") resulttp = self.PE_PAIR if pairs else None result = np.array( [ [ - self.price_estimate(tknb=b, tknq=q, raiseonerror=False, result=resulttp) + self.price_estimate(tknb=b, tknq=q, raiseonerror=False, result=resulttp, verbose=verbose) for b in tknbs ] for q in tknqs @@ -2696,12 +2715,7 @@ def price_estimates( } # print("[price_estimates] result", result) if not len(missing) == 0: - raise ValueError( - f"no price found for {len(missing)} pairs", - result, - missing, - len(missing), - ) + raise ValueError(f"no price found for {len(missing)} pairs", missing, result) #print(f"[price_estimates] DONE [{time.time()-start_time:.2f}s]") if unwrapsingle and len(tknqs) == 1: From 645b02c826b63baa61b63fde3631f213f26661fe Mon Sep 17 00:00:00 2001 From: sklbancor <109073706+sklbancor@users.noreply.github.com> Date: Mon, 11 Dec 2023 19:34:13 +0000 Subject: [PATCH 05/17] MargPOptimizer 5.3-b1 Update margpoptimizer.py (cherry picked from commit ff03bd02bb84eefd7857011cde7390a7b177fd78) --- .../tools/optimizer/margpoptimizer.py | 427 ++++++++++-------- 1 file changed, 245 insertions(+), 182 deletions(-) diff --git a/fastlane_bot/tools/optimizer/margpoptimizer.py b/fastlane_bot/tools/optimizer/margpoptimizer.py index eb207f7e9..a23ff409b 100644 --- a/fastlane_bot/tools/optimizer/margpoptimizer.py +++ b/fastlane_bot/tools/optimizer/margpoptimizer.py @@ -22,8 +22,8 @@ (c) Copyright Bprotocol foundation 2023. Licensed under MIT """ -__VERSION__ = "5.2" -__DATE__ = "15/Sep/2023" +__VERSION__ = "5.3-b1" +__DATE__ = "14/Dec/2023" from dataclasses import dataclass, field, fields, asdict, astuple, InitVar import pandas as pd @@ -52,26 +52,26 @@ def kind(self): return "margp" @classmethod - def jacobian(cls, func, x, *, eps=None): + def jacobian(cls, func, x, *, jach=None): """ computes the Jacobian of func at point x :func: a callable x=(x1..xn) -> (y1..ym), taking and returning np.arrays - must also take a quiet parameter, which if True suppresses output + must also take a `quiet` parameter, which, if True suppresses output :x: a vector x=(x1..xn) as np.array + :jach: the h value for the derivative (Jacobian) calculation (default: cls.MOJACH) """ - if eps is None: - eps = cls.JACEPS + h = cls.MOJACH if jach is None else jach n = len(x) y = func(x, quiet=True) jac = np.zeros((n, n)) for j in range(n): # through columns to allow for vector addition - Dxj = abs(x[j]) * eps if x[j] != 0 else eps + Dxj = abs(x[j]) * h if x[j] != 0 else h x_plus = [(xi if k != j else xi + Dxj) for k, xi in enumerate(x)] jac[:, j] = (func(x_plus, quiet=True) - y) / Dxj return jac J = jacobian - JACEPS = 1e-5 + MOJACH = 1e-5 MO_DEBUG = "debug" @@ -81,12 +81,35 @@ def jacobian(cls, func, x, *, eps=None): MO_MINIMAL = "minimal" MO_FULL = "full" - MOEPS = 1e-6 - MOMAXITER = 50 + MOCRITR = "rel" # relative convergence criterion used + MOCRITA = "abs" # ditto absolute + MOEPS = 1e-6 # relative convergence threshold + MOEPSAUNIT = "USD" # absolute convergence unit + MOEPSA = 1 # absolute convergence threshold (unit: MOCAUNIT) + MONORML1 = 1 # L1 norm (sum of absolute values) + MONORML2 = 2 # L2 norm (Euclidean distance) + MONORMLINF = np.inf # L-infinity norm (maximum absolute value) + + MOMAXITER = 50 class OptimizationError(Exception): pass class ConvergenceError(OptimizationError): pass class ParameterError(OptimizationError): pass + + @classmethod + def norml1_f(cls, x): + """the L1 norm of a vector x""" + return np.linalg.norm(x, ord=1) + + @classmethod + def norml2_f(cls, x): + """the L2 norm of a vector x""" + return np.linalg.norm(x, ord=2) + + @classmethod + def normlinf_f(cls, x): + """the Linf norm of a vector x""" + return np.linalg.norm(x, ord=np.inf) def optimize(self, sfc=None, result=None, *, params=None): """ @@ -120,7 +143,12 @@ def optimize(self, sfc=None, result=None, *, params=None): ================== ========================================================================= parameter meaning ================== ========================================================================= - eps precision parameter for accepting the result (default: 1e-6) + crit criterion: MOCRITR (relative; default) or MOCRITA (absolute) + norm norm for convergence crit (MONORML1, MONORML2, MONORMLINF) + eps relative convergence threshold (default: MOEPS) + epsa absolute convergence threshold (default: MOEPSA) + epsaunit unit for epsa (default: MOEPSAUNIT) + jach step size for calculating Jacobian (default: MOJACH) maxiter maximum number of iterations (default: 100) verbose if True, print some high level output progress if True, print some basic progress output @@ -129,11 +157,10 @@ def optimize(self, sfc=None, result=None, *, params=None): raiseonerror if True, raise an OptimizationError exception on error pstart starting price for optimization (3) ================== ========================================================================= - - + NOTE 1: this optimizer uses the marginal price method, ie it solves the equation - dx_i (p) = 0 for all i != targettkn, and the whole price vector + dx_i (p) = 0 for all i != targettkn, and the whole price vector p NOTE 2: at the moment only the trivial self-financing constraint is allowed, ie the one that only specifies the target token, and where all other constraints are zero; if sfc is @@ -143,6 +170,8 @@ def optimize(self, sfc=None, result=None, *, params=None): returned by MO_PSTART; excess tokens can be provided but all required tokens must be present """ + start_time = time.time() + # data conversion: string to SFC object; note that anything but pure arb not currently supported if isinstance(sfc, str): sfc = self.arb(targettkn=sfc) @@ -155,174 +184,191 @@ def optimize(self, sfc=None, result=None, *, params=None): dxdy_f = lambda r: (np.array(r[0:2])) # extract dx, dy from result tn = lambda t: t.split("-")[0] # token name, eg WETH-xxxx -> WETH - # initialisations + # epsilons and maxiter eps = P("eps") or self.MOEPS + epsa = P("epsa") or self.MOEPSA + epsaunit = P("epsaunit") or self.MOEPSAUNIT + jach = P("jach") or self.MOJACH maxiter = P("maxiter") or self.MOMAXITER - start_time = time.time() + + # curves, tokens and pairs curves_t = self.curve_container + if len (curves_t) == 0: + raise self.ParameterError("no curves found") + if len (curves_t) == 1: + raise self.ParameterError(f"can't run arbitrage on single curve {curves_t}") + alltokens_s = self.curve_container.tokens() + if not targettkn in alltokens_s: + raise self.ParameterError(f"targettkn {targettkn} not in {alltokens_s}") + tokens_t = tuple(t for t in alltokens_s if t != targettkn) # all _other_ tokens... tokens_ix = {t: i for i, t in enumerate(tokens_t)} # ...with index lookup pairs = self.curve_container.pairs(standardize=False) - curves_by_pair = { - pair: tuple(c for c in curves_t if c.pair == pair) for pair in pairs } + curves_by_pair = {pair: tuple(c for c in curves_t if c.pair == pair) for pair in pairs } pairs_t = tuple(tuple(p.split("/")) for p in pairs) + + # return the inner function if requested + # (this may need to move lower) + if result == self.MO_DTKNFROMPF: + return dtknfromp_f + + # return debug info if requested + if result == self.MO_DEBUG: + return dict( + tokens_t=tokens_t, + tokens_ix=tokens_ix, + pairs=pairs, + sfc=sfc, + targettkn=targettkn, + pairs_t=pairs_t, + crit=dict(crit=P("crit"), eps=eps, epsa=epsa, epsaunit=epsaunit, pstart=P("pstart")), + optimizer=self, + ) - try: - - # assertions - if len (curves_t) == 0: - raise self.ParameterError("no curves found") - if len (curves_t) == 1: - raise self.ParameterError(f"can't run arbitrage on single curve {curves_t}") - if not targettkn in alltokens_s: - raise self.ParameterError(f"targettkn {targettkn} not in {alltokens_s}") - - # calculating the start price for the iteration process - if not P("pstart") is None: - pstart = P("pstart") - if P("verbose") or P("debug"): - print(f"[margp_optimizer] using pstartd [{len(P('pstart'))} tokens]") - if isinstance(P("pstart"), pd.DataFrame): - try: - pstart = pstart.to_dict()[targettkn] - except Exception as e: - raise Exception( - f"error while converting dataframe pstart to dict: {e}", - pstart, - targettkn, - ) - assert isinstance( - pstart, dict - ), f"pstart must be a dict or a data frame [{pstart}]" - price_estimates_t = tuple(pstart[t] for t in tokens_t) - else: - if P("verbose") or P("debug"): - print("[margp_optimizer] calculating price estimates") + # pstart + pstart = P("pstart") + if not pstart is None: + if P("verbose") or P("debug"): + print(f"[margp_optimizer] using pstart [{len(P('pstart'))} tokens]") + if isinstance(pstart, pd.DataFrame): try: - price_estimates_t = self.price_estimates( - tknq=targettkn, - tknbs=tokens_t, - verbose=False, - triangulate=True, - ) + pstart = pstart.to_dict()[targettkn] except Exception as e: - if P("verbose") or P("debug"): - print(f"[margp_optimizer] error while calculating price estimates: [{e}]") - price_estimates_t = None + raise Exception(f"error while converting dataframe pstart to dict: {e}", pstart, targettkn) + assert isinstance(pstart, dict), f"pstart must be a dict or a data frame [{pstart}]" + price_estimates_t = tuple(pstart[t] for t in tokens_t) + else: + if P("verbose") or P("debug"): + print("[margp_optimizer] calculating price estimates") if P("debug"): - print("[margp_optimizer] pstart:", price_estimates_t) - if result == self.MO_PSTART: - df = pd.DataFrame(price_estimates_t, index=tokens_t, columns=[targettkn]) - df.index.name = "tknb" - return df + print(f"[margp_optimizer] tknq={targettkn}, tknbs={tokens_t}") + + try: + price_estimates_t = self.price_estimates( + tknq=targettkn, + tknbs=tokens_t, + verbose=P("debug"), + triangulate=True, + ) + except Exception as e: + if P("verbose") or P("debug"): + print(f"[margp_optimizer] error calculating price estimates: [{e}]") + price_estimates_t = None + raise + + if P("debug"): + print("[margp_optimizer] pstart:", price_estimates_t) + if result == self.MO_PSTART: + df = pd.DataFrame(price_estimates_t, index=tokens_t, columns=[targettkn]) + df.index.name = "tknb" + return df - ## INNER FUNCTION: CALCULATE THE TARGET FUNCTION - def dtknfromp_f(p, *, islog10=True, asdct=False, quiet=False): - """ - calculates the aggregate change in token amounts for a given price vector - - :p: price vector, where prices use the reference token as quote token - this vector is an np.array, and the token order is the same as in tokens_t - :islog10: if True, p is interpreted as log10(p) - :asdct: if True, the result is returned as dict AND tuple, otherwise as np.array - :quiet: if overrides P("debug") etc, eg for calc of Jacobian - :returns: if asdct is False, a tuple of the same length as tokens_t detailing the - change in token amounts for each token except for the target token (ie the - quantity with target zero; if asdct is True, that same information is - returned as dict, including the target token. - """ - p = np.array(p, dtype=np.float64) - if islog10: - p = np.exp(p * np.log(10)) - assert len(p) == len(tokens_t), f"p and tokens_t have different lengths [{p}, {tokens_t}]" - if P("debug") and not quiet: - print(f"\n[dtknfromp_f] =====================>>>") - print(f"prices={p}") - print(f"tokens={tokens_t}") + # criterion and norm + crit = P("crit") or self.MOCRITR + assert crit in set((self.MOCRITR, self.MOCRITA)), "crit must be MOCRITR or MOCRITA" + if crit == self.MOCRITA: + assert not pstart is None, "pstart must be provided if crit is MOCRITA" + assert epsaunit in pstart, f"epsaunit {epsaunit} not in pstart {P('pstart')}" + p_targettkn_per_epsaunit = pstart[epsaunit]/pstart[targettkn] + if P("debug"): + print(f"[margp_optimizer] 1 epsaunit [{epsaunit}] = {p_targettkn_per_epsaunit:,.4f} target [{targettkn}]") + crit_is_relative = crit == self.MOCRITR + eps_used = eps if crit_is_relative else epsa + eps_unit = 1 if crit_is_relative else epsaunit + + norm = P("norm") or self.MONORML2 + assert norm in set((self.MONORML1, self.MONORML2, self.MONORMLINF)), f"norm must be MONORML1, MONORML2 or MONORMLINF [{norm}]" + normf = lambda x: np.linalg.norm(x, ord=norm) - # pvec is dict {tkn -> (log) price} for all tokens in p - pvec = {tkn: p_ for tkn, p_ in zip(tokens_t, p)} - pvec[targettkn] = 1 - if P("debug") and not quiet: - print(f"pvec={pvec}") + ## INNER FUNCTION: CALCULATE THE TARGET FUNCTION + def dtknfromp_f(p, *, islog10=True, asdct=False, quiet=False): + """ + calculates the aggregate change in token amounts for a given price vector + + :p: price vector, where prices use the reference token as quote token + this vector is an np.array, and the token order is the same as in tokens_t + :islog10: if True, p is interpreted as log10(p) + :asdct: if True, the result is returned as dict AND tuple, otherwise as np.array + :quiet: if overrides P("debug") etc, eg for calc of Jacobian + :returns: if asdct is False, a tuple of the same length as tokens_t detailing the + change in token amounts for each token except for the target token (ie the + quantity with target zero; if asdct is True, that same information is + returned as dict, including the target token. + """ + p = np.array(p, dtype=np.float64) + if islog10: + p = np.exp(p * np.log(10)) + assert len(p) == len(tokens_t), f"p and tokens_t have different lengths [{p}, {tokens_t}]" + if P("debug") and not quiet: + print(f"\n[dtknfromp_f] =====================>>>") + print(f"prices={p}") + print(f"tokens={tokens_t}") + + # pvec is dict {tkn -> (log) price} for all tokens in p + pvec = {tkn: p_ for tkn, p_ in zip(tokens_t, p)} + pvec[targettkn] = 1 + if P("debug") and not quiet: + print(f"pvec={pvec}") + + sum_by_tkn = {t: 0 for t in alltokens_s} + for pair, (tknb, tknq) in zip(pairs, pairs_t): + if get(p, tokens_ix.get(tknq)) > 0: + price = get(p, tokens_ix.get(tknb)) / get(p, tokens_ix.get(tknq)) + else: + #print(f"[dtknfromp_f] warning: price for {pair} is unknown, using 1 instead") + price = 1 + curves = curves_by_pair[pair] + c0 = curves[0] + #dxdy = tuple(dxdy_f(c.dxdyfromp_f(price)) for c in curves) + dxvecs = (c.dxvecfrompvec_f(pvec) for c in curves) - sum_by_tkn = {t: 0 for t in alltokens_s} - for pair, (tknb, tknq) in zip(pairs, pairs_t): - if get(p, tokens_ix.get(tknq)) > 0: - price = get(p, tokens_ix.get(tknb)) / get(p, tokens_ix.get(tknq)) - else: - #print(f"[dtknfromp_f] warning: price for {pair} is unknown, using 1 instead") - price = 1 - curves = curves_by_pair[pair] - c0 = curves[0] - #dxdy = tuple(dxdy_f(c.dxdyfromp_f(price)) for c in curves) - dxvecs = (c.dxvecfrompvec_f(pvec) for c in curves) - - if P("debug2") and not quiet: - dxdy = tuple(dxdy_f(c.dxdyfromp_f(price)) for c in curves) - # TODO: rewrite this using the dxvec - # there is no need to extract dy dx; just iterate over dict - # however not urgent because this is debug code - print(f"\n{c0.pairp} --->>") - print(f" price={price:,.4f}, 1/price={1/price:,.4f}") - for r, c in zip(dxdy, curves): - s = f" cid={c.cid:15}" - s += f" dx={float(r[0]):15,.3f} {c.tknxp:>5}" - s += f" dy={float(r[1]):15,.3f} {c.tknyp:>5}" - s += f" p={c.p:,.2f} 1/p={1/c.p:,.2f}" - print(s) - print(f"<<--- {c0.pairp}") - - # old code from dxdy = tuple(dxdy_f(c.dxdyfromp_f(price)) for c in curves) - # sumdx, sumdy = sum(dxdy) - # sum_by_tkn[tknq] += sumdy - # sum_by_tkn[tknb] += sumdx - for dxvec in dxvecs: - for tkn, dx_ in dxvec.items(): - sum_by_tkn[tkn] += dx_ - - # if P("debug") and not quiet: - # print(f"pair={c0.pairp}, {sumdy:,.4f} {tn(tknq)}, {sumdx:,.4f} {tn(tknb)}, price={price:,.4f} {tn(tknq)} per {tn(tknb)} [{len(curves)} funcs]") - - result = tuple(sum_by_tkn[t] for t in tokens_t) - if P("debug") and not quiet: - print(f"sum_by_tkn={sum_by_tkn}") - print(f"result={result}") - print(f"<<<===================== [dtknfromp_f]") - - if asdct: - return sum_by_tkn, np.array(result) - - return np.array(result) - ## END INNER FUNCTION - - # return the inner function if requested - if result == self.MO_DTKNFROMPF: - return dtknfromp_f - - # return debug info if requested - if result == self.MO_DEBUG: - return dict( - # price_estimates_all = price_estimates_all, - # price_estimates_d = price_estimates_d, - price_estimates_t=price_estimates_t, - tokens_t=tokens_t, - tokens_ix=tokens_ix, - pairs=pairs, - sfc=sfc, - targettkn=targettkn, - pairs_t=pairs_t, - dtknfromp_f=dtknfromp_f, - optimizer=self, - ) + if P("debug2") and not quiet: + dxdy = tuple(dxdy_f(c.dxdyfromp_f(price)) for c in curves) + # TODO: rewrite this using the dxvec + # there is no need to extract dy dx; just iterate over dict + # however not urgent because this is debug code + print(f"\n{c0.pairp} --->>") + print(f" price={price:,.4f}, 1/price={1/price:,.4f}") + for r, c in zip(dxdy, curves): + s = f" cid={c.cid:15}" + s += f" dx={float(r[0]):15,.3f} {c.tknxp:>5}" + s += f" dy={float(r[1]):15,.3f} {c.tknyp:>5}" + s += f" p={c.p:,.2f} 1/p={1/c.p:,.2f}" + print(s) + print(f"<<--- {c0.pairp}") + + # old code from dxdy = tuple(dxdy_f(c.dxdyfromp_f(price)) for c in curves) + # sumdx, sumdy = sum(dxdy) + # sum_by_tkn[tknq] += sumdy + # sum_by_tkn[tknb] += sumdx + for dxvec in dxvecs: + for tkn, dx_ in dxvec.items(): + sum_by_tkn[tkn] += dx_ + + # if P("debug") and not quiet: + # print(f"pair={c0.pairp}, {sumdy:,.4f} {tn(tknq)}, {sumdx:,.4f} {tn(tknb)}, price={price:,.4f} {tn(tknq)} per {tn(tknb)} [{len(curves)} funcs]") + + result = tuple(sum_by_tkn[t] for t in tokens_t) + if P("debug") and not quiet: + print(f"sum_by_tkn={sum_by_tkn}") + print(f"result={result}") + print(f"<<<===================== [dtknfromp_f]") + + if asdct: + return sum_by_tkn, np.array(result) + + return np.array(result) + ## END INNER FUNCTION + try: + # setting up the optimization variables (note: we optimize in log space) if price_estimates_t is None: raise Exception(f"price estimates not found; try setting pstart") p = np.array(price_estimates_t, dtype=float) plog10 = np.log10(p) - if P("verbose"): + if P("verbose") or P("debug"): # dtkn_d, dtkn = dtknfromp_f(plog10, islog10=True, asdct=True) print("[margp_optimizer] pe ", p) print("[margp_optimizer] p ", ", ".join(f"{x:,.2f}" for x in p)) @@ -335,9 +381,7 @@ def dtknfromp_f(p, *, islog10=True, asdct=False, quiet=False): for i in range(maxiter): if P("progress"): - print( - f"Iteration [{i:2.0f}]: time elapsed: {time.time()-start_time:.2f}s" - ) + print(f"Iteration [{i:2.0f}]: time elapsed: {time.time()-start_time:.2f}s") # calculate the change in token amounts (also as dict if requested) if P("tknd"): @@ -348,7 +392,7 @@ def dtknfromp_f(p, *, islog10=True, asdct=False, quiet=False): # calculate the Jacobian # if P("debug"): # print("\n[margp_optimizer] ============= JACOBIAN =============>>>") - J = self.J(dtknfromp_f, plog10) + J = self.J(dtknfromp_f, plog10, jach=jach) # ATTENTION: dtknfromp_f takes log10(p) as input if P("debug"): # print("==== J ====>") @@ -367,35 +411,55 @@ def dtknfromp_f(p, *, islog10=True, asdct=False, quiet=False): # https://numpy.org/doc/stable/reference/generated/numpy.linalg.solve.html # https://numpy.org/doc/stable/reference/generated/numpy.linalg.lstsq.html - # update log prices, prices and determine the criterium... + # update log prices, prices... p0log10 = [*plog10] plog10 += dplog10 p = np.exp(plog10 * np.log(10)) - criterium = np.linalg.norm(dplog10) + # determine the convergence criterium + if crit_is_relative: + criterium = normf(dplog10) + # the relative criterium is the norm of the change in log prices + # in other words, it is something like an "average percentage change" of prices + # this may not quite what we want though because if we have highly levered curves, + # then even small percentage changes in prices can be important + # eg for limit orders the whole liquidity is by default distributed + # over a small range that may only be minrw=1e-6 wide + + else: + p_in_epsaunit = p / p_targettkn_per_epsaunit + # p is denominated in targettkn + # p_in_epsaunit in epsaunit + criterium = normf(dtkn*p_in_epsaunit) + if P("debug"): + print(f"[margp_optimizer] tokens_t={tokens_t} [{targettkn}]") + print(f"[margp_optimizer] dtkn={dtkn}") + print(f"[margp_optimizer] p={p} {targettkn}") + print(f"[margp_optimizer] p={p_in_epsaunit} {epsaunit}") + if P("verbose") or P("debug"): + print(f"[margp_optimizer] crit=normf({dtkn*p_in_epsaunit}) = {criterium} {epsaunit}") + # ...print out some info if requested... if P("verbose"): print(f"\n[margp_optimizer] ========== cycle {i} =======>>>") - print("log p0", p0log10) - print("log dp", dplog10) - print("log p ", plog10) - print("p ", tuple(p)) - print("p ", ", ".join(f"{x:,.2f}" for x in p)) - print("1/p ", ", ".join(f"{1/x:,.2f}" for x in p)) - print("tokens_t", tokens_t) + print("log p0 ", p0log10) + print("log dp ", dplog10) + print("log p ", plog10) + print("p_t ", tuple(p), targettkn) + print("p ", ", ".join(f"{x:,.2f}" for x in p)) + print("1/p ", ", ".join(f"{1/x:,.2f}" for x in p)) + print("tokens ", tokens_t) # print("dtkn", dtkn) - print("dtkn", ", ".join(f"{x:,.3f}" for x in dtkn)) - print( - f"[criterium={criterium:.2e}, eps={eps:.1e}, c/e={criterium/eps:,.0e}]" - ) + print("dtkn ", ", ".join(f"{x:,.3f}" for x in dtkn)) + print(f"crit {criterium:.2e} [{eps_unit}; L{norm}], eps={eps_used}, c/e={criterium/eps_used:,.0e}]") if P("tknd"): - print("dtkn_d", dtkn_d) + print("dtkn_d ", dtkn_d) if P("J"): - print("J", J) + print("J ", J) print(f"<<<========== cycle {i} ======= [margp_optimizer]") # ...and finally check the criterium (percentage changes this step) for convergence - if criterium < eps: + if criterium < eps_used: if i != 0: # we don't break in the first iteration because we need this first iteration # to establish a common baseline price, therefore d logp ~ 0 is not good @@ -446,4 +510,3 @@ def dtknfromp_f(p, *, islog10=True, asdct=False, quiet=False): errormsg=e, ) margp_optimizer = optimize # margp_optimizer is deprecated - From 8d687aae1b9de7f1d3f1584f13d9d367f21121ed Mon Sep 17 00:00:00 2001 From: sklbancor <109073706+sklbancor@users.noreply.github.com> Date: Tue, 30 Apr 2024 21:49:09 +0100 Subject: [PATCH 06/17] MargPOptimzer v5.3-b2 This is manually merging the changes from c4a110c9297abff1f355b03155c1052dcd1bd2fa into v5.3-b1 --- .../tools/optimizer/margpoptimizer.py | 93 +++++++++++-------- 1 file changed, 55 insertions(+), 38 deletions(-) diff --git a/fastlane_bot/tools/optimizer/margpoptimizer.py b/fastlane_bot/tools/optimizer/margpoptimizer.py index a23ff409b..3ca49c6e6 100644 --- a/fastlane_bot/tools/optimizer/margpoptimizer.py +++ b/fastlane_bot/tools/optimizer/margpoptimizer.py @@ -22,8 +22,10 @@ (c) Copyright Bprotocol foundation 2023. Licensed under MIT """ -__VERSION__ = "5.3-b1" -__DATE__ = "14/Dec/2023" +__VERSION__ = "5.3-b2" +# MERGING THE CHANGES FROM c4a110c9297abff1f355b03155c1052dcd1bd2fa + +__DATE__ = "30/Apr/2024" from dataclasses import dataclass, field, fields, asdict, astuple, InitVar import pandas as pd @@ -150,10 +152,12 @@ def optimize(self, sfc=None, result=None, *, params=None): epsaunit unit for epsa (default: MOEPSAUNIT) jach step size for calculating Jacobian (default: MOJACH) maxiter maximum number of iterations (default: 100) - verbose if True, print some high level output - progress if True, print some basic progress output - debug if True, print some debug output - debug2 more debug output + progress if True, print progress output + verbose ditto, high level output + debug ditto, basic debug output + debug_j ditto, additional debug output (Jacobian) + debug_dtkn ditto (d Token) + debug_dtkn2 ditto (more d Token; requires debug_dtkn) raiseonerror if True, raise an OptimizationError exception on error pstart starting price for optimization (3) ================== ========================================================================= @@ -300,16 +304,19 @@ def dtknfromp_f(p, *, islog10=True, asdct=False, quiet=False): if islog10: p = np.exp(p * np.log(10)) assert len(p) == len(tokens_t), f"p and tokens_t have different lengths [{p}, {tokens_t}]" - if P("debug") and not quiet: - print(f"\n[dtknfromp_f] =====================>>>") - print(f"prices={p}") - print(f"tokens={tokens_t}") + if P("debug_dtkn") and not quiet: + print(f"\n[dtknfromp_f]\n=====================>>>") + #print(f"prices={p}") + #print(f"tokens={tokens_t}") + print( "p ", ", ".join(f"{x:,.2f}" for x in p)) + print( "1/p ", ", ".join(f"{1/x:,.2f}" for x in p)) + print(f"{targettkn} <-", ", ".join(tokens_t)) # pvec is dict {tkn -> (log) price} for all tokens in p pvec = {tkn: p_ for tkn, p_ in zip(tokens_t, p)} pvec[targettkn] = 1 - if P("debug") and not quiet: - print(f"pvec={pvec}") + # if P("debug") and not quiet: + # print(f"pvec={pvec}") sum_by_tkn = {t: 0 for t in alltokens_s} for pair, (tknb, tknq) in zip(pairs, pairs_t): @@ -323,7 +330,7 @@ def dtknfromp_f(p, *, islog10=True, asdct=False, quiet=False): #dxdy = tuple(dxdy_f(c.dxdyfromp_f(price)) for c in curves) dxvecs = (c.dxvecfrompvec_f(pvec) for c in curves) - if P("debug2") and not quiet: + if P("debug_dtkn2") and not quiet: dxdy = tuple(dxdy_f(c.dxdyfromp_f(price)) for c in curves) # TODO: rewrite this using the dxvec # there is no need to extract dy dx; just iterate over dict @@ -331,7 +338,7 @@ def dtknfromp_f(p, *, islog10=True, asdct=False, quiet=False): print(f"\n{c0.pairp} --->>") print(f" price={price:,.4f}, 1/price={1/price:,.4f}") for r, c in zip(dxdy, curves): - s = f" cid={c.cid:15}" + s = f" cid={c.cid[2:6]}{c.cid[-2:]}" s += f" dx={float(r[0]):15,.3f} {c.tknxp:>5}" s += f" dy={float(r[1]):15,.3f} {c.tknyp:>5}" s += f" p={c.p:,.2f} 1/p={1/c.p:,.2f}" @@ -350,10 +357,10 @@ def dtknfromp_f(p, *, islog10=True, asdct=False, quiet=False): # print(f"pair={c0.pairp}, {sumdy:,.4f} {tn(tknq)}, {sumdx:,.4f} {tn(tknb)}, price={price:,.4f} {tn(tknq)} per {tn(tknb)} [{len(curves)} funcs]") result = tuple(sum_by_tkn[t] for t in tokens_t) - if P("debug") and not quiet: - print(f"sum_by_tkn={sum_by_tkn}") + if P("debug_dtkn") and not quiet: + print(f"\nsum_by_tkn={sum_by_tkn}") print(f"result={result}") - print(f"<<<===================== [dtknfromp_f]") + print(f"<<<=====================") if asdct: return sum_by_tkn, np.array(result) @@ -370,9 +377,10 @@ def dtknfromp_f(p, *, islog10=True, asdct=False, quiet=False): plog10 = np.log10(p) if P("verbose") or P("debug"): # dtkn_d, dtkn = dtknfromp_f(plog10, islog10=True, asdct=True) - print("[margp_optimizer] pe ", p) - print("[margp_optimizer] p ", ", ".join(f"{x:,.2f}" for x in p)) - print("[margp_optimizer] 1/p ", ", ".join(f"{1/x:,.2f}" for x in p)) + print(f"[margp_optimizer] {targettkn} <-", ", ".join(tokens_t)) + print( "[margp_optimizer] p_t ", p) + print( "[margp_optimizer] p ", ", ".join(f"{x:,.2f}" for x in p)) + print( "[margp_optimizer] 1/p ", ", ".join(f"{1/x:,.2f}" for x in p)) # print("[margp_optimizer] dtkn", dtkn) # if P("tknd"): # print("[margp_optimizer] dtkn_d", dtkn_d) @@ -381,8 +389,9 @@ def dtknfromp_f(p, *, islog10=True, asdct=False, quiet=False): for i in range(maxiter): if P("progress"): - print(f"Iteration [{i:2.0f}]: time elapsed: {time.time()-start_time:.2f}s") - + print( + f"\n[margp_optimizer] Iteration [{i:2.0f}]: time elapsed: {time.time()-start_time:.2f}s" + ) # calculate the change in token amounts (also as dict if requested) if P("tknd"): dtkn_d, dtkn = dtknfromp_f(plog10, islog10=True, asdct=True) @@ -391,30 +400,33 @@ def dtknfromp_f(p, *, islog10=True, asdct=False, quiet=False): # calculate the Jacobian # if P("debug"): - # print("\n[margp_optimizer] ============= JACOBIAN =============>>>") + # print("\n[margp_optimizer] calculating Jacobian") + J = self.J(dtknfromp_f, plog10, jach=jach) # ATTENTION: dtknfromp_f takes log10(p) as input - if P("debug"): - # print("==== J ====>") - print("\n============= JACOBIAN =============>>>") + + if P("debug_j"): + print("\n[margp_optimizer]\n============= JACOBIAN =============>>>") print(J) - # print("<=== J =====") print("<<<============= JACOBIAN =============\n") # Update p, dtkn using the Newton-Raphson formula try: dplog10 = np.linalg.solve(J, -dtkn) + except np.linalg.LinAlgError: + if P("verbose") or P("debug"): - print("[margp_optimizer] singular Jacobian, using lstsq instead") + print("\n[margp_optimizer] singular Jacobian, using lstsq instead") + dplog10 = np.linalg.lstsq(J, -dtkn, rcond=None)[0] # https://numpy.org/doc/stable/reference/generated/numpy.linalg.solve.html # https://numpy.org/doc/stable/reference/generated/numpy.linalg.lstsq.html # update log prices, prices... - p0log10 = [*plog10] - plog10 += dplog10 - p = np.exp(plog10 * np.log(10)) + p0log10 = [*plog10] # keep current log prices (deep copy) + plog10 += dplog10 # update log prices + p = np.exp(plog10 * np.log(10)) # expand log to actual prices # determine the convergence criterium if crit_is_relative: @@ -441,22 +453,27 @@ def dtknfromp_f(p, *, islog10=True, asdct=False, quiet=False): # ...print out some info if requested... if P("verbose"): - print(f"\n[margp_optimizer] ========== cycle {i} =======>>>") - print("log p0 ", p0log10) - print("log dp ", dplog10) - print("log p ", plog10) + print(f"\n[margp_optimizer]\n========== cycle {i} =======>>>") + print(f"{targettkn} <-", ", ".join(tokens_t)) + print("dtkn ", ", ".join(f"{x:,.3f}" for x in dtkn)) + print("log p0", p0log10) # previous log prices + print("d logp", dplog10) # change in log prices + print("log p ", plog10) # current log prices print("p_t ", tuple(p), targettkn) print("p ", ", ".join(f"{x:,.2f}" for x in p)) print("1/p ", ", ".join(f"{1/x:,.2f}" for x in p)) - print("tokens ", tokens_t) + #print("tokens ", tokens_t) # print("dtkn", dtkn) - print("dtkn ", ", ".join(f"{x:,.3f}" for x in dtkn)) + #print("dtkn ", ", ".join(f"{x:,.3f}" for x in dtkn)) print(f"crit {criterium:.2e} [{eps_unit}; L{norm}], eps={eps_used}, c/e={criterium/eps_used:,.0e}]") + + # TODO: DEAL WITH THOSE DEBUG FLAGS if P("tknd"): print("dtkn_d ", dtkn_d) if P("J"): print("J ", J) - print(f"<<<========== cycle {i} ======= [margp_optimizer]") + + print(f"<<<========== cycle {i} =======") # ...and finally check the criterium (percentage changes this step) for convergence if criterium < eps_used: From b150baf4194b4514f7b604e8dde505acd3b3629b Mon Sep 17 00:00:00 2001 From: sklbancor <109073706+sklbancor@users.noreply.github.com> Date: Tue, 30 Apr 2024 21:52:40 +0100 Subject: [PATCH 07/17] MargPOptimzer v5.3-b3 --- .../tools/optimizer/margpoptimizer.py | 113 ++++++++---------- 1 file changed, 47 insertions(+), 66 deletions(-) diff --git a/fastlane_bot/tools/optimizer/margpoptimizer.py b/fastlane_bot/tools/optimizer/margpoptimizer.py index 3ca49c6e6..dc90ce6a0 100644 --- a/fastlane_bot/tools/optimizer/margpoptimizer.py +++ b/fastlane_bot/tools/optimizer/margpoptimizer.py @@ -22,9 +22,7 @@ (c) Copyright Bprotocol foundation 2023. Licensed under MIT """ -__VERSION__ = "5.3-b2" -# MERGING THE CHANGES FROM c4a110c9297abff1f355b03155c1052dcd1bd2fa - +__VERSION__ = "5.3-b3" __DATE__ = "30/Apr/2024" from dataclasses import dataclass, field, fields, asdict, astuple, InitVar @@ -158,6 +156,7 @@ def optimize(self, sfc=None, result=None, *, params=None): debug_j ditto, additional debug output (Jacobian) debug_dtkn ditto (d Token) debug_dtkn2 ditto (more d Token; requires debug_dtkn) + debug_dtknd ditto, d Token as dict raiseonerror if True, raise an OptimizationError exception on error pstart starting price for optimization (3) ================== ========================================================================= @@ -188,6 +187,7 @@ def optimize(self, sfc=None, result=None, *, params=None): dxdy_f = lambda r: (np.array(r[0:2])) # extract dx, dy from result tn = lambda t: t.split("-")[0] # token name, eg WETH-xxxx -> WETH + # epsilons and maxiter eps = P("eps") or self.MOEPS epsa = P("epsa") or self.MOEPSA @@ -231,9 +231,12 @@ def optimize(self, sfc=None, result=None, *, params=None): ) # pstart + if P("verbose") or P("debug"): + print(f"[margp_optimizer] targettkn = {targettkn}") + pstart = P("pstart") if not pstart is None: - if P("verbose") or P("debug"): + if P("debug"): print(f"[margp_optimizer] using pstart [{len(P('pstart'))} tokens]") if isinstance(pstart, pd.DataFrame): try: @@ -243,9 +246,8 @@ def optimize(self, sfc=None, result=None, *, params=None): assert isinstance(pstart, dict), f"pstart must be a dict or a data frame [{pstart}]" price_estimates_t = tuple(pstart[t] for t in tokens_t) else: - if P("verbose") or P("debug"): - print("[margp_optimizer] calculating price estimates") if P("debug"): + print("[margp_optimizer] calculating price estimates") print(f"[margp_optimizer] tknq={targettkn}, tknbs={tokens_t}") try: @@ -262,7 +264,8 @@ def optimize(self, sfc=None, result=None, *, params=None): raise if P("debug"): - print("[margp_optimizer] pstart:", price_estimates_t) + print("[margp_optimizer] price estimates = ", price_estimates_t) + if result == self.MO_PSTART: df = pd.DataFrame(price_estimates_t, index=tokens_t, columns=[targettkn]) df.index.name = "tknb" @@ -270,7 +273,7 @@ def optimize(self, sfc=None, result=None, *, params=None): # criterion and norm crit = P("crit") or self.MOCRITR - assert crit in set((self.MOCRITR, self.MOCRITA)), "crit must be MOCRITR or MOCRITA" + assert crit in set((self.MOCRITR, self.MOCRITA)), f"crit must be {self.MOCRITR} or {self.MOCRITA}" if crit == self.MOCRITA: assert not pstart is None, "pstart must be provided if crit is MOCRITA" assert epsaunit in pstart, f"epsaunit {epsaunit} not in pstart {P('pstart')}" @@ -284,6 +287,9 @@ def optimize(self, sfc=None, result=None, *, params=None): norm = P("norm") or self.MONORML2 assert norm in set((self.MONORML1, self.MONORML2, self.MONORMLINF)), f"norm must be MONORML1, MONORML2 or MONORMLINF [{norm}]" normf = lambda x: np.linalg.norm(x, ord=norm) + + if P("verbose") or P("debug"): + print(f"[margp_optimizer] crit={crit} (eps={eps_used}, unit={eps_unit}, norm=L{norm})") ## INNER FUNCTION: CALCULATE THE TARGET FUNCTION def dtknfromp_f(p, *, islog10=True, asdct=False, quiet=False): @@ -305,18 +311,14 @@ def dtknfromp_f(p, *, islog10=True, asdct=False, quiet=False): p = np.exp(p * np.log(10)) assert len(p) == len(tokens_t), f"p and tokens_t have different lengths [{p}, {tokens_t}]" if P("debug_dtkn") and not quiet: - print(f"\n[dtknfromp_f]\n=====================>>>") - #print(f"prices={p}") - #print(f"tokens={tokens_t}") - print( "p ", ", ".join(f"{x:,.2f}" for x in p)) - print( "1/p ", ", ".join(f"{1/x:,.2f}" for x in p)) - print(f"{targettkn} <-", ", ".join(tokens_t)) + print(f"\n[dtknfromp_f]\ndtkn=================>>>") + print(f"{targettkn:6}", ", ".join(tokens_t)) + print( "p ", ", ".join(f"{x:,.2f}" for x in p)) + print( "1/p ", ", ".join(f"{1/x:,.2f}" for x in p)) # pvec is dict {tkn -> (log) price} for all tokens in p pvec = {tkn: p_ for tkn, p_ in zip(tokens_t, p)} pvec[targettkn] = 1 - # if P("debug") and not quiet: - # print(f"pvec={pvec}") sum_by_tkn = {t: 0 for t in alltokens_s} for pair, (tknb, tknq) in zip(pairs, pairs_t): @@ -330,37 +332,31 @@ def dtknfromp_f(p, *, islog10=True, asdct=False, quiet=False): #dxdy = tuple(dxdy_f(c.dxdyfromp_f(price)) for c in curves) dxvecs = (c.dxvecfrompvec_f(pvec) for c in curves) - if P("debug_dtkn2") and not quiet: - dxdy = tuple(dxdy_f(c.dxdyfromp_f(price)) for c in curves) - # TODO: rewrite this using the dxvec - # there is no need to extract dy dx; just iterate over dict - # however not urgent because this is debug code - print(f"\n{c0.pairp} --->>") - print(f" price={price:,.4f}, 1/price={1/price:,.4f}") - for r, c in zip(dxdy, curves): - s = f" cid={c.cid[2:6]}{c.cid[-2:]}" - s += f" dx={float(r[0]):15,.3f} {c.tknxp:>5}" - s += f" dy={float(r[1]):15,.3f} {c.tknyp:>5}" - s += f" p={c.p:,.2f} 1/p={1/c.p:,.2f}" - print(s) - print(f"<<--- {c0.pairp}") + # if P("debug_dtkn2") and not quiet: + # dxdy = tuple(dxdy_f(c.dxdyfromp_f(price)) for c in curves) + # # TODO: rewrite this using the dxvec + # # there is no need to extract dy dx; just iterate over dict + # # however not urgent because this is debug code + # print(f"\n{c0.pairp} --->>") + # print(f" price={price:,.4f}, 1/price={1/price:,.4f}") + # for r, c in zip(dxdy, curves): + # s = f" cid={c.cid[2:6]}{c.cid[-2:]}" + # s += f" dx={float(r[0]):15,.3f} {c.tknxp:>5}" + # s += f" dy={float(r[1]):15,.3f} {c.tknyp:>5}" + # s += f" p={c.p:,.2f} 1/p={1/c.p:,.2f}" + # print(s) + # print(f"<<--- {c0.pairp}") - # old code from dxdy = tuple(dxdy_f(c.dxdyfromp_f(price)) for c in curves) - # sumdx, sumdy = sum(dxdy) - # sum_by_tkn[tknq] += sumdy - # sum_by_tkn[tknb] += sumdx for dxvec in dxvecs: for tkn, dx_ in dxvec.items(): sum_by_tkn[tkn] += dx_ - # if P("debug") and not quiet: - # print(f"pair={c0.pairp}, {sumdy:,.4f} {tn(tknq)}, {sumdx:,.4f} {tn(tknb)}, price={price:,.4f} {tn(tknq)} per {tn(tknb)} [{len(curves)} funcs]") - result = tuple(sum_by_tkn[t] for t in tokens_t) if P("debug_dtkn") and not quiet: print(f"\nsum_by_tkn={sum_by_tkn}") print(f"result={result}") - print(f"<<<=====================") + print(" >", ", ".join(tokens_t)) + print(f"<<<=================dtkn") if asdct: return sum_by_tkn, np.array(result) @@ -369,41 +365,33 @@ def dtknfromp_f(p, *, islog10=True, asdct=False, quiet=False): ## END INNER FUNCTION try: - + # setting up the optimization variables (note: we optimize in log space) if price_estimates_t is None: raise Exception(f"price estimates not found; try setting pstart") p = np.array(price_estimates_t, dtype=float) plog10 = np.log10(p) if P("verbose") or P("debug"): - # dtkn_d, dtkn = dtknfromp_f(plog10, islog10=True, asdct=True) - print(f"[margp_optimizer] {targettkn} <-", ", ".join(tokens_t)) - print( "[margp_optimizer] p_t ", p) + print(f"\n[margp_optimizer] {targettkn} <-", ", ".join(tokens_t)) print( "[margp_optimizer] p ", ", ".join(f"{x:,.2f}" for x in p)) print( "[margp_optimizer] 1/p ", ", ".join(f"{1/x:,.2f}" for x in p)) - # print("[margp_optimizer] dtkn", dtkn) - # if P("tknd"): - # print("[margp_optimizer] dtkn_d", dtkn_d) - + ## MAIN OPTIMIZATION LOOP for i in range(maxiter): if P("progress"): - print( - f"\n[margp_optimizer] Iteration [{i:2.0f}]: time elapsed: {time.time()-start_time:.2f}s" - ) + print(f"\n[margp_optimizer] Iteration [{i:2.0f}]: time elapsed: {time.time()-start_time:.2f}s") + # calculate the change in token amounts (also as dict if requested) - if P("tknd"): + if P("debug_dtknd"): dtkn_d, dtkn = dtknfromp_f(plog10, islog10=True, asdct=True) else: dtkn = dtknfromp_f(plog10, islog10=True, asdct=False) # calculate the Jacobian - # if P("debug"): - # print("\n[margp_optimizer] calculating Jacobian") J = self.J(dtknfromp_f, plog10, jach=jach) - # ATTENTION: dtknfromp_f takes log10(p) as input + # ATTENTION: dtknfromp_f takes log10(p) as input by default if P("debug_j"): print("\n[margp_optimizer]\n============= JACOBIAN =============>>>") @@ -420,8 +408,8 @@ def dtknfromp_f(p, *, islog10=True, asdct=False, quiet=False): print("\n[margp_optimizer] singular Jacobian, using lstsq instead") dplog10 = np.linalg.lstsq(J, -dtkn, rcond=None)[0] - # https://numpy.org/doc/stable/reference/generated/numpy.linalg.solve.html - # https://numpy.org/doc/stable/reference/generated/numpy.linalg.lstsq.html + # https://numpy.org/doc/stable/reference/generated/numpy.linalg.solve.html + # https://numpy.org/doc/stable/reference/generated/numpy.linalg.lstsq.html # update log prices, prices... p0log10 = [*plog10] # keep current log prices (deep copy) @@ -448,11 +436,10 @@ def dtknfromp_f(p, *, islog10=True, asdct=False, quiet=False): print(f"[margp_optimizer] dtkn={dtkn}") print(f"[margp_optimizer] p={p} {targettkn}") print(f"[margp_optimizer] p={p_in_epsaunit} {epsaunit}") - if P("verbose") or P("debug"): print(f"[margp_optimizer] crit=normf({dtkn*p_in_epsaunit}) = {criterium} {epsaunit}") - + # ...print out some info if requested... - if P("verbose"): + if P("verbose") or P("debug"): print(f"\n[margp_optimizer]\n========== cycle {i} =======>>>") print(f"{targettkn} <-", ", ".join(tokens_t)) print("dtkn ", ", ".join(f"{x:,.3f}" for x in dtkn)) @@ -462,17 +449,11 @@ def dtknfromp_f(p, *, islog10=True, asdct=False, quiet=False): print("p_t ", tuple(p), targettkn) print("p ", ", ".join(f"{x:,.2f}" for x in p)) print("1/p ", ", ".join(f"{1/x:,.2f}" for x in p)) - #print("tokens ", tokens_t) - # print("dtkn", dtkn) - #print("dtkn ", ", ".join(f"{x:,.3f}" for x in dtkn)) print(f"crit {criterium:.2e} [{eps_unit}; L{norm}], eps={eps_used}, c/e={criterium/eps_used:,.0e}]") - # TODO: DEAL WITH THOSE DEBUG FLAGS - if P("tknd"): + if P("debug_dtknd"): print("dtkn_d ", dtkn_d) - if P("J"): - print("J ", J) - + print(f"<<<========== cycle {i} =======") # ...and finally check the criterium (percentage changes this step) for convergence From 0ffb36ab3842265e66809ead0df42b582bd6b341 Mon Sep 17 00:00:00 2001 From: sklbancor <109073706+sklbancor@users.noreply.github.com> Date: Wed, 1 May 2024 07:11:21 +0100 Subject: [PATCH 08/17] MargPOptimizer v5.3 --- .../tools/optimizer/margpoptimizer.py | 53 +++++++++++-------- 1 file changed, 32 insertions(+), 21 deletions(-) diff --git a/fastlane_bot/tools/optimizer/margpoptimizer.py b/fastlane_bot/tools/optimizer/margpoptimizer.py index dc90ce6a0..abf4645fc 100644 --- a/fastlane_bot/tools/optimizer/margpoptimizer.py +++ b/fastlane_bot/tools/optimizer/margpoptimizer.py @@ -22,8 +22,8 @@ (c) Copyright Bprotocol foundation 2023. Licensed under MIT """ -__VERSION__ = "5.3-b3" -__DATE__ = "30/Apr/2024" +__VERSION__ = "5.3" +__DATE__ = "01/May/2024" from dataclasses import dataclass, field, fields, asdict, astuple, InitVar import pandas as pd @@ -212,24 +212,6 @@ def optimize(self, sfc=None, result=None, *, params=None): curves_by_pair = {pair: tuple(c for c in curves_t if c.pair == pair) for pair in pairs } pairs_t = tuple(tuple(p.split("/")) for p in pairs) - # return the inner function if requested - # (this may need to move lower) - if result == self.MO_DTKNFROMPF: - return dtknfromp_f - - # return debug info if requested - if result == self.MO_DEBUG: - return dict( - tokens_t=tokens_t, - tokens_ix=tokens_ix, - pairs=pairs, - sfc=sfc, - targettkn=targettkn, - pairs_t=pairs_t, - crit=dict(crit=P("crit"), eps=eps, epsa=epsa, epsaunit=epsaunit, pstart=P("pstart")), - optimizer=self, - ) - # pstart if P("verbose") or P("debug"): print(f"[margp_optimizer] targettkn = {targettkn}") @@ -363,7 +345,27 @@ def dtknfromp_f(p, *, islog10=True, asdct=False, quiet=False): return np.array(result) ## END INNER FUNCTION - + + # return debug info if requested + if result == self.MO_DEBUG: + return dict( + tokens_t=tokens_t, + tokens_ix=tokens_ix, + price_estimates_t = price_estimates_t, + pairs=pairs, + sfc=sfc, + targettkn=targettkn, + pairs_t=pairs_t, + crit=dict(crit=P("crit"), eps=eps, epsa=epsa, epsaunit=epsaunit, pstart=P("pstart")), + dtknfromp_f = dtknfromp_f, + optimizer=self, + ) + + # return the inner function if requested + if result == self.MO_DTKNFROMPF: + return dtknfromp_f + + try: # setting up the optimization variables (note: we optimize in log space) @@ -411,6 +413,15 @@ def dtknfromp_f(p, *, islog10=True, asdct=False, quiet=False): # https://numpy.org/doc/stable/reference/generated/numpy.linalg.solve.html # https://numpy.org/doc/stable/reference/generated/numpy.linalg.lstsq.html + # #### TODO: EXPERIMENTAL: ADD A DAMPING FACTOR TO THE JACOBIAN + + # #dplog10 = np.clip(dplog10, -0.1, 0.1) + # nrm = normf(dplog10) + # if nrm > 0.1: + # dplog10 /= nrm + + # #### END EXPERIMENTAL + # update log prices, prices... p0log10 = [*plog10] # keep current log prices (deep copy) plog10 += dplog10 # update log prices From 5f911132fa948ef1b6ebee8ca9d8b9d6b8b70987 Mon Sep 17 00:00:00 2001 From: sklbancor <109073706+sklbancor@users.noreply.github.com> Date: Tue, 30 Apr 2024 18:41:02 +0100 Subject: [PATCH 09/17] Simplepair: adding spacing to token index (trivial) --- fastlane_bot/tools/simplepair.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/fastlane_bot/tools/simplepair.py b/fastlane_bot/tools/simplepair.py index 65dd27ba5..f425abaa0 100644 --- a/fastlane_bot/tools/simplepair.py +++ b/fastlane_bot/tools/simplepair.py @@ -5,8 +5,8 @@ (c) Copyright Bprotocol foundation 2023. Licensed under MIT """ -__VERSION__ = "2.1" -__DATE__ = "18/May/2023" +__VERSION__ = "2.2" +__DATE__ = "30/Apr/2024" from dataclasses import dataclass, field, asdict, InitVar @@ -83,7 +83,7 @@ def tkny(self): return self.tknq NUMERAIRE_TOKENS = { - tkn: i + tkn: i*10 for i, tkn in enumerate( [ "USDC", From 025c65607d2ac2ac65bbbf3698a15e9a3b4ab173 Mon Sep 17 00:00:00 2001 From: sklbancor <109073706+sklbancor@users.noreply.github.com> Date: Wed, 1 May 2024 15:37:47 +0100 Subject: [PATCH 10/17] NBTest 002, 003 --- .../NBTest/NBTest_002_CPCandOptimizer.ipynb | 12 ++ .../NBTest/NBTest_002_CPCandOptimizer.py | 4 + .../NBTest/NBTest_003_Serialization.ipynb | 163 ++++++++++++------ resources/NBTest/NBTest_003_Serialization.py | 22 ++- 4 files changed, 141 insertions(+), 60 deletions(-) diff --git a/resources/NBTest/NBTest_002_CPCandOptimizer.ipynb b/resources/NBTest/NBTest_002_CPCandOptimizer.ipynb index 099e10b48..0f16cffb0 100644 --- a/resources/NBTest/NBTest_002_CPCandOptimizer.ipynb +++ b/resources/NBTest/NBTest_002_CPCandOptimizer.ipynb @@ -2732,6 +2732,8 @@ "r = O.margp_optimizer(\"WETH\", result=O.MO_DEBUG)\n", "assert isinstance(r, dict)\n", "prices0 = r[\"price_estimates_t\"]\n", + "dtknfromp_f = r[\"dtknfromp_f\"]\n", + "assert np.linalg.norm(dtknfromp_f(np.log10(prices0))) < 1e-6\n", "assert not prices0 is None, f\"prices0 must not be None [{prices0}]\"\n", "r1 = O.arb(\"WETH\")\n", "r2 = O.SelfFinancingConstraints.arb(\"WETH\")\n", @@ -2792,6 +2794,16 @@ "prices0" ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "30424c63", + "metadata": {}, + "outputs": [], + "source": [ + "dtknfromp_f(np.log10(prices0))" + ] + }, { "cell_type": "code", "execution_count": 115, diff --git a/resources/NBTest/NBTest_002_CPCandOptimizer.py b/resources/NBTest/NBTest_002_CPCandOptimizer.py index b985dacf7..4e34b9a1e 100644 --- a/resources/NBTest/NBTest_002_CPCandOptimizer.py +++ b/resources/NBTest/NBTest_002_CPCandOptimizer.py @@ -1278,6 +1278,8 @@ r = O.margp_optimizer("WETH", result=O.MO_DEBUG) assert isinstance(r, dict) prices0 = r["price_estimates_t"] +dtknfromp_f = r["dtknfromp_f"] +assert np.linalg.norm(dtknfromp_f(np.log10(prices0))) < 1e-6 assert not prices0 is None, f"prices0 must not be None [{prices0}]" r1 = O.arb("WETH") r2 = O.SelfFinancingConstraints.arb("WETH") @@ -1291,6 +1293,8 @@ prices0 +dtknfromp_f(np.log10(prices0)) + f = O.optimize("WETH", result=O.MO_DTKNFROMPF, params=dict(verbose=True, debug=False)) r3 = f(prices0, islog10=False) assert np.all(r3 == (0,0)) diff --git a/resources/NBTest/NBTest_003_Serialization.ipynb b/resources/NBTest/NBTest_003_Serialization.ipynb index d5b5680f6..fb042d8c0 100644 --- a/resources/NBTest/NBTest_003_Serialization.ipynb +++ b/resources/NBTest/NBTest_003_Serialization.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "code", - "execution_count": 1, + "execution_count": 58, "id": "be65f3d2-769a-449f-90cd-2633a11478d0", "metadata": {}, "outputs": [ @@ -10,8 +10,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "imported m, np, pd, plt, os, sys, decimal; defined iseq, raises, require, Timer\n", - "ConstantProductCurve v3.4 (23/Jan/2024)\n", + "ConstantProductCurve v3.5 (22/Apr/2023)\n", "CPCArbOptimizer v5.1 (15/Sep/2023)\n" ] } @@ -21,7 +20,6 @@ " from fastlane_bot.tools.cpc import ConstantProductCurve as CPC, CPCContainer\n", " from fastlane_bot.tools.optimizer import CPCArbOptimizer, cp, time\n", " from fastlane_bot.testing import *\n", - "\n", "except:\n", " from tools.cpc import ConstantProductCurve as CPC, CPCContainer\n", " from tools.optimizer import CPCArbOptimizer, cp, time\n", @@ -55,7 +53,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 59, "id": "4030cea3-3e03-4e0f-8d80-7a2bcca05fcf", "metadata": {}, "outputs": [], @@ -65,7 +63,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 60, "id": "8cb4f9bc-2f31-4eae-b77f-533aa188e49b", "metadata": {}, "outputs": [], @@ -84,7 +82,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 61, "id": "a5ed0075-5ee5-4592-a192-e06d2b5af454", "metadata": {}, "outputs": [], @@ -95,7 +93,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 62, "id": "1bf13d91-2bc0-4819-96b9-2712ef89b6f1", "metadata": {}, "outputs": [], @@ -105,7 +103,7 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 63, "id": "ce05c578-5060-498e-b4eb-f55617d10cdd", "metadata": {}, "outputs": [], @@ -140,7 +138,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 64, "id": "41a5cdfe-fb7b-4c8b-a270-1a52f0765e94", "metadata": {}, "outputs": [ @@ -150,7 +148,7 @@ "ConstantProductCurve(k=10000, x=100, x_act=100, y_act=100, alpha=0.5, pair='TKNB/TKNQ', cid='1', fee=0, descr='UniV2', constr='uv2', params={})" ] }, - "execution_count": 7, + "execution_count": 64, "metadata": {}, "output_type": "execute_result" } @@ -174,7 +172,7 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 65, "id": "ea3cdfbc-8edd-41f1-9703-0ae0d72fdb9a", "metadata": {}, "outputs": [ @@ -194,7 +192,7 @@ " 'params': {}}" ] }, - "execution_count": 8, + "execution_count": 65, "metadata": {}, "output_type": "execute_result" } @@ -205,7 +203,7 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 66, "id": "595de023-5c66-40fc-928f-eca5fe6a50c9", "metadata": {}, "outputs": [], @@ -227,7 +225,7 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 67, "id": "215b5105-08d9-4077-a51a-7658cafcffa9", "metadata": {}, "outputs": [], @@ -261,7 +259,7 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 68, "id": "0963034a-b36c-4cfb-84da-ccb3c88c4389", "metadata": {}, "outputs": [], @@ -279,7 +277,7 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 69, "id": "eb5dd380-dd90-4a3b-b88a-5a697bdbc3a0", "metadata": {}, "outputs": [], @@ -310,7 +308,7 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": 70, "id": "624b80f1-c811-483b-ba24-b76c72fe3e0c", "metadata": {}, "outputs": [], @@ -325,7 +323,7 @@ }, { "cell_type": "code", - "execution_count": 14, + "execution_count": 71, "id": "34d52402-18d6-4485-8e5c-6cb4f8af2ab2", "metadata": {}, "outputs": [ @@ -349,7 +347,7 @@ }, { "cell_type": "code", - "execution_count": 15, + "execution_count": 72, "id": "85175836-0fa9-4f64-a42f-b5b787e622f0", "metadata": {}, "outputs": [], @@ -364,7 +362,7 @@ }, { "cell_type": "code", - "execution_count": 16, + "execution_count": 73, "id": "9753798a-b154-4865-a845-a1f5f1eb8e4b", "metadata": {}, "outputs": [ @@ -388,17 +386,17 @@ }, { "cell_type": "code", - "execution_count": 17, + "execution_count": 74, "id": "5f683913-1799-4f3a-9473-a663d803448a", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "ConstantProductCurve(k=0.01, x=0.0015438708879488485, x_act=0, y_act=1, alpha=0.5, pair='ETH/USDC', cid='4', fee=0, descr='Carbon', constr='carb', params={'y': 1, 'yint': 1, 'A': 10, 'B': 54.772255750516614, 'pa': 4195.445115010333, 'pb': 3000.0000000000005})" + "ConstantProductCurve(k=0.01, x=0.0015438708879488485, x_act=0, y_act=1, alpha=0.5, pair='ETH/USDC', cid='4', fee=0, descr='Carbon', constr='carb', params={'y': 1, 'yint': 1, 'A': 10, 'B': 54.772255750516614, 'pa': 4195.445115010333, 'pb': 3000.0000000000005, 'minrw': 1e-06})" ] }, - "execution_count": 17, + "execution_count": 74, "metadata": {}, "output_type": "execute_result" } @@ -412,7 +410,7 @@ }, { "cell_type": "code", - "execution_count": 18, + "execution_count": 75, "id": "cffdcaa4-f221-4bd7-bf2d-5418a33e3592", "metadata": {}, "outputs": [], @@ -431,12 +429,35 @@ "assert raises(CPC.from_carbon, yint=1, y=1, pa=1800, pb=2200, A=100, pair=\"ETH/USDC\", tkny=\"ETH\", fee=0, cid=\"1\", descr=\"Carbon\", isdydx=False)\n", "assert raises(CPC.from_carbon, yint=1, y=1, pa=1800, pb=2200, B=100, pair=\"ETH/USDC\", tkny=\"ETH\", fee=0, cid=\"1\", descr=\"Carbon\", isdydx=False)\n", "assert raises(CPC.from_carbon, yint=1, y=1, pa=1800, pb=2200, A=100, B=100, pair=\"ETH/USDC\", tkny=\"ETH\", fee=0, cid=\"1\", descr=\"Carbon\", isdydx=False)\n", - "assert raises(CPC.from_carbon, yint=1, y=1, pb=1800, pa=2200, pair=\"ETH/USDC\", tkny=\"ETH\", fee=0, cid=\"1\", descr=\"Carbon\", isdydx=False)" + "#assert raises(CPC.from_carbon, yint=1, y=1, pb=1800, pa=2200, pair=\"ETH/USDC\", tkny=\"ETH\", fee=0, cid=\"1\", descr=\"Carbon\", isdydx=False)" + ] + }, + { + "cell_type": "markdown", + "id": "6d4698a1-5df9-4c5d-a1c9-7e48fd9aa580", + "metadata": {}, + "source": [ + "TODO" ] }, { "cell_type": "code", - "execution_count": 19, + "execution_count": 76, + "id": "c1b70bbc-2531-458a-a507-24d89559bf41", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "#assert raises(CPC.from_carbon, yint=1, y=1, pa=1800, pb=2200, pair=\"ETH/USDC\", tkny=\"ETH\", cid=\"1\", descr=\"Carbon\", isdydx=False)\n", + "#assert raises(CPC.from_carbon, yint=1, y=1, pa=1800, pb=2200, pair=\"ETH/USDC\", tkny=\"ETH\", fee=0, descr=\"Carbon\", isdydx=False)\n", + "#assert raises(CPC.from_carbon, yint=1, y=1, pa=1800, pb=2200, pair=\"ETH/USDC\", tkny=\"ETH\", fee=0, cid=\"1\", isdydx=False)\n", + "#assert raises(CPC.from_carbon, yint=1, y=1, pb=1800, pa=2200, pair=\"ETH/USDC\", tkny=\"ETH\", fee=0, cid=\"1\", descr=\"Carbon\", isdydx=False)" + ] + }, + { + "cell_type": "code", + "execution_count": 77, "id": "f66fc490-97e0-4c5e-958d-1e9014934d5c", "metadata": {}, "outputs": [], @@ -450,13 +471,31 @@ }, { "cell_type": "code", - "execution_count": 20, + "execution_count": 78, "id": "465ff937-2382-4215-8e11-ec8096e1ea3d", "metadata": {}, "outputs": [], "source": [ "assert not raises(CPC.from_carbon, yint=1, y=1, pa=3100, pb=2900, pair=\"ETH/USDC\", tkny=\"USDC\", fee=0, cid=\"2\", descr=\"Carbon\", isdydx=True)\n", - "assert raises(CPC.from_carbon, yint=1, y=1, pb=3100, pa=2900, pair=\"ETH/USDC\", tkny=\"USDC\", fee=0, cid=\"2\", descr=\"Carbon\", isdydx=True)" + "#assert raises(CPC.from_carbon, yint=1, y=1, pb=3100, pa=2900, pair=\"ETH/USDC\", tkny=\"USDC\", fee=0, cid=\"2\", descr=\"Carbon\", isdydx=True)" + ] + }, + { + "cell_type": "markdown", + "id": "b0da3d2e-9b91-4c7a-89b8-8aa140901e32", + "metadata": {}, + "source": [ + "TODO" + ] + }, + { + "cell_type": "code", + "execution_count": 79, + "id": "d30a97ad-0188-4388-a3f8-3efa1151aa4a", + "metadata": {}, + "outputs": [], + "source": [ + "#assert raises(CPC.from_carbon, yint=1, y=1, pb=3100, pa=2900, pair=\"ETH/USDC\", tkny=\"USDC\", fee=0, cid=\"2\", descr=\"Carbon\", isdydx=True)" ] }, { @@ -469,7 +508,7 @@ }, { "cell_type": "code", - "execution_count": 21, + "execution_count": 80, "id": "c5c8d6c3-0d15-4c3d-8852-b2870a7b4caa", "metadata": {}, "outputs": [], @@ -485,7 +524,7 @@ }, { "cell_type": "code", - "execution_count": 22, + "execution_count": 81, "id": "8296d087-d5a5-4b77-825a-dd53ed60d4bd", "metadata": {}, "outputs": [], @@ -503,7 +542,7 @@ }, { "cell_type": "code", - "execution_count": 23, + "execution_count": 82, "id": "e72d0162-dd59-489c-8efb-dbb8327ff553", "metadata": {}, "outputs": [ @@ -567,7 +606,7 @@ }, { "cell_type": "code", - "execution_count": 24, + "execution_count": 83, "id": "c2d5dc97-05e8-4eca-abc7-66eee6e7d706", "metadata": {}, "outputs": [], @@ -581,7 +620,7 @@ }, { "cell_type": "code", - "execution_count": 25, + "execution_count": 84, "id": "9f467a32-370b-4634-bec8-3c28be84a0a0", "metadata": {}, "outputs": [], @@ -593,7 +632,7 @@ }, { "cell_type": "code", - "execution_count": 26, + "execution_count": 85, "id": "d7563934-5381-476d-b9cb-99b909691049", "metadata": {}, "outputs": [ @@ -603,7 +642,7 @@ "CPCContainer(curves=[ConstantProductCurve(k=2000, x=1, x_act=1, y_act=2000, alpha=0.5, pair='ETH/USDC', cid='1', fee=0.001, descr='UniV2', constr='uv2', params={'meh': 1}), ConstantProductCurve(k=8040, x=2, x_act=2, y_act=4020, alpha=0.5, pair='ETH/USDC', cid='2', fee=0.001, descr='UniV2', constr='uv2', params={}), ConstantProductCurve(k=1970, x=1, x_act=1, y_act=1970, alpha=0.5, pair='ETH/USDC', cid='3', fee=0.001, descr='UniV2', constr='uv2', params={})])" ] }, - "execution_count": 26, + "execution_count": 85, "metadata": {}, "output_type": "execute_result" } @@ -621,7 +660,7 @@ }, { "cell_type": "code", - "execution_count": 27, + "execution_count": 86, "id": "131928b8-f927-4799-97c6-ec50631c7959", "metadata": {}, "outputs": [ @@ -723,7 +762,7 @@ "3 1970 1 1 1970 0.5 ETH/USDC 0.001 UniV2 uv2 {}" ] }, - "execution_count": 27, + "execution_count": 86, "metadata": {}, "output_type": "execute_result" } @@ -750,7 +789,7 @@ }, { "cell_type": "code", - "execution_count": 28, + "execution_count": 87, "id": "6cd062ae-c465-4102-a57c-587874023de5", "metadata": {}, "outputs": [], @@ -779,7 +818,7 @@ }, { "cell_type": "code", - "execution_count": 29, + "execution_count": 88, "id": "8c046e70-ef8a-4de8-bd17-726afb617ea1", "metadata": {}, "outputs": [ @@ -788,7 +827,7 @@ "output_type": "stream", "text": [ "len 2355000\n", - "elapsed time: 0.29s\n" + "elapsed time: 0.34s\n" ] } ], @@ -814,7 +853,7 @@ }, { "cell_type": "code", - "execution_count": 30, + "execution_count": 89, "id": "e892dc06-329d-477f-adcb-40a87eb7a009", "metadata": {}, "outputs": [ @@ -822,7 +861,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "elapsed time: 0.21s\n" + "elapsed time: 0.22s\n" ] }, { @@ -913,7 +952,7 @@ "2 3 1970 1 1 1970 0.5 ETH/USDC 0.001 UniV2 uv2 {}" ] }, - "execution_count": 30, + "execution_count": 89, "metadata": {}, "output_type": "execute_result" } @@ -939,7 +978,7 @@ }, { "cell_type": "code", - "execution_count": 31, + "execution_count": 90, "id": "a2976017-2a84-4fba-885d-7680d9f61c3a", "metadata": {}, "outputs": [ @@ -947,7 +986,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "elapsed time: 0.17s\n" + "elapsed time: 0.16s\n" ] } ], @@ -971,7 +1010,7 @@ }, { "cell_type": "code", - "execution_count": 32, + "execution_count": 91, "id": "ed5aaa2c-2f5a-4863-87cf-a77240826a85", "metadata": { "lines_to_next_cell": 2 @@ -981,7 +1020,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "elapsed time: 0.21s\n" + "elapsed time: 0.16s\n" ] } ], @@ -1005,7 +1044,7 @@ }, { "cell_type": "code", - "execution_count": 33, + "execution_count": 92, "id": "f1507cc7-96ba-4342-bf1e-955b248bd8b4", "metadata": {}, "outputs": [], @@ -1030,7 +1069,7 @@ }, { "cell_type": "code", - "execution_count": 34, + "execution_count": 93, "id": "a1c75dfe-ce14-4840-9c62-39a8d5cfc3ad", "metadata": {}, "outputs": [ @@ -1139,7 +1178,7 @@ "3 1970 1 1 1970 0.5 ETH/USDC 0.001 UniV2 uv2 {}" ] }, - "execution_count": 34, + "execution_count": 93, "metadata": {}, "output_type": "execute_result" } @@ -1156,7 +1195,9 @@ { "cell_type": "markdown", "id": "3cfc2ff5-bf9d-4684-9b8c-2aff57937a46", - "metadata": {}, + "metadata": { + "tags": [] + }, "source": [ "### Benchmarking\n", "\n", @@ -1174,7 +1215,7 @@ }, { "cell_type": "code", - "execution_count": 35, + "execution_count": 94, "id": "c43b9431-603d-49af-b5fd-1975e9f59e2f", "metadata": {}, "outputs": [ @@ -1183,10 +1224,10 @@ "output_type": "stream", "text": [ " 2355000 .curves.json\n", - "-rw-r--r-- 1 skl staff 720055 1 May 07:51 .curves.csv\n", - "-rw-r--r-- 1 skl staff 2965 1 May 07:51 .curves.csv.gz\n", - "-rw-r--r-- 1 skl staff 961219 1 May 07:51 .curves.pkl\n", - "-rw-r--r-- 1 skl staff 720055 1 May 07:51 .curves.tsv\n" + "-rw-r--r-- 1 skl staff 720055 1 May 15:40 .curves.csv\n", + "-rw-r--r-- 1 skl staff 2965 1 May 15:40 .curves.csv.gz\n", + "-rw-r--r-- 1 skl staff 961219 1 May 15:40 .curves.pkl\n", + "-rw-r--r-- 1 skl staff 720055 1 May 15:40 .curves.tsv\n" ] } ], @@ -1227,6 +1268,14 @@ "metadata": {}, "outputs": [], "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "73a341c5-36e5-47c2-9fb0-0a63b589b98b", + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { diff --git a/resources/NBTest/NBTest_003_Serialization.py b/resources/NBTest/NBTest_003_Serialization.py index 95f7a43db..d1530d580 100644 --- a/resources/NBTest/NBTest_003_Serialization.py +++ b/resources/NBTest/NBTest_003_Serialization.py @@ -19,7 +19,6 @@ from fastlane_bot.tools.cpc import ConstantProductCurve as CPC, CPCContainer from fastlane_bot.tools.optimizer import CPCArbOptimizer, cp, time from fastlane_bot.testing import * - except: from tools.cpc import ConstantProductCurve as CPC, CPCContainer from tools.optimizer import CPCArbOptimizer, cp, time @@ -205,7 +204,16 @@ assert raises(CPC.from_carbon, yint=1, y=1, pa=1800, pb=2200, A=100, pair="ETH/USDC", tkny="ETH", fee=0, cid="1", descr="Carbon", isdydx=False) assert raises(CPC.from_carbon, yint=1, y=1, pa=1800, pb=2200, B=100, pair="ETH/USDC", tkny="ETH", fee=0, cid="1", descr="Carbon", isdydx=False) assert raises(CPC.from_carbon, yint=1, y=1, pa=1800, pb=2200, A=100, B=100, pair="ETH/USDC", tkny="ETH", fee=0, cid="1", descr="Carbon", isdydx=False) -assert raises(CPC.from_carbon, yint=1, y=1, pb=1800, pa=2200, pair="ETH/USDC", tkny="ETH", fee=0, cid="1", descr="Carbon", isdydx=False) +#assert raises(CPC.from_carbon, yint=1, y=1, pb=1800, pa=2200, pair="ETH/USDC", tkny="ETH", fee=0, cid="1", descr="Carbon", isdydx=False) + +# TODO + +# + +#assert raises(CPC.from_carbon, yint=1, y=1, pa=1800, pb=2200, pair="ETH/USDC", tkny="ETH", cid="1", descr="Carbon", isdydx=False) +#assert raises(CPC.from_carbon, yint=1, y=1, pa=1800, pb=2200, pair="ETH/USDC", tkny="ETH", fee=0, descr="Carbon", isdydx=False) +#assert raises(CPC.from_carbon, yint=1, y=1, pa=1800, pb=2200, pair="ETH/USDC", tkny="ETH", fee=0, cid="1", isdydx=False) +#assert raises(CPC.from_carbon, yint=1, y=1, pb=1800, pa=2200, pair="ETH/USDC", tkny="ETH", fee=0, cid="1", descr="Carbon", isdydx=False) +# - assert not raises(CPC.from_carbon, yint=1, y=1, A=1/10, B=m.sqrt(1/2000), pair="ETH/USDC", tkny="USDC", fee=0, cid="2", descr="Carbon", isdydx=True) assert raises(CPC.from_carbon, yint=1, y=1, A=1/10, B=m.sqrt(1/2000), pair="ETH/USDC", tkny="USDC", fee=0, cid="2", descr="Carbon", isdydx=False) @@ -214,7 +222,13 @@ assert raises(CPC.from_carbon, yint=1, y=1, A=-1/10, B=m.sqrt(1/2000), pair="ETH/USDC", tkny="USDC", fee=0, cid="2", descr="Carbon", isdydx=True) assert not raises(CPC.from_carbon, yint=1, y=1, pa=3100, pb=2900, pair="ETH/USDC", tkny="USDC", fee=0, cid="2", descr="Carbon", isdydx=True) -assert raises(CPC.from_carbon, yint=1, y=1, pb=3100, pa=2900, pair="ETH/USDC", tkny="USDC", fee=0, cid="2", descr="Carbon", isdydx=True) +#assert raises(CPC.from_carbon, yint=1, y=1, pb=3100, pa=2900, pair="ETH/USDC", tkny="USDC", fee=0, cid="2", descr="Carbon", isdydx=True) + +# TODO + +# + +#assert raises(CPC.from_carbon, yint=1, y=1, pb=3100, pa=2900, pair="ETH/USDC", tkny="USDC", fee=0, cid="2", descr="Carbon", isdydx=True) +# - # ## Charts [NOTEST] @@ -386,3 +400,5 @@ + + From 9421057908579ae9e0c3be76a067ee0b670c85b5 Mon Sep 17 00:00:00 2001 From: NIXBNT <88088888+NIXBNT@users.noreply.github.com> Date: Thu, 2 May 2024 19:56:12 +1000 Subject: [PATCH 11/17] use the regular multicall address --- fastlane_bot/config/network.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fastlane_bot/config/network.py b/fastlane_bot/config/network.py index cfec97aa3..7dac0b6bc 100644 --- a/fastlane_bot/config/network.py +++ b/fastlane_bot/config/network.py @@ -794,7 +794,7 @@ class _ConfigNetworkSei(ConfigNetwork): network_df = get_multichain_addresses(network=NETWORK_NAME) FASTLANE_CONTRACT_ADDRESS = "0xC7Dd38e64822108446872c5C2105308058c5C55C" #TODO - UPDATE WITH Mainnet - MULTICALL_CONTRACT_ADDRESS = "0x1E05037b9c4fEFaF3c45CD6F4F2C3197e6A43cD8" # previously 0xcA11bde05977b3631167028862bE2a173976CA11 + MULTICALL_CONTRACT_ADDRESS = "0xcA11bde05977b3631167028862bE2a173976CA11" CARBON_CONTROLLER_ADDRESS = "0x59f21012B2E9BA67ce6a7605E74F945D0D4C84EA" #TODO - UPDATE WITH Mainnet CARBON_CONTROLLER_VOUCHER = "0xe4816658ad10bF215053C533cceAe3f59e1f1087" #TODO - UPDATE WITH Mainnet From 19d474e935f8581bacf2b2f702b2903cff02fe4a Mon Sep 17 00:00:00 2001 From: NIXBNT <88088888+NIXBNT@users.noreply.github.com> Date: Fri, 3 May 2024 08:39:27 +1000 Subject: [PATCH 12/17] update default flashloan tokens --- fastlane_bot/config/network.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fastlane_bot/config/network.py b/fastlane_bot/config/network.py index 7dac0b6bc..087ee9d77 100644 --- a/fastlane_bot/config/network.py +++ b/fastlane_bot/config/network.py @@ -810,7 +810,7 @@ class _ConfigNetworkSei(ConfigNetwork): BALANCER_VAULT_ADDRESS = "0x7ccBebeb88696f9c8b061f1112Bb970158e29cA5" # # TODO Jellyswap on devnet CHAIN_FLASHLOAN_TOKENS = { - "0x26841a0A5D958B128209F4ea9a1DD7E61558c330": "WSEI", #TODO confirm for Mainnet + "0x57eE725BEeB991c70c53f9642f36755EC6eb2139": "WSEI", #TODO confirm for Mainnet "0xace5f7Ea93439Af39b46d2748fA1aC19951c8d7C": "USDC", #TODO confirm for Mainnet } # Add any exchanges unique to the chain here From 1d3324c72093a71e747a37770dbc4d59b1eb6cd4 Mon Sep 17 00:00:00 2001 From: NIXBNT <88088888+NIXBNT@users.noreply.github.com> Date: Fri, 3 May 2024 11:03:35 +1000 Subject: [PATCH 13/17] other static data for testing --- fastlane_bot/data/blockchain_data/sei/static_pool_data.csv | 2 ++ fastlane_bot/data/blockchain_data/sei/tokens.csv | 1 + .../data/blockchain_data/sei/uniswap_v2_event_mappings.csv | 4 +++- fastlane_bot/data/multichain_addresses.csv | 4 +++- 4 files changed, 9 insertions(+), 2 deletions(-) diff --git a/fastlane_bot/data/blockchain_data/sei/static_pool_data.csv b/fastlane_bot/data/blockchain_data/sei/static_pool_data.csv index 09177afa2..90d9f351a 100644 --- a/fastlane_bot/data/blockchain_data/sei/static_pool_data.csv +++ b/fastlane_bot/data/blockchain_data/sei/static_pool_data.csv @@ -1,3 +1,5 @@ cid,strategy_id,last_updated,last_updated_block,descr,pair_name,exchange_name,fee,fee_float,address,anchor,tkn0_address,tkn1_address,tkn0_decimals,tkn1_decimals,exchange_id,tkn0_symbol,tkn1_symbol,timestamp,tkn0_balance,tkn1_balance,liquidity,sqrt_price_q96,tick,tick_spacing,exchange,pool_type,tkn0_weight,tkn1_weight,tkn2_address,tkn2_decimals,tkn2_symbol,tkn2_balance,tkn2_weight,tkn3_address,tkn3_decimals,tkn3_symbol,tkn3_balance,tkn3_weight,tkn4_address,tkn4_decimals,tkn4_symbol,tkn4_balance,tkn4_weight,tkn5_address,tkn5_decimals,tkn5_symbol,tkn5_balance,tkn5_weight,tkn6_address,tkn6_decimals,tkn6_symbol,tkn6_balance,tkn6_weight,tkn7_address,tkn7_decimals,tkn7_symbol,tkn7_balance,tkn7_weight 0x1422169ab760ea6994358267b7d3783e8e7fa55c6a74b365b3fd3d17cbf4c6f1,0,,2354,dragonswap 0x027D2E627209f1cebA52ADc8A5aFE9318459b44B/0x7b75109369ACb528d9fa989E227812a6589712b9,0x027D2E627209f1cebA52ADc8A5aFE9318459b44B/0x7b75109369ACb528d9fa989E227812a6589712b9,dragonswap,0.003,0.003,0x01A34Dfa104F020FEE739268679338169945D5B1,,0x027D2E627209f1cebA52ADc8A5aFE9318459b44B,0x7b75109369ACb528d9fa989E227812a6589712b9,18,18,3,WSEI,DSWAP,0,0,0,,,,,dragonswap,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 0xbfd9612b2cb8035908dff18c040f64de75999cefd1020b5ce8a2e533c2ecd5dc,0,,2354,dragonswap 0x027D2E627209f1cebA52ADc8A5aFE9318459b44B/0xace5f7Ea93439Af39b46d2748fA1aC19951c8d7C,0x027D2E627209f1cebA52ADc8A5aFE9318459b44B/0xace5f7Ea93439Af39b46d2748fA1aC19951c8d7C,dragonswap,0.003,0.003,0x85CB6BFd781e1f42f4E79Efb6bf1F1fEfE4E9732,,0x027D2E627209f1cebA52ADc8A5aFE9318459b44B,0xace5f7Ea93439Af39b46d2748fA1aC19951c8d7C,18,6,3,WSEI,USDC,0,0,0,,,,,dragonswap,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, +0xe3aead757d877a15316e4896d5c5ab7639adbcba1ff76e3434b4e0af90f6225e,0,,2354,dragonswap 0xace5f7Ea93439Af39b46d2748fA1aC19951c8d7C/0xF983afa393199D6902a1Dd04f8E93465915ffD8B,0xace5f7Ea93439Af39b46d2748fA1aC19951c8d7C/0xF983afa393199D6902a1Dd04f8E93465915ffD8B,dragonswap,0.003,0.003,0x72A788B0A83e18ce1757171321E82c03e4351498,,0xace5f7Ea93439Af39b46d2748fA1aC19951c8d7C,0xF983afa393199D6902a1Dd04f8E93465915ffD8B,6,6,3,USDC,USDT,0,0,0,,,,,dragonswap,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, + diff --git a/fastlane_bot/data/blockchain_data/sei/tokens.csv b/fastlane_bot/data/blockchain_data/sei/tokens.csv index af2dc29ce..e79bf6c3b 100644 --- a/fastlane_bot/data/blockchain_data/sei/tokens.csv +++ b/fastlane_bot/data/blockchain_data/sei/tokens.csv @@ -6,3 +6,4 @@ address,decimals,symbol 0xEeeeeEeeeEeEeeEeEeEeeEEEeeeeEeeeeeeeEEeE,18,SEI 0x9e7A8e558Ce582511f4104465a886b7bEfBC146b,18,JLY 0x57eE725BEeB991c70c53f9642f36755EC6eb2139,18,WSEI +0xF983afa393199D6902a1Dd04f8E93465915ffD8B,6,USDT diff --git a/fastlane_bot/data/blockchain_data/sei/uniswap_v2_event_mappings.csv b/fastlane_bot/data/blockchain_data/sei/uniswap_v2_event_mappings.csv index f0165604a..1989d2011 100644 --- a/fastlane_bot/data/blockchain_data/sei/uniswap_v2_event_mappings.csv +++ b/fastlane_bot/data/blockchain_data/sei/uniswap_v2_event_mappings.csv @@ -1,3 +1,5 @@ exchange,address dragonswap,0x01A34Dfa104F020FEE739268679338169945D5B1 -dragonswap,0x85CB6BFd781e1f42f4E79Efb6bf1F1fEfE4E9732 \ No newline at end of file +dragonswap,0x85CB6BFd781e1f42f4E79Efb6bf1F1fEfE4E9732 +dragonswap,0x38BcEBb9A3fbF05B0Ab7ce9b485c9669578409fE +dragonswap,0x72A788B0A83e18ce1757171321E82c03e4351498 \ No newline at end of file diff --git a/fastlane_bot/data/multichain_addresses.csv b/fastlane_bot/data/multichain_addresses.csv index 235346cb8..0650447ea 100644 --- a/fastlane_bot/data/multichain_addresses.csv +++ b/fastlane_bot/data/multichain_addresses.csv @@ -136,4 +136,6 @@ pancakeswap_v3,zkevm,uniswap_v3,0x0BFbCF9fa4f9C56B0F40a671Ad40E0805A091865,0x1b8 pancakeswap_v3,zksync,uniswap_v3,0x1BB72E0CbbEA93c08f535fc7856E0338D7F7a8aB,0xD70C70AD87aa8D45b8D59600342FB3AEe76E3c68,,, xfai_v0,linea,solidly_v2,0xa5136eAd459F0E61C99Cec70fe8F5C24cF3ecA26,0xD538be6e9026C13D130C9e17d509E69C8Bb0eF33,,222864, carbon_v1,sei,carbon_v1,0x59f21012B2E9BA67ce6a7605E74F945D0D4C84EA,0x59f21012B2E9BA67ce6a7605E74F945D0D4C84EA,,17658678, -dragonswap,sei,uniswap_v2,0x5D370a6189F89603FaB67e9C68383e63F7B6A262,0x2346d3A6fb18Ff3ae590Ea31d9e41E6AB8c9f5EB,,1008775, +dragonswap,sei,uniswap_v2,0x5D370a6189F89603FaB67e9C68383e63F7B6A262,0x2346d3A6fb18Ff3ae590Ea31d9e41E6AB8c9f5EB,0.003,1008775, +jellyswap,sei,balancer,BALANCER_VAULT_ADDRESS,0x7ccBebeb88696f9c8b061f1112Bb970158e29cA5,0,222832, +uniswap_v3,sei,uniswap_v3,0x0000000000000000000000000000000000000000,0x0000000000000000000000000000000000000000,,1, From d2ec816ac8f427be96fe2508fd66df9750d37f93 Mon Sep 17 00:00:00 2001 From: NIXBNT <88088888+NIXBNT@users.noreply.github.com> Date: Fri, 3 May 2024 11:04:23 +1000 Subject: [PATCH 14/17] add basics to terraformer --- run_blockchain_terraformer.py | 25 ++++++++++++++++--------- 1 file changed, 16 insertions(+), 9 deletions(-) diff --git a/run_blockchain_terraformer.py b/run_blockchain_terraformer.py index b9f661e7d..6102f4f88 100644 --- a/run_blockchain_terraformer.py +++ b/run_blockchain_terraformer.py @@ -50,6 +50,7 @@ async def gather(): FANTOM = "fantom" MANTLE = "mantle" LINEA = "linea" +SEI = "sei" coingecko_network_map = { "ethereum": "ethereum", @@ -68,18 +69,20 @@ async def gather(): "cosmos": "cosmos", "kava": "kava", "mantle": "mantle", + "sei": "sei", } BLOCK_CHUNK_SIZE_MAP = { - "ethereum": 0, - "polygon": 0, - "polygon_zkevm": 0, - "arbitrum_one": 0, - "optimism": 0, - "coinbase_base": 0, - "fantom": 5000, - "mantle": 0, - "linea": 0 + "ethereum": 50000, + "polygon": 250000, + "polygon_zkevm": 500000, + "arbitrum_one": 500000, + "optimism": 500000, + "coinbase_base": 250000, + "fantom": 2000, + "mantle": 10000000, + "linea": 1000000, + "sei": 10000, #TODO untested this could be more } ALCHEMY_KEY_DICT = { @@ -92,6 +95,7 @@ async def gather(): "fantom": "WEB3_FANTOM", "mantle": "WEB3_MANTLE", "linea": "WEB3_LINEA", + "sei": "WEB3_SEI", } ALCHEMY_RPC_LIST = { @@ -104,6 +108,7 @@ async def gather(): "fantom": "https://fantom.blockpi.network/v1/rpc/", "mantle": "https://rpc.mantle.xyz/", "linea": "https://rpc.linea.build/", + "sei": "https://evm-rpc.arctic-1.seinetwork.io/", # TODO update with mainnet } BALANCER_SUBGRAPH_CHAIN_URL = { @@ -115,6 +120,7 @@ async def gather(): "coinbase_base": "https://api.studio.thegraph.com/query/24660/balancer-base-v2/version/latest", "avalanche": "https://api.thegraph.com/subgraphs/name/balancer-labs/balancer-avalanche-v2", "fantom": "https://api.thegraph.com/subgraphs/name/beethovenxfi/beethovenx", + "sei": "" # TODO add Jellyswap } @@ -1216,6 +1222,7 @@ def terraform_blockchain(network_name: str): file_desc.writelines(list(dict.fromkeys(lines))) file_desc.close() +#terraform_blockchain(network_name="sei", save_tokens=True) #terraform_blockchain(network_name=ETHEREUM) #terraform_blockchain(network_name=BASE) From 87139984d357bfc4201b5da0629a1bf609444273 Mon Sep 17 00:00:00 2001 From: Platon Floria Date: Fri, 3 May 2024 12:05:44 +0200 Subject: [PATCH 15/17] fix: BLOCK_CHUNK_SIZE_MAP --- run_blockchain_terraformer.py | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/run_blockchain_terraformer.py b/run_blockchain_terraformer.py index 6102f4f88..68d06658c 100644 --- a/run_blockchain_terraformer.py +++ b/run_blockchain_terraformer.py @@ -73,16 +73,15 @@ async def gather(): } BLOCK_CHUNK_SIZE_MAP = { - "ethereum": 50000, - "polygon": 250000, - "polygon_zkevm": 500000, - "arbitrum_one": 500000, - "optimism": 500000, - "coinbase_base": 250000, - "fantom": 2000, - "mantle": 10000000, - "linea": 1000000, - "sei": 10000, #TODO untested this could be more + "ethereum": 0, + "polygon": 0, + "polygon_zkevm": 0, + "arbitrum_one": 0, + "optimism": 0, + "coinbase_base": 0, + "fantom": 5000, + "mantle": 0, + "linea": 0 } ALCHEMY_KEY_DICT = { From 8fd74596e28d643e0bca497b007ec0efd385fb35 Mon Sep 17 00:00:00 2001 From: barak manos <> Date: Fri, 3 May 2024 13:13:06 +0300 Subject: [PATCH 16/17] Fix the terraformer script and update data files for sei --- fastlane_bot/data/blockchain_data/sei/static_pool_data.csv | 1 - .../data/blockchain_data/sei/uniswap_v2_event_mappings.csv | 2 +- run_blockchain_terraformer.py | 5 +++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/fastlane_bot/data/blockchain_data/sei/static_pool_data.csv b/fastlane_bot/data/blockchain_data/sei/static_pool_data.csv index 90d9f351a..54e89f03e 100644 --- a/fastlane_bot/data/blockchain_data/sei/static_pool_data.csv +++ b/fastlane_bot/data/blockchain_data/sei/static_pool_data.csv @@ -2,4 +2,3 @@ cid,strategy_id,last_updated,last_updated_block,descr,pair_name,exchange_name,fe 0x1422169ab760ea6994358267b7d3783e8e7fa55c6a74b365b3fd3d17cbf4c6f1,0,,2354,dragonswap 0x027D2E627209f1cebA52ADc8A5aFE9318459b44B/0x7b75109369ACb528d9fa989E227812a6589712b9,0x027D2E627209f1cebA52ADc8A5aFE9318459b44B/0x7b75109369ACb528d9fa989E227812a6589712b9,dragonswap,0.003,0.003,0x01A34Dfa104F020FEE739268679338169945D5B1,,0x027D2E627209f1cebA52ADc8A5aFE9318459b44B,0x7b75109369ACb528d9fa989E227812a6589712b9,18,18,3,WSEI,DSWAP,0,0,0,,,,,dragonswap,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 0xbfd9612b2cb8035908dff18c040f64de75999cefd1020b5ce8a2e533c2ecd5dc,0,,2354,dragonswap 0x027D2E627209f1cebA52ADc8A5aFE9318459b44B/0xace5f7Ea93439Af39b46d2748fA1aC19951c8d7C,0x027D2E627209f1cebA52ADc8A5aFE9318459b44B/0xace5f7Ea93439Af39b46d2748fA1aC19951c8d7C,dragonswap,0.003,0.003,0x85CB6BFd781e1f42f4E79Efb6bf1F1fEfE4E9732,,0x027D2E627209f1cebA52ADc8A5aFE9318459b44B,0xace5f7Ea93439Af39b46d2748fA1aC19951c8d7C,18,6,3,WSEI,USDC,0,0,0,,,,,dragonswap,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 0xe3aead757d877a15316e4896d5c5ab7639adbcba1ff76e3434b4e0af90f6225e,0,,2354,dragonswap 0xace5f7Ea93439Af39b46d2748fA1aC19951c8d7C/0xF983afa393199D6902a1Dd04f8E93465915ffD8B,0xace5f7Ea93439Af39b46d2748fA1aC19951c8d7C/0xF983afa393199D6902a1Dd04f8E93465915ffD8B,dragonswap,0.003,0.003,0x72A788B0A83e18ce1757171321E82c03e4351498,,0xace5f7Ea93439Af39b46d2748fA1aC19951c8d7C,0xF983afa393199D6902a1Dd04f8E93465915ffD8B,6,6,3,USDC,USDT,0,0,0,,,,,dragonswap,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, - diff --git a/fastlane_bot/data/blockchain_data/sei/uniswap_v2_event_mappings.csv b/fastlane_bot/data/blockchain_data/sei/uniswap_v2_event_mappings.csv index 1989d2011..c23f7b0da 100644 --- a/fastlane_bot/data/blockchain_data/sei/uniswap_v2_event_mappings.csv +++ b/fastlane_bot/data/blockchain_data/sei/uniswap_v2_event_mappings.csv @@ -2,4 +2,4 @@ exchange,address dragonswap,0x01A34Dfa104F020FEE739268679338169945D5B1 dragonswap,0x85CB6BFd781e1f42f4E79Efb6bf1F1fEfE4E9732 dragonswap,0x38BcEBb9A3fbF05B0Ab7ce9b485c9669578409fE -dragonswap,0x72A788B0A83e18ce1757171321E82c03e4351498 \ No newline at end of file +dragonswap,0x72A788B0A83e18ce1757171321E82c03e4351498 diff --git a/run_blockchain_terraformer.py b/run_blockchain_terraformer.py index 68d06658c..5804e0092 100644 --- a/run_blockchain_terraformer.py +++ b/run_blockchain_terraformer.py @@ -81,7 +81,8 @@ async def gather(): "coinbase_base": 0, "fantom": 5000, "mantle": 0, - "linea": 0 + "linea": 0, + "sei": 0, } ALCHEMY_KEY_DICT = { @@ -1221,10 +1222,10 @@ def terraform_blockchain(network_name: str): file_desc.writelines(list(dict.fromkeys(lines))) file_desc.close() -#terraform_blockchain(network_name="sei", save_tokens=True) #terraform_blockchain(network_name=ETHEREUM) #terraform_blockchain(network_name=BASE) #terraform_blockchain(network_name=FANTOM) #terraform_blockchain(network_name=MANTLE) #terraform_blockchain(network_name=LINEA) +#terraform_blockchain(network_name=SEI) From a1915bd8135fecb533f0ce0f154c50379384f05a Mon Sep 17 00:00:00 2001 From: barak manos <> Date: Fri, 3 May 2024 13:29:37 +0300 Subject: [PATCH 17/17] Remove entries in multichain_addresses.csv of univ2 forks with no fee --- fastlane_bot/data/multichain_addresses.csv | 8 -------- 1 file changed, 8 deletions(-) diff --git a/fastlane_bot/data/multichain_addresses.csv b/fastlane_bot/data/multichain_addresses.csv index 0650447ea..b2269ce67 100644 --- a/fastlane_bot/data/multichain_addresses.csv +++ b/fastlane_bot/data/multichain_addresses.csv @@ -1,7 +1,6 @@ exchange_name,chain,fork,factory_address,router_address,fee,start_block,active sushiswap_v2,arbitrum_nova,uniswap_v2,0xc35DADB65012eC5796536bD9864eD8773aBc74C4,0x1b02dA8Cb0d097eB8D57A175b88c7D8b47997506,0.003,0, sushiswap_v3,arbitrum_nova,uniswap_v3,0xaa26771d497814E81D305c511Efbb3ceD90BF5bd,0xc14Ee6B248787847527e11b8d7Cf257b212f7a9F,NA,4242300, -arbswap_v2,arbitrum_one,uniswap_v2,0xd394E9CC20f43d2651293756F8D320668E850F1b,0xD01319f4b65b79124549dE409D36F25e04B3e551,,3692134, arbswap_v3,arbitrum_one,solidly_v2,0x3a52e9200Ed7403D9d21664fDee540C2d02c099d,0x6947A425453D04305520E612F0Cb2952E4D07d62,,78527770,FALSE balancer,arbitrum_one,balancer,BALANCER_VAULT_ADDRESS,0xBA12222222228d8Ba445958a75a0704d566BF2C8,0,222832, camelot_v2,arbitrum_one,uniswap_v2,0x6EcCab422D763aC031210895C81787E87B43A652,0xc873fEcbd354f5A56E00E710B90EF4201db2448d,0.002,35061163, @@ -11,15 +10,12 @@ pancakeswap_v2,arbitrum_one,uniswap_v2,0x02a84c1b3BBD7401a5f7fa98a384EBC70bB5749 pancakeswap_v3,arbitrum_one,uniswap_v3,0x0BFbCF9fa4f9C56B0F40a671Ad40E0805A091865,0x1b81D678ffb9C0263b24A97847620C99d213eB14,NA,101028949, ramses_v1,arbitrum_one,solidly_v2,0xAAA20D08e59F6561f242b08513D36266C5A29415,0xAAA87963EFeB6f7E0a2711F397663105Acb1805e,,69820005,FALSE ramses_v2,arbitrum_one,uniswap_v3,0xAA2cd7477c451E703f3B9Ba5663334914763edF8,0xAA273216Cc9201A1e4285CA623f584BADc736944,,90593047, -smardex_v2,arbitrum_one,uniswap_v2,0x41A00e3FbE7F479A99bA6822704d9c5dEB611F22,0xDA3970a20cdc2B1269fc96C4E8D300E0fdDB7b3D,,112072483, -spartadex_v2,arbitrum_one,uniswap_v2,0xFe8EC10Fe07A6a6f4A2584f8cD9FE232930eAF55,0x89AE36E3B567b914a5E97E6488C6EB5b9C5d0231,,117753271, sushiswap_v2,arbitrum_one,uniswap_v2,0xc35DADB65012eC5796536bD9864eD8773aBc74C4,0x1b02dA8Cb0d097eB8D57A175b88c7D8b47997506,0.003,70, sushiswap_v3,arbitrum_one,uniswap_v3,0x1af415a1EbA07a4986a52B6f2e7dE7003D82231e,0x8A21F6768C1f8075791D08546Dadf6daA0bE820c,NA,75998697, traderjoe_v2,arbitrum_one,OTHER,0x1886D09C9Ade0c5DB822D85D21678Db67B6c2982,0x7BFd7192E76D950832c77BB412aaE841049D8D9B,,47891979, traderjoe_v3,arbitrum_one,OTHER,0x8e42f2F4101563bF679975178e880FD87d3eFd4e,0xb4315e873dBcf96Ffd0acd8EA43f689D8c20fB30,,77473199, uniswap_v2,arbitrum_one,uniswap_v2,0x5C69bEe701ef814a2B6a3EDD4B1652CB9cc5aA6f,0x7a250d5630B4cF539739dF2C5dAcb4c659F2488D,0.003,10000835, uniswap_v3,arbitrum_one,uniswap_v3,0x1F98431c8aD98523631AE4a59f267346ea31F984,0xE592427A0AEce92De3Edee1F18E0157C05861564,NA,165, -zyberswap_v2,arbitrum_one,uniswap_v2,0xaC2ee06A14c52570Ef3B9812Ed240BCe359772e7,0x16e71B13fE6079B4312063F7E81F76d165Ad32Ad,,54768263, zyberswap_v3,arbitrum_one,OTHER,0x9C2ABD632771b433E5E7507BcaA41cA3b25D8544,0xFa58b8024B49836772180f2Df902f231ba712F72,,62714800,FALSE balancer,avalanche,balancer,BALANCER_VAULT_ADDRESS,0xBA12222222228d8Ba445958a75a0704d566BF2C8,0,0, sushiswap_v2,avalanche,uniswap_v2,0xc35DADB65012eC5796536bD9864eD8773aBc74C4,0x1b02dA8Cb0d097eB8D57A175b88c7D8b47997506,0.003,506190, @@ -27,7 +23,6 @@ sushiswap_v3,avalanche,uniswap_v3,0x3e603C14aF37EBdaD31709C4f848Fc6aD5BEc715,N/A carbon_v1,bera_artio_testnet,carbon_v1,0xeA4F91d96e317F0F88002b251b5E9614f0644D67,0xeA4F91d96e317F0F88002b251b5E9614f0644D67,,, pancakeswap_v2,binance_smart_chain,uniswap_v2,0xcA143Ce32Fe78f1f7019d7d551a6402fC5350c73,0x10ED43C718714eb63d5aA57B78B54704E256024E,0.0025,6809737, pancakeswap_v3,binance_smart_chain,uniswap_v3,0x0BFbCF9fa4f9C56B0F40a671Ad40E0805A091865,0x1b81D678ffb9C0263b24A97847620C99d213eB14,NA,26956207, -smardex_v2,binance_smart_chain,uniswap_v2,0xA8EF6FEa013034E62E2C4A9Ec1CDb059fE23Af33,0xaB3699B71e89a53c529eC037C3389B5A2Caf545A,,30036610, sushiswap_v2,binance_smart_chain,uniswap_v2,0xc35DADB65012eC5796536bD9864eD8773aBc74C4,0x1b02dA8Cb0d097eB8D57A175b88c7D8b47997506,0.003,5205069, sushiswap_v3,binance_smart_chain,uniswap_v3,0x126555dd55a39328F69400d6aE4F782Bd4C34ABb,N/A,NA,26976538, uniswap_v3,binance_smart_chain,uniswap_v3,0xdB1d10011AD0Ff90774D0C6Bb92e5C5c8b4461F7,0xB971eF87ede563556b2ED4b1C0b0019111Dd85d2,NA,26324014, @@ -49,7 +44,6 @@ horizon_v3,coinbase_base,uniswap_v3,0x07AceD5690e09935b1c0e6E88B772d9440F64718,0 pancakeswap_v2,coinbase_base,uniswap_v2,0x02a84c1b3BBD7401a5f7fa98a384EBC70bB5749E,0x8cFe327CEc66d1C090Dd72bd0FF11d690C33a2Eb,0.0025,2910387, pancakeswap_v3,coinbase_base,uniswap_v3,0x0BFbCF9fa4f9C56B0F40a671Ad40E0805A091865,0x1b81D678ffb9C0263b24A97847620C99d213eB14,NA,2912007, scale_v2,coinbase_base,solidly_v2,0xEd8db60aCc29e14bC867a497D94ca6e3CeB5eC04,0x2F87Bf58D5A9b2eFadE55Cdbd46153a0902be6FA,0.0002,4332209, -smardex_v2,coinbase_base,uniswap_v2,0xdd4536dD9636564D891c919416880a3e250f975A,0x5C622Dcc96b6D96ac6c154f99CF081815094CBC9,TBD,2918553, sushiswap_v2,coinbase_base,uniswap_v2,0x71524B4f93c58fcbF659783284E38825f0622859,0x6BDED42c6DA8FBf0d2bA55B2fa120C5e0c8D7891,0.003,2631214, sushiswap_v3,coinbase_base,uniswap_v3,0xc35DADB65012eC5796536bD9864eD8773aBc74C4,0xFB7eF66a7e61224DD6FcD0D7d9C3be5C8B049b9f,NA,1759510, swapbased_v2,coinbase_base,uniswap_v2,0x04C9f118d21e8B767D2e50C946f0cC9F6C367300,0xaaa3b1F1bd7BCc97fD1917c18ADE665C5D31F066,0.003,1972292, @@ -61,7 +55,6 @@ balancer,ethereum,balancer,BALANCER_VAULT_ADDRESS,0xBA12222222228d8Ba445958a75a0 carbon_v1,ethereum,carbon_v1,0xC537e898CD774e2dCBa3B14Ea6f34C93d5eA45e1,0xC537e898CD774e2dCBa3B14Ea6f34C93d5eA45e1,NA,17087375, pancakeswap_v2,ethereum,uniswap_v2,0x1097053Fd2ea711dad45caCcc45EfF7548fCB362,0xEfF92A263d31888d860bD50809A8D171709b7b1c,0.0025,15614590, pancakeswap_v3,ethereum,uniswap_v3,0x0BFbCF9fa4f9C56B0F40a671Ad40E0805A091865,0x1b81D678ffb9C0263b24A97847620C99d213eB14,NA,16950686, -smardex_v2,ethereum,uniswap_v2,0xB878DC600550367e14220d4916Ff678fB284214F,0xC33984ABcAe20f47a754eF78f6526FeF266c0C6F,,18519341, sushiswap_v2,ethereum,uniswap_v2,0xC0AEe478e3658e2610c5F7A4A2E1777cE9e4f2Ac,0xd9e1cE17f2641f24aE83637ab66a2cca9C378B9F,0.003,10794229, sushiswap_v3,ethereum,uniswap_v3,0xbACEB8eC6b9355Dfc0269C18bac9d6E2Bdc29C4F,0x2E6cd2d30aa43f40aa81619ff4b6E0a41479B13F,NA,16955547, uniswap_v2,ethereum,uniswap_v2,0x5C69bEe701ef814a2B6a3EDD4B1652CB9cc5aA6f,0x7a250d5630B4cF539739dF2C5dAcb4c659F2488D,0.003,10000835, @@ -122,7 +115,6 @@ uniswap_v2,optimism,uniswap_v2,0x5C69bEe701ef814a2B6a3EDD4B1652CB9cc5aA6f,0x7a25 uniswap_v3,optimism,uniswap_v3,0x1F98431c8aD98523631AE4a59f267346ea31F984,0xE592427A0AEce92De3Edee1F18E0157C05861564,NA,0, sushiswap_v2,palm,uniswap_v2,0xc35DADB65012eC5796536bD9864eD8773aBc74C4,0x1b02dA8Cb0d097eB8D57A175b88c7D8b47997506,0.003,0, balancer,polygon,balancer,BALANCER_VAULT_ADDRESS,0xBA12222222228d8Ba445958a75a0704d566BF2C8,0,0, -smardex_v2,polygon,uniswap_v2,0x9A1e1681f6D59Ca051776410465AfAda6384398f,0xedD758D17175Dc9131992ebd02F55Cc4ebeb7B7c,,45180001, sushiswap_v2,polygon,uniswap_v2,0xc35DADB65012eC5796536bD9864eD8773aBc74C4,0x1b02dA8Cb0d097eB8D57A175b88c7D8b47997506,0.003,11333218, sushiswap_v3,polygon,uniswap_v3,0x917933899c6a5F8E37F31E19f92CdBFF7e8FF0e2,N/A,NA,41024971, uniswap_v2,polygon,uniswap_v2,0x5C69bEe701ef814a2B6a3EDD4B1652CB9cc5aA6f,0x7a250d5630B4cF539739dF2C5dAcb4c659F2488D,0.003,10000835,