From 0657c55d9c49e211ce84258c3cc70315b15a854a Mon Sep 17 00:00:00 2001 From: NIXBNT <88088888+NIXBNT@users.noreply.github.com> Date: Mon, 29 Apr 2024 08:57:56 +1000 Subject: [PATCH 01/24] add_sei (devnet) initial commit --- .env.example | 1 + fastlane_bot/config/network.py | 41 ++++++++++++++++++- fastlane_bot/config/selectors.py | 1 + .../sei/solidly_v2_event_mappings.csv | 1 + .../blockchain_data/sei/static_pool_data.csv | 3 ++ .../data/blockchain_data/sei/tokens.csv | 5 +++ .../sei/uniswap_v2_event_mappings.csv | 3 ++ .../sei/uniswap_v3_event_mappings.csv | 1 + fastlane_bot/data/multichain_addresses.csv | 2 + main.py | 2 +- 10 files changed, 58 insertions(+), 2 deletions(-) create mode 100644 fastlane_bot/data/blockchain_data/sei/solidly_v2_event_mappings.csv create mode 100644 fastlane_bot/data/blockchain_data/sei/static_pool_data.csv create mode 100644 fastlane_bot/data/blockchain_data/sei/tokens.csv create mode 100644 fastlane_bot/data/blockchain_data/sei/uniswap_v2_event_mappings.csv create mode 100644 fastlane_bot/data/blockchain_data/sei/uniswap_v3_event_mappings.csv diff --git a/.env.example b/.env.example index 5f1f4a5ae..5cb8f2b48 100644 --- a/.env.example +++ b/.env.example @@ -3,6 +3,7 @@ export ETH_PRIVATE_KEY_BE_CAREFUL="0x123-USE-YOUR-OWN-PRIVATE-KEY-HERE" export WEB3_FANTOM="FANTOM-API-KEY-HERE" // "public" can be used in place of a paid API key export WEB3_MANTLE="MANTLE-API-KEY-HERE" export WEB3_LINEA="LINEA-API-KEY-HERE" // +export WEB3_SEI="SEI-API-KEY-HERE" // #******** For Development - not required to run bot ********# export ETHERSCAN_TOKEN="ONLY_REQUIRED_IN_DEV" diff --git a/fastlane_bot/config/network.py b/fastlane_bot/config/network.py index 0bed6b542..be8952671 100644 --- a/fastlane_bot/config/network.py +++ b/fastlane_bot/config/network.py @@ -276,6 +276,7 @@ class ConfigNetwork(ConfigBase): NETWORK_FANTOM = S.NETWORK_FANTOM NETWORK_MANTLE = S.NETWORK_MANTLE NETWORK_LINEA = S.NETWORK_LINEA + NETWORK_SEI = S.NETWORK_SEI # FLAGS ####################################################################################### @@ -317,7 +318,9 @@ def new(cls, network=None): elif network == cls.NETWORK_MANTLE: return _ConfigNetworkMantle(_direct=False) elif network == cls.NETWORK_LINEA: - return _ConfigNetworkLinea(_direct=False) + return _ConfigNetworkLinea(_direct=False) + elif network == cls.NETWORK_SEI: + return _ConfigNetworkSei(_direct=False) elif network == cls.NETWORK_TENDERLY: return _ConfigNetworkTenderly(_direct=False) else: @@ -777,6 +780,42 @@ class _ConfigNetworkLinea(ConfigNetwork): # Add any exchanges unique to the chain here CHAIN_SPECIFIC_EXCHANGES = [] +class _ConfigNetworkSei(ConfigNetwork): + """ + Fastlane bot config -- network [Base Mainnet] + """ + + NETWORK = S.NETWORK_SEI + NETWORK_ID = "1" # TODO + NETWORK_NAME = "sei" + DEFAULT_PROVIDER = S.PROVIDER_ALCHEMY + RPC_ENDPOINT = "https://evm-rpc.arctic-1.seinetwork.io/" # TODO currently Sei devnet + WEB3_ALCHEMY_PROJECT_ID = os.environ.get("WEB3_SEI") + + network_df = get_multichain_addresses(network=NETWORK_NAME) + FASTLANE_CONTRACT_ADDRESS = "0xC7Dd38e64822108446872c5C2105308058c5C55C" #TODO - UPDATE WITH Mainnet + MULTICALL_CONTRACT_ADDRESS = "0x1E05037b9c4fEFaF3c45CD6F4F2C3197e6A43cD8" # previously 0xcA11bde05977b3631167028862bE2a173976CA11 + + CARBON_CONTROLLER_ADDRESS = "0x59f21012B2E9BA67ce6a7605E74F945D0D4C84EA" #TODO - UPDATE WITH Mainnet + CARBON_CONTROLLER_VOUCHER = "0xe4816658ad10bF215053C533cceAe3f59e1f1087" #TODO - UPDATE WITH Mainnet + + NATIVE_GAS_TOKEN_ADDRESS = "0xEeeeeEeeeEeEeeEeEeEeeEEEeeeeEeeeeeeeEEeE" + WRAPPED_GAS_TOKEN_ADDRESS = "0x26841a0A5D958B128209F4ea9a1DD7E61558c330" # TODO confirm for Mainnet + NATIVE_GAS_TOKEN_SYMBOL = "SEI" + WRAPPED_GAS_TOKEN_SYMBOL = "WSEI" + STABLECOIN_ADDRESS = "0xace5f7Ea93439Af39b46d2748fA1aC19951c8d7C" #TODO USDC on devnet + + IS_INJECT_POA_MIDDLEWARE = False + # Balancer + BALANCER_VAULT_ADDRESS = "0x7ccBebeb88696f9c8b061f1112Bb970158e29cA5" # # TODO Jellyswap on devnet + + CHAIN_FLASHLOAN_TOKENS = { + "0x26841a0A5D958B128209F4ea9a1DD7E61558c330": "WSEI", #TODO confirm for Mainnet + "0xace5f7Ea93439Af39b46d2748fA1aC19951c8d7C": "USDC", #TODO confirm for Mainnet + } + # Add any exchanges unique to the chain here + CHAIN_SPECIFIC_EXCHANGES = [] + class _ConfigNetworkTenderly(ConfigNetwork): """ Fastlane bot config -- network [Ethereum Tenderly] diff --git a/fastlane_bot/config/selectors.py b/fastlane_bot/config/selectors.py index d910f52be..791810fea 100644 --- a/fastlane_bot/config/selectors.py +++ b/fastlane_bot/config/selectors.py @@ -18,6 +18,7 @@ NETWORK_CANTO = "canto" NETWORK_FANTOM = "fantom" NETWORK_LINEA = "linea" +NETWORK_SEI = "sei" NETWORK_MANTLE = "mantle" NETWORK_SCROLL = "scroll" NETWORK_BSC = "binance_smart_chain" diff --git a/fastlane_bot/data/blockchain_data/sei/solidly_v2_event_mappings.csv b/fastlane_bot/data/blockchain_data/sei/solidly_v2_event_mappings.csv new file mode 100644 index 000000000..2785f2805 --- /dev/null +++ b/fastlane_bot/data/blockchain_data/sei/solidly_v2_event_mappings.csv @@ -0,0 +1 @@ +exchange,address diff --git a/fastlane_bot/data/blockchain_data/sei/static_pool_data.csv b/fastlane_bot/data/blockchain_data/sei/static_pool_data.csv new file mode 100644 index 000000000..09177afa2 --- /dev/null +++ b/fastlane_bot/data/blockchain_data/sei/static_pool_data.csv @@ -0,0 +1,3 @@ +cid,strategy_id,last_updated,last_updated_block,descr,pair_name,exchange_name,fee,fee_float,address,anchor,tkn0_address,tkn1_address,tkn0_decimals,tkn1_decimals,exchange_id,tkn0_symbol,tkn1_symbol,timestamp,tkn0_balance,tkn1_balance,liquidity,sqrt_price_q96,tick,tick_spacing,exchange,pool_type,tkn0_weight,tkn1_weight,tkn2_address,tkn2_decimals,tkn2_symbol,tkn2_balance,tkn2_weight,tkn3_address,tkn3_decimals,tkn3_symbol,tkn3_balance,tkn3_weight,tkn4_address,tkn4_decimals,tkn4_symbol,tkn4_balance,tkn4_weight,tkn5_address,tkn5_decimals,tkn5_symbol,tkn5_balance,tkn5_weight,tkn6_address,tkn6_decimals,tkn6_symbol,tkn6_balance,tkn6_weight,tkn7_address,tkn7_decimals,tkn7_symbol,tkn7_balance,tkn7_weight +0x1422169ab760ea6994358267b7d3783e8e7fa55c6a74b365b3fd3d17cbf4c6f1,0,,2354,dragonswap 0x027D2E627209f1cebA52ADc8A5aFE9318459b44B/0x7b75109369ACb528d9fa989E227812a6589712b9,0x027D2E627209f1cebA52ADc8A5aFE9318459b44B/0x7b75109369ACb528d9fa989E227812a6589712b9,dragonswap,0.003,0.003,0x01A34Dfa104F020FEE739268679338169945D5B1,,0x027D2E627209f1cebA52ADc8A5aFE9318459b44B,0x7b75109369ACb528d9fa989E227812a6589712b9,18,18,3,WSEI,DSWAP,0,0,0,,,,,dragonswap,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, +0xbfd9612b2cb8035908dff18c040f64de75999cefd1020b5ce8a2e533c2ecd5dc,0,,2354,dragonswap 0x027D2E627209f1cebA52ADc8A5aFE9318459b44B/0xace5f7Ea93439Af39b46d2748fA1aC19951c8d7C,0x027D2E627209f1cebA52ADc8A5aFE9318459b44B/0xace5f7Ea93439Af39b46d2748fA1aC19951c8d7C,dragonswap,0.003,0.003,0x85CB6BFd781e1f42f4E79Efb6bf1F1fEfE4E9732,,0x027D2E627209f1cebA52ADc8A5aFE9318459b44B,0xace5f7Ea93439Af39b46d2748fA1aC19951c8d7C,18,6,3,WSEI,USDC,0,0,0,,,,,dragonswap,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, diff --git a/fastlane_bot/data/blockchain_data/sei/tokens.csv b/fastlane_bot/data/blockchain_data/sei/tokens.csv new file mode 100644 index 000000000..d831bd4a1 --- /dev/null +++ b/fastlane_bot/data/blockchain_data/sei/tokens.csv @@ -0,0 +1,5 @@ +address,decimals,symbol +0x26841a0A5D958B128209F4ea9a1DD7E61558c330,18,WSEI +0xace5f7Ea93439Af39b46d2748fA1aC19951c8d7C,6,USDC +0x027D2E627209f1cebA52ADc8A5aFE9318459b44B,18,WSEI +0x7b75109369ACb528d9fa989E227812a6589712b9,18,DSWAP diff --git a/fastlane_bot/data/blockchain_data/sei/uniswap_v2_event_mappings.csv b/fastlane_bot/data/blockchain_data/sei/uniswap_v2_event_mappings.csv new file mode 100644 index 000000000..f0165604a --- /dev/null +++ b/fastlane_bot/data/blockchain_data/sei/uniswap_v2_event_mappings.csv @@ -0,0 +1,3 @@ +exchange,address +dragonswap,0x01A34Dfa104F020FEE739268679338169945D5B1 +dragonswap,0x85CB6BFd781e1f42f4E79Efb6bf1F1fEfE4E9732 \ No newline at end of file diff --git a/fastlane_bot/data/blockchain_data/sei/uniswap_v3_event_mappings.csv b/fastlane_bot/data/blockchain_data/sei/uniswap_v3_event_mappings.csv new file mode 100644 index 000000000..2785f2805 --- /dev/null +++ b/fastlane_bot/data/blockchain_data/sei/uniswap_v3_event_mappings.csv @@ -0,0 +1 @@ +exchange,address diff --git a/fastlane_bot/data/multichain_addresses.csv b/fastlane_bot/data/multichain_addresses.csv index c1ba30702..235346cb8 100644 --- a/fastlane_bot/data/multichain_addresses.csv +++ b/fastlane_bot/data/multichain_addresses.csv @@ -135,3 +135,5 @@ sushiswap_v3,thundercore,uniswap_v3,0xc35DADB65012eC5796536bD9864eD8773aBc74C4,0 pancakeswap_v3,zkevm,uniswap_v3,0x0BFbCF9fa4f9C56B0F40a671Ad40E0805A091865,0x1b81D678ffb9C0263b24A97847620C99d213eB14,,, pancakeswap_v3,zksync,uniswap_v3,0x1BB72E0CbbEA93c08f535fc7856E0338D7F7a8aB,0xD70C70AD87aa8D45b8D59600342FB3AEe76E3c68,,, xfai_v0,linea,solidly_v2,0xa5136eAd459F0E61C99Cec70fe8F5C24cF3ecA26,0xD538be6e9026C13D130C9e17d509E69C8Bb0eF33,,222864, +carbon_v1,sei,carbon_v1,0x59f21012B2E9BA67ce6a7605E74F945D0D4C84EA,0x59f21012B2E9BA67ce6a7605E74F945D0D4C84EA,,17658678, +dragonswap,sei,uniswap_v2,0x5D370a6189F89603FaB67e9C68383e63F7B6A262,0x2346d3A6fb18Ff3ae590Ea31d9e41E6AB8c9f5EB,,1008775, diff --git a/main.py b/main.py index cb61544f2..468909357 100644 --- a/main.py +++ b/main.py @@ -672,7 +672,7 @@ def run(mgr, args, tenderly_uri=None) -> None: "--blockchain", default="ethereum", help="A blockchain from the list. Blockchains not in this list do not have a deployed Fast Lane contract and are not supported.", - choices=["ethereum", "coinbase_base", "fantom", "mantle", "linea"], + choices=["ethereum", "coinbase_base", "fantom", "mantle", "linea", "sei"], ) parser.add_argument( "--pool_data_update_frequency", From c5047834375ff5c76a586e71673e1bebbf9d493f Mon Sep 17 00:00:00 2001 From: NIXBNT <88088888+NIXBNT@users.noreply.github.com> Date: Wed, 1 May 2024 21:55:18 +1000 Subject: [PATCH 02/24] update wrapped address --- fastlane_bot/config/network.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fastlane_bot/config/network.py b/fastlane_bot/config/network.py index be8952671..cfec97aa3 100644 --- a/fastlane_bot/config/network.py +++ b/fastlane_bot/config/network.py @@ -800,7 +800,7 @@ class _ConfigNetworkSei(ConfigNetwork): CARBON_CONTROLLER_VOUCHER = "0xe4816658ad10bF215053C533cceAe3f59e1f1087" #TODO - UPDATE WITH Mainnet NATIVE_GAS_TOKEN_ADDRESS = "0xEeeeeEeeeEeEeeEeEeEeeEEEeeeeEeeeeeeeEEeE" - WRAPPED_GAS_TOKEN_ADDRESS = "0x26841a0A5D958B128209F4ea9a1DD7E61558c330" # TODO confirm for Mainnet + WRAPPED_GAS_TOKEN_ADDRESS = "0x57eE725BEeB991c70c53f9642f36755EC6eb2139" # TODO confirm for Mainnet NATIVE_GAS_TOKEN_SYMBOL = "SEI" WRAPPED_GAS_TOKEN_SYMBOL = "WSEI" STABLECOIN_ADDRESS = "0xace5f7Ea93439Af39b46d2748fA1aC19951c8d7C" #TODO USDC on devnet From 37c5f80101c375da62080870642ad051abcdc1b5 Mon Sep 17 00:00:00 2001 From: NIXBNT <88088888+NIXBNT@users.noreply.github.com> Date: Wed, 1 May 2024 22:28:31 +1000 Subject: [PATCH 03/24] Update tokens.csv --- fastlane_bot/data/blockchain_data/sei/tokens.csv | 3 +++ 1 file changed, 3 insertions(+) diff --git a/fastlane_bot/data/blockchain_data/sei/tokens.csv b/fastlane_bot/data/blockchain_data/sei/tokens.csv index d831bd4a1..af2dc29ce 100644 --- a/fastlane_bot/data/blockchain_data/sei/tokens.csv +++ b/fastlane_bot/data/blockchain_data/sei/tokens.csv @@ -3,3 +3,6 @@ address,decimals,symbol 0xace5f7Ea93439Af39b46d2748fA1aC19951c8d7C,6,USDC 0x027D2E627209f1cebA52ADc8A5aFE9318459b44B,18,WSEI 0x7b75109369ACb528d9fa989E227812a6589712b9,18,DSWAP +0xEeeeeEeeeEeEeeEeEeEeeEEEeeeeEeeeeeeeEEeE,18,SEI +0x9e7A8e558Ce582511f4104465a886b7bEfBC146b,18,JLY +0x57eE725BEeB991c70c53f9642f36755EC6eb2139,18,WSEI From d39f854118cba8c5f6be21f7113cd7cb7ad6fb00 Mon Sep 17 00:00:00 2001 From: sklbancor <109073706+sklbancor@users.noreply.github.com> Date: Mon, 11 Dec 2023 17:20:19 +0000 Subject: [PATCH 04/24] CPC v3.5 (as_dicts, as_df, minrw, price estimate) (cherry picked from commit 825ff7c605becacc1e5be762d368f627fd8af63f) --- fastlane_bot/tools/cpc.py | 60 ++++++++++++++++++++++++--------------- 1 file changed, 37 insertions(+), 23 deletions(-) diff --git a/fastlane_bot/tools/cpc.py b/fastlane_bot/tools/cpc.py index 1e74c5eb5..29f18289e 100644 --- a/fastlane_bot/tools/cpc.py +++ b/fastlane_bot/tools/cpc.py @@ -8,8 +8,8 @@ NOTE: this class is not part of the API of the Carbon protocol, and you must expect breaking changes even in minor version updates. Use at your own risk. """ -__VERSION__ = "3.4" -__DATE__ = "23/Jan/2024" +__VERSION__ = "3.5" +__DATE__ = "22/Apr/2023" from dataclasses import dataclass, field, asdict, InitVar from .simplepair import SimplePair as Pair @@ -836,7 +836,7 @@ def from_univ3(cls, Pmarg, uniL, uniPa, uniPb, pair, cid, fee, descr, params=Non constr="uv3", params=params, ) - + SOLIDLY_PRICE_SPREAD = 0.06 # 0.06 gives pretty good results for m=2.6 @classmethod def from_solidly( @@ -982,6 +982,9 @@ def from_solidly( print("[cpc::from_solidly] returning curve directly is deprecated; prepare to accept a list of curves in the future") return result + # minimun range width (pa/pb-1) for carbon curves and sqrt thereof + CARBON_MIN_RANGEWIDTH = 1e-6 + @classmethod def from_carbon( cls, @@ -999,6 +1002,7 @@ def from_carbon( descr=None, params=None, isdydx=True, + minrw=None, ): """ constructor: from a single Carbon order (see class docstring for other parameters) (1) @@ -1011,6 +1015,7 @@ def from_carbon( :B: alternative to pa, pb: B = sqrt(pb) in dy/dy :tkny: token y :isdydx: if True prices in dy/dx, if False in quote direction of the pair + :minrw: minimum perc width (pa/pb-1) of range (default CARBON_MIN_RANGEWIDTH) NOTE 1: that ALL parameters are mandatory, except that EITHER pa, bp OR A, B must be given but not both; we do not correct for incorrect assignment of @@ -1028,7 +1033,10 @@ def from_carbon( # assert not fee is None, "fee must not be None" # assert not cid is None, "cid must not be None" # assert not descr is None, "descr must not be None" - + + if minrw is None: + minrw = cls.CARBON_MIN_RANGEWIDTH + # if yint is None: # yint = y assert y <= yint, "y must be <= yint" @@ -1067,15 +1075,21 @@ def from_carbon( if not tkny == tknq: pa, pb = 1 / pa, 1 / pb - # zero-width ranges are somewhat extended for numerical stability + # small and zero-width ranges are extended for numerical stability pa0, pb0 = pa, pb + if pa/pb-1 < minrw: + pa = pb = sqrt(pa*pb) + assert pa == pb, "just making sure" if pa == pb: - pa *= 1.0000001 - pb /= 1.0000001 + # pa *= 1.0000001 + # pb /= 1.0000001 + rw_multiplier = sqrt(1+minrw) + pa *= rw_multiplier + pb /= rw_multiplier # validation - if not pa > pb: - raise cls.CPCValidationError(f"pa > pb required ({pa}, {pb})") + if not pa/pb - 1 >= minrw*0.99: + raise cls.CPCValidationError(f"pa +> pb required ({pa}, {pb}, {pa/pb-1}, {minrw})") # finally set A, B A = sqrt(pa) - sqrt(pb) @@ -1094,7 +1108,7 @@ def from_carbon( yasym_times_A = yint * B kappa_times_A = yint**2 / A - params0 = dict(y=y, yint=yint, A=A0, B=B, pa=pa0, pb=pb0) + params0 = dict(y=y, yint=yint, A=A0, B=B, pa=pa0, pb=pb0, minrw=minrw) if params is None: params = AttrDict(params0) else: @@ -1805,13 +1819,15 @@ def scale(self, tkn): """returns the scale of tkn""" return self.tokenscale.scale(tkn) - def asdicts(self): + def as_dicts(self): """returns list of dictionaries representing the curves""" return [c.asdict() for c in self.curves] - - def asdf(self): + asdicts = as_dicts # legacy name + + def as_df(self): """returns pandas dataframe representing the curves""" return pd.DataFrame.from_dict(self.asdicts()).set_index("cid") + asdf = as_df # legacy name @classmethod def from_dicts(cls, dicts, *, tokenscale=None): @@ -2531,7 +2547,7 @@ def xystats(self, curves=None): PE_DATA = "data" def price_estimate( - self, *, tknq=None, tknb=None, pair=None, result=None, raiseonerror=True + self, *, tknq=None, tknb=None, pair=None, result=None, raiseonerror=True, verbose=False ): """ calculates price estimate in the reference token as base token @@ -2544,6 +2560,7 @@ def price_estimate( :PE_PAIR: slashpair :PE_CURVES: curves :PE_DATA: prices, weights + :verbose: whether to print some progress :returns: price (quote per base) """ assert tknq is not None and tknb is not None or pair is not None, ( @@ -2570,6 +2587,8 @@ def price_estimate( # return dict(curves=tuple(crvs), rcurves=tuple(rcrvs)) return tuple(acurves) data = tuple((r[1], sqrt(r[2])) for r in acurves) + if verbose: + print(f"[price_estimate] {tknq}/{tknb} {len(data)} curves") if not len(data) > 0: if raiseonerror: raise ValueError(f"no curves found for {tknq}/{tknb}") @@ -2620,13 +2639,13 @@ def price_estimates( tknqs = [t.strip() for t in tknqs.split(",")] if isinstance(tknbs, str): tknbs = [t.strip() for t in tknbs.split(",")] - # print(f"[price_estimates] tknqs [{len(tknqs)}], tknbs [{len(tknbs)}]") - # print(f"[price_estimates] tknqs [{len(tknqs)}] = {tknqs} , tknbs [{len(tknbs)}]] = {tknbs} ") + if verbose: + print(f"[price_estimates] tknqs [{len(tknqs)}] = {tknqs} , tknbs [{len(tknbs)}] = {tknbs} ") resulttp = self.PE_PAIR if pairs else None result = np.array( [ [ - self.price_estimate(tknb=b, tknq=q, raiseonerror=False, result=resulttp) + self.price_estimate(tknb=b, tknq=q, raiseonerror=False, result=resulttp, verbose=verbose) for b in tknbs ] for q in tknqs @@ -2696,12 +2715,7 @@ def price_estimates( } # print("[price_estimates] result", result) if not len(missing) == 0: - raise ValueError( - f"no price found for {len(missing)} pairs", - result, - missing, - len(missing), - ) + raise ValueError(f"no price found for {len(missing)} pairs", missing, result) #print(f"[price_estimates] DONE [{time.time()-start_time:.2f}s]") if unwrapsingle and len(tknqs) == 1: From f5c2a1e07284c0821dada5980383b3da430056cf Mon Sep 17 00:00:00 2001 From: sklbancor <109073706+sklbancor@users.noreply.github.com> Date: Mon, 11 Dec 2023 19:34:13 +0000 Subject: [PATCH 05/24] MargPOptimizer 5.3-b1 Update margpoptimizer.py (cherry picked from commit ff03bd02bb84eefd7857011cde7390a7b177fd78) --- .../tools/optimizer/margpoptimizer.py | 427 ++++++++++-------- 1 file changed, 245 insertions(+), 182 deletions(-) diff --git a/fastlane_bot/tools/optimizer/margpoptimizer.py b/fastlane_bot/tools/optimizer/margpoptimizer.py index eb207f7e9..a23ff409b 100644 --- a/fastlane_bot/tools/optimizer/margpoptimizer.py +++ b/fastlane_bot/tools/optimizer/margpoptimizer.py @@ -22,8 +22,8 @@ (c) Copyright Bprotocol foundation 2023. Licensed under MIT """ -__VERSION__ = "5.2" -__DATE__ = "15/Sep/2023" +__VERSION__ = "5.3-b1" +__DATE__ = "14/Dec/2023" from dataclasses import dataclass, field, fields, asdict, astuple, InitVar import pandas as pd @@ -52,26 +52,26 @@ def kind(self): return "margp" @classmethod - def jacobian(cls, func, x, *, eps=None): + def jacobian(cls, func, x, *, jach=None): """ computes the Jacobian of func at point x :func: a callable x=(x1..xn) -> (y1..ym), taking and returning np.arrays - must also take a quiet parameter, which if True suppresses output + must also take a `quiet` parameter, which, if True suppresses output :x: a vector x=(x1..xn) as np.array + :jach: the h value for the derivative (Jacobian) calculation (default: cls.MOJACH) """ - if eps is None: - eps = cls.JACEPS + h = cls.MOJACH if jach is None else jach n = len(x) y = func(x, quiet=True) jac = np.zeros((n, n)) for j in range(n): # through columns to allow for vector addition - Dxj = abs(x[j]) * eps if x[j] != 0 else eps + Dxj = abs(x[j]) * h if x[j] != 0 else h x_plus = [(xi if k != j else xi + Dxj) for k, xi in enumerate(x)] jac[:, j] = (func(x_plus, quiet=True) - y) / Dxj return jac J = jacobian - JACEPS = 1e-5 + MOJACH = 1e-5 MO_DEBUG = "debug" @@ -81,12 +81,35 @@ def jacobian(cls, func, x, *, eps=None): MO_MINIMAL = "minimal" MO_FULL = "full" - MOEPS = 1e-6 - MOMAXITER = 50 + MOCRITR = "rel" # relative convergence criterion used + MOCRITA = "abs" # ditto absolute + MOEPS = 1e-6 # relative convergence threshold + MOEPSAUNIT = "USD" # absolute convergence unit + MOEPSA = 1 # absolute convergence threshold (unit: MOCAUNIT) + MONORML1 = 1 # L1 norm (sum of absolute values) + MONORML2 = 2 # L2 norm (Euclidean distance) + MONORMLINF = np.inf # L-infinity norm (maximum absolute value) + + MOMAXITER = 50 class OptimizationError(Exception): pass class ConvergenceError(OptimizationError): pass class ParameterError(OptimizationError): pass + + @classmethod + def norml1_f(cls, x): + """the L1 norm of a vector x""" + return np.linalg.norm(x, ord=1) + + @classmethod + def norml2_f(cls, x): + """the L2 norm of a vector x""" + return np.linalg.norm(x, ord=2) + + @classmethod + def normlinf_f(cls, x): + """the Linf norm of a vector x""" + return np.linalg.norm(x, ord=np.inf) def optimize(self, sfc=None, result=None, *, params=None): """ @@ -120,7 +143,12 @@ def optimize(self, sfc=None, result=None, *, params=None): ================== ========================================================================= parameter meaning ================== ========================================================================= - eps precision parameter for accepting the result (default: 1e-6) + crit criterion: MOCRITR (relative; default) or MOCRITA (absolute) + norm norm for convergence crit (MONORML1, MONORML2, MONORMLINF) + eps relative convergence threshold (default: MOEPS) + epsa absolute convergence threshold (default: MOEPSA) + epsaunit unit for epsa (default: MOEPSAUNIT) + jach step size for calculating Jacobian (default: MOJACH) maxiter maximum number of iterations (default: 100) verbose if True, print some high level output progress if True, print some basic progress output @@ -129,11 +157,10 @@ def optimize(self, sfc=None, result=None, *, params=None): raiseonerror if True, raise an OptimizationError exception on error pstart starting price for optimization (3) ================== ========================================================================= - - + NOTE 1: this optimizer uses the marginal price method, ie it solves the equation - dx_i (p) = 0 for all i != targettkn, and the whole price vector + dx_i (p) = 0 for all i != targettkn, and the whole price vector p NOTE 2: at the moment only the trivial self-financing constraint is allowed, ie the one that only specifies the target token, and where all other constraints are zero; if sfc is @@ -143,6 +170,8 @@ def optimize(self, sfc=None, result=None, *, params=None): returned by MO_PSTART; excess tokens can be provided but all required tokens must be present """ + start_time = time.time() + # data conversion: string to SFC object; note that anything but pure arb not currently supported if isinstance(sfc, str): sfc = self.arb(targettkn=sfc) @@ -155,174 +184,191 @@ def optimize(self, sfc=None, result=None, *, params=None): dxdy_f = lambda r: (np.array(r[0:2])) # extract dx, dy from result tn = lambda t: t.split("-")[0] # token name, eg WETH-xxxx -> WETH - # initialisations + # epsilons and maxiter eps = P("eps") or self.MOEPS + epsa = P("epsa") or self.MOEPSA + epsaunit = P("epsaunit") or self.MOEPSAUNIT + jach = P("jach") or self.MOJACH maxiter = P("maxiter") or self.MOMAXITER - start_time = time.time() + + # curves, tokens and pairs curves_t = self.curve_container + if len (curves_t) == 0: + raise self.ParameterError("no curves found") + if len (curves_t) == 1: + raise self.ParameterError(f"can't run arbitrage on single curve {curves_t}") + alltokens_s = self.curve_container.tokens() + if not targettkn in alltokens_s: + raise self.ParameterError(f"targettkn {targettkn} not in {alltokens_s}") + tokens_t = tuple(t for t in alltokens_s if t != targettkn) # all _other_ tokens... tokens_ix = {t: i for i, t in enumerate(tokens_t)} # ...with index lookup pairs = self.curve_container.pairs(standardize=False) - curves_by_pair = { - pair: tuple(c for c in curves_t if c.pair == pair) for pair in pairs } + curves_by_pair = {pair: tuple(c for c in curves_t if c.pair == pair) for pair in pairs } pairs_t = tuple(tuple(p.split("/")) for p in pairs) + + # return the inner function if requested + # (this may need to move lower) + if result == self.MO_DTKNFROMPF: + return dtknfromp_f + + # return debug info if requested + if result == self.MO_DEBUG: + return dict( + tokens_t=tokens_t, + tokens_ix=tokens_ix, + pairs=pairs, + sfc=sfc, + targettkn=targettkn, + pairs_t=pairs_t, + crit=dict(crit=P("crit"), eps=eps, epsa=epsa, epsaunit=epsaunit, pstart=P("pstart")), + optimizer=self, + ) - try: - - # assertions - if len (curves_t) == 0: - raise self.ParameterError("no curves found") - if len (curves_t) == 1: - raise self.ParameterError(f"can't run arbitrage on single curve {curves_t}") - if not targettkn in alltokens_s: - raise self.ParameterError(f"targettkn {targettkn} not in {alltokens_s}") - - # calculating the start price for the iteration process - if not P("pstart") is None: - pstart = P("pstart") - if P("verbose") or P("debug"): - print(f"[margp_optimizer] using pstartd [{len(P('pstart'))} tokens]") - if isinstance(P("pstart"), pd.DataFrame): - try: - pstart = pstart.to_dict()[targettkn] - except Exception as e: - raise Exception( - f"error while converting dataframe pstart to dict: {e}", - pstart, - targettkn, - ) - assert isinstance( - pstart, dict - ), f"pstart must be a dict or a data frame [{pstart}]" - price_estimates_t = tuple(pstart[t] for t in tokens_t) - else: - if P("verbose") or P("debug"): - print("[margp_optimizer] calculating price estimates") + # pstart + pstart = P("pstart") + if not pstart is None: + if P("verbose") or P("debug"): + print(f"[margp_optimizer] using pstart [{len(P('pstart'))} tokens]") + if isinstance(pstart, pd.DataFrame): try: - price_estimates_t = self.price_estimates( - tknq=targettkn, - tknbs=tokens_t, - verbose=False, - triangulate=True, - ) + pstart = pstart.to_dict()[targettkn] except Exception as e: - if P("verbose") or P("debug"): - print(f"[margp_optimizer] error while calculating price estimates: [{e}]") - price_estimates_t = None + raise Exception(f"error while converting dataframe pstart to dict: {e}", pstart, targettkn) + assert isinstance(pstart, dict), f"pstart must be a dict or a data frame [{pstart}]" + price_estimates_t = tuple(pstart[t] for t in tokens_t) + else: + if P("verbose") or P("debug"): + print("[margp_optimizer] calculating price estimates") if P("debug"): - print("[margp_optimizer] pstart:", price_estimates_t) - if result == self.MO_PSTART: - df = pd.DataFrame(price_estimates_t, index=tokens_t, columns=[targettkn]) - df.index.name = "tknb" - return df + print(f"[margp_optimizer] tknq={targettkn}, tknbs={tokens_t}") + + try: + price_estimates_t = self.price_estimates( + tknq=targettkn, + tknbs=tokens_t, + verbose=P("debug"), + triangulate=True, + ) + except Exception as e: + if P("verbose") or P("debug"): + print(f"[margp_optimizer] error calculating price estimates: [{e}]") + price_estimates_t = None + raise + + if P("debug"): + print("[margp_optimizer] pstart:", price_estimates_t) + if result == self.MO_PSTART: + df = pd.DataFrame(price_estimates_t, index=tokens_t, columns=[targettkn]) + df.index.name = "tknb" + return df - ## INNER FUNCTION: CALCULATE THE TARGET FUNCTION - def dtknfromp_f(p, *, islog10=True, asdct=False, quiet=False): - """ - calculates the aggregate change in token amounts for a given price vector - - :p: price vector, where prices use the reference token as quote token - this vector is an np.array, and the token order is the same as in tokens_t - :islog10: if True, p is interpreted as log10(p) - :asdct: if True, the result is returned as dict AND tuple, otherwise as np.array - :quiet: if overrides P("debug") etc, eg for calc of Jacobian - :returns: if asdct is False, a tuple of the same length as tokens_t detailing the - change in token amounts for each token except for the target token (ie the - quantity with target zero; if asdct is True, that same information is - returned as dict, including the target token. - """ - p = np.array(p, dtype=np.float64) - if islog10: - p = np.exp(p * np.log(10)) - assert len(p) == len(tokens_t), f"p and tokens_t have different lengths [{p}, {tokens_t}]" - if P("debug") and not quiet: - print(f"\n[dtknfromp_f] =====================>>>") - print(f"prices={p}") - print(f"tokens={tokens_t}") + # criterion and norm + crit = P("crit") or self.MOCRITR + assert crit in set((self.MOCRITR, self.MOCRITA)), "crit must be MOCRITR or MOCRITA" + if crit == self.MOCRITA: + assert not pstart is None, "pstart must be provided if crit is MOCRITA" + assert epsaunit in pstart, f"epsaunit {epsaunit} not in pstart {P('pstart')}" + p_targettkn_per_epsaunit = pstart[epsaunit]/pstart[targettkn] + if P("debug"): + print(f"[margp_optimizer] 1 epsaunit [{epsaunit}] = {p_targettkn_per_epsaunit:,.4f} target [{targettkn}]") + crit_is_relative = crit == self.MOCRITR + eps_used = eps if crit_is_relative else epsa + eps_unit = 1 if crit_is_relative else epsaunit + + norm = P("norm") or self.MONORML2 + assert norm in set((self.MONORML1, self.MONORML2, self.MONORMLINF)), f"norm must be MONORML1, MONORML2 or MONORMLINF [{norm}]" + normf = lambda x: np.linalg.norm(x, ord=norm) - # pvec is dict {tkn -> (log) price} for all tokens in p - pvec = {tkn: p_ for tkn, p_ in zip(tokens_t, p)} - pvec[targettkn] = 1 - if P("debug") and not quiet: - print(f"pvec={pvec}") + ## INNER FUNCTION: CALCULATE THE TARGET FUNCTION + def dtknfromp_f(p, *, islog10=True, asdct=False, quiet=False): + """ + calculates the aggregate change in token amounts for a given price vector + + :p: price vector, where prices use the reference token as quote token + this vector is an np.array, and the token order is the same as in tokens_t + :islog10: if True, p is interpreted as log10(p) + :asdct: if True, the result is returned as dict AND tuple, otherwise as np.array + :quiet: if overrides P("debug") etc, eg for calc of Jacobian + :returns: if asdct is False, a tuple of the same length as tokens_t detailing the + change in token amounts for each token except for the target token (ie the + quantity with target zero; if asdct is True, that same information is + returned as dict, including the target token. + """ + p = np.array(p, dtype=np.float64) + if islog10: + p = np.exp(p * np.log(10)) + assert len(p) == len(tokens_t), f"p and tokens_t have different lengths [{p}, {tokens_t}]" + if P("debug") and not quiet: + print(f"\n[dtknfromp_f] =====================>>>") + print(f"prices={p}") + print(f"tokens={tokens_t}") + + # pvec is dict {tkn -> (log) price} for all tokens in p + pvec = {tkn: p_ for tkn, p_ in zip(tokens_t, p)} + pvec[targettkn] = 1 + if P("debug") and not quiet: + print(f"pvec={pvec}") + + sum_by_tkn = {t: 0 for t in alltokens_s} + for pair, (tknb, tknq) in zip(pairs, pairs_t): + if get(p, tokens_ix.get(tknq)) > 0: + price = get(p, tokens_ix.get(tknb)) / get(p, tokens_ix.get(tknq)) + else: + #print(f"[dtknfromp_f] warning: price for {pair} is unknown, using 1 instead") + price = 1 + curves = curves_by_pair[pair] + c0 = curves[0] + #dxdy = tuple(dxdy_f(c.dxdyfromp_f(price)) for c in curves) + dxvecs = (c.dxvecfrompvec_f(pvec) for c in curves) - sum_by_tkn = {t: 0 for t in alltokens_s} - for pair, (tknb, tknq) in zip(pairs, pairs_t): - if get(p, tokens_ix.get(tknq)) > 0: - price = get(p, tokens_ix.get(tknb)) / get(p, tokens_ix.get(tknq)) - else: - #print(f"[dtknfromp_f] warning: price for {pair} is unknown, using 1 instead") - price = 1 - curves = curves_by_pair[pair] - c0 = curves[0] - #dxdy = tuple(dxdy_f(c.dxdyfromp_f(price)) for c in curves) - dxvecs = (c.dxvecfrompvec_f(pvec) for c in curves) - - if P("debug2") and not quiet: - dxdy = tuple(dxdy_f(c.dxdyfromp_f(price)) for c in curves) - # TODO: rewrite this using the dxvec - # there is no need to extract dy dx; just iterate over dict - # however not urgent because this is debug code - print(f"\n{c0.pairp} --->>") - print(f" price={price:,.4f}, 1/price={1/price:,.4f}") - for r, c in zip(dxdy, curves): - s = f" cid={c.cid:15}" - s += f" dx={float(r[0]):15,.3f} {c.tknxp:>5}" - s += f" dy={float(r[1]):15,.3f} {c.tknyp:>5}" - s += f" p={c.p:,.2f} 1/p={1/c.p:,.2f}" - print(s) - print(f"<<--- {c0.pairp}") - - # old code from dxdy = tuple(dxdy_f(c.dxdyfromp_f(price)) for c in curves) - # sumdx, sumdy = sum(dxdy) - # sum_by_tkn[tknq] += sumdy - # sum_by_tkn[tknb] += sumdx - for dxvec in dxvecs: - for tkn, dx_ in dxvec.items(): - sum_by_tkn[tkn] += dx_ - - # if P("debug") and not quiet: - # print(f"pair={c0.pairp}, {sumdy:,.4f} {tn(tknq)}, {sumdx:,.4f} {tn(tknb)}, price={price:,.4f} {tn(tknq)} per {tn(tknb)} [{len(curves)} funcs]") - - result = tuple(sum_by_tkn[t] for t in tokens_t) - if P("debug") and not quiet: - print(f"sum_by_tkn={sum_by_tkn}") - print(f"result={result}") - print(f"<<<===================== [dtknfromp_f]") - - if asdct: - return sum_by_tkn, np.array(result) - - return np.array(result) - ## END INNER FUNCTION - - # return the inner function if requested - if result == self.MO_DTKNFROMPF: - return dtknfromp_f - - # return debug info if requested - if result == self.MO_DEBUG: - return dict( - # price_estimates_all = price_estimates_all, - # price_estimates_d = price_estimates_d, - price_estimates_t=price_estimates_t, - tokens_t=tokens_t, - tokens_ix=tokens_ix, - pairs=pairs, - sfc=sfc, - targettkn=targettkn, - pairs_t=pairs_t, - dtknfromp_f=dtknfromp_f, - optimizer=self, - ) + if P("debug2") and not quiet: + dxdy = tuple(dxdy_f(c.dxdyfromp_f(price)) for c in curves) + # TODO: rewrite this using the dxvec + # there is no need to extract dy dx; just iterate over dict + # however not urgent because this is debug code + print(f"\n{c0.pairp} --->>") + print(f" price={price:,.4f}, 1/price={1/price:,.4f}") + for r, c in zip(dxdy, curves): + s = f" cid={c.cid:15}" + s += f" dx={float(r[0]):15,.3f} {c.tknxp:>5}" + s += f" dy={float(r[1]):15,.3f} {c.tknyp:>5}" + s += f" p={c.p:,.2f} 1/p={1/c.p:,.2f}" + print(s) + print(f"<<--- {c0.pairp}") + + # old code from dxdy = tuple(dxdy_f(c.dxdyfromp_f(price)) for c in curves) + # sumdx, sumdy = sum(dxdy) + # sum_by_tkn[tknq] += sumdy + # sum_by_tkn[tknb] += sumdx + for dxvec in dxvecs: + for tkn, dx_ in dxvec.items(): + sum_by_tkn[tkn] += dx_ + + # if P("debug") and not quiet: + # print(f"pair={c0.pairp}, {sumdy:,.4f} {tn(tknq)}, {sumdx:,.4f} {tn(tknb)}, price={price:,.4f} {tn(tknq)} per {tn(tknb)} [{len(curves)} funcs]") + + result = tuple(sum_by_tkn[t] for t in tokens_t) + if P("debug") and not quiet: + print(f"sum_by_tkn={sum_by_tkn}") + print(f"result={result}") + print(f"<<<===================== [dtknfromp_f]") + + if asdct: + return sum_by_tkn, np.array(result) + + return np.array(result) + ## END INNER FUNCTION + try: + # setting up the optimization variables (note: we optimize in log space) if price_estimates_t is None: raise Exception(f"price estimates not found; try setting pstart") p = np.array(price_estimates_t, dtype=float) plog10 = np.log10(p) - if P("verbose"): + if P("verbose") or P("debug"): # dtkn_d, dtkn = dtknfromp_f(plog10, islog10=True, asdct=True) print("[margp_optimizer] pe ", p) print("[margp_optimizer] p ", ", ".join(f"{x:,.2f}" for x in p)) @@ -335,9 +381,7 @@ def dtknfromp_f(p, *, islog10=True, asdct=False, quiet=False): for i in range(maxiter): if P("progress"): - print( - f"Iteration [{i:2.0f}]: time elapsed: {time.time()-start_time:.2f}s" - ) + print(f"Iteration [{i:2.0f}]: time elapsed: {time.time()-start_time:.2f}s") # calculate the change in token amounts (also as dict if requested) if P("tknd"): @@ -348,7 +392,7 @@ def dtknfromp_f(p, *, islog10=True, asdct=False, quiet=False): # calculate the Jacobian # if P("debug"): # print("\n[margp_optimizer] ============= JACOBIAN =============>>>") - J = self.J(dtknfromp_f, plog10) + J = self.J(dtknfromp_f, plog10, jach=jach) # ATTENTION: dtknfromp_f takes log10(p) as input if P("debug"): # print("==== J ====>") @@ -367,35 +411,55 @@ def dtknfromp_f(p, *, islog10=True, asdct=False, quiet=False): # https://numpy.org/doc/stable/reference/generated/numpy.linalg.solve.html # https://numpy.org/doc/stable/reference/generated/numpy.linalg.lstsq.html - # update log prices, prices and determine the criterium... + # update log prices, prices... p0log10 = [*plog10] plog10 += dplog10 p = np.exp(plog10 * np.log(10)) - criterium = np.linalg.norm(dplog10) + # determine the convergence criterium + if crit_is_relative: + criterium = normf(dplog10) + # the relative criterium is the norm of the change in log prices + # in other words, it is something like an "average percentage change" of prices + # this may not quite what we want though because if we have highly levered curves, + # then even small percentage changes in prices can be important + # eg for limit orders the whole liquidity is by default distributed + # over a small range that may only be minrw=1e-6 wide + + else: + p_in_epsaunit = p / p_targettkn_per_epsaunit + # p is denominated in targettkn + # p_in_epsaunit in epsaunit + criterium = normf(dtkn*p_in_epsaunit) + if P("debug"): + print(f"[margp_optimizer] tokens_t={tokens_t} [{targettkn}]") + print(f"[margp_optimizer] dtkn={dtkn}") + print(f"[margp_optimizer] p={p} {targettkn}") + print(f"[margp_optimizer] p={p_in_epsaunit} {epsaunit}") + if P("verbose") or P("debug"): + print(f"[margp_optimizer] crit=normf({dtkn*p_in_epsaunit}) = {criterium} {epsaunit}") + # ...print out some info if requested... if P("verbose"): print(f"\n[margp_optimizer] ========== cycle {i} =======>>>") - print("log p0", p0log10) - print("log dp", dplog10) - print("log p ", plog10) - print("p ", tuple(p)) - print("p ", ", ".join(f"{x:,.2f}" for x in p)) - print("1/p ", ", ".join(f"{1/x:,.2f}" for x in p)) - print("tokens_t", tokens_t) + print("log p0 ", p0log10) + print("log dp ", dplog10) + print("log p ", plog10) + print("p_t ", tuple(p), targettkn) + print("p ", ", ".join(f"{x:,.2f}" for x in p)) + print("1/p ", ", ".join(f"{1/x:,.2f}" for x in p)) + print("tokens ", tokens_t) # print("dtkn", dtkn) - print("dtkn", ", ".join(f"{x:,.3f}" for x in dtkn)) - print( - f"[criterium={criterium:.2e}, eps={eps:.1e}, c/e={criterium/eps:,.0e}]" - ) + print("dtkn ", ", ".join(f"{x:,.3f}" for x in dtkn)) + print(f"crit {criterium:.2e} [{eps_unit}; L{norm}], eps={eps_used}, c/e={criterium/eps_used:,.0e}]") if P("tknd"): - print("dtkn_d", dtkn_d) + print("dtkn_d ", dtkn_d) if P("J"): - print("J", J) + print("J ", J) print(f"<<<========== cycle {i} ======= [margp_optimizer]") # ...and finally check the criterium (percentage changes this step) for convergence - if criterium < eps: + if criterium < eps_used: if i != 0: # we don't break in the first iteration because we need this first iteration # to establish a common baseline price, therefore d logp ~ 0 is not good @@ -446,4 +510,3 @@ def dtknfromp_f(p, *, islog10=True, asdct=False, quiet=False): errormsg=e, ) margp_optimizer = optimize # margp_optimizer is deprecated - From 51d9328b5d0df314c6c61abb8b129a4e7c3c9936 Mon Sep 17 00:00:00 2001 From: sklbancor <109073706+sklbancor@users.noreply.github.com> Date: Tue, 30 Apr 2024 21:49:09 +0100 Subject: [PATCH 06/24] MargPOptimzer v5.3-b2 This is manually merging the changes from c4a110c9297abff1f355b03155c1052dcd1bd2fa into v5.3-b1 --- .../tools/optimizer/margpoptimizer.py | 93 +++++++++++-------- 1 file changed, 55 insertions(+), 38 deletions(-) diff --git a/fastlane_bot/tools/optimizer/margpoptimizer.py b/fastlane_bot/tools/optimizer/margpoptimizer.py index a23ff409b..3ca49c6e6 100644 --- a/fastlane_bot/tools/optimizer/margpoptimizer.py +++ b/fastlane_bot/tools/optimizer/margpoptimizer.py @@ -22,8 +22,10 @@ (c) Copyright Bprotocol foundation 2023. Licensed under MIT """ -__VERSION__ = "5.3-b1" -__DATE__ = "14/Dec/2023" +__VERSION__ = "5.3-b2" +# MERGING THE CHANGES FROM c4a110c9297abff1f355b03155c1052dcd1bd2fa + +__DATE__ = "30/Apr/2024" from dataclasses import dataclass, field, fields, asdict, astuple, InitVar import pandas as pd @@ -150,10 +152,12 @@ def optimize(self, sfc=None, result=None, *, params=None): epsaunit unit for epsa (default: MOEPSAUNIT) jach step size for calculating Jacobian (default: MOJACH) maxiter maximum number of iterations (default: 100) - verbose if True, print some high level output - progress if True, print some basic progress output - debug if True, print some debug output - debug2 more debug output + progress if True, print progress output + verbose ditto, high level output + debug ditto, basic debug output + debug_j ditto, additional debug output (Jacobian) + debug_dtkn ditto (d Token) + debug_dtkn2 ditto (more d Token; requires debug_dtkn) raiseonerror if True, raise an OptimizationError exception on error pstart starting price for optimization (3) ================== ========================================================================= @@ -300,16 +304,19 @@ def dtknfromp_f(p, *, islog10=True, asdct=False, quiet=False): if islog10: p = np.exp(p * np.log(10)) assert len(p) == len(tokens_t), f"p and tokens_t have different lengths [{p}, {tokens_t}]" - if P("debug") and not quiet: - print(f"\n[dtknfromp_f] =====================>>>") - print(f"prices={p}") - print(f"tokens={tokens_t}") + if P("debug_dtkn") and not quiet: + print(f"\n[dtknfromp_f]\n=====================>>>") + #print(f"prices={p}") + #print(f"tokens={tokens_t}") + print( "p ", ", ".join(f"{x:,.2f}" for x in p)) + print( "1/p ", ", ".join(f"{1/x:,.2f}" for x in p)) + print(f"{targettkn} <-", ", ".join(tokens_t)) # pvec is dict {tkn -> (log) price} for all tokens in p pvec = {tkn: p_ for tkn, p_ in zip(tokens_t, p)} pvec[targettkn] = 1 - if P("debug") and not quiet: - print(f"pvec={pvec}") + # if P("debug") and not quiet: + # print(f"pvec={pvec}") sum_by_tkn = {t: 0 for t in alltokens_s} for pair, (tknb, tknq) in zip(pairs, pairs_t): @@ -323,7 +330,7 @@ def dtknfromp_f(p, *, islog10=True, asdct=False, quiet=False): #dxdy = tuple(dxdy_f(c.dxdyfromp_f(price)) for c in curves) dxvecs = (c.dxvecfrompvec_f(pvec) for c in curves) - if P("debug2") and not quiet: + if P("debug_dtkn2") and not quiet: dxdy = tuple(dxdy_f(c.dxdyfromp_f(price)) for c in curves) # TODO: rewrite this using the dxvec # there is no need to extract dy dx; just iterate over dict @@ -331,7 +338,7 @@ def dtknfromp_f(p, *, islog10=True, asdct=False, quiet=False): print(f"\n{c0.pairp} --->>") print(f" price={price:,.4f}, 1/price={1/price:,.4f}") for r, c in zip(dxdy, curves): - s = f" cid={c.cid:15}" + s = f" cid={c.cid[2:6]}{c.cid[-2:]}" s += f" dx={float(r[0]):15,.3f} {c.tknxp:>5}" s += f" dy={float(r[1]):15,.3f} {c.tknyp:>5}" s += f" p={c.p:,.2f} 1/p={1/c.p:,.2f}" @@ -350,10 +357,10 @@ def dtknfromp_f(p, *, islog10=True, asdct=False, quiet=False): # print(f"pair={c0.pairp}, {sumdy:,.4f} {tn(tknq)}, {sumdx:,.4f} {tn(tknb)}, price={price:,.4f} {tn(tknq)} per {tn(tknb)} [{len(curves)} funcs]") result = tuple(sum_by_tkn[t] for t in tokens_t) - if P("debug") and not quiet: - print(f"sum_by_tkn={sum_by_tkn}") + if P("debug_dtkn") and not quiet: + print(f"\nsum_by_tkn={sum_by_tkn}") print(f"result={result}") - print(f"<<<===================== [dtknfromp_f]") + print(f"<<<=====================") if asdct: return sum_by_tkn, np.array(result) @@ -370,9 +377,10 @@ def dtknfromp_f(p, *, islog10=True, asdct=False, quiet=False): plog10 = np.log10(p) if P("verbose") or P("debug"): # dtkn_d, dtkn = dtknfromp_f(plog10, islog10=True, asdct=True) - print("[margp_optimizer] pe ", p) - print("[margp_optimizer] p ", ", ".join(f"{x:,.2f}" for x in p)) - print("[margp_optimizer] 1/p ", ", ".join(f"{1/x:,.2f}" for x in p)) + print(f"[margp_optimizer] {targettkn} <-", ", ".join(tokens_t)) + print( "[margp_optimizer] p_t ", p) + print( "[margp_optimizer] p ", ", ".join(f"{x:,.2f}" for x in p)) + print( "[margp_optimizer] 1/p ", ", ".join(f"{1/x:,.2f}" for x in p)) # print("[margp_optimizer] dtkn", dtkn) # if P("tknd"): # print("[margp_optimizer] dtkn_d", dtkn_d) @@ -381,8 +389,9 @@ def dtknfromp_f(p, *, islog10=True, asdct=False, quiet=False): for i in range(maxiter): if P("progress"): - print(f"Iteration [{i:2.0f}]: time elapsed: {time.time()-start_time:.2f}s") - + print( + f"\n[margp_optimizer] Iteration [{i:2.0f}]: time elapsed: {time.time()-start_time:.2f}s" + ) # calculate the change in token amounts (also as dict if requested) if P("tknd"): dtkn_d, dtkn = dtknfromp_f(plog10, islog10=True, asdct=True) @@ -391,30 +400,33 @@ def dtknfromp_f(p, *, islog10=True, asdct=False, quiet=False): # calculate the Jacobian # if P("debug"): - # print("\n[margp_optimizer] ============= JACOBIAN =============>>>") + # print("\n[margp_optimizer] calculating Jacobian") + J = self.J(dtknfromp_f, plog10, jach=jach) # ATTENTION: dtknfromp_f takes log10(p) as input - if P("debug"): - # print("==== J ====>") - print("\n============= JACOBIAN =============>>>") + + if P("debug_j"): + print("\n[margp_optimizer]\n============= JACOBIAN =============>>>") print(J) - # print("<=== J =====") print("<<<============= JACOBIAN =============\n") # Update p, dtkn using the Newton-Raphson formula try: dplog10 = np.linalg.solve(J, -dtkn) + except np.linalg.LinAlgError: + if P("verbose") or P("debug"): - print("[margp_optimizer] singular Jacobian, using lstsq instead") + print("\n[margp_optimizer] singular Jacobian, using lstsq instead") + dplog10 = np.linalg.lstsq(J, -dtkn, rcond=None)[0] # https://numpy.org/doc/stable/reference/generated/numpy.linalg.solve.html # https://numpy.org/doc/stable/reference/generated/numpy.linalg.lstsq.html # update log prices, prices... - p0log10 = [*plog10] - plog10 += dplog10 - p = np.exp(plog10 * np.log(10)) + p0log10 = [*plog10] # keep current log prices (deep copy) + plog10 += dplog10 # update log prices + p = np.exp(plog10 * np.log(10)) # expand log to actual prices # determine the convergence criterium if crit_is_relative: @@ -441,22 +453,27 @@ def dtknfromp_f(p, *, islog10=True, asdct=False, quiet=False): # ...print out some info if requested... if P("verbose"): - print(f"\n[margp_optimizer] ========== cycle {i} =======>>>") - print("log p0 ", p0log10) - print("log dp ", dplog10) - print("log p ", plog10) + print(f"\n[margp_optimizer]\n========== cycle {i} =======>>>") + print(f"{targettkn} <-", ", ".join(tokens_t)) + print("dtkn ", ", ".join(f"{x:,.3f}" for x in dtkn)) + print("log p0", p0log10) # previous log prices + print("d logp", dplog10) # change in log prices + print("log p ", plog10) # current log prices print("p_t ", tuple(p), targettkn) print("p ", ", ".join(f"{x:,.2f}" for x in p)) print("1/p ", ", ".join(f"{1/x:,.2f}" for x in p)) - print("tokens ", tokens_t) + #print("tokens ", tokens_t) # print("dtkn", dtkn) - print("dtkn ", ", ".join(f"{x:,.3f}" for x in dtkn)) + #print("dtkn ", ", ".join(f"{x:,.3f}" for x in dtkn)) print(f"crit {criterium:.2e} [{eps_unit}; L{norm}], eps={eps_used}, c/e={criterium/eps_used:,.0e}]") + + # TODO: DEAL WITH THOSE DEBUG FLAGS if P("tknd"): print("dtkn_d ", dtkn_d) if P("J"): print("J ", J) - print(f"<<<========== cycle {i} ======= [margp_optimizer]") + + print(f"<<<========== cycle {i} =======") # ...and finally check the criterium (percentage changes this step) for convergence if criterium < eps_used: From d0c4e7ea5581a152a6e2529d80841ea6ed8a3542 Mon Sep 17 00:00:00 2001 From: sklbancor <109073706+sklbancor@users.noreply.github.com> Date: Tue, 30 Apr 2024 21:52:40 +0100 Subject: [PATCH 07/24] MargPOptimzer v5.3-b3 --- .../tools/optimizer/margpoptimizer.py | 113 ++++++++---------- 1 file changed, 47 insertions(+), 66 deletions(-) diff --git a/fastlane_bot/tools/optimizer/margpoptimizer.py b/fastlane_bot/tools/optimizer/margpoptimizer.py index 3ca49c6e6..dc90ce6a0 100644 --- a/fastlane_bot/tools/optimizer/margpoptimizer.py +++ b/fastlane_bot/tools/optimizer/margpoptimizer.py @@ -22,9 +22,7 @@ (c) Copyright Bprotocol foundation 2023. Licensed under MIT """ -__VERSION__ = "5.3-b2" -# MERGING THE CHANGES FROM c4a110c9297abff1f355b03155c1052dcd1bd2fa - +__VERSION__ = "5.3-b3" __DATE__ = "30/Apr/2024" from dataclasses import dataclass, field, fields, asdict, astuple, InitVar @@ -158,6 +156,7 @@ def optimize(self, sfc=None, result=None, *, params=None): debug_j ditto, additional debug output (Jacobian) debug_dtkn ditto (d Token) debug_dtkn2 ditto (more d Token; requires debug_dtkn) + debug_dtknd ditto, d Token as dict raiseonerror if True, raise an OptimizationError exception on error pstart starting price for optimization (3) ================== ========================================================================= @@ -188,6 +187,7 @@ def optimize(self, sfc=None, result=None, *, params=None): dxdy_f = lambda r: (np.array(r[0:2])) # extract dx, dy from result tn = lambda t: t.split("-")[0] # token name, eg WETH-xxxx -> WETH + # epsilons and maxiter eps = P("eps") or self.MOEPS epsa = P("epsa") or self.MOEPSA @@ -231,9 +231,12 @@ def optimize(self, sfc=None, result=None, *, params=None): ) # pstart + if P("verbose") or P("debug"): + print(f"[margp_optimizer] targettkn = {targettkn}") + pstart = P("pstart") if not pstart is None: - if P("verbose") or P("debug"): + if P("debug"): print(f"[margp_optimizer] using pstart [{len(P('pstart'))} tokens]") if isinstance(pstart, pd.DataFrame): try: @@ -243,9 +246,8 @@ def optimize(self, sfc=None, result=None, *, params=None): assert isinstance(pstart, dict), f"pstart must be a dict or a data frame [{pstart}]" price_estimates_t = tuple(pstart[t] for t in tokens_t) else: - if P("verbose") or P("debug"): - print("[margp_optimizer] calculating price estimates") if P("debug"): + print("[margp_optimizer] calculating price estimates") print(f"[margp_optimizer] tknq={targettkn}, tknbs={tokens_t}") try: @@ -262,7 +264,8 @@ def optimize(self, sfc=None, result=None, *, params=None): raise if P("debug"): - print("[margp_optimizer] pstart:", price_estimates_t) + print("[margp_optimizer] price estimates = ", price_estimates_t) + if result == self.MO_PSTART: df = pd.DataFrame(price_estimates_t, index=tokens_t, columns=[targettkn]) df.index.name = "tknb" @@ -270,7 +273,7 @@ def optimize(self, sfc=None, result=None, *, params=None): # criterion and norm crit = P("crit") or self.MOCRITR - assert crit in set((self.MOCRITR, self.MOCRITA)), "crit must be MOCRITR or MOCRITA" + assert crit in set((self.MOCRITR, self.MOCRITA)), f"crit must be {self.MOCRITR} or {self.MOCRITA}" if crit == self.MOCRITA: assert not pstart is None, "pstart must be provided if crit is MOCRITA" assert epsaunit in pstart, f"epsaunit {epsaunit} not in pstart {P('pstart')}" @@ -284,6 +287,9 @@ def optimize(self, sfc=None, result=None, *, params=None): norm = P("norm") or self.MONORML2 assert norm in set((self.MONORML1, self.MONORML2, self.MONORMLINF)), f"norm must be MONORML1, MONORML2 or MONORMLINF [{norm}]" normf = lambda x: np.linalg.norm(x, ord=norm) + + if P("verbose") or P("debug"): + print(f"[margp_optimizer] crit={crit} (eps={eps_used}, unit={eps_unit}, norm=L{norm})") ## INNER FUNCTION: CALCULATE THE TARGET FUNCTION def dtknfromp_f(p, *, islog10=True, asdct=False, quiet=False): @@ -305,18 +311,14 @@ def dtknfromp_f(p, *, islog10=True, asdct=False, quiet=False): p = np.exp(p * np.log(10)) assert len(p) == len(tokens_t), f"p and tokens_t have different lengths [{p}, {tokens_t}]" if P("debug_dtkn") and not quiet: - print(f"\n[dtknfromp_f]\n=====================>>>") - #print(f"prices={p}") - #print(f"tokens={tokens_t}") - print( "p ", ", ".join(f"{x:,.2f}" for x in p)) - print( "1/p ", ", ".join(f"{1/x:,.2f}" for x in p)) - print(f"{targettkn} <-", ", ".join(tokens_t)) + print(f"\n[dtknfromp_f]\ndtkn=================>>>") + print(f"{targettkn:6}", ", ".join(tokens_t)) + print( "p ", ", ".join(f"{x:,.2f}" for x in p)) + print( "1/p ", ", ".join(f"{1/x:,.2f}" for x in p)) # pvec is dict {tkn -> (log) price} for all tokens in p pvec = {tkn: p_ for tkn, p_ in zip(tokens_t, p)} pvec[targettkn] = 1 - # if P("debug") and not quiet: - # print(f"pvec={pvec}") sum_by_tkn = {t: 0 for t in alltokens_s} for pair, (tknb, tknq) in zip(pairs, pairs_t): @@ -330,37 +332,31 @@ def dtknfromp_f(p, *, islog10=True, asdct=False, quiet=False): #dxdy = tuple(dxdy_f(c.dxdyfromp_f(price)) for c in curves) dxvecs = (c.dxvecfrompvec_f(pvec) for c in curves) - if P("debug_dtkn2") and not quiet: - dxdy = tuple(dxdy_f(c.dxdyfromp_f(price)) for c in curves) - # TODO: rewrite this using the dxvec - # there is no need to extract dy dx; just iterate over dict - # however not urgent because this is debug code - print(f"\n{c0.pairp} --->>") - print(f" price={price:,.4f}, 1/price={1/price:,.4f}") - for r, c in zip(dxdy, curves): - s = f" cid={c.cid[2:6]}{c.cid[-2:]}" - s += f" dx={float(r[0]):15,.3f} {c.tknxp:>5}" - s += f" dy={float(r[1]):15,.3f} {c.tknyp:>5}" - s += f" p={c.p:,.2f} 1/p={1/c.p:,.2f}" - print(s) - print(f"<<--- {c0.pairp}") + # if P("debug_dtkn2") and not quiet: + # dxdy = tuple(dxdy_f(c.dxdyfromp_f(price)) for c in curves) + # # TODO: rewrite this using the dxvec + # # there is no need to extract dy dx; just iterate over dict + # # however not urgent because this is debug code + # print(f"\n{c0.pairp} --->>") + # print(f" price={price:,.4f}, 1/price={1/price:,.4f}") + # for r, c in zip(dxdy, curves): + # s = f" cid={c.cid[2:6]}{c.cid[-2:]}" + # s += f" dx={float(r[0]):15,.3f} {c.tknxp:>5}" + # s += f" dy={float(r[1]):15,.3f} {c.tknyp:>5}" + # s += f" p={c.p:,.2f} 1/p={1/c.p:,.2f}" + # print(s) + # print(f"<<--- {c0.pairp}") - # old code from dxdy = tuple(dxdy_f(c.dxdyfromp_f(price)) for c in curves) - # sumdx, sumdy = sum(dxdy) - # sum_by_tkn[tknq] += sumdy - # sum_by_tkn[tknb] += sumdx for dxvec in dxvecs: for tkn, dx_ in dxvec.items(): sum_by_tkn[tkn] += dx_ - # if P("debug") and not quiet: - # print(f"pair={c0.pairp}, {sumdy:,.4f} {tn(tknq)}, {sumdx:,.4f} {tn(tknb)}, price={price:,.4f} {tn(tknq)} per {tn(tknb)} [{len(curves)} funcs]") - result = tuple(sum_by_tkn[t] for t in tokens_t) if P("debug_dtkn") and not quiet: print(f"\nsum_by_tkn={sum_by_tkn}") print(f"result={result}") - print(f"<<<=====================") + print(" >", ", ".join(tokens_t)) + print(f"<<<=================dtkn") if asdct: return sum_by_tkn, np.array(result) @@ -369,41 +365,33 @@ def dtknfromp_f(p, *, islog10=True, asdct=False, quiet=False): ## END INNER FUNCTION try: - + # setting up the optimization variables (note: we optimize in log space) if price_estimates_t is None: raise Exception(f"price estimates not found; try setting pstart") p = np.array(price_estimates_t, dtype=float) plog10 = np.log10(p) if P("verbose") or P("debug"): - # dtkn_d, dtkn = dtknfromp_f(plog10, islog10=True, asdct=True) - print(f"[margp_optimizer] {targettkn} <-", ", ".join(tokens_t)) - print( "[margp_optimizer] p_t ", p) + print(f"\n[margp_optimizer] {targettkn} <-", ", ".join(tokens_t)) print( "[margp_optimizer] p ", ", ".join(f"{x:,.2f}" for x in p)) print( "[margp_optimizer] 1/p ", ", ".join(f"{1/x:,.2f}" for x in p)) - # print("[margp_optimizer] dtkn", dtkn) - # if P("tknd"): - # print("[margp_optimizer] dtkn_d", dtkn_d) - + ## MAIN OPTIMIZATION LOOP for i in range(maxiter): if P("progress"): - print( - f"\n[margp_optimizer] Iteration [{i:2.0f}]: time elapsed: {time.time()-start_time:.2f}s" - ) + print(f"\n[margp_optimizer] Iteration [{i:2.0f}]: time elapsed: {time.time()-start_time:.2f}s") + # calculate the change in token amounts (also as dict if requested) - if P("tknd"): + if P("debug_dtknd"): dtkn_d, dtkn = dtknfromp_f(plog10, islog10=True, asdct=True) else: dtkn = dtknfromp_f(plog10, islog10=True, asdct=False) # calculate the Jacobian - # if P("debug"): - # print("\n[margp_optimizer] calculating Jacobian") J = self.J(dtknfromp_f, plog10, jach=jach) - # ATTENTION: dtknfromp_f takes log10(p) as input + # ATTENTION: dtknfromp_f takes log10(p) as input by default if P("debug_j"): print("\n[margp_optimizer]\n============= JACOBIAN =============>>>") @@ -420,8 +408,8 @@ def dtknfromp_f(p, *, islog10=True, asdct=False, quiet=False): print("\n[margp_optimizer] singular Jacobian, using lstsq instead") dplog10 = np.linalg.lstsq(J, -dtkn, rcond=None)[0] - # https://numpy.org/doc/stable/reference/generated/numpy.linalg.solve.html - # https://numpy.org/doc/stable/reference/generated/numpy.linalg.lstsq.html + # https://numpy.org/doc/stable/reference/generated/numpy.linalg.solve.html + # https://numpy.org/doc/stable/reference/generated/numpy.linalg.lstsq.html # update log prices, prices... p0log10 = [*plog10] # keep current log prices (deep copy) @@ -448,11 +436,10 @@ def dtknfromp_f(p, *, islog10=True, asdct=False, quiet=False): print(f"[margp_optimizer] dtkn={dtkn}") print(f"[margp_optimizer] p={p} {targettkn}") print(f"[margp_optimizer] p={p_in_epsaunit} {epsaunit}") - if P("verbose") or P("debug"): print(f"[margp_optimizer] crit=normf({dtkn*p_in_epsaunit}) = {criterium} {epsaunit}") - + # ...print out some info if requested... - if P("verbose"): + if P("verbose") or P("debug"): print(f"\n[margp_optimizer]\n========== cycle {i} =======>>>") print(f"{targettkn} <-", ", ".join(tokens_t)) print("dtkn ", ", ".join(f"{x:,.3f}" for x in dtkn)) @@ -462,17 +449,11 @@ def dtknfromp_f(p, *, islog10=True, asdct=False, quiet=False): print("p_t ", tuple(p), targettkn) print("p ", ", ".join(f"{x:,.2f}" for x in p)) print("1/p ", ", ".join(f"{1/x:,.2f}" for x in p)) - #print("tokens ", tokens_t) - # print("dtkn", dtkn) - #print("dtkn ", ", ".join(f"{x:,.3f}" for x in dtkn)) print(f"crit {criterium:.2e} [{eps_unit}; L{norm}], eps={eps_used}, c/e={criterium/eps_used:,.0e}]") - # TODO: DEAL WITH THOSE DEBUG FLAGS - if P("tknd"): + if P("debug_dtknd"): print("dtkn_d ", dtkn_d) - if P("J"): - print("J ", J) - + print(f"<<<========== cycle {i} =======") # ...and finally check the criterium (percentage changes this step) for convergence From 29709cdfc56802a651f48e5ac8e06e2ecfd4d0ae Mon Sep 17 00:00:00 2001 From: sklbancor <109073706+sklbancor@users.noreply.github.com> Date: Wed, 1 May 2024 07:11:21 +0100 Subject: [PATCH 08/24] MargPOptimizer v5.3 --- .../tools/optimizer/margpoptimizer.py | 53 +++++++++++-------- 1 file changed, 32 insertions(+), 21 deletions(-) diff --git a/fastlane_bot/tools/optimizer/margpoptimizer.py b/fastlane_bot/tools/optimizer/margpoptimizer.py index dc90ce6a0..abf4645fc 100644 --- a/fastlane_bot/tools/optimizer/margpoptimizer.py +++ b/fastlane_bot/tools/optimizer/margpoptimizer.py @@ -22,8 +22,8 @@ (c) Copyright Bprotocol foundation 2023. Licensed under MIT """ -__VERSION__ = "5.3-b3" -__DATE__ = "30/Apr/2024" +__VERSION__ = "5.3" +__DATE__ = "01/May/2024" from dataclasses import dataclass, field, fields, asdict, astuple, InitVar import pandas as pd @@ -212,24 +212,6 @@ def optimize(self, sfc=None, result=None, *, params=None): curves_by_pair = {pair: tuple(c for c in curves_t if c.pair == pair) for pair in pairs } pairs_t = tuple(tuple(p.split("/")) for p in pairs) - # return the inner function if requested - # (this may need to move lower) - if result == self.MO_DTKNFROMPF: - return dtknfromp_f - - # return debug info if requested - if result == self.MO_DEBUG: - return dict( - tokens_t=tokens_t, - tokens_ix=tokens_ix, - pairs=pairs, - sfc=sfc, - targettkn=targettkn, - pairs_t=pairs_t, - crit=dict(crit=P("crit"), eps=eps, epsa=epsa, epsaunit=epsaunit, pstart=P("pstart")), - optimizer=self, - ) - # pstart if P("verbose") or P("debug"): print(f"[margp_optimizer] targettkn = {targettkn}") @@ -363,7 +345,27 @@ def dtknfromp_f(p, *, islog10=True, asdct=False, quiet=False): return np.array(result) ## END INNER FUNCTION - + + # return debug info if requested + if result == self.MO_DEBUG: + return dict( + tokens_t=tokens_t, + tokens_ix=tokens_ix, + price_estimates_t = price_estimates_t, + pairs=pairs, + sfc=sfc, + targettkn=targettkn, + pairs_t=pairs_t, + crit=dict(crit=P("crit"), eps=eps, epsa=epsa, epsaunit=epsaunit, pstart=P("pstart")), + dtknfromp_f = dtknfromp_f, + optimizer=self, + ) + + # return the inner function if requested + if result == self.MO_DTKNFROMPF: + return dtknfromp_f + + try: # setting up the optimization variables (note: we optimize in log space) @@ -411,6 +413,15 @@ def dtknfromp_f(p, *, islog10=True, asdct=False, quiet=False): # https://numpy.org/doc/stable/reference/generated/numpy.linalg.solve.html # https://numpy.org/doc/stable/reference/generated/numpy.linalg.lstsq.html + # #### TODO: EXPERIMENTAL: ADD A DAMPING FACTOR TO THE JACOBIAN + + # #dplog10 = np.clip(dplog10, -0.1, 0.1) + # nrm = normf(dplog10) + # if nrm > 0.1: + # dplog10 /= nrm + + # #### END EXPERIMENTAL + # update log prices, prices... p0log10 = [*plog10] # keep current log prices (deep copy) plog10 += dplog10 # update log prices From b5e8bbd8fb110bcec51fb186aec31c064d636ded Mon Sep 17 00:00:00 2001 From: sklbancor <109073706+sklbancor@users.noreply.github.com> Date: Tue, 30 Apr 2024 18:41:02 +0100 Subject: [PATCH 09/24] Simplepair: adding spacing to token index (trivial) --- fastlane_bot/tools/simplepair.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/fastlane_bot/tools/simplepair.py b/fastlane_bot/tools/simplepair.py index 65dd27ba5..f425abaa0 100644 --- a/fastlane_bot/tools/simplepair.py +++ b/fastlane_bot/tools/simplepair.py @@ -5,8 +5,8 @@ (c) Copyright Bprotocol foundation 2023. Licensed under MIT """ -__VERSION__ = "2.1" -__DATE__ = "18/May/2023" +__VERSION__ = "2.2" +__DATE__ = "30/Apr/2024" from dataclasses import dataclass, field, asdict, InitVar @@ -83,7 +83,7 @@ def tkny(self): return self.tknq NUMERAIRE_TOKENS = { - tkn: i + tkn: i*10 for i, tkn in enumerate( [ "USDC", From c54749ca1c41c5b2731807f2d8fceaa639fdd26a Mon Sep 17 00:00:00 2001 From: sklbancor <109073706+sklbancor@users.noreply.github.com> Date: Wed, 1 May 2024 15:37:47 +0100 Subject: [PATCH 10/24] NBTest 002, 003 --- .../NBTest/NBTest_002_CPCandOptimizer.ipynb | 12 ++ .../NBTest/NBTest_002_CPCandOptimizer.py | 4 + .../NBTest/NBTest_003_Serialization.ipynb | 163 ++++++++++++------ resources/NBTest/NBTest_003_Serialization.py | 22 ++- 4 files changed, 141 insertions(+), 60 deletions(-) diff --git a/resources/NBTest/NBTest_002_CPCandOptimizer.ipynb b/resources/NBTest/NBTest_002_CPCandOptimizer.ipynb index 099e10b48..0f16cffb0 100644 --- a/resources/NBTest/NBTest_002_CPCandOptimizer.ipynb +++ b/resources/NBTest/NBTest_002_CPCandOptimizer.ipynb @@ -2732,6 +2732,8 @@ "r = O.margp_optimizer(\"WETH\", result=O.MO_DEBUG)\n", "assert isinstance(r, dict)\n", "prices0 = r[\"price_estimates_t\"]\n", + "dtknfromp_f = r[\"dtknfromp_f\"]\n", + "assert np.linalg.norm(dtknfromp_f(np.log10(prices0))) < 1e-6\n", "assert not prices0 is None, f\"prices0 must not be None [{prices0}]\"\n", "r1 = O.arb(\"WETH\")\n", "r2 = O.SelfFinancingConstraints.arb(\"WETH\")\n", @@ -2792,6 +2794,16 @@ "prices0" ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "30424c63", + "metadata": {}, + "outputs": [], + "source": [ + "dtknfromp_f(np.log10(prices0))" + ] + }, { "cell_type": "code", "execution_count": 115, diff --git a/resources/NBTest/NBTest_002_CPCandOptimizer.py b/resources/NBTest/NBTest_002_CPCandOptimizer.py index b985dacf7..4e34b9a1e 100644 --- a/resources/NBTest/NBTest_002_CPCandOptimizer.py +++ b/resources/NBTest/NBTest_002_CPCandOptimizer.py @@ -1278,6 +1278,8 @@ r = O.margp_optimizer("WETH", result=O.MO_DEBUG) assert isinstance(r, dict) prices0 = r["price_estimates_t"] +dtknfromp_f = r["dtknfromp_f"] +assert np.linalg.norm(dtknfromp_f(np.log10(prices0))) < 1e-6 assert not prices0 is None, f"prices0 must not be None [{prices0}]" r1 = O.arb("WETH") r2 = O.SelfFinancingConstraints.arb("WETH") @@ -1291,6 +1293,8 @@ prices0 +dtknfromp_f(np.log10(prices0)) + f = O.optimize("WETH", result=O.MO_DTKNFROMPF, params=dict(verbose=True, debug=False)) r3 = f(prices0, islog10=False) assert np.all(r3 == (0,0)) diff --git a/resources/NBTest/NBTest_003_Serialization.ipynb b/resources/NBTest/NBTest_003_Serialization.ipynb index d5b5680f6..fb042d8c0 100644 --- a/resources/NBTest/NBTest_003_Serialization.ipynb +++ b/resources/NBTest/NBTest_003_Serialization.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "code", - "execution_count": 1, + "execution_count": 58, "id": "be65f3d2-769a-449f-90cd-2633a11478d0", "metadata": {}, "outputs": [ @@ -10,8 +10,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "imported m, np, pd, plt, os, sys, decimal; defined iseq, raises, require, Timer\n", - "ConstantProductCurve v3.4 (23/Jan/2024)\n", + "ConstantProductCurve v3.5 (22/Apr/2023)\n", "CPCArbOptimizer v5.1 (15/Sep/2023)\n" ] } @@ -21,7 +20,6 @@ " from fastlane_bot.tools.cpc import ConstantProductCurve as CPC, CPCContainer\n", " from fastlane_bot.tools.optimizer import CPCArbOptimizer, cp, time\n", " from fastlane_bot.testing import *\n", - "\n", "except:\n", " from tools.cpc import ConstantProductCurve as CPC, CPCContainer\n", " from tools.optimizer import CPCArbOptimizer, cp, time\n", @@ -55,7 +53,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 59, "id": "4030cea3-3e03-4e0f-8d80-7a2bcca05fcf", "metadata": {}, "outputs": [], @@ -65,7 +63,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 60, "id": "8cb4f9bc-2f31-4eae-b77f-533aa188e49b", "metadata": {}, "outputs": [], @@ -84,7 +82,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 61, "id": "a5ed0075-5ee5-4592-a192-e06d2b5af454", "metadata": {}, "outputs": [], @@ -95,7 +93,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 62, "id": "1bf13d91-2bc0-4819-96b9-2712ef89b6f1", "metadata": {}, "outputs": [], @@ -105,7 +103,7 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 63, "id": "ce05c578-5060-498e-b4eb-f55617d10cdd", "metadata": {}, "outputs": [], @@ -140,7 +138,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 64, "id": "41a5cdfe-fb7b-4c8b-a270-1a52f0765e94", "metadata": {}, "outputs": [ @@ -150,7 +148,7 @@ "ConstantProductCurve(k=10000, x=100, x_act=100, y_act=100, alpha=0.5, pair='TKNB/TKNQ', cid='1', fee=0, descr='UniV2', constr='uv2', params={})" ] }, - "execution_count": 7, + "execution_count": 64, "metadata": {}, "output_type": "execute_result" } @@ -174,7 +172,7 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 65, "id": "ea3cdfbc-8edd-41f1-9703-0ae0d72fdb9a", "metadata": {}, "outputs": [ @@ -194,7 +192,7 @@ " 'params': {}}" ] }, - "execution_count": 8, + "execution_count": 65, "metadata": {}, "output_type": "execute_result" } @@ -205,7 +203,7 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 66, "id": "595de023-5c66-40fc-928f-eca5fe6a50c9", "metadata": {}, "outputs": [], @@ -227,7 +225,7 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 67, "id": "215b5105-08d9-4077-a51a-7658cafcffa9", "metadata": {}, "outputs": [], @@ -261,7 +259,7 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 68, "id": "0963034a-b36c-4cfb-84da-ccb3c88c4389", "metadata": {}, "outputs": [], @@ -279,7 +277,7 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 69, "id": "eb5dd380-dd90-4a3b-b88a-5a697bdbc3a0", "metadata": {}, "outputs": [], @@ -310,7 +308,7 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": 70, "id": "624b80f1-c811-483b-ba24-b76c72fe3e0c", "metadata": {}, "outputs": [], @@ -325,7 +323,7 @@ }, { "cell_type": "code", - "execution_count": 14, + "execution_count": 71, "id": "34d52402-18d6-4485-8e5c-6cb4f8af2ab2", "metadata": {}, "outputs": [ @@ -349,7 +347,7 @@ }, { "cell_type": "code", - "execution_count": 15, + "execution_count": 72, "id": "85175836-0fa9-4f64-a42f-b5b787e622f0", "metadata": {}, "outputs": [], @@ -364,7 +362,7 @@ }, { "cell_type": "code", - "execution_count": 16, + "execution_count": 73, "id": "9753798a-b154-4865-a845-a1f5f1eb8e4b", "metadata": {}, "outputs": [ @@ -388,17 +386,17 @@ }, { "cell_type": "code", - "execution_count": 17, + "execution_count": 74, "id": "5f683913-1799-4f3a-9473-a663d803448a", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "ConstantProductCurve(k=0.01, x=0.0015438708879488485, x_act=0, y_act=1, alpha=0.5, pair='ETH/USDC', cid='4', fee=0, descr='Carbon', constr='carb', params={'y': 1, 'yint': 1, 'A': 10, 'B': 54.772255750516614, 'pa': 4195.445115010333, 'pb': 3000.0000000000005})" + "ConstantProductCurve(k=0.01, x=0.0015438708879488485, x_act=0, y_act=1, alpha=0.5, pair='ETH/USDC', cid='4', fee=0, descr='Carbon', constr='carb', params={'y': 1, 'yint': 1, 'A': 10, 'B': 54.772255750516614, 'pa': 4195.445115010333, 'pb': 3000.0000000000005, 'minrw': 1e-06})" ] }, - "execution_count": 17, + "execution_count": 74, "metadata": {}, "output_type": "execute_result" } @@ -412,7 +410,7 @@ }, { "cell_type": "code", - "execution_count": 18, + "execution_count": 75, "id": "cffdcaa4-f221-4bd7-bf2d-5418a33e3592", "metadata": {}, "outputs": [], @@ -431,12 +429,35 @@ "assert raises(CPC.from_carbon, yint=1, y=1, pa=1800, pb=2200, A=100, pair=\"ETH/USDC\", tkny=\"ETH\", fee=0, cid=\"1\", descr=\"Carbon\", isdydx=False)\n", "assert raises(CPC.from_carbon, yint=1, y=1, pa=1800, pb=2200, B=100, pair=\"ETH/USDC\", tkny=\"ETH\", fee=0, cid=\"1\", descr=\"Carbon\", isdydx=False)\n", "assert raises(CPC.from_carbon, yint=1, y=1, pa=1800, pb=2200, A=100, B=100, pair=\"ETH/USDC\", tkny=\"ETH\", fee=0, cid=\"1\", descr=\"Carbon\", isdydx=False)\n", - "assert raises(CPC.from_carbon, yint=1, y=1, pb=1800, pa=2200, pair=\"ETH/USDC\", tkny=\"ETH\", fee=0, cid=\"1\", descr=\"Carbon\", isdydx=False)" + "#assert raises(CPC.from_carbon, yint=1, y=1, pb=1800, pa=2200, pair=\"ETH/USDC\", tkny=\"ETH\", fee=0, cid=\"1\", descr=\"Carbon\", isdydx=False)" + ] + }, + { + "cell_type": "markdown", + "id": "6d4698a1-5df9-4c5d-a1c9-7e48fd9aa580", + "metadata": {}, + "source": [ + "TODO" ] }, { "cell_type": "code", - "execution_count": 19, + "execution_count": 76, + "id": "c1b70bbc-2531-458a-a507-24d89559bf41", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "#assert raises(CPC.from_carbon, yint=1, y=1, pa=1800, pb=2200, pair=\"ETH/USDC\", tkny=\"ETH\", cid=\"1\", descr=\"Carbon\", isdydx=False)\n", + "#assert raises(CPC.from_carbon, yint=1, y=1, pa=1800, pb=2200, pair=\"ETH/USDC\", tkny=\"ETH\", fee=0, descr=\"Carbon\", isdydx=False)\n", + "#assert raises(CPC.from_carbon, yint=1, y=1, pa=1800, pb=2200, pair=\"ETH/USDC\", tkny=\"ETH\", fee=0, cid=\"1\", isdydx=False)\n", + "#assert raises(CPC.from_carbon, yint=1, y=1, pb=1800, pa=2200, pair=\"ETH/USDC\", tkny=\"ETH\", fee=0, cid=\"1\", descr=\"Carbon\", isdydx=False)" + ] + }, + { + "cell_type": "code", + "execution_count": 77, "id": "f66fc490-97e0-4c5e-958d-1e9014934d5c", "metadata": {}, "outputs": [], @@ -450,13 +471,31 @@ }, { "cell_type": "code", - "execution_count": 20, + "execution_count": 78, "id": "465ff937-2382-4215-8e11-ec8096e1ea3d", "metadata": {}, "outputs": [], "source": [ "assert not raises(CPC.from_carbon, yint=1, y=1, pa=3100, pb=2900, pair=\"ETH/USDC\", tkny=\"USDC\", fee=0, cid=\"2\", descr=\"Carbon\", isdydx=True)\n", - "assert raises(CPC.from_carbon, yint=1, y=1, pb=3100, pa=2900, pair=\"ETH/USDC\", tkny=\"USDC\", fee=0, cid=\"2\", descr=\"Carbon\", isdydx=True)" + "#assert raises(CPC.from_carbon, yint=1, y=1, pb=3100, pa=2900, pair=\"ETH/USDC\", tkny=\"USDC\", fee=0, cid=\"2\", descr=\"Carbon\", isdydx=True)" + ] + }, + { + "cell_type": "markdown", + "id": "b0da3d2e-9b91-4c7a-89b8-8aa140901e32", + "metadata": {}, + "source": [ + "TODO" + ] + }, + { + "cell_type": "code", + "execution_count": 79, + "id": "d30a97ad-0188-4388-a3f8-3efa1151aa4a", + "metadata": {}, + "outputs": [], + "source": [ + "#assert raises(CPC.from_carbon, yint=1, y=1, pb=3100, pa=2900, pair=\"ETH/USDC\", tkny=\"USDC\", fee=0, cid=\"2\", descr=\"Carbon\", isdydx=True)" ] }, { @@ -469,7 +508,7 @@ }, { "cell_type": "code", - "execution_count": 21, + "execution_count": 80, "id": "c5c8d6c3-0d15-4c3d-8852-b2870a7b4caa", "metadata": {}, "outputs": [], @@ -485,7 +524,7 @@ }, { "cell_type": "code", - "execution_count": 22, + "execution_count": 81, "id": "8296d087-d5a5-4b77-825a-dd53ed60d4bd", "metadata": {}, "outputs": [], @@ -503,7 +542,7 @@ }, { "cell_type": "code", - "execution_count": 23, + "execution_count": 82, "id": "e72d0162-dd59-489c-8efb-dbb8327ff553", "metadata": {}, "outputs": [ @@ -567,7 +606,7 @@ }, { "cell_type": "code", - "execution_count": 24, + "execution_count": 83, "id": "c2d5dc97-05e8-4eca-abc7-66eee6e7d706", "metadata": {}, "outputs": [], @@ -581,7 +620,7 @@ }, { "cell_type": "code", - "execution_count": 25, + "execution_count": 84, "id": "9f467a32-370b-4634-bec8-3c28be84a0a0", "metadata": {}, "outputs": [], @@ -593,7 +632,7 @@ }, { "cell_type": "code", - "execution_count": 26, + "execution_count": 85, "id": "d7563934-5381-476d-b9cb-99b909691049", "metadata": {}, "outputs": [ @@ -603,7 +642,7 @@ "CPCContainer(curves=[ConstantProductCurve(k=2000, x=1, x_act=1, y_act=2000, alpha=0.5, pair='ETH/USDC', cid='1', fee=0.001, descr='UniV2', constr='uv2', params={'meh': 1}), ConstantProductCurve(k=8040, x=2, x_act=2, y_act=4020, alpha=0.5, pair='ETH/USDC', cid='2', fee=0.001, descr='UniV2', constr='uv2', params={}), ConstantProductCurve(k=1970, x=1, x_act=1, y_act=1970, alpha=0.5, pair='ETH/USDC', cid='3', fee=0.001, descr='UniV2', constr='uv2', params={})])" ] }, - "execution_count": 26, + "execution_count": 85, "metadata": {}, "output_type": "execute_result" } @@ -621,7 +660,7 @@ }, { "cell_type": "code", - "execution_count": 27, + "execution_count": 86, "id": "131928b8-f927-4799-97c6-ec50631c7959", "metadata": {}, "outputs": [ @@ -723,7 +762,7 @@ "3 1970 1 1 1970 0.5 ETH/USDC 0.001 UniV2 uv2 {}" ] }, - "execution_count": 27, + "execution_count": 86, "metadata": {}, "output_type": "execute_result" } @@ -750,7 +789,7 @@ }, { "cell_type": "code", - "execution_count": 28, + "execution_count": 87, "id": "6cd062ae-c465-4102-a57c-587874023de5", "metadata": {}, "outputs": [], @@ -779,7 +818,7 @@ }, { "cell_type": "code", - "execution_count": 29, + "execution_count": 88, "id": "8c046e70-ef8a-4de8-bd17-726afb617ea1", "metadata": {}, "outputs": [ @@ -788,7 +827,7 @@ "output_type": "stream", "text": [ "len 2355000\n", - "elapsed time: 0.29s\n" + "elapsed time: 0.34s\n" ] } ], @@ -814,7 +853,7 @@ }, { "cell_type": "code", - "execution_count": 30, + "execution_count": 89, "id": "e892dc06-329d-477f-adcb-40a87eb7a009", "metadata": {}, "outputs": [ @@ -822,7 +861,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "elapsed time: 0.21s\n" + "elapsed time: 0.22s\n" ] }, { @@ -913,7 +952,7 @@ "2 3 1970 1 1 1970 0.5 ETH/USDC 0.001 UniV2 uv2 {}" ] }, - "execution_count": 30, + "execution_count": 89, "metadata": {}, "output_type": "execute_result" } @@ -939,7 +978,7 @@ }, { "cell_type": "code", - "execution_count": 31, + "execution_count": 90, "id": "a2976017-2a84-4fba-885d-7680d9f61c3a", "metadata": {}, "outputs": [ @@ -947,7 +986,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "elapsed time: 0.17s\n" + "elapsed time: 0.16s\n" ] } ], @@ -971,7 +1010,7 @@ }, { "cell_type": "code", - "execution_count": 32, + "execution_count": 91, "id": "ed5aaa2c-2f5a-4863-87cf-a77240826a85", "metadata": { "lines_to_next_cell": 2 @@ -981,7 +1020,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "elapsed time: 0.21s\n" + "elapsed time: 0.16s\n" ] } ], @@ -1005,7 +1044,7 @@ }, { "cell_type": "code", - "execution_count": 33, + "execution_count": 92, "id": "f1507cc7-96ba-4342-bf1e-955b248bd8b4", "metadata": {}, "outputs": [], @@ -1030,7 +1069,7 @@ }, { "cell_type": "code", - "execution_count": 34, + "execution_count": 93, "id": "a1c75dfe-ce14-4840-9c62-39a8d5cfc3ad", "metadata": {}, "outputs": [ @@ -1139,7 +1178,7 @@ "3 1970 1 1 1970 0.5 ETH/USDC 0.001 UniV2 uv2 {}" ] }, - "execution_count": 34, + "execution_count": 93, "metadata": {}, "output_type": "execute_result" } @@ -1156,7 +1195,9 @@ { "cell_type": "markdown", "id": "3cfc2ff5-bf9d-4684-9b8c-2aff57937a46", - "metadata": {}, + "metadata": { + "tags": [] + }, "source": [ "### Benchmarking\n", "\n", @@ -1174,7 +1215,7 @@ }, { "cell_type": "code", - "execution_count": 35, + "execution_count": 94, "id": "c43b9431-603d-49af-b5fd-1975e9f59e2f", "metadata": {}, "outputs": [ @@ -1183,10 +1224,10 @@ "output_type": "stream", "text": [ " 2355000 .curves.json\n", - "-rw-r--r-- 1 skl staff 720055 1 May 07:51 .curves.csv\n", - "-rw-r--r-- 1 skl staff 2965 1 May 07:51 .curves.csv.gz\n", - "-rw-r--r-- 1 skl staff 961219 1 May 07:51 .curves.pkl\n", - "-rw-r--r-- 1 skl staff 720055 1 May 07:51 .curves.tsv\n" + "-rw-r--r-- 1 skl staff 720055 1 May 15:40 .curves.csv\n", + "-rw-r--r-- 1 skl staff 2965 1 May 15:40 .curves.csv.gz\n", + "-rw-r--r-- 1 skl staff 961219 1 May 15:40 .curves.pkl\n", + "-rw-r--r-- 1 skl staff 720055 1 May 15:40 .curves.tsv\n" ] } ], @@ -1227,6 +1268,14 @@ "metadata": {}, "outputs": [], "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "73a341c5-36e5-47c2-9fb0-0a63b589b98b", + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { diff --git a/resources/NBTest/NBTest_003_Serialization.py b/resources/NBTest/NBTest_003_Serialization.py index 95f7a43db..d1530d580 100644 --- a/resources/NBTest/NBTest_003_Serialization.py +++ b/resources/NBTest/NBTest_003_Serialization.py @@ -19,7 +19,6 @@ from fastlane_bot.tools.cpc import ConstantProductCurve as CPC, CPCContainer from fastlane_bot.tools.optimizer import CPCArbOptimizer, cp, time from fastlane_bot.testing import * - except: from tools.cpc import ConstantProductCurve as CPC, CPCContainer from tools.optimizer import CPCArbOptimizer, cp, time @@ -205,7 +204,16 @@ assert raises(CPC.from_carbon, yint=1, y=1, pa=1800, pb=2200, A=100, pair="ETH/USDC", tkny="ETH", fee=0, cid="1", descr="Carbon", isdydx=False) assert raises(CPC.from_carbon, yint=1, y=1, pa=1800, pb=2200, B=100, pair="ETH/USDC", tkny="ETH", fee=0, cid="1", descr="Carbon", isdydx=False) assert raises(CPC.from_carbon, yint=1, y=1, pa=1800, pb=2200, A=100, B=100, pair="ETH/USDC", tkny="ETH", fee=0, cid="1", descr="Carbon", isdydx=False) -assert raises(CPC.from_carbon, yint=1, y=1, pb=1800, pa=2200, pair="ETH/USDC", tkny="ETH", fee=0, cid="1", descr="Carbon", isdydx=False) +#assert raises(CPC.from_carbon, yint=1, y=1, pb=1800, pa=2200, pair="ETH/USDC", tkny="ETH", fee=0, cid="1", descr="Carbon", isdydx=False) + +# TODO + +# + +#assert raises(CPC.from_carbon, yint=1, y=1, pa=1800, pb=2200, pair="ETH/USDC", tkny="ETH", cid="1", descr="Carbon", isdydx=False) +#assert raises(CPC.from_carbon, yint=1, y=1, pa=1800, pb=2200, pair="ETH/USDC", tkny="ETH", fee=0, descr="Carbon", isdydx=False) +#assert raises(CPC.from_carbon, yint=1, y=1, pa=1800, pb=2200, pair="ETH/USDC", tkny="ETH", fee=0, cid="1", isdydx=False) +#assert raises(CPC.from_carbon, yint=1, y=1, pb=1800, pa=2200, pair="ETH/USDC", tkny="ETH", fee=0, cid="1", descr="Carbon", isdydx=False) +# - assert not raises(CPC.from_carbon, yint=1, y=1, A=1/10, B=m.sqrt(1/2000), pair="ETH/USDC", tkny="USDC", fee=0, cid="2", descr="Carbon", isdydx=True) assert raises(CPC.from_carbon, yint=1, y=1, A=1/10, B=m.sqrt(1/2000), pair="ETH/USDC", tkny="USDC", fee=0, cid="2", descr="Carbon", isdydx=False) @@ -214,7 +222,13 @@ assert raises(CPC.from_carbon, yint=1, y=1, A=-1/10, B=m.sqrt(1/2000), pair="ETH/USDC", tkny="USDC", fee=0, cid="2", descr="Carbon", isdydx=True) assert not raises(CPC.from_carbon, yint=1, y=1, pa=3100, pb=2900, pair="ETH/USDC", tkny="USDC", fee=0, cid="2", descr="Carbon", isdydx=True) -assert raises(CPC.from_carbon, yint=1, y=1, pb=3100, pa=2900, pair="ETH/USDC", tkny="USDC", fee=0, cid="2", descr="Carbon", isdydx=True) +#assert raises(CPC.from_carbon, yint=1, y=1, pb=3100, pa=2900, pair="ETH/USDC", tkny="USDC", fee=0, cid="2", descr="Carbon", isdydx=True) + +# TODO + +# + +#assert raises(CPC.from_carbon, yint=1, y=1, pb=3100, pa=2900, pair="ETH/USDC", tkny="USDC", fee=0, cid="2", descr="Carbon", isdydx=True) +# - # ## Charts [NOTEST] @@ -386,3 +400,5 @@ + + From f7b458702fd21497aa756a7f163a42a12d3cba83 Mon Sep 17 00:00:00 2001 From: NIXBNT <88088888+NIXBNT@users.noreply.github.com> Date: Thu, 2 May 2024 19:56:12 +1000 Subject: [PATCH 11/24] use the regular multicall address --- fastlane_bot/config/network.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fastlane_bot/config/network.py b/fastlane_bot/config/network.py index cfec97aa3..7dac0b6bc 100644 --- a/fastlane_bot/config/network.py +++ b/fastlane_bot/config/network.py @@ -794,7 +794,7 @@ class _ConfigNetworkSei(ConfigNetwork): network_df = get_multichain_addresses(network=NETWORK_NAME) FASTLANE_CONTRACT_ADDRESS = "0xC7Dd38e64822108446872c5C2105308058c5C55C" #TODO - UPDATE WITH Mainnet - MULTICALL_CONTRACT_ADDRESS = "0x1E05037b9c4fEFaF3c45CD6F4F2C3197e6A43cD8" # previously 0xcA11bde05977b3631167028862bE2a173976CA11 + MULTICALL_CONTRACT_ADDRESS = "0xcA11bde05977b3631167028862bE2a173976CA11" CARBON_CONTROLLER_ADDRESS = "0x59f21012B2E9BA67ce6a7605E74F945D0D4C84EA" #TODO - UPDATE WITH Mainnet CARBON_CONTROLLER_VOUCHER = "0xe4816658ad10bF215053C533cceAe3f59e1f1087" #TODO - UPDATE WITH Mainnet From ff962eb347deeee98ae313c4bb4eeedeca1a4f79 Mon Sep 17 00:00:00 2001 From: barak manos <> Date: Thu, 2 May 2024 17:09:26 +0300 Subject: [PATCH 12/24] Update the Multicall contract used on Ethereum from Multicall2 to `Multicall3` --- fastlane_bot/config/multicaller.py | 2 +- fastlane_bot/config/network.py | 4 ++-- fastlane_bot/data/abi.py | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/fastlane_bot/config/multicaller.py b/fastlane_bot/config/multicaller.py index 9687db79c..c4a5fdd9a 100644 --- a/fastlane_bot/config/multicaller.py +++ b/fastlane_bot/config/multicaller.py @@ -113,7 +113,7 @@ class MultiCaller(ContextManager): def __init__(self, contract: web3.contract.Contract, web3: Web3, - block_identifier: Any = 'latest', multicall_address = "0x5BA1e12693Dc8F9c48aAD8770482f4739bEeD696"): + block_identifier: Any = 'latest', multicall_address = "0xcA11bde05977b3631167028862bE2a173976CA11"): self._contract_calls: List[Callable] = [] self.contract = contract self.block_identifier = block_identifier diff --git a/fastlane_bot/config/network.py b/fastlane_bot/config/network.py index 7dac0b6bc..9e73f9339 100644 --- a/fastlane_bot/config/network.py +++ b/fastlane_bot/config/network.py @@ -429,7 +429,7 @@ class _ConfigNetworkMainnet(ConfigNetwork): RPC_ENDPOINT = "https://eth-mainnet.alchemyapi.io/v2/" WEB3_ALCHEMY_PROJECT_ID = os.environ.get("WEB3_ALCHEMY_PROJECT_ID") - MULTICALL_CONTRACT_ADDRESS = "0x5BA1e12693Dc8F9c48aAD8770482f4739bEeD696" + MULTICALL_CONTRACT_ADDRESS = "0xcA11bde05977b3631167028862bE2a173976CA11" # NATIVE_GAS_TOKEN_KEY = "ETH-EEeE" # WRAPPED_GAS_TOKEN_KEY = "WETH-6Cc2" # STABLECOIN_KEY = "USDC-eB48" @@ -849,7 +849,7 @@ class _ConfigNetworkTenderly(ConfigNetwork): FASTLANE_CONTRACT_ADDRESS = "0x41Eeba3355d7D6FF628B7982F3F9D055c39488cB" CARBON_CONTROLLER_ADDRESS = "0xC537e898CD774e2dCBa3B14Ea6f34C93d5eA45e1" CARBON_CONTROLLER_VOUCHER = "0x3660F04B79751e31128f6378eAC70807e38f554E" - MULTICALL_CONTRACT_ADDRESS = "0x5BA1e12693Dc8F9c48aAD8770482f4739bEeD696" + MULTICALL_CONTRACT_ADDRESS = "0xcA11bde05977b3631167028862bE2a173976CA11" # Uniswap UNISWAP_V2_ROUTER_ADDRESS = "0x7a250d5630B4cF539739dF2C5dAcb4c659F2488D" diff --git a/fastlane_bot/data/abi.py b/fastlane_bot/data/abi.py index 4d0d9f788..e79b157a2 100644 --- a/fastlane_bot/data/abi.py +++ b/fastlane_bot/data/abi.py @@ -678,7 +678,7 @@ "type": "function", "name": "aggregate", "stateMutability": "view", - "inputs": [{"components": [{"internalType": "address", "name": "target", "type": "address"}, {"internalType": "bytes", "name": "callData", "type": "bytes"}], "internalType": "struct Multicall2.Call[]", "name": "calls", "type": "tuple[]"}], + "inputs": [{"components": [{"internalType": "address", "name": "target", "type": "address"}, {"internalType": "bytes", "name": "callData", "type": "bytes"}], "internalType": "struct Multicall3.Call[]", "name": "calls", "type": "tuple[]"}], "outputs": [{"internalType": "uint256", "name": "blockNumber", "type": "uint256"}, {"internalType": "bytes[]", "name": "returnData", "type": "bytes[]"}] } ] From 420991172f97a300b1af3321bac3e52ef9bb7da0 Mon Sep 17 00:00:00 2001 From: sklbancor <109073706+sklbancor@users.noreply.github.com> Date: Tue, 30 Apr 2024 21:49:09 +0100 Subject: [PATCH 13/24] MargPOptimzer v5.3-b2 This is manually merging the changes from c4a110c9297abff1f355b03155c1052dcd1bd2fa into v5.3-b1 --- fastlane_bot/tools/optimizer/margpoptimizer.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/fastlane_bot/tools/optimizer/margpoptimizer.py b/fastlane_bot/tools/optimizer/margpoptimizer.py index abf4645fc..c6eba3685 100644 --- a/fastlane_bot/tools/optimizer/margpoptimizer.py +++ b/fastlane_bot/tools/optimizer/margpoptimizer.py @@ -25,6 +25,8 @@ __VERSION__ = "5.3" __DATE__ = "01/May/2024" +__DATE__ = "30/Apr/2024" + from dataclasses import dataclass, field, fields, asdict, astuple, InitVar import pandas as pd import numpy as np From bcd46feca5a422603e0c39ec5ac19c6c85912104 Mon Sep 17 00:00:00 2001 From: NIXBNT <88088888+NIXBNT@users.noreply.github.com> Date: Fri, 3 May 2024 08:39:27 +1000 Subject: [PATCH 14/24] update default flashloan tokens --- fastlane_bot/config/network.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fastlane_bot/config/network.py b/fastlane_bot/config/network.py index 9e73f9339..70b4e562a 100644 --- a/fastlane_bot/config/network.py +++ b/fastlane_bot/config/network.py @@ -810,7 +810,7 @@ class _ConfigNetworkSei(ConfigNetwork): BALANCER_VAULT_ADDRESS = "0x7ccBebeb88696f9c8b061f1112Bb970158e29cA5" # # TODO Jellyswap on devnet CHAIN_FLASHLOAN_TOKENS = { - "0x26841a0A5D958B128209F4ea9a1DD7E61558c330": "WSEI", #TODO confirm for Mainnet + "0x57eE725BEeB991c70c53f9642f36755EC6eb2139": "WSEI", #TODO confirm for Mainnet "0xace5f7Ea93439Af39b46d2748fA1aC19951c8d7C": "USDC", #TODO confirm for Mainnet } # Add any exchanges unique to the chain here From 3ed719809cb19ae59de45028d414ba47f58dd29a Mon Sep 17 00:00:00 2001 From: NIXBNT <88088888+NIXBNT@users.noreply.github.com> Date: Fri, 3 May 2024 11:03:35 +1000 Subject: [PATCH 15/24] other static data for testing --- fastlane_bot/data/blockchain_data/sei/static_pool_data.csv | 2 ++ fastlane_bot/data/blockchain_data/sei/tokens.csv | 1 + .../data/blockchain_data/sei/uniswap_v2_event_mappings.csv | 4 +++- fastlane_bot/data/multichain_addresses.csv | 4 +++- 4 files changed, 9 insertions(+), 2 deletions(-) diff --git a/fastlane_bot/data/blockchain_data/sei/static_pool_data.csv b/fastlane_bot/data/blockchain_data/sei/static_pool_data.csv index 09177afa2..90d9f351a 100644 --- a/fastlane_bot/data/blockchain_data/sei/static_pool_data.csv +++ b/fastlane_bot/data/blockchain_data/sei/static_pool_data.csv @@ -1,3 +1,5 @@ cid,strategy_id,last_updated,last_updated_block,descr,pair_name,exchange_name,fee,fee_float,address,anchor,tkn0_address,tkn1_address,tkn0_decimals,tkn1_decimals,exchange_id,tkn0_symbol,tkn1_symbol,timestamp,tkn0_balance,tkn1_balance,liquidity,sqrt_price_q96,tick,tick_spacing,exchange,pool_type,tkn0_weight,tkn1_weight,tkn2_address,tkn2_decimals,tkn2_symbol,tkn2_balance,tkn2_weight,tkn3_address,tkn3_decimals,tkn3_symbol,tkn3_balance,tkn3_weight,tkn4_address,tkn4_decimals,tkn4_symbol,tkn4_balance,tkn4_weight,tkn5_address,tkn5_decimals,tkn5_symbol,tkn5_balance,tkn5_weight,tkn6_address,tkn6_decimals,tkn6_symbol,tkn6_balance,tkn6_weight,tkn7_address,tkn7_decimals,tkn7_symbol,tkn7_balance,tkn7_weight 0x1422169ab760ea6994358267b7d3783e8e7fa55c6a74b365b3fd3d17cbf4c6f1,0,,2354,dragonswap 0x027D2E627209f1cebA52ADc8A5aFE9318459b44B/0x7b75109369ACb528d9fa989E227812a6589712b9,0x027D2E627209f1cebA52ADc8A5aFE9318459b44B/0x7b75109369ACb528d9fa989E227812a6589712b9,dragonswap,0.003,0.003,0x01A34Dfa104F020FEE739268679338169945D5B1,,0x027D2E627209f1cebA52ADc8A5aFE9318459b44B,0x7b75109369ACb528d9fa989E227812a6589712b9,18,18,3,WSEI,DSWAP,0,0,0,,,,,dragonswap,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 0xbfd9612b2cb8035908dff18c040f64de75999cefd1020b5ce8a2e533c2ecd5dc,0,,2354,dragonswap 0x027D2E627209f1cebA52ADc8A5aFE9318459b44B/0xace5f7Ea93439Af39b46d2748fA1aC19951c8d7C,0x027D2E627209f1cebA52ADc8A5aFE9318459b44B/0xace5f7Ea93439Af39b46d2748fA1aC19951c8d7C,dragonswap,0.003,0.003,0x85CB6BFd781e1f42f4E79Efb6bf1F1fEfE4E9732,,0x027D2E627209f1cebA52ADc8A5aFE9318459b44B,0xace5f7Ea93439Af39b46d2748fA1aC19951c8d7C,18,6,3,WSEI,USDC,0,0,0,,,,,dragonswap,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, +0xe3aead757d877a15316e4896d5c5ab7639adbcba1ff76e3434b4e0af90f6225e,0,,2354,dragonswap 0xace5f7Ea93439Af39b46d2748fA1aC19951c8d7C/0xF983afa393199D6902a1Dd04f8E93465915ffD8B,0xace5f7Ea93439Af39b46d2748fA1aC19951c8d7C/0xF983afa393199D6902a1Dd04f8E93465915ffD8B,dragonswap,0.003,0.003,0x72A788B0A83e18ce1757171321E82c03e4351498,,0xace5f7Ea93439Af39b46d2748fA1aC19951c8d7C,0xF983afa393199D6902a1Dd04f8E93465915ffD8B,6,6,3,USDC,USDT,0,0,0,,,,,dragonswap,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, + diff --git a/fastlane_bot/data/blockchain_data/sei/tokens.csv b/fastlane_bot/data/blockchain_data/sei/tokens.csv index af2dc29ce..e79bf6c3b 100644 --- a/fastlane_bot/data/blockchain_data/sei/tokens.csv +++ b/fastlane_bot/data/blockchain_data/sei/tokens.csv @@ -6,3 +6,4 @@ address,decimals,symbol 0xEeeeeEeeeEeEeeEeEeEeeEEEeeeeEeeeeeeeEEeE,18,SEI 0x9e7A8e558Ce582511f4104465a886b7bEfBC146b,18,JLY 0x57eE725BEeB991c70c53f9642f36755EC6eb2139,18,WSEI +0xF983afa393199D6902a1Dd04f8E93465915ffD8B,6,USDT diff --git a/fastlane_bot/data/blockchain_data/sei/uniswap_v2_event_mappings.csv b/fastlane_bot/data/blockchain_data/sei/uniswap_v2_event_mappings.csv index f0165604a..1989d2011 100644 --- a/fastlane_bot/data/blockchain_data/sei/uniswap_v2_event_mappings.csv +++ b/fastlane_bot/data/blockchain_data/sei/uniswap_v2_event_mappings.csv @@ -1,3 +1,5 @@ exchange,address dragonswap,0x01A34Dfa104F020FEE739268679338169945D5B1 -dragonswap,0x85CB6BFd781e1f42f4E79Efb6bf1F1fEfE4E9732 \ No newline at end of file +dragonswap,0x85CB6BFd781e1f42f4E79Efb6bf1F1fEfE4E9732 +dragonswap,0x38BcEBb9A3fbF05B0Ab7ce9b485c9669578409fE +dragonswap,0x72A788B0A83e18ce1757171321E82c03e4351498 \ No newline at end of file diff --git a/fastlane_bot/data/multichain_addresses.csv b/fastlane_bot/data/multichain_addresses.csv index 235346cb8..0650447ea 100644 --- a/fastlane_bot/data/multichain_addresses.csv +++ b/fastlane_bot/data/multichain_addresses.csv @@ -136,4 +136,6 @@ pancakeswap_v3,zkevm,uniswap_v3,0x0BFbCF9fa4f9C56B0F40a671Ad40E0805A091865,0x1b8 pancakeswap_v3,zksync,uniswap_v3,0x1BB72E0CbbEA93c08f535fc7856E0338D7F7a8aB,0xD70C70AD87aa8D45b8D59600342FB3AEe76E3c68,,, xfai_v0,linea,solidly_v2,0xa5136eAd459F0E61C99Cec70fe8F5C24cF3ecA26,0xD538be6e9026C13D130C9e17d509E69C8Bb0eF33,,222864, carbon_v1,sei,carbon_v1,0x59f21012B2E9BA67ce6a7605E74F945D0D4C84EA,0x59f21012B2E9BA67ce6a7605E74F945D0D4C84EA,,17658678, -dragonswap,sei,uniswap_v2,0x5D370a6189F89603FaB67e9C68383e63F7B6A262,0x2346d3A6fb18Ff3ae590Ea31d9e41E6AB8c9f5EB,,1008775, +dragonswap,sei,uniswap_v2,0x5D370a6189F89603FaB67e9C68383e63F7B6A262,0x2346d3A6fb18Ff3ae590Ea31d9e41E6AB8c9f5EB,0.003,1008775, +jellyswap,sei,balancer,BALANCER_VAULT_ADDRESS,0x7ccBebeb88696f9c8b061f1112Bb970158e29cA5,0,222832, +uniswap_v3,sei,uniswap_v3,0x0000000000000000000000000000000000000000,0x0000000000000000000000000000000000000000,,1, From e34ff93899195cd54579b8a350ad0341727010cb Mon Sep 17 00:00:00 2001 From: NIXBNT <88088888+NIXBNT@users.noreply.github.com> Date: Fri, 3 May 2024 11:04:23 +1000 Subject: [PATCH 16/24] add basics to terraformer --- run_blockchain_terraformer.py | 25 ++++++++++++++++--------- 1 file changed, 16 insertions(+), 9 deletions(-) diff --git a/run_blockchain_terraformer.py b/run_blockchain_terraformer.py index 62d7f2840..4419946fa 100644 --- a/run_blockchain_terraformer.py +++ b/run_blockchain_terraformer.py @@ -49,6 +49,7 @@ async def gather(): FANTOM = "fantom" MANTLE = "mantle" LINEA = "linea" +SEI = "sei" coingecko_network_map = { "ethereum": "ethereum", @@ -67,18 +68,20 @@ async def gather(): "cosmos": "cosmos", "kava": "kava", "mantle": "mantle", + "sei": "sei", } BLOCK_CHUNK_SIZE_MAP = { - "ethereum": 0, - "polygon": 0, - "polygon_zkevm": 0, - "arbitrum_one": 0, - "optimism": 0, - "coinbase_base": 0, - "fantom": 5000, - "mantle": 0, - "linea": 0 + "ethereum": 50000, + "polygon": 250000, + "polygon_zkevm": 500000, + "arbitrum_one": 500000, + "optimism": 500000, + "coinbase_base": 250000, + "fantom": 2000, + "mantle": 10000000, + "linea": 1000000, + "sei": 10000, #TODO untested this could be more } ALCHEMY_KEY_DICT = { @@ -91,6 +94,7 @@ async def gather(): "fantom": "WEB3_FANTOM", "mantle": "WEB3_MANTLE", "linea": "WEB3_LINEA", + "sei": "WEB3_SEI", } ALCHEMY_RPC_LIST = { @@ -103,6 +107,7 @@ async def gather(): "fantom": "https://fantom.blockpi.network/v1/rpc/", "mantle": "https://rpc.mantle.xyz/", "linea": "https://rpc.linea.build/", + "sei": "https://evm-rpc.arctic-1.seinetwork.io/", # TODO update with mainnet } BALANCER_SUBGRAPH_CHAIN_URL = { @@ -114,6 +119,7 @@ async def gather(): "coinbase_base": "https://api.studio.thegraph.com/query/24660/balancer-base-v2/version/latest", "avalanche": "https://api.thegraph.com/subgraphs/name/balancer-labs/balancer-avalanche-v2", "fantom": "https://api.thegraph.com/subgraphs/name/beethovenxfi/beethovenx", + "sei": "" # TODO add Jellyswap } @@ -1067,6 +1073,7 @@ def terraform_blockchain(network_name: str): file_desc.writelines(list(dict.fromkeys(lines))) file_desc.close() +#terraform_blockchain(network_name="sei", save_tokens=True) #terraform_blockchain(network_name=ETHEREUM) #terraform_blockchain(network_name=BASE) From 63df73e354443c0299e1f04939ad491e7f43162c Mon Sep 17 00:00:00 2001 From: Platon Floria Date: Fri, 3 May 2024 12:05:44 +0200 Subject: [PATCH 17/24] fix: BLOCK_CHUNK_SIZE_MAP --- run_blockchain_terraformer.py | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/run_blockchain_terraformer.py b/run_blockchain_terraformer.py index 4419946fa..e6912d34a 100644 --- a/run_blockchain_terraformer.py +++ b/run_blockchain_terraformer.py @@ -72,16 +72,15 @@ async def gather(): } BLOCK_CHUNK_SIZE_MAP = { - "ethereum": 50000, - "polygon": 250000, - "polygon_zkevm": 500000, - "arbitrum_one": 500000, - "optimism": 500000, - "coinbase_base": 250000, - "fantom": 2000, - "mantle": 10000000, - "linea": 1000000, - "sei": 10000, #TODO untested this could be more + "ethereum": 0, + "polygon": 0, + "polygon_zkevm": 0, + "arbitrum_one": 0, + "optimism": 0, + "coinbase_base": 0, + "fantom": 5000, + "mantle": 0, + "linea": 0 } ALCHEMY_KEY_DICT = { From 68741095afae80f71bf4e351590f617ec5519cb8 Mon Sep 17 00:00:00 2001 From: barak manos <> Date: Fri, 3 May 2024 13:13:06 +0300 Subject: [PATCH 18/24] Fix the terraformer script and update data files for sei --- fastlane_bot/data/blockchain_data/sei/static_pool_data.csv | 1 - .../data/blockchain_data/sei/uniswap_v2_event_mappings.csv | 2 +- run_blockchain_terraformer.py | 5 +++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/fastlane_bot/data/blockchain_data/sei/static_pool_data.csv b/fastlane_bot/data/blockchain_data/sei/static_pool_data.csv index 90d9f351a..54e89f03e 100644 --- a/fastlane_bot/data/blockchain_data/sei/static_pool_data.csv +++ b/fastlane_bot/data/blockchain_data/sei/static_pool_data.csv @@ -2,4 +2,3 @@ cid,strategy_id,last_updated,last_updated_block,descr,pair_name,exchange_name,fe 0x1422169ab760ea6994358267b7d3783e8e7fa55c6a74b365b3fd3d17cbf4c6f1,0,,2354,dragonswap 0x027D2E627209f1cebA52ADc8A5aFE9318459b44B/0x7b75109369ACb528d9fa989E227812a6589712b9,0x027D2E627209f1cebA52ADc8A5aFE9318459b44B/0x7b75109369ACb528d9fa989E227812a6589712b9,dragonswap,0.003,0.003,0x01A34Dfa104F020FEE739268679338169945D5B1,,0x027D2E627209f1cebA52ADc8A5aFE9318459b44B,0x7b75109369ACb528d9fa989E227812a6589712b9,18,18,3,WSEI,DSWAP,0,0,0,,,,,dragonswap,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 0xbfd9612b2cb8035908dff18c040f64de75999cefd1020b5ce8a2e533c2ecd5dc,0,,2354,dragonswap 0x027D2E627209f1cebA52ADc8A5aFE9318459b44B/0xace5f7Ea93439Af39b46d2748fA1aC19951c8d7C,0x027D2E627209f1cebA52ADc8A5aFE9318459b44B/0xace5f7Ea93439Af39b46d2748fA1aC19951c8d7C,dragonswap,0.003,0.003,0x85CB6BFd781e1f42f4E79Efb6bf1F1fEfE4E9732,,0x027D2E627209f1cebA52ADc8A5aFE9318459b44B,0xace5f7Ea93439Af39b46d2748fA1aC19951c8d7C,18,6,3,WSEI,USDC,0,0,0,,,,,dragonswap,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 0xe3aead757d877a15316e4896d5c5ab7639adbcba1ff76e3434b4e0af90f6225e,0,,2354,dragonswap 0xace5f7Ea93439Af39b46d2748fA1aC19951c8d7C/0xF983afa393199D6902a1Dd04f8E93465915ffD8B,0xace5f7Ea93439Af39b46d2748fA1aC19951c8d7C/0xF983afa393199D6902a1Dd04f8E93465915ffD8B,dragonswap,0.003,0.003,0x72A788B0A83e18ce1757171321E82c03e4351498,,0xace5f7Ea93439Af39b46d2748fA1aC19951c8d7C,0xF983afa393199D6902a1Dd04f8E93465915ffD8B,6,6,3,USDC,USDT,0,0,0,,,,,dragonswap,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, - diff --git a/fastlane_bot/data/blockchain_data/sei/uniswap_v2_event_mappings.csv b/fastlane_bot/data/blockchain_data/sei/uniswap_v2_event_mappings.csv index 1989d2011..c23f7b0da 100644 --- a/fastlane_bot/data/blockchain_data/sei/uniswap_v2_event_mappings.csv +++ b/fastlane_bot/data/blockchain_data/sei/uniswap_v2_event_mappings.csv @@ -2,4 +2,4 @@ exchange,address dragonswap,0x01A34Dfa104F020FEE739268679338169945D5B1 dragonswap,0x85CB6BFd781e1f42f4E79Efb6bf1F1fEfE4E9732 dragonswap,0x38BcEBb9A3fbF05B0Ab7ce9b485c9669578409fE -dragonswap,0x72A788B0A83e18ce1757171321E82c03e4351498 \ No newline at end of file +dragonswap,0x72A788B0A83e18ce1757171321E82c03e4351498 diff --git a/run_blockchain_terraformer.py b/run_blockchain_terraformer.py index e6912d34a..654fcedba 100644 --- a/run_blockchain_terraformer.py +++ b/run_blockchain_terraformer.py @@ -80,7 +80,8 @@ async def gather(): "coinbase_base": 0, "fantom": 5000, "mantle": 0, - "linea": 0 + "linea": 0, + "sei": 0, } ALCHEMY_KEY_DICT = { @@ -1072,10 +1073,10 @@ def terraform_blockchain(network_name: str): file_desc.writelines(list(dict.fromkeys(lines))) file_desc.close() -#terraform_blockchain(network_name="sei", save_tokens=True) #terraform_blockchain(network_name=ETHEREUM) #terraform_blockchain(network_name=BASE) #terraform_blockchain(network_name=FANTOM) #terraform_blockchain(network_name=MANTLE) #terraform_blockchain(network_name=LINEA) +#terraform_blockchain(network_name=SEI) From 8c005870f5f21cb2062691ef784eabfa3105b22e Mon Sep 17 00:00:00 2001 From: sklbancor <109073706+sklbancor@users.noreply.github.com> Date: Mon, 6 May 2024 08:24:41 +0100 Subject: [PATCH 19/24] Test39: asserting all curves are being converted (cherry picked from commit a480254cc672095f879594f2df28040a8f499ef1) --- fastlane_bot/tests/test_039_TestMultiMode.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/fastlane_bot/tests/test_039_TestMultiMode.py b/fastlane_bot/tests/test_039_TestMultiMode.py index dc47761d1..b3c875d70 100644 --- a/fastlane_bot/tests/test_039_TestMultiMode.py +++ b/fastlane_bot/tests/test_039_TestMultiMode.py @@ -140,6 +140,7 @@ def test_test_tax_tokens(): # ------------------------------------------------------------ assert any(token.address in cfg.TAX_TOKENS for token in tokens), f"[TestMultiMode], DB does not include any tax tokens" + assert len(CCm) == 516, f"[NBTest 039 TestMultiMode] Expected 516 curves, found {len(CCm)}" for curve in CCm: for token in cfg.TAX_TOKENS: @@ -178,6 +179,7 @@ def test_test_combos_and_tokens(): # ------------------------------------------------------------ # + + assert len(CCm) == 516, f"[NBTest 039 TestMultiMode] Expected 516 curves, found {len(CCm)}" arb_finder = bot._get_arb_finder("multi") finder = arb_finder( flashloan_tokens=flashloan_tokens, @@ -205,6 +207,7 @@ def test_test_expected_output(): # ------------------------------------------------------------ # + + assert len(CCm) == 516, f"[NBTest 039 TestMultiMode] Expected 516 curves, found {len(CCm)}" arb_finder = bot._get_arb_finder("multi") finder = arb_finder( flashloan_tokens=flashloan_tokens, From 48eac0ba61c84d2c8c80fa5a5c5eab08e0616121 Mon Sep 17 00:00:00 2001 From: NIXBNT <88088888+NIXBNT@users.noreply.github.com> Date: Mon, 6 May 2024 23:29:08 +1000 Subject: [PATCH 20/24] add balancer-fork jelly subgraph --- run_blockchain_terraformer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/run_blockchain_terraformer.py b/run_blockchain_terraformer.py index 654fcedba..96269107f 100644 --- a/run_blockchain_terraformer.py +++ b/run_blockchain_terraformer.py @@ -119,7 +119,7 @@ async def gather(): "coinbase_base": "https://api.studio.thegraph.com/query/24660/balancer-base-v2/version/latest", "avalanche": "https://api.thegraph.com/subgraphs/name/balancer-labs/balancer-avalanche-v2", "fantom": "https://api.thegraph.com/subgraphs/name/beethovenxfi/beethovenx", - "sei": "" # TODO add Jellyswap + "sei": "https://thegraph.dev.mvpworkshop.co/subgraphs/name/jelly" # TODO verify this for mainnet } From ed4d1e0cc532b56487941567a0abe28bdd9214b6 Mon Sep 17 00:00:00 2001 From: NIXBNT <88088888+NIXBNT@users.noreply.github.com> Date: Thu, 2 May 2024 15:37:59 +1000 Subject: [PATCH 21/24] overlapping order modifier --- fastlane_bot/helpers/poolandtokens.py | 70 +++++++++++++++++++++++++++ fastlane_bot/utils.py | 28 ++++++++--- 2 files changed, 92 insertions(+), 6 deletions(-) diff --git a/fastlane_bot/helpers/poolandtokens.py b/fastlane_bot/helpers/poolandtokens.py index c6ac1f61d..4eac11a4c 100644 --- a/fastlane_bot/helpers/poolandtokens.py +++ b/fastlane_bot/helpers/poolandtokens.py @@ -407,6 +407,7 @@ def _carbon_to_cpc(self) -> ConstantProductCurve: # if idx == 0, use the first curve, otherwise use the second curve. change the numerical values to Decimal lst = [] errors = [] + strategy_typed_args = [] for i in [0, 1]: S = Decimal(self.A_1) if i == 0 else Decimal(self.A_0) @@ -450,6 +451,7 @@ def decimal_converter(idx): decimal_converter = decimal_converter(i) p_start = Decimal(encoded_order.p_start) * decimal_converter + p_marg = Decimal(encoded_order.p_marg) * decimal_converter p_end = Decimal(encoded_order.p_end) * decimal_converter yint = Decimal(yint) / ( Decimal("10") ** [self.tkn1_decimals, self.tkn0_decimals][i] @@ -457,6 +459,11 @@ def decimal_converter(idx): y = Decimal(y) / ( Decimal("10") ** [self.tkn1_decimals, self.tkn0_decimals][i] ) + is_limit_order = p_start==p_end + + # if (p_marg!=p_start) and (p_marg!=p_end): + # self.ConfigObj.logger.debug(f"[poolandtokens.py, _carbon_to_cpc] p_start, p_marg, p_end:, {p_start, p_marg, p_end}") + # assert (round(p_start,6)<=round(p_marg,6)<=round(p_end,6)) or (round(p_start,6)>=round(p_marg,6)>=round(p_end,6)), f"WARNING {p_start, p_marg, p_end}" tkny = 1 if i == 0 else 0 typed_args = { @@ -466,7 +473,9 @@ def decimal_converter(idx): "yint": yint, "y": y, "pb": p_end, + "p_marg": p_marg, # deleted later since not supported by from_carbon() "pa": p_start, + "is_limit_order": is_limit_order, # deleted later since not supported by from_carbon() "tkny": self.pair_name.split("/")[tkny].replace( self.ConfigObj.NATIVE_GAS_TOKEN_ADDRESS, self.ConfigObj.WRAPPED_GAS_TOKEN_ADDRESS ), @@ -476,6 +485,67 @@ def decimal_converter(idx): "descr": self.descr, "params": self._params, } + + strategy_typed_args += [typed_args] + + #### MODIFICATION LOGIC HERE >>>>> + # Only overlapping strategies are selected for modification + if len(strategy_typed_args) == 2: + + is_overlapping = False + pmarg_threshold = Decimal("0.01") # 1% # WARNING using this condition alone can included stable/stable pairs incidently + + # evaluate that the marginal prices are within the pmarg_threshold + pmarg0, pmarg1 = [x['p_marg'] for x in strategy_typed_args] + pmarg0_inv = 1/pmarg0 # one of the orders must be flipped since prices are always dy/dx - but must flip same geomean_pmarg later + percent_component = pmarg_threshold * max(pmarg0_inv, pmarg1) + percent_component_met = abs(pmarg0_inv - pmarg1) <= percent_component + + # overlapping strategies by defintion cannot have A=0 i.e. there must be no limit orders + no_limit_orders = (strategy_typed_args[0]['is_limit_order'] == False) and (strategy_typed_args[1]['is_limit_order'] == False) + + # evaluate if the price boundaries pa/pb overlap at one end # TODO check logic and remove duplicate logic if necessary + prices_overlap = (strategy_typed_args[1]['pa']>(1/strategy_typed_args[0]['pa'])>strategy_typed_args[1]['pb']) # or (1/strategy_typed_args[0]['pa']<(strategy_typed_args[1]['pb'])) + + # if (percent_component_met and no_limit_orders) and not prices_overlap: + # print(percent_component_met, no_limit_orders, prices_overlap) + # print(strategy_typed_args) + + # if the threshold is met and neither is a limit order and prices overlap then likely to be overlapping + is_overlapping = percent_component_met and no_limit_orders and prices_overlap + + if is_overlapping: + # print(strategy_typed_args) + # calculate the geometric mean + geomean_p_marg = Decimal.sqrt(pmarg0_inv * pmarg1) + self.ConfigObj.logger.debug(f"[poolandtokens.py, _carbon_to_cpc] These cids are identified as overlapping: {[x['cid'] for x in strategy_typed_args]}") + self.ConfigObj.logger.debug(f"[poolandtokens.py, _carbon_to_cpc] pmarg0_inv, pmarg1, geomean_p_marg: {pmarg0_inv, pmarg1, geomean_p_marg}") + + # modify the y_int based on the new geomean to the limit of y #TODO check that this math is correct + typed_args0 = strategy_typed_args[0] + new_yint0 = typed_args0['y'] * (typed_args0['pa'] - typed_args0['pb']) / ((1/geomean_p_marg) - typed_args0['pb']) #this geomean is flipped because we flippend the pmarg from order 0 + if new_yint0 < typed_args0['y']: + new_yint0 = typed_args0['y'] + self.ConfigObj.logger.debug(f"[poolandtokens.py, _carbon_to_cpc] First order: typed_args0['yint'], new_yint0, , typed_args0['y']: {typed_args0['yint'], new_yint0, typed_args0['y']}") + typed_args0['yint'] = new_yint0 + + typed_args1 = strategy_typed_args[1] + new_yint1 = typed_args1['y'] * (typed_args1['pa'] - typed_args1['pb']) / (geomean_p_marg - typed_args1['pb']) + if new_yint1 < typed_args1['y']: + new_yint1 = typed_args1['y'] + self.ConfigObj.logger.debug(f"[poolandtokens.py, _carbon_to_cpc] Second order: typed_args1['yint'], new_yint1, typed_args1['y']: {typed_args1['yint'], new_yint1, typed_args1['y']} \n") + typed_args1['yint'] = new_yint1 + + # repack the strateg_typed_args + strategy_typed_args = [typed_args0, typed_args1] + + + #### <<<<< MODIFICATION LOGIC HERE + + for typed_args in strategy_typed_args: + # delete new args that arent supported by from_carbon() + del typed_args["p_marg"] + del typed_args["is_limit_order"] try: if typed_args["y"] > 0: lst.append( diff --git a/fastlane_bot/utils.py b/fastlane_bot/utils.py index e1692d086..715f95bdc 100644 --- a/fastlane_bot/utils.py +++ b/fastlane_bot/utils.py @@ -68,6 +68,10 @@ def __getitem__(self, item): def decodeFloat(cls, value): """undoes the mantisse/exponent encoding in A,B""" return (value % cls.ONE) << (value // cls.ONE) + + @classmethod + def decodeRate(cls, value): + return (value / cls.ONE) ** 2 @classmethod def decode(cls, value): @@ -179,15 +183,27 @@ def p_start(self): @property def p_marg(self): if self.y == self.z: + # try: + new_method = self.decodeRate(self.decodeFloat(int(self.B)) + self.decodeFloat(int(self.A))) + # except: + # print(self.B, self.A) + # print(type(self.B), type(self.A)) + # print(self.decodeFloat(int(self.B)) + self.decodeFloat(int(self.A))) + # print(new_method, self.p_start) + assert new_method == self.p_start, f"{new_method}, {self.p_start} **************************************" return self.p_start elif self.y == 0: return self.p_end - raise NotImplementedError("p_marg not implemented for non-full / empty orders") - A = self.decodeFloat(self.A) - B = self.decodeFloat(self.B) - return self.decode(B + A * self.y / self.z) ** 2 - # https://github.com/bancorprotocol/carbon-simulator/blob/beta/benchmark/core/trade/impl.py - # 'marginalRate' : decodeRate(B + A if y == z else B + A * y / z), + # return 0 + else: + return self.decodeRate(self.decodeFloat(int(self.B)) + (self.decodeFloat(int(self.A)) * self.y/self.z)) + + # raise NotImplementedError("p_marg not implemented for non-full / empty orders") + # A = self.decodeFloat(self.A) + # B = self.decodeFloat(self.B) + # return self.decode(B + A * self.y / self.z) ** 2 + # # https://github.com/bancorprotocol/carbon-simulator/blob/beta/benchmark/core/trade/impl.py + # # 'marginalRate' : decodeRate(B + A if y == z else B + A * y / z), def find_latest_timestamped_folder(logging_path=None): From 5c3a92b330fa56ba9e310a22b6fac3248af0567f Mon Sep 17 00:00:00 2001 From: NIXBNT <88088888+NIXBNT@users.noreply.github.com> Date: Thu, 2 May 2024 21:22:06 +1000 Subject: [PATCH 22/24] fix calculations for overlapping order modifier --- fastlane_bot/helpers/poolandtokens.py | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/fastlane_bot/helpers/poolandtokens.py b/fastlane_bot/helpers/poolandtokens.py index 4eac11a4c..b718d3404 100644 --- a/fastlane_bot/helpers/poolandtokens.py +++ b/fastlane_bot/helpers/poolandtokens.py @@ -403,6 +403,14 @@ def _carbon_to_cpc(self) -> ConstantProductCurve: allow to omit yint (in which case it is set to y, but this does not make a difference for the result) """ + def calculate_parameters(y: Decimal, pa: Decimal, pb: Decimal, pm: Decimal, n: Decimal): + H = pa.sqrt() ** n + L = pb.sqrt() ** n + M = pm.sqrt() ** n + A = H - L + B = L + z = y * (H - L) / (M - L) if M > L else y + return z # if idx == 0, use the first curve, otherwise use the second curve. change the numerical values to Decimal lst = [] @@ -523,14 +531,14 @@ def decimal_converter(idx): # modify the y_int based on the new geomean to the limit of y #TODO check that this math is correct typed_args0 = strategy_typed_args[0] - new_yint0 = typed_args0['y'] * (typed_args0['pa'] - typed_args0['pb']) / ((1/geomean_p_marg) - typed_args0['pb']) #this geomean is flipped because we flippend the pmarg from order 0 + new_yint0 = calculate_parameters(y=typed_args0['y'], pa=typed_args0['pa'], pb=typed_args0['pb'], pm=(1/geomean_p_marg), n=1) if new_yint0 < typed_args0['y']: new_yint0 = typed_args0['y'] - self.ConfigObj.logger.debug(f"[poolandtokens.py, _carbon_to_cpc] First order: typed_args0['yint'], new_yint0, , typed_args0['y']: {typed_args0['yint'], new_yint0, typed_args0['y']}") + self.ConfigObj.logger.debug(f"[poolandtokens.py, _carbon_to_cpc] First order: typed_args0['yint'], new_yint0, typed_args0['y']: {typed_args0['yint'], new_yint0, typed_args0['y']}") typed_args0['yint'] = new_yint0 typed_args1 = strategy_typed_args[1] - new_yint1 = typed_args1['y'] * (typed_args1['pa'] - typed_args1['pb']) / (geomean_p_marg - typed_args1['pb']) + new_yint1 = calculate_parameters(y=typed_args1['y'], pa=typed_args1['pa'], pb=typed_args1['pb'], pm=(geomean_p_marg), n=1) if new_yint1 < typed_args1['y']: new_yint1 = typed_args1['y'] self.ConfigObj.logger.debug(f"[poolandtokens.py, _carbon_to_cpc] Second order: typed_args1['yint'], new_yint1, typed_args1['y']: {typed_args1['yint'], new_yint1, typed_args1['y']} \n") From cb149decdea5ec1e63e9aa1fea58115d4c2304bd Mon Sep 17 00:00:00 2001 From: NIXBNT <88088888+NIXBNT@users.noreply.github.com> Date: Fri, 3 May 2024 13:26:18 +1000 Subject: [PATCH 23/24] cleaner method for checking overlapping strategies --- fastlane_bot/helpers/poolandtokens.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/fastlane_bot/helpers/poolandtokens.py b/fastlane_bot/helpers/poolandtokens.py index b718d3404..1ae7c257e 100644 --- a/fastlane_bot/helpers/poolandtokens.py +++ b/fastlane_bot/helpers/poolandtokens.py @@ -411,6 +411,12 @@ def calculate_parameters(y: Decimal, pa: Decimal, pb: Decimal, pm: Decimal, n: D B = L z = y * (H - L) / (M - L) if M > L else y return z + + def check_overlap(pa0, pb0, pa1, pb1): + min0, max0 = sorted([pa0, pb0]) + min1, max1 = sorted([1 / pa1, 1 / pb1]) + prices_overlap = max(min0, min1) < min(max0, max1) + return prices_overlap # if idx == 0, use the first curve, otherwise use the second curve. change the numerical values to Decimal lst = [] @@ -513,8 +519,7 @@ def decimal_converter(idx): no_limit_orders = (strategy_typed_args[0]['is_limit_order'] == False) and (strategy_typed_args[1]['is_limit_order'] == False) # evaluate if the price boundaries pa/pb overlap at one end # TODO check logic and remove duplicate logic if necessary - prices_overlap = (strategy_typed_args[1]['pa']>(1/strategy_typed_args[0]['pa'])>strategy_typed_args[1]['pb']) # or (1/strategy_typed_args[0]['pa']<(strategy_typed_args[1]['pb'])) - + prices_overlap = check_overlap(strategy_typed_args[0]['pa'], strategy_typed_args[0]['pb'], strategy_typed_args[1]['pa'], strategy_typed_args[1]['pb']) # if (percent_component_met and no_limit_orders) and not prices_overlap: # print(percent_component_met, no_limit_orders, prices_overlap) # print(strategy_typed_args) From 3ad438a6c6e8aa598b21ce6a737d46121b89daa7 Mon Sep 17 00:00:00 2001 From: NIXBNT <88088888+NIXBNT@users.noreply.github.com> Date: Tue, 7 May 2024 09:57:33 +1000 Subject: [PATCH 24/24] correct handling of p_marg and cleanup --- fastlane_bot/utils.py | 25 ++++--------------------- 1 file changed, 4 insertions(+), 21 deletions(-) diff --git a/fastlane_bot/utils.py b/fastlane_bot/utils.py index 715f95bdc..042ed7a19 100644 --- a/fastlane_bot/utils.py +++ b/fastlane_bot/utils.py @@ -182,29 +182,12 @@ def p_start(self): @property def p_marg(self): + A = self.decodeFloat(int(self.A)) + B = self.decodeFloat(int(self.B)) if self.y == self.z: - # try: - new_method = self.decodeRate(self.decodeFloat(int(self.B)) + self.decodeFloat(int(self.A))) - # except: - # print(self.B, self.A) - # print(type(self.B), type(self.A)) - # print(self.decodeFloat(int(self.B)) + self.decodeFloat(int(self.A))) - # print(new_method, self.p_start) - assert new_method == self.p_start, f"{new_method}, {self.p_start} **************************************" - return self.p_start - elif self.y == 0: - return self.p_end - # return 0 + return self.decodeRate(B + A) else: - return self.decodeRate(self.decodeFloat(int(self.B)) + (self.decodeFloat(int(self.A)) * self.y/self.z)) - - # raise NotImplementedError("p_marg not implemented for non-full / empty orders") - # A = self.decodeFloat(self.A) - # B = self.decodeFloat(self.B) - # return self.decode(B + A * self.y / self.z) ** 2 - # # https://github.com/bancorprotocol/carbon-simulator/blob/beta/benchmark/core/trade/impl.py - # # 'marginalRate' : decodeRate(B + A if y == z else B + A * y / z), - + return self.decodeRate(B + A * self.y/self.z) def find_latest_timestamped_folder(logging_path=None): """